Merge branch 'django:main' into ticket/34038

This commit is contained in:
William 2022-10-09 15:34:08 +02:00 committed by GitHub
commit 733eaa4af2
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
169 changed files with 1916 additions and 1343 deletions

View File

@ -1,6 +1,6 @@
repos:
- repo: https://github.com/psf/black
rev: 22.8.0
rev: 22.10.0
hooks:
- id: black
exclude: \.py-tpl$
@ -13,6 +13,6 @@ repos:
hooks:
- id: flake8
- repo: https://github.com/pre-commit/mirrors-eslint
rev: v8.23.0
rev: v8.24.0
hooks:
- id: eslint

View File

@ -2011,15 +2011,17 @@ class ModelAdmin(BaseModelAdmin):
)
if formset.is_valid():
changecount = 0
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(request, form, None)
self.log_change(request, obj, change_msg)
changecount += 1
with transaction.atomic(using=router.db_for_write(self.model)):
for form in formset.forms:
if form.has_changed():
obj = self.save_form(request, form, change=True)
self.save_model(request, obj, form, change=True)
self.save_related(request, form, formsets=[], change=True)
change_msg = self.construct_change_message(
request, form, None
)
self.log_change(request, obj, change_msg)
changecount += 1
if changecount:
msg = ngettext(
"%(count)s %(name)s was changed successfully.",

View File

@ -46,8 +46,8 @@ html[data-theme="light"],
--button-hover-bg: #609ab6;
--default-button-bg: var(--secondary);
--default-button-hover-bg: #205067;
--close-button-bg: #888; /* Previously #bbb, contrast 1.92 */
--close-button-hover-bg: #747474;
--close-button-bg: #747474;
--close-button-hover-bg: #333;
--delete-button-bg: #ba2121;
--delete-button-hover-bg: #a41515;
@ -1132,3 +1132,7 @@ a.deletelink:focus, a.deletelink:hover {
color: white;
background: var(--link-hover-color);
}
.base-svgs {
display: none;
}

View File

@ -96,7 +96,8 @@
// Extract the model from the popup url '.../<model>/add/' or
// '.../<model>/<id>/change/' depending the action (add or change).
const modelName = path.split('/')[path.split('/').length - (objId ? 4 : 3)];
const selectsRelated = document.querySelectorAll(`[data-model-ref="${modelName}"] select`);
// Exclude autocomplete selects.
const selectsRelated = document.querySelectorAll(`[data-model-ref="${modelName}"] select:not(.admin-autocomplete)`);
selectsRelated.forEach(function(select) {
if (currentSelect === select) {

View File

@ -69,11 +69,15 @@
</div>
{% endblock %}
<!-- END Header -->
{% block breadcrumbs %}
<div class="breadcrumbs">
<a href="{% url 'admin:index' %}">{% translate 'Home' %}</a>
{% if title %} &rsaquo; {{ title }}{% endif %}
</div>
{% block nav-breadcrumbs %}
<nav aria-label="{% translate 'Breadcrumbs' %}">
{% block breadcrumbs %}
<div class="breadcrumbs">
<a href="{% url 'admin:index' %}">{% translate 'Home' %}</a>
{% if title %} &rsaquo; {{ title }}{% endif %}
</div>
{% endblock %}
</nav>
{% endblock %}
{% endif %}
@ -111,7 +115,7 @@
<!-- END Container -->
<!-- SVGs -->
<svg xmlns="http://www.w3.org/2000/svg">
<svg xmlns="http://www.w3.org/2000/svg" class="base-svgs">
<symbol viewBox="0 0 24 24" width="16" height="16" id="icon-auto"><path d="M0 0h24v24H0z" fill="currentColor"/><path d="M12 22C6.477 22 2 17.523 2 12S6.477 2 12 2s10 4.477 10 10-4.477 10-10 10zm0-2V4a8 8 0 1 0 0 16z"/></symbol>
<symbol viewBox="0 0 24 24" width="16" height="16" id="icon-moon"><path d="M0 0h24v24H0z" fill="currentColor"/><path d="M10 7a7 7 0 0 0 12 4.9v.1c0 5.523-4.477 10-10 10S2 17.523 2 12 6.477 2 12 2h.1A6.979 6.979 0 0 0 10 7zm-6 5a8 8 0 0 0 15.062 3.762A9 9 0 0 1 8.238 4.938 7.999 7.999 0 0 0 4 12z"/></symbol>
<symbol viewBox="0 0 24 24" width="16" height="16" id="icon-sun"><path d="M0 0h24v24H0z" fill="currentColor"/><path d="M12 18a6 6 0 1 1 0-12 6 6 0 0 1 0 12zm0-2a4 4 0 1 0 0-8 4 4 0 0 0 0 8zM11 1h2v3h-2V1zm0 19h2v3h-2v-3zM3.515 4.929l1.414-1.414L7.05 5.636 5.636 7.05 3.515 4.93zM16.95 18.364l1.414-1.414 2.121 2.121-1.414 1.414-2.121-2.121zm2.121-14.85l1.414 1.415-2.121 2.121-1.414-1.414 2.121-2.121zM5.636 16.95l1.414 1.414-2.121 2.121-1.414-1.414 2.121-2.121zM23 11v2h-3v-2h3zM4 11v2H1v-2h3z"/></symbol>

View File

@ -7,7 +7,7 @@
{% block bodyclass %}{{ block.super }} dashboard{% endblock %}
{% block breadcrumbs %}{% endblock %}
{% block nav-breadcrumbs %}{% endblock %}
{% block nav-sidebar %}{% endblock %}

View File

@ -15,7 +15,7 @@
{% block content_title %}{% endblock %}
{% block breadcrumbs %}{% endblock %}
{% block nav-breadcrumbs %}{% endblock %}
{% block content %}
{% if form.errors and not form.non_field_errors %}

View File

@ -3,7 +3,7 @@
<div id="toolbar"><form id="changelist-search" method="get">
<div><!-- DIV needed for valid HTML -->
<label for="searchbar"><img src="{% static "admin/img/search.svg" %}" alt="Search"></label>
<input type="text" size="40" name="{{ search_var }}" value="{{ cl.query }}" id="searchbar" autofocus{% if cl.search_help_text %} aria-describedby="searchbar_helptext"{% endif %}>
<input type="text" size="40" name="{{ search_var }}" value="{{ cl.query }}" id="searchbar"{% if cl.search_help_text %} aria-describedby="searchbar_helptext"{% endif %}>
<input type="submit" value="{% translate 'Search' %}">
{% if show_result_count %}
<span class="small quiet">{% blocktranslate count counter=cl.result_count %}{{ counter }} result{% plural %}{{ counter }} results{% endblocktranslate %} (<a href="?{% if cl.is_popup %}{{ is_popup_var }}=1{% endif %}">{% if cl.show_full_result_count %}{% blocktranslate with full_result_count=cl.full_result_count %}{{ full_result_count }} total{% endblocktranslate %}{% else %}{% translate "Show all" %}{% endif %}</a>)</span>

View File

@ -282,13 +282,11 @@ class Command(BaseCommand):
def username_is_unique(self):
if self.username_field.unique:
return True
for unique_constraint in self.UserModel._meta.total_unique_constraints:
if (
len(unique_constraint.fields) == 1
and unique_constraint.fields[0] == self.username_field.name
):
return True
return False
return any(
len(unique_constraint.fields) == 1
and unique_constraint.fields[0] == self.username_field.name
for unique_constraint in self.UserModel._meta.total_unique_constraints
)
def _validate_username(self, username, verbose_field_name, database):
"""Validate username. If invalid, return a string error message."""

View File

@ -221,7 +221,7 @@ class CommonPasswordValidator:
The password is rejected if it occurs in a provided list of passwords,
which may be gzipped. The list Django ships with contains 20000 common
passwords (lowercased and deduplicated), created by Royce Williams:
https://gist.github.com/roycewilliams/281ce539915a947a23db17137d91aeb7
https://gist.github.com/roycewilliams/226886fd01572964e1431ac8afc999ce
The password list must be lowercased to match the comparison in validate().
"""

View File

@ -9,7 +9,7 @@ from django.utils.translation import gettext_lazy as _
class ASCIIUsernameValidator(validators.RegexValidator):
regex = r"^[\w.@+-]+\Z"
message = _(
"Enter a valid username. This value may contain only English letters, "
"Enter a valid username. This value may contain only ASCII letters, "
"numbers, and @/./+/-/_ characters."
)
flags = re.ASCII

View File

@ -552,14 +552,6 @@ class ReverseGenericManyToOneDescriptor(ReverseManyToOneDescriptor):
self.rel,
)
@cached_property
def related_manager_cache_key(self):
# By default, GenericRel instances will be marked as hidden unless
# related_query_name is given (their accessor name being "+" when
# hidden), which would cause multiple GenericRelations declared on a
# single model to collide, so always use the remote field's name.
return self.field.get_cache_name()
def create_generic_related_manager(superclass, rel):
"""

View File

@ -169,8 +169,11 @@ class _WKBReader(IOBase):
if isinstance(wkb, memoryview):
wkb_s = bytes(wkb)
return wkb_reader_read(self.ptr, wkb_s, len(wkb_s))
elif isinstance(wkb, (bytes, str)):
elif isinstance(wkb, bytes):
return wkb_reader_read_hex(self.ptr, wkb, len(wkb))
elif isinstance(wkb, str):
wkb_s = wkb.encode()
return wkb_reader_read_hex(self.ptr, wkb_s, len(wkb_s))
else:
raise TypeError

View File

@ -78,7 +78,7 @@ class RangeMaxValueValidator(MaxValueValidator):
return a.upper is None or a.upper > b
message = _(
"Ensure that this range is completely less than or equal to %(limit_value)s."
"Ensure that the upper bound of the range is not greater than %(limit_value)s."
)
@ -87,5 +87,5 @@ class RangeMinValueValidator(MinValueValidator):
return a.lower is None or a.lower < b
message = _(
"Ensure that this range is completely greater than or equal to %(limit_value)s."
"Ensure that the lower bound of the range is not less than %(limit_value)s."
)

View File

@ -4,7 +4,7 @@ from collections import defaultdict
from django.conf import settings
from django.template.backends.django import get_template_tag_modules
from . import Error, Tags, register
from . import Error, Tags, Warning, register
E001 = Error(
"You have 'APP_DIRS': True in your TEMPLATES but also specify 'loaders' "
@ -15,7 +15,7 @@ E002 = Error(
"'string_if_invalid' in TEMPLATES OPTIONS must be a string but got: {} ({}).",
id="templates.E002",
)
E003 = Error(
W003 = Warning(
"{} is used for multiple template tag modules: {}",
id="templates.E003",
)
@ -63,12 +63,12 @@ def check_for_template_tags_with_the_same_name(app_configs, **kwargs):
for library_name, items in libraries.items():
if len(items) > 1:
errors.append(
Error(
E003.msg.format(
Warning(
W003.msg.format(
repr(library_name),
", ".join(repr(item) for item in sorted(items)),
),
id=E003.id,
id=W003.id,
)
)

View File

@ -70,7 +70,10 @@ class Command(BaseCommand):
"--check",
action="store_true",
dest="check_changes",
help="Exit with a non-zero status if model changes are missing migrations.",
help=(
"Exit with a non-zero status if model changes are missing migrations "
"and don't actually write them."
),
)
parser.add_argument(
"--scriptable",
@ -248,12 +251,12 @@ class Command(BaseCommand):
else:
self.log("No changes detected")
else:
if check_changes:
sys.exit(1)
if self.update:
self.write_to_last_migration_files(changes)
else:
self.write_migration_files(changes)
if check_changes:
sys.exit(1)
def write_to_last_migration_files(self, changes):
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])

View File

@ -81,7 +81,10 @@ class Command(BaseCommand):
"--check",
action="store_true",
dest="check_unapplied",
help="Exits with a non-zero status if unapplied migrations exist.",
help=(
"Exits with a non-zero status if unapplied migrations exist and does "
"not actually apply migrations."
),
)
parser.add_argument(
"--prune",
@ -237,23 +240,27 @@ class Command(BaseCommand):
self.stdout.write(" No migrations to prune.")
plan = executor.migration_plan(targets)
exit_dry = plan and options["check_unapplied"]
if options["plan"]:
self.stdout.write("Planned operations:", self.style.MIGRATE_LABEL)
if not plan:
self.stdout.write(" No planned migration operations.")
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(operation, backwards)
style = self.style.WARNING if is_error else None
self.stdout.write(" " + message, style)
if exit_dry:
else:
for migration, backwards in plan:
self.stdout.write(str(migration), self.style.MIGRATE_HEADING)
for operation in migration.operations:
message, is_error = self.describe_operation(
operation, backwards
)
style = self.style.WARNING if is_error else None
self.stdout.write(" " + message, style)
if options["check_unapplied"]:
sys.exit(1)
return
if options["check_unapplied"]:
if plan:
sys.exit(1)
return
if exit_dry:
sys.exit(1)
if options["prune"]:
return

View File

@ -129,6 +129,13 @@ class Command(BaseCommand):
shutdown_message = options.get("shutdown_message", "")
quit_command = "CTRL-BREAK" if sys.platform == "win32" else "CONTROL-C"
if self._raw_ipv6:
addr = f"[{self.addr}]"
elif self.addr == "0":
addr = "0.0.0.0"
else:
addr = self.addr
if not options["skip_checks"]:
self.stdout.write("Performing system checks...\n\n")
self.check(display_num_errors=True)
@ -147,7 +154,7 @@ class Command(BaseCommand):
"version": self.get_version(),
"settings": settings.SETTINGS_MODULE,
"protocol": self.protocol,
"addr": "[%s]" % self.addr if self._raw_ipv6 else self.addr,
"addr": addr,
"port": self.port,
"quit_command": quit_command,
}

View File

@ -93,6 +93,7 @@ class BaseDatabaseWrapper:
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
self.rollback_exc = None
# Connection termination related attributes.
self.close_at = None
@ -526,7 +527,7 @@ class BaseDatabaseWrapper:
raise TransactionManagementError(
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
) from self.rollback_exc
# ##### Foreign key constraints checks handling #####

View File

@ -10,6 +10,7 @@ class BaseDatabaseFeatures:
allows_group_by_lob = True
allows_group_by_pk = False
allows_group_by_selected_pks = False
allows_group_by_refs = True
empty_fetchmany_value = []
update_can_self_select = True
@ -235,6 +236,7 @@ class BaseDatabaseFeatures:
supports_select_difference = True
supports_slicing_ordering_in_compound = False
supports_parentheses_in_compound = True
requires_compound_order_by_subquery = False
# Does the database support SQL 2003 FILTER (WHERE ...) in aggregate
# expressions?

View File

@ -81,7 +81,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
"swedish_ci": f"{charset}_swedish_ci",
}
test_now_utc_template = "UTC_TIMESTAMP"
test_now_utc_template = "UTC_TIMESTAMP(6)"
@cached_property
def django_test_skips(self):

View File

@ -8,6 +8,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
# Oracle crashes with "ORA-00932: inconsistent datatypes: expected - got
# BLOB" when grouping by LOBs (#24096).
allows_group_by_lob = False
allows_group_by_refs = False
interprets_empty_strings_as_nulls = True
has_select_for_update = True
has_select_for_update_nowait = True
@ -69,6 +70,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
supports_partial_indexes = False
can_rename_index = True
supports_slicing_ordering_in_compound = True
requires_compound_order_by_subquery = True
allows_multiple_constraints_on_same_fields = False
supports_boolean_expr_in_select_clause = False
supports_comparing_boolean_expr = False

View File

@ -13,7 +13,7 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
"UPDATE %(table)s SET %(column)s = %(default)s WHERE %(column)s IS NULL"
"; SET CONSTRAINTS ALL IMMEDIATE"
)
sql_alter_sequence_type = "ALTER SEQUENCE IF EXISTS %(sequence)s AS %(type)s"
sql_delete_sequence = "DROP SEQUENCE IF EXISTS %(sequence)s CASCADE"
sql_create_index = (
@ -130,7 +130,28 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
return using_sql
return ""
def _get_sequence_name(self, table, column):
with self.connection.cursor() as cursor:
for sequence in self.connection.introspection.get_sequences(cursor, table):
if sequence["column"] == column:
return sequence["name"]
return None
def _alter_column_type_sql(self, model, old_field, new_field, new_type):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params["type"]
if (old_field.db_index or old_field.unique) and (
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
or (old_type.startswith("text") and not new_type.startswith("text"))
or (old_type.startswith("citext") and not new_type.startswith("citext"))
):
index_name = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_name))
self.sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
# Cast when data type changed.
if using_sql := self._using_sql(new_field, old_field):
@ -179,21 +200,48 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
}
)
column = strip_quotes(new_field.column)
sequence_name = "%s_%s_seq" % (table, column)
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
return fragment, [
(
# Drop the sequence if exists (Django 4.1+ identity columns
# don't have it).
self.sql_delete_sequence
% {
"sequence": self.quote_name(sequence_name),
},
[],
),
]
# Drop the sequence if exists (Django 4.1+ identity columns don't
# have it).
other_actions = []
if sequence_name := self._get_sequence_name(table, column):
other_actions = [
(
self.sql_delete_sequence
% {
"sequence": self.quote_name(sequence_name),
},
[],
)
]
return fragment, other_actions
elif new_is_auto and old_is_auto and old_internal_type != new_internal_type:
fragment, _ = super()._alter_column_type_sql(
model, old_field, new_field, new_type
)
column = strip_quotes(new_field.column)
db_types = {
"AutoField": "integer",
"BigAutoField": "bigint",
"SmallAutoField": "smallint",
}
# Alter the sequence type if exists (Django 4.1+ identity columns
# don't have it).
other_actions = []
if sequence_name := self._get_sequence_name(table, column):
other_actions = [
(
self.sql_alter_sequence_type
% {
"sequence": self.quote_name(sequence_name),
"type": db_types[new_internal_type],
},
[],
),
]
return fragment, other_actions
else:
return super()._alter_column_type_sql(model, old_field, new_field, new_type)
@ -227,18 +275,6 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
new_db_params,
strict=False,
):
# Drop indexes on varchar/text/citext columns that are changing to a
# different type.
if (old_field.db_index or old_field.unique) and (
(old_type.startswith("varchar") and not new_type.startswith("varchar"))
or (old_type.startswith("text") and not new_type.startswith("text"))
or (old_type.startswith("citext") and not new_type.startswith("citext"))
):
index_name = self._create_index_name(
model._meta.db_table, [old_field.column], suffix="_like"
)
self.execute(self._delete_index_sql(model, index_name))
super()._alter_field(
model,
old_field,

View File

@ -1,3 +1,5 @@
import re
from django.db.migrations.utils import get_migration_name_timestamp
from django.db.transaction import atomic
@ -205,7 +207,7 @@ class Migration:
return "initial"
raw_fragments = [op.migration_name_fragment for op in self.operations]
fragments = [name for name in raw_fragments if name]
fragments = [re.sub(r"\W+", "_", name) for name in raw_fragments if name]
if not fragments or len(fragments) != len(self.operations):
return "auto_%s" % get_migration_name_timestamp()

View File

@ -97,7 +97,7 @@ class Aggregate(Func):
return "%s__%s" % (expressions[0].name, self.name.lower())
raise TypeError("Complex expressions require an alias")
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return []
def as_sql(self, compiler, connection, **extra_context):

View File

@ -434,18 +434,11 @@ class ModelBase(type):
return cls._meta.default_manager
class ModelStateCacheDescriptor:
"""
Upon first access, replace itself with an empty dictionary on the instance.
"""
def __set_name__(self, owner, name):
self.attribute_name = name
class ModelStateFieldsCacheDescriptor:
def __get__(self, instance, cls=None):
if instance is None:
return self
res = instance.__dict__[self.attribute_name] = {}
res = instance.fields_cache = {}
return res
@ -458,20 +451,7 @@ class ModelState:
# explicit (non-auto) PKs. This impacts validation only; it has no effect
# on the actual save.
adding = True
fields_cache = ModelStateCacheDescriptor()
related_managers_cache = ModelStateCacheDescriptor()
def __getstate__(self):
state = self.__dict__.copy()
if "fields_cache" in state:
state["fields_cache"] = self.fields_cache.copy()
# Manager instances stored in related_managers_cache won't necessarily
# be deserializable if they were dynamically created via an inner
# scope, e.g. create_forward_many_to_many_manager() and
# create_generic_related_manager().
if "related_managers_cache" in state:
state["related_managers_cache"] = {}
return state
fields_cache = ModelStateFieldsCacheDescriptor()
class Model(metaclass=ModelBase):
@ -633,6 +613,7 @@ class Model(metaclass=ModelBase):
"""Hook to allow choosing the attributes to pickle."""
state = self.__dict__.copy()
state["_state"] = copy.copy(state["_state"])
state["_state"].fields_cache = state["_state"].fields_cache.copy()
# memoryview cannot be pickled, so cast it to bytes and store
# separately.
_memoryview_attrs = []
@ -799,15 +780,7 @@ class Model(metaclass=ModelBase):
return
update_fields = frozenset(update_fields)
field_names = set()
for field in self._meta.concrete_fields:
if not field.primary_key:
field_names.add(field.name)
if field.name != field.attname:
field_names.add(field.attname)
field_names = self._meta._non_pk_concrete_field_names
non_model_fields = update_fields.difference(field_names)
if non_model_fields:

View File

@ -417,7 +417,7 @@ class BaseExpression:
)
return clone
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
if not self.contains_aggregate:
return [self]
cols = []
@ -533,6 +533,7 @@ _connector_combinations = [
Combinable.SUB,
Combinable.MUL,
Combinable.DIV,
Combinable.MOD,
)
},
# Bitwise operators.
@ -855,7 +856,7 @@ class ResolvedOuterRef(F):
def relabeled_clone(self, relabels):
return self
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return []
@ -1018,7 +1019,7 @@ class Value(SQLiteNumericMixin, Expression):
c.for_save = for_save
return c
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return []
def _resolve_output_field(self):
@ -1063,7 +1064,7 @@ class RawSQL(Expression):
def as_sql(self, compiler, connection):
return "(%s)" % self.sql, self.params
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return [self]
def resolve_expression(
@ -1121,7 +1122,7 @@ class Col(Expression):
relabels.get(self.alias, self.alias), self.target, self.output_field
)
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return [self]
def get_db_converters(self, connection):
@ -1164,7 +1165,7 @@ class Ref(Expression):
def as_sql(self, compiler, connection):
return connection.ops.quote_name(self.refs), []
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return [self]
@ -1235,11 +1236,11 @@ class ExpressionWrapper(SQLiteNumericMixin, Expression):
def get_source_expressions(self):
return [self.expression]
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
if isinstance(self.expression, Expression):
expression = self.expression.copy()
expression.output_field = self.output_field
return expression.get_group_by_cols(alias=alias)
return expression.get_group_by_cols()
# For non-expressions e.g. an SQL WHERE clause, the entire
# `expression` must be included in the GROUP BY clause.
return super().get_group_by_cols()
@ -1327,7 +1328,7 @@ class When(Expression):
*result_params,
)
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
# This is not a complete expression and cannot be used in GROUP BY.
cols = []
for source in self.get_source_expressions():
@ -1423,10 +1424,10 @@ class Case(SQLiteNumericMixin, Expression):
sql = connection.ops.unification_cast_sql(self.output_field) % sql
return sql, sql_params
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
if not self.cases:
return self.default.get_group_by_cols(alias)
return super().get_group_by_cols(alias)
return self.default.get_group_by_cols()
return super().get_group_by_cols()
class Subquery(BaseExpression, Combinable):
@ -1467,25 +1468,18 @@ class Subquery(BaseExpression, Combinable):
def get_external_cols(self):
return self.query.get_external_cols()
def as_sql(self, compiler, connection, template=None, query=None, **extra_context):
def as_sql(self, compiler, connection, template=None, **extra_context):
connection.ops.check_expression_support(self)
template_params = {**self.extra, **extra_context}
query = query or self.query
subquery_sql, sql_params = query.as_sql(compiler, connection)
subquery_sql, sql_params = self.query.as_sql(compiler, connection)
template_params["subquery"] = subquery_sql[1:-1]
template = template or template_params.get("template", self.template)
sql = template % template_params
return sql, sql_params
def get_group_by_cols(self, alias=None):
# If this expression is referenced by an alias for an explicit GROUP BY
# through values() a reference to this expression and not the
# underlying .query must be returned to ensure external column
# references are not grouped against as well.
if alias:
return [Ref(alias, self)]
return self.query.get_group_by_cols()
def get_group_by_cols(self):
return self.query.get_group_by_cols(wrapper=self)
class Exists(Subquery):
@ -1495,28 +1489,18 @@ class Exists(Subquery):
def __init__(self, queryset, negated=False, **kwargs):
self.negated = negated
super().__init__(queryset, **kwargs)
self.query = self.query.exists()
def __invert__(self):
clone = self.copy()
clone.negated = not self.negated
return clone
def get_group_by_cols(self, alias=None):
# self.query only gets limited to a single row in the .exists() call
# from self.as_sql() so deferring to Query.get_group_by_cols() is
# inappropriate.
if alias is None:
return [self]
return super().get_group_by_cols(alias)
def as_sql(self, compiler, connection, template=None, **extra_context):
query = self.query.exists(using=connection.alias)
def as_sql(self, compiler, connection, **extra_context):
try:
sql, params = super().as_sql(
compiler,
connection,
template=template,
query=query,
**extra_context,
)
except EmptyResultSet:
@ -1616,7 +1600,7 @@ class OrderBy(Expression):
return copy.as_sql(compiler, connection)
return self.as_sql(compiler, connection)
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())
@ -1746,7 +1730,7 @@ class Window(SQLiteNumericMixin, Expression):
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
group_by_cols = []
if self.partition_by:
group_by_cols.extend(self.partition_by.get_group_by_cols())
@ -1794,7 +1778,7 @@ class WindowFrame(Expression):
def __repr__(self):
return "<%s: %s>" % (self.__class__.__name__, self)
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return []
def __str__(self):

View File

@ -590,14 +590,6 @@ class ReverseManyToOneDescriptor:
self.rel = rel
self.field = rel.field
@cached_property
def related_manager_cache_key(self):
# Being able to access the manager instance precludes it from being
# hidden. The rel's accessor name is used to allow multiple managers
# to the same model to coexist. e.g. post.attached_comment_set and
# post.attached_link_set are separately cached.
return self.rel.get_cache_name()
@cached_property
def related_manager_cls(self):
related_model = self.rel.related_model
@ -619,11 +611,8 @@ class ReverseManyToOneDescriptor:
"""
if instance is None:
return self
key = self.related_manager_cache_key
instance_cache = instance._state.related_managers_cache
if key not in instance_cache:
instance_cache[key] = self.related_manager_cls(instance)
return instance_cache[key]
return self.related_manager_cls(instance)
def _get_set_deprecation_msg_params(self):
return (
@ -941,17 +930,6 @@ class ManyToManyDescriptor(ReverseManyToOneDescriptor):
reverse=self.reverse,
)
@cached_property
def related_manager_cache_key(self):
if self.reverse:
# Symmetrical M2Ms won't have an accessor name, but should never
# end up in the reverse branch anyway, as the related_name ends up
# being hidden, and no public manager is created.
return self.rel.get_cache_name()
else:
# For forward managers, defer to the field name.
return self.field.get_cache_name()
def _get_set_deprecation_msg_params(self):
return (
"%s side of a many-to-many set"

View File

@ -223,6 +223,19 @@ class Now(Func):
compiler, connection, template="STATEMENT_TIMESTAMP()", **extra_context
)
def as_mysql(self, compiler, connection, **extra_context):
return self.as_sql(
compiler, connection, template="CURRENT_TIMESTAMP(6)", **extra_context
)
def as_sqlite(self, compiler, connection, **extra_context):
return self.as_sql(
compiler,
connection,
template="STRFTIME('%%Y-%%m-%%d %%H:%%M:%%f', 'NOW')",
**extra_context,
)
class TruncBase(TimezoneMixin, Transform):
kind = None

View File

@ -169,7 +169,7 @@ class Random(NumericOutputFieldMixin, Func):
def as_sqlite(self, compiler, connection, **extra_context):
return super().as_sql(compiler, connection, function="RAND", **extra_context)
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
return []

View File

@ -128,7 +128,7 @@ class Lookup(Expression):
def rhs_is_direct_value(self):
return not hasattr(self.rhs, "as_sql")
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
cols = []
for source in self.get_source_expressions():
cols.extend(source.get_group_by_cols())

View File

@ -89,6 +89,7 @@ class Options:
"many_to_many",
"concrete_fields",
"local_concrete_fields",
"_non_pk_concrete_field_names",
"_forward_fields_map",
"managers",
"managers_map",
@ -982,6 +983,19 @@ class Options:
names.append(name)
return frozenset(names)
@cached_property
def _non_pk_concrete_field_names(self):
"""
Return a set of the non-pk concrete field names defined on the model.
"""
names = []
for field in self.concrete_fields:
if not field.primary_key:
names.append(field.name)
if field.name != field.attname:
names.append(field.attname)
return frozenset(names)
@cached_property
def db_returning_fields(self):
"""

View File

@ -20,7 +20,7 @@ from django.db import (
router,
transaction,
)
from django.db.models import AutoField, DateField, DateTimeField, sql
from django.db.models import AutoField, DateField, DateTimeField, Field, sql
from django.db.models.constants import LOOKUP_SEP, OnConflict
from django.db.models.deletion import Collector
from django.db.models.expressions import Case, F, Ref, Value, When
@ -963,7 +963,25 @@ class QuerySet:
return obj, created
for k, v in resolve_callables(defaults):
setattr(obj, k, v)
obj.save(using=self.db)
update_fields = set(defaults)
concrete_field_names = self.model._meta._non_pk_concrete_field_names
# update_fields does not support non-concrete fields.
if concrete_field_names.issuperset(update_fields):
# Add fields which are set on pre_save(), e.g. auto_now fields.
# This is to maintain backward compatibility as these fields
# are not updated unless explicitly specified in the
# update_fields list.
for field in self.model._meta.local_concrete_fields:
if not (
field.primary_key or field.__class__.pre_save is Field.pre_save
):
update_fields.add(field.name)
if field.name != field.attname:
update_fields.add(field.attname)
obj.save(using=self.db, update_fields=update_fields)
else:
obj.save(using=self.db)
return obj, False
async def aupdate_or_create(self, defaults=None, **kwargs):

View File

@ -123,6 +123,7 @@ class SQLCompiler:
if self.query.group_by is None:
return []
expressions = []
allows_group_by_refs = self.connection.features.allows_group_by_refs
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
@ -131,18 +132,22 @@ class SQLCompiler:
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, "as_sql"):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
expr = self.query.resolve_ref(expr)
if not allows_group_by_refs and isinstance(expr, Ref):
expr = expr.source
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {expr.source for expr in expressions if isinstance(expr, Ref)}
for expr, _, _ in select:
aliased_exprs = {}
for expr, _, alias in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
if alias:
aliased_exprs[expr] = alias
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
@ -160,6 +165,8 @@ class SQLCompiler:
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
if allows_group_by_refs and (alias := aliased_exprs.get(expr)):
expr = Ref(alias, expr)
try:
sql, params = self.compile(expr)
except EmptyResultSet:
@ -344,7 +351,13 @@ class SQLCompiler:
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
yield field, False
if isinstance(field.expression, F) and (
annotation := self.query.annotation_select.get(
field.expression.name
)
):
field.expression = Ref(field.expression.name, annotation)
yield field, isinstance(field.expression, Ref)
continue
if field == "?": # random
yield OrderBy(Random()), False
@ -432,24 +445,25 @@ class SQLCompiler:
"""
result = []
seen = set()
replacements = {
expr: Ref(alias, expr)
for alias, expr in self.query.annotation_select.items()
}
for expr, is_ref in self._order_by_pairs():
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator and self.select:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
if not is_ref and self.query.combinator and self.select:
src = resolved.expression
expr_src = expr.expression
for sel_expr, _, col_alias in self.select:
if col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL("%d" % (idx + 1), ())])
resolved.set_source_expressions(
[Ref(col_alias if col_alias else src.target.column, src)]
)
break
else:
if col_alias:
@ -464,7 +478,7 @@ class SQLCompiler:
q.add_annotation(expr_src, col_name)
self.query.add_select_col(resolved, col_name)
resolved.set_source_expressions([RawSQL(f"{order_by_idx}", ())])
sql, params = self.compile(resolved)
sql, params = self.compile(resolved.replace_expressions(replacements))
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
@ -525,8 +539,8 @@ class SQLCompiler:
if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
for compiler in compilers:
if compiler.query.is_sliced:
raise DatabaseError(
"LIMIT/OFFSET not allowed in subqueries of compound statements."
)
@ -534,6 +548,11 @@ class SQLCompiler:
raise DatabaseError(
"ORDER BY not allowed in subqueries of compound statements."
)
elif self.query.is_sliced and combinator == "union":
limit = (self.query.low_mark, self.query.high_mark)
for compiler in compilers:
if not compiler.query.is_sliced:
compiler.query.set_limits(*limit)
parts = ()
for compiler in compilers:
try:
@ -853,7 +872,11 @@ class SQLCompiler:
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append("ORDER BY %s" % ", ".join(ordering))
order_by_sql = "ORDER BY %s" % ", ".join(ordering)
if combinator and features.requires_compound_order_by_subquery:
result = ["SELECT * FROM (", *result, ")", order_by_sql]
else:
result.append(order_by_sql)
if with_limit_offset:
result.append(
@ -1001,12 +1024,14 @@ class SQLCompiler:
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
# shortcut or the attribute name of the field that is specified or
# there are transforms to process.
if (
field.is_relation
and opts.ordering
and getattr(field, "attname", None) != pieces[-1]
and name != "pk"
and not getattr(transform_function, "has_transforms", False)
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()

View File

@ -556,7 +556,7 @@ class Query(BaseExpression):
def has_filters(self):
return self.where
def exists(self, using, limit=True):
def exists(self, limit=True):
q = self.clone()
if not (q.distinct and q.is_sliced):
if q.group_by is True:
@ -568,11 +568,8 @@ class Query(BaseExpression):
q.set_group_by(allow_aliases=False)
q.clear_select_clause()
if q.combined_queries and q.combinator == "union":
limit_combined = connections[
using
].features.supports_slicing_ordering_in_compound
q.combined_queries = tuple(
combined_query.exists(using, limit=limit_combined)
combined_query.exists(limit=False)
for combined_query in q.combined_queries
)
q.clear_ordering(force=True)
@ -1150,12 +1147,14 @@ class Query(BaseExpression):
if col.alias in self.external_aliases
]
def get_group_by_cols(self, alias=None):
if alias:
return [Ref(alias, self)]
def get_group_by_cols(self, wrapper=None):
# If wrapper is referenced by an alias for an explicit GROUP BY through
# values() a reference to this expression and not the self must be
# returned to ensure external column references are not grouped against
# as well.
external_cols = self.get_external_cols()
if any(col.possibly_multivalued for col in external_cols):
return [self]
return [wrapper or self]
return external_cols
def as_sql(self, compiler, connection):
@ -1819,6 +1818,7 @@ class Query(BaseExpression):
final_transformer = functools.partial(
transform, name=name, previous=final_transformer
)
final_transformer.has_transforms = True
# Then, add the path to the query's joins. Note that we can't trim
# joins at this stage - we will need the information about join type
# of the trimmed joins.
@ -2219,8 +2219,8 @@ class Query(BaseExpression):
primary key, and the query would be equivalent, the optimization
will be made automatically.
"""
# Column names from JOINs to check collisions with aliases.
if allow_aliases:
# Column names from JOINs to check collisions with aliases.
column_names = set()
seen_models = set()
for join in list(self.alias_map.values())[1:]: # Skip base table.
@ -2230,13 +2230,31 @@ class Query(BaseExpression):
{field.column for field in model._meta.local_concrete_fields}
)
seen_models.add(model)
if self.values_select:
# If grouping by aliases is allowed assign selected values
# aliases by moving them to annotations.
group_by_annotations = {}
values_select = {}
for alias, expr in zip(self.values_select, self.select):
if isinstance(expr, Col):
values_select[alias] = expr
else:
group_by_annotations[alias] = expr
self.annotations = {**group_by_annotations, **self.annotations}
self.append_annotation_mask(group_by_annotations)
self.select = tuple(values_select.values())
self.values_select = tuple(values_select)
group_by = list(self.select)
if self.annotation_select:
for alias, annotation in self.annotation_select.items():
if not allow_aliases or alias in column_names:
alias = None
group_by_cols = annotation.get_group_by_cols(alias=alias)
for alias, annotation in self.annotation_select.items():
if not (group_by_cols := annotation.get_group_by_cols()):
continue
if (
allow_aliases
and alias not in column_names
and not annotation.contains_aggregate
):
group_by.append(Ref(alias, annotation))
else:
group_by.extend(group_by_cols)
self.group_by = tuple(group_by)

View File

@ -178,7 +178,7 @@ class WhereNode(tree.Node):
sql_string = "(%s)" % sql_string
return sql_string, result_params
def get_group_by_cols(self, alias=None):
def get_group_by_cols(self):
cols = []
for child in self.children:
cols.extend(child.get_group_by_cols())

View File

@ -118,10 +118,11 @@ def mark_for_rollback_on_error(using=None):
"""
try:
yield
except Exception:
except Exception as exc:
connection = get_connection(using)
if connection.in_atomic_block:
connection.needs_rollback = True
connection.rollback_exc = exc
raise

View File

@ -17,6 +17,7 @@ from contextlib import contextmanager
from importlib import import_module
from io import StringIO
import django
from django.core.management import call_command
from django.db import connections
from django.test import SimpleTestCase, TestCase
@ -397,6 +398,7 @@ def _init_worker(
serialized_contents=None,
process_setup=None,
process_setup_args=None,
debug_mode=None,
):
"""
Switch to databases dedicated to this worker.
@ -418,7 +420,8 @@ def _init_worker(
if process_setup_args is None:
process_setup_args = ()
process_setup(*process_setup_args)
setup_test_environment()
django.setup()
setup_test_environment(debug=debug_mode)
for alias in connections:
connection = connections[alias]
@ -471,10 +474,13 @@ class ParallelTestSuite(unittest.TestSuite):
run_subsuite = _run_subsuite
runner_class = RemoteTestRunner
def __init__(self, subsuites, processes, failfast=False, buffer=False):
def __init__(
self, subsuites, processes, failfast=False, debug_mode=False, buffer=False
):
self.subsuites = subsuites
self.processes = processes
self.failfast = failfast
self.debug_mode = debug_mode
self.buffer = buffer
self.initial_settings = None
self.serialized_contents = None
@ -506,6 +512,7 @@ class ParallelTestSuite(unittest.TestSuite):
self.serialized_contents,
self.process_setup.__func__,
self.process_setup_args,
self.debug_mode,
],
)
args = [
@ -931,6 +938,7 @@ class DiscoverRunner:
subsuites,
processes,
self.failfast,
self.debug_mode,
self.buffer,
)
return suite

View File

@ -54,7 +54,7 @@ from django.test.utils import (
modify_settings,
override_settings,
)
from django.utils.deprecation import RemovedInDjango50Warning
from django.utils.deprecation import RemovedInDjango50Warning, RemovedInDjango51Warning
from django.utils.functional import classproperty
from django.utils.version import PY310
from django.views.static import serve
@ -206,18 +206,18 @@ class _AssertFormErrorDeprecationHelper:
)
@staticmethod
def assertFormsetError(
def assertFormSetError(
self, response, formset, form_index, field, errors, msg_prefix=""
):
"""
Search for a formset named "formset" in the "response" and dispatch to
the new assertFormsetError() using that instance. If the name is found
the new assertFormSetError() using that instance. If the name is found
in multiple contexts they're all checked in order and any failure will
abort the test.
"""
warning_msg = (
f"Passing response to assertFormsetError() is deprecated. Use the formset "
f"object directly: assertFormsetError(response.context[{formset!r}], "
f"Passing response to assertFormSetError() is deprecated. Use the formset "
f"object directly: assertFormSetError(response.context[{formset!r}], "
f"{form_index!r}, ...)"
)
warnings.warn(warning_msg, RemovedInDjango50Warning, stacklevel=2)
@ -234,7 +234,7 @@ class _AssertFormErrorDeprecationHelper:
if formset not in context or not hasattr(context[formset], "forms"):
continue
found_formset = True
self.assertFormsetError(
self.assertFormSetError(
context[formset], form_index, field, errors, msg_prefix
)
if not found_formset:
@ -737,10 +737,19 @@ class SimpleTestCase(unittest.TestCase):
errors = to_list(errors)
self._assert_form_error(form, field, errors, msg_prefix, f"form {form!r}")
# RemovedInDjango51Warning.
def assertFormsetError(self, *args, **kw):
warnings.warn(
"assertFormsetError() is deprecated in favor of assertFormSetError().",
category=RemovedInDjango51Warning,
stacklevel=2,
)
return self.assertFormSetError(*args, **kw)
# RemovedInDjango50Warning: When the deprecation ends, remove the
# decorator.
@_AssertFormErrorDeprecationHelper.patch_signature
def assertFormsetError(self, formset, form_index, field, errors, msg_prefix=""):
def assertFormSetError(self, formset, form_index, field, errors, msg_prefix=""):
"""
Similar to assertFormError() but for formsets.
@ -752,7 +761,7 @@ class SimpleTestCase(unittest.TestCase):
"""
if errors is None:
warnings.warn(
"Passing errors=None to assertFormsetError() is deprecated, "
"Passing errors=None to assertFormSetError() is deprecated, "
"use errors=[] instead.",
RemovedInDjango50Warning,
stacklevel=2,
@ -1313,7 +1322,16 @@ class TransactionTestCase(SimpleTestCase):
inhibit_post_migrate=inhibit_post_migrate,
)
def assertQuerysetEqual(self, qs, values, transform=None, ordered=True, msg=None):
# RemovedInDjango51Warning.
def assertQuerysetEqual(self, *args, **kw):
warnings.warn(
"assertQuerysetEqual() is deprecated in favor of assertQuerySetEqual().",
category=RemovedInDjango51Warning,
stacklevel=2,
)
return self.assertQuerySetEqual(*args, **kw)
def assertQuerySetEqual(self, qs, values, transform=None, ordered=True, msg=None):
values = list(values)
items = qs
if transform is not None:

View File

@ -346,7 +346,7 @@ class LocalePrefixPattern:
@property
def regex(self):
# This is only used by reverse() and cached in _reverse_dict.
return re.compile(self.language_prefix)
return re.compile(re.escape(self.language_prefix))
@property
def language_prefix(self):

View File

@ -148,7 +148,16 @@ class View:
request.path,
extra={"status_code": 405, "request": request},
)
return HttpResponseNotAllowed(self._allowed_methods())
response = HttpResponseNotAllowed(self._allowed_methods())
if self.view_is_async:
async def func():
return response
return func()
else:
return response
def options(self, request, *args, **kwargs):
"""Handle responding to requests for the OPTIONS HTTP verb."""

View File

@ -6,18 +6,18 @@ It's sometimes useful to prepopulate your database with hard-coded data when
you're first setting up an app. You can provide initial data with migrations or
fixtures.
Providing initial data with migrations
======================================
Provide initial data with migrations
====================================
If you want to automatically load initial data for an app, create a
To automatically load initial data for an app, create a
:ref:`data migration <data-migrations>`. Migrations are run when setting up the
test database, so the data will be available there, subject to :ref:`some
limitations <test-case-serialized-rollback>`.
.. _initial-data-via-fixtures:
Providing data with fixtures
============================
Provide data with fixtures
==========================
You can also provide data using fixtures, however, this data isn't loaded
automatically, except if you use :attr:`.TransactionTestCase.fixtures`.
@ -80,16 +80,29 @@ from the fixture and reloaded into the database. Note this means that if you
change one of the rows created by a fixture and then run :djadmin:`loaddata`
again, you'll wipe out any changes you've made.
Where Django finds fixture files
--------------------------------
Tell Django where to look for fixture files
-------------------------------------------
By default, Django looks in the ``fixtures`` directory inside each app for
fixtures. You can set the :setting:`FIXTURE_DIRS` setting to a list of
additional directories where Django should look.
By default, Django looks for fixtures in the ``fixtures`` directory inside each
app for, so the command ``loaddata sample`` will find the file
``my_app/fixtures/sample.json``. This works with relative paths as well, so
``loaddata my_app/sample`` will find the file
``my_app/fixtures/my_app/sample.json``.
When running :djadmin:`manage.py loaddata <loaddata>`, you can also
specify a path to a fixture file, which overrides searching the usual
directories.
Django also looks for fixtures in the list of directories provided in the
:setting:`FIXTURE_DIRS` setting.
To completely prevent default search form happening, use an absolute path to
specify the location of your fixture file, e.g. ``loaddata /path/to/sample``.
.. admonition:: Namespace your fixture files
Django will use the first fixture file it finds whose name matches, so if
you have fixture files with the same name in different applications, you
will be unable to distinguish between them in your ``loaddata`` commands.
The easiest way to avoid this problem is by *namespacing* your fixture
files. That is, by putting them inside a directory named for their
application, as in the relative path example above.
.. seealso::

View File

@ -35,6 +35,10 @@ details on these changes.
* The ``map_width`` and ``map_height`` attributes of ``BaseGeometryWidget``
will be removed.
* The ``SimpleTestCase.assertFormsetError()`` method will be removed.
* The ``TransactionTestCase.assertQuerysetEqual()`` method will be removed.
.. _deprecation-removed-in-5.0:
5.0

View File

@ -181,10 +181,10 @@ It worked!
.. console::
$ python manage.py runserver 0:8000
$ python manage.py runserver 0.0.0.0:8000
**0** is a shortcut for **0.0.0.0**. Full docs for the development server
can be found in the :djadmin:`runserver` reference.
Full docs for the development server can be found in the
:djadmin:`runserver` reference.
.. admonition:: Automatic reloading of :djadmin:`runserver`

View File

@ -491,7 +491,7 @@ class:
response = self.client.get(reverse('polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
self.assertQuerySetEqual(response.context['latest_question_list'], [])
def test_past_question(self):
"""
@ -500,7 +500,7 @@ class:
"""
question = create_question(question_text="Past question.", days=-30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
response.context['latest_question_list'],
[question],
)
@ -513,7 +513,7 @@ class:
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_question_list'], [])
self.assertQuerySetEqual(response.context['latest_question_list'], [])
def test_future_question_and_past_question(self):
"""
@ -523,7 +523,7 @@ class:
question = create_question(question_text="Past question.", days=-30)
create_question(question_text="Future question.", days=30)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
response.context['latest_question_list'],
[question],
)
@ -535,7 +535,7 @@ class:
question1 = create_question(question_text="Past question 1.", days=-30)
question2 = create_question(question_text="Past question 2.", days=-5)
response = self.client.get(reverse('polls:index'))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
response.context['latest_question_list'],
[question2, question1],
)
@ -551,7 +551,7 @@ repetition out of the process of creating questions.
Note that the :class:`django.test.TestCase` class provides some additional
assertion methods. In these examples, we use
:meth:`~django.test.SimpleTestCase.assertContains()` and
:meth:`~django.test.TransactionTestCase.assertQuerysetEqual()`.
:meth:`~django.test.TransactionTestCase.assertQuerySetEqual()`.
In ``test_past_question``, we create a question and verify that it appears in
the list.

View File

@ -553,6 +553,9 @@ configured:
:setting:`OPTIONS <TEMPLATES-OPTIONS>` must be a string but got: ``{value}``
(``{type}``).
* **templates.E003**:``<name>`` is used for multiple template tag modules:
``<module list>``. *This check was changed to* ``templates.W003`` *in Django
4.1.2*.
* **templates.W003**:``<name>`` is used for multiple template tag modules:
``<module list>``.
Translation

View File

@ -825,6 +825,11 @@ Generate migration files without Django version and timestamp header.
Makes ``makemigrations`` exit with a non-zero status when model changes without
migrations are detected.
.. versionchanged:: 4.2
In older versions, the missing migrations were also created when using the
``--check`` option.
.. django-admin-option:: --scriptable
.. versionadded:: 4.1
@ -1003,8 +1008,8 @@ separate ports by executing ``django-admin runserver`` more than once.
Note that the default IP address, ``127.0.0.1``, is not accessible from other
machines on your network. To make your development server viewable to other
machines on the network, use its own IP address (e.g. ``192.168.2.1``) or
``0.0.0.0`` or ``::`` (with IPv6 enabled).
machines on the network, use its own IP address (e.g. ``192.168.2.1``), ``0``
(shortcut for ``0.0.0.0``), ``0.0.0.0``, or ``::`` (with IPv6 enabled).
You can provide an IPv6 address surrounded by brackets
(e.g. ``[200a::1]:8000``). This will automatically enable IPv6 support.

View File

@ -495,6 +495,11 @@ Usage example::
``Now()`` uses ``STATEMENT_TIMESTAMP`` instead. If you need the transaction
timestamp, use :class:`django.contrib.postgres.functions.TransactionNow`.
.. versionchanged:: 4.2
Support for microsecond precision on MySQL and millisecond precision on
SQLite were added.
``Trunc``
---------

View File

@ -1036,13 +1036,16 @@ calling the appropriate methods on the wrapped expression.
``expression`` is the same as ``self``.
.. method:: get_group_by_cols(alias=None)
.. method:: get_group_by_cols()
Responsible for returning the list of columns references by
this expression. ``get_group_by_cols()`` should be called on any
nested expressions. ``F()`` objects, in particular, hold a reference
to a column. The ``alias`` parameter will be ``None`` unless the
expression has been annotated and is used for grouping.
to a column.
.. versionchanged:: 4.2
The ``alias=None`` keyword argument was removed.
.. method:: asc(nulls_first=None, nulls_last=None)

View File

@ -2012,13 +2012,14 @@ your resulting ``User`` model will have the following attributes::
A ``RelatedObjectDoesNotExist`` exception is raised when accessing the reverse
relationship if an entry in the related table doesn't exist. This is a subclass
of the target model's :exc:`Model.DoesNotExist
<django.db.models.Model.DoesNotExist>` exception. For example, if a user
doesn't have a supervisor designated by ``MySpecialUser``::
<django.db.models.Model.DoesNotExist>` exception and can be accessed as an
attribute of the reverse accessor. For example, if a user doesn't have a
supervisor designated by ``MySpecialUser``::
>>> user.supervisor_of
Traceback (most recent call last):
...
RelatedObjectDoesNotExist: User has no supervisor_of.
try:
user.supervisor_of
except User.supervisor_of.RelatedObjectDoesNotExist:
pass
.. _onetoone-arguments:

View File

@ -621,6 +621,14 @@ from the DB will get updated. In effect there is an automatic
``update_fields`` in this case. If you assign or change any deferred field
value, the field will be added to the updated fields.
.. admonition:: ``Field.pre_save()`` and ``update_fields``
If ``update_fields`` is passed in, only the
:meth:`~django.db.models.Field.pre_save` methods of the ``update_fields``
are called. For example, this means that date/time fields with
``auto_now=True`` will not be updated unless they are included in the
``update_fields``.
Deleting objects
================

View File

@ -162,7 +162,7 @@ Minor features
* The ``HttpOnly`` flag can be set on the CSRF cookie with
:setting:`CSRF_COOKIE_HTTPONLY`.
* The :meth:`~django.test.TransactionTestCase.assertQuerysetEqual` now checks
* The ``assertQuerysetEqual()`` now checks
for undefined order and raises :exc:`ValueError` if undefined
order is spotted. The order is seen as undefined if the given ``QuerySet``
isn't ordered and there is more than one ordered value to compare against.
@ -321,7 +321,7 @@ Minor features
* The :class:`~django.test.SimpleTestCase` class includes a new assertion
helper for testing formset errors:
:meth:`~django.test.SimpleTestCase.assertFormsetError`.
``django.test.SimpleTestCase.assertFormsetError()``.
* The list of related fields added to a
:class:`~django.db.models.query.QuerySet` by

13
docs/releases/3.2.16.txt Normal file
View File

@ -0,0 +1,13 @@
===========================
Django 3.2.16 release notes
===========================
*October 4, 2022*
Django 3.2.16 fixes a security issue with severity "medium" in 3.2.15.
CVE-2022-41323: Potential denial-of-service vulnerability in internationalized URLs
===================================================================================
Internationalized URLs were subject to potential denial of service attack via
the locale parameter.

View File

@ -539,7 +539,8 @@ Tests
<django.db.transaction.on_commit>` in a list. This allows you to test such
callbacks without using the slower :class:`.TransactionTestCase`.
* :meth:`.TransactionTestCase.assertQuerysetEqual` now supports direct
* :meth:`TransactionTestCase.assertQuerysetEqual()
<django.test.TransactionTestCase.assertQuerySetEqual>` now supports direct
comparison against another queryset rather than being restricted to
comparison against a list of string representations of objects when using the
default value for the ``transform`` argument.

View File

@ -50,8 +50,7 @@ Bugfixes
========
* Fixed a regression in Django 4.0 that caused a crash of
:meth:`~django.test.SimpleTestCase.assertFormsetError` on a formset named
``form`` (:ticket:`33346`).
``assertFormsetError()`` on a formset named ``form`` (:ticket:`33346`).
* Fixed a bug in Django 4.0 that caused a crash on booleans with the
``RedisCache`` backend (:ticket:`33361`).

13
docs/releases/4.0.8.txt Normal file
View File

@ -0,0 +1,13 @@
==========================
Django 4.0.8 release notes
==========================
*October 4, 2022*
Django 4.0.8 fixes a security issue with severity "medium" in 4.0.7.
CVE-2022-41323: Potential denial-of-service vulnerability in internationalized URLs
===================================================================================
Internationalized URLs were subject to potential denial of service attack via
the locale parameter.

View File

@ -2,9 +2,16 @@
Django 4.1.2 release notes
==========================
*Expected October 4, 2022*
*October 4, 2022*
Django 4.1.2 fixes several bugs in 4.1.1.
Django 4.1.2 fixes a security issue with severity "medium" and several bugs in
4.1.1.
CVE-2022-41323: Potential denial-of-service vulnerability in internationalized URLs
===================================================================================
Internationalized URLs were subject to potential denial of service attack via
the locale parameter.
Bugfixes
========
@ -22,3 +29,30 @@ Bugfixes
* Fixed a regression in Django 4.1 that caused a
``QuerySet.values()/values_list()`` crash on ``ArrayAgg()`` and
``JSONBAgg()`` (:ticket:`34016`).
* Fixed a bug in Django 4.1 that caused :attr:`.ModelAdmin.autocomplete_fields`
to be incorrectly selected after adding/changing related instances via popups
(:ticket:`34025`).
* Fixed a regression in Django 4.1 where the app registry was not populated
when running parallel tests with the ``multiprocessing`` start method
``spawn`` (:ticket:`34010`).
* Fixed a regression in Django 4.1 where the ``--debug-mode`` argument to
``test`` did not work when running parallel tests with the
``multiprocessing`` start method ``spawn`` (:ticket:`34010`).
* Fixed a regression in Django 4.1 that didn't alter a sequence type when
altering type of pre-Django 4.1 serial columns on PostgreSQL
(:ticket:`34058`).
* Fixed a regression in Django 4.1 that caused a crash for :class:`View`
subclasses with asynchronous handlers when handling non-allowed HTTP methods
(:ticket:`34062`).
* Reverted caching related managers for ``ForeignKey``, ``ManyToManyField``,
and ``GenericRelation`` that caused the incorrect refreshing of related
objects (:ticket:`33984`).
* Relaxed the system check added in Django 4.1 for the same name used for
multiple template tag modules to a warning (:ticket:`32987`).

12
docs/releases/4.1.3.txt Normal file
View File

@ -0,0 +1,12 @@
==========================
Django 4.1.3 release notes
==========================
*Expected November 1, 2022*
Django 4.1.3 fixes several bugs in 4.1.2.
Bugfixes
========
* ...

View File

@ -407,8 +407,8 @@ Tests
raises a ``RuntimeError``, the same as outside of tests.
* :meth:`.SimpleTestCase.assertFormError` and
:meth:`~.SimpleTestCase.assertFormsetError` now support passing a
form/formset object directly.
:meth:`assertFormsetError() <django.test.SimpleTestCase.assertFormSetError>`
now support passing a form/formset object directly.
URLs
~~~~
@ -530,7 +530,8 @@ Miscellaneous
* Related managers for :class:`~django.db.models.ForeignKey`,
:class:`~django.db.models.ManyToManyField`, and
:class:`~django.contrib.contenttypes.fields.GenericRelation` are now cached
on the :class:`~django.db.models.Model` instance to which they belong.
on the :class:`~django.db.models.Model` instance to which they belong. *This
change was reverted in Django 4.1.2.*
* The Django test runner now returns a non-zero error code for unexpected
successes from tests marked with :py:func:`unittest.expectedFailure`.
@ -670,8 +671,8 @@ Miscellaneous
* The undocumented ability to pass ``errors=None`` to
:meth:`.SimpleTestCase.assertFormError` and
:meth:`~.SimpleTestCase.assertFormsetError` is deprecated. Use ``errors=[]``
instead.
:meth:`assertFormsetError() <django.test.SimpleTestCase.assertFormSetError>`
is deprecated. Use ``errors=[]`` instead.
* ``django.contrib.sessions.serializers.PickleSerializer`` is deprecated due to
the risk of remote code execution.

View File

@ -48,6 +48,12 @@ Minor features
:attr:`~django.contrib.admin.ModelAdmin.filter_vertical` widgets are now
filterable.
* The ``admin/base.html`` template now has a new block ``nav-breadcrumbs``
which contains the navigation landmark and the ``breadcrumbs`` block.
* :attr:`.ModelAdmin.list_editable` now uses atomic transactions when making
edits.
:mod:`django.contrib.admindocs`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -220,6 +226,9 @@ Models
the text value of a key, index, or path transform of
:class:`~django.db.models.JSONField`.
* :class:`~django.db.models.functions.Now` now supports microsecond precision
on MySQL and millisecond precision on SQLite.
Requests and Responses
~~~~~~~~~~~~~~~~~~~~~~
@ -271,7 +280,8 @@ Utilities
Validators
~~~~~~~~~~
* ...
* The list of common passwords used by ``CommonPasswordValidator`` is updated
to the most recent version.
.. _backwards-incompatible-4.2:
@ -316,6 +326,14 @@ Miscellaneous
* :ttag:`{% blocktranslate asvar … %}<blocktranslate>` result is now marked as
safe for (HTML) output purposes.
* The ``autofocus`` HTML attribute in the admin search box is removed as it can
be confusing for screen readers.
* The :option:`makemigrations --check` option no longer creates missing
migration files.
* The ``alias`` argument for :meth:`.Expression.get_group_by_cols` is removed.
.. _deprecated-features-4.2:
Features deprecated in 4.2
@ -401,3 +419,9 @@ Miscellaneous
* The ``map_height`` and ``map_width`` attributes of ``BaseGeometryWidget`` are
deprecated, use CSS to size map widgets instead.
* ``SimpleTestCase.assertFormsetError()`` is deprecated in favor of
``assertFormSetError()``.
* ``TransactionTestCase.assertQuerysetEqual()`` is deprecated in favor of
``assertQuerySetEqual()``.

View File

@ -33,6 +33,7 @@ versions of the documentation contain the release notes for any later releases.
.. toctree::
:maxdepth: 1
4.1.3
4.1.2
4.1.1
4.1
@ -42,6 +43,7 @@ versions of the documentation contain the release notes for any later releases.
.. toctree::
:maxdepth: 1
4.0.8
4.0.7
4.0.6
4.0.5
@ -56,6 +58,7 @@ versions of the documentation contain the release notes for any later releases.
.. toctree::
:maxdepth: 1
3.2.16
3.2.15
3.2.14
3.2.13

View File

@ -36,6 +36,17 @@ Issues under Django's security process
All security issues have been handled under versions of Django's security
process. These are listed below.
October 4, 2022 - :cve:`2022-41323`
-----------------------------------
Potential denial-of-service vulnerability in internationalized URLs. `Full
description
<https://www.djangoproject.com/weblog/2022/oct/04/security-releases/>`__
* Django 4.1 :commit:`(patch) <9d656ea51d9ea7105c0c0785783ac29d426a7d25>`
* Django 4.0 :commit:`(patch) <23f0093125ac2e553da6c1b2f9988eb6a3dd2ea1>`
* Django 3.2 :commit:`(patch) <5b6b257fa7ec37ff27965358800c67e2dd11c924>`
August 3, 2022 - :cve:`2022-36359`
----------------------------------

View File

@ -1,3 +1,3 @@
pyenchant
Sphinx>=3.1.0
Sphinx>=4.5.0
sphinxcontrib-spelling

View File

@ -615,12 +615,17 @@ Django includes four validators:
Validates that the password is not a common password. This converts the
password to lowercase (to do a case-insensitive comparison) and checks it
against a list of 20,000 common password created by `Royce Williams
<https://gist.github.com/roycewilliams/281ce539915a947a23db17137d91aeb7>`_.
<https://gist.github.com/roycewilliams/226886fd01572964e1431ac8afc999ce>`_.
The ``password_list_path`` can be set to the path of a custom file of
common passwords. This file should contain one lowercase password per line
and may be plain text or gzipped.
.. versionchanged:: 4.2
The list of 20,000 common passwords was updated to the most recent
version.
.. class:: NumericPasswordValidator()
Validate that the password is not entirely numeric.

View File

@ -72,28 +72,27 @@ If you have multiple caches defined in :setting:`CACHES`, Django will use the
default cache. To use another cache, set :setting:`SESSION_CACHE_ALIAS` to the
name of that cache.
Once your cache is configured, you've got two choices for how to store data in
the cache:
Once your cache is configured, you have to choose between a database-backed
cache or a non-persistent cache.
* Set :setting:`SESSION_ENGINE` to
``"django.contrib.sessions.backends.cache"`` for a simple caching session
store. Session data will be stored directly in your cache. However, session
data may not be persistent: cached data can be evicted if the cache fills
up or if the cache server is restarted.
The cached database backend (``cached_db``) uses a write-through cache --
session writes are applied to both the cache and the database. Session reads
use the cache, or the database if the data has been evicted from the cache. To
use this backend, set :setting:`SESSION_ENGINE` to
``"django.contrib.sessions.backends.cached_db"``, and follow the configuration
instructions for the `using database-backed sessions`_.
* For persistent, cached data, set :setting:`SESSION_ENGINE` to
``"django.contrib.sessions.backends.cached_db"``. This uses a
write-through cache -- every write to the cache will also be written to
the database. Session reads only use the database if the data is not
already in the cache.
The cache backend (``cache``) stores session data only in your cache. This is
faster because it avoids database persistence, but you will have to consider
what happens when cache data is evicted. Eviction can occur if the cache fills
up or the cache server is restarted, and it will mean session data is lost,
including logging out users. To use this backend, set :setting:`SESSION_ENGINE`
to ``"django.contrib.sessions.backends.cache"``.
Both session stores are quite fast, but the simple cache is faster because it
disregards persistence. In most cases, the ``cached_db`` backend will be fast
enough, but if you need that last bit of performance, and are willing to let
session data be expunged from time to time, the ``cache`` backend is for you.
If you use the ``cached_db`` session backend, you also need to follow the
configuration instructions for the `using database-backed sessions`_.
The cache backend can be made persistent by using a persistent cache, such as
Redis with appropriate configuration. But unless your cache is definitely
configured for sufficient persistence, opt for the cached database backend.
This avoids edge cases caused by unreliable data storage in production.
Using file-based sessions
-------------------------

View File

@ -34,12 +34,15 @@ short:
* Use Django's test client to establish that the correct template is being
rendered and that the template is passed the correct context data.
* Use :class:`~django.test.RequestFactory` to test view functions directly,
bypassing the routing and middleware layers.
* Use in-browser frameworks like Selenium_ to test *rendered* HTML and the
*behavior* of web pages, namely JavaScript functionality. Django also
provides special support for those frameworks; see the section on
:class:`~django.test.LiveServerTestCase` for more details.
A comprehensive test suite should use a combination of both test types.
A comprehensive test suite should use a combination of all of these test types.
Overview and a quick example
----------------------------
@ -784,7 +787,7 @@ add some database-specific features:
* Database :attr:`~TransactionTestCase.fixtures`.
* Test :ref:`skipping based on database backend features <skipping-tests>`.
* The remaining specialized :meth:`assert*
<TransactionTestCase.assertQuerysetEqual>` methods.
<TransactionTestCase.assertQuerySetEqual>` methods.
Django's :class:`TestCase` class is a more commonly used subclass of
``TransactionTestCase`` that makes use of database transaction facilities
@ -1570,16 +1573,16 @@ your test suite.
``assertFormError()`` is deprecated and will be removed in Django 5.0.
Use the form instance directly instead.
.. method:: SimpleTestCase.assertFormsetError(formset, form_index, field, errors, msg_prefix='')
.. method:: SimpleTestCase.assertFormSetError(formset, form_index, field, errors, msg_prefix='')
Asserts that the ``formset`` raises the provided list of errors when
rendered.
``formset`` is a ``Formset`` instance. The formset must be bound but not
necessarily validated (``assertFormsetError()`` will automatically call the
``formset`` is a ``FormSet`` instance. The formset must be bound but not
necessarily validated (``assertFormSetError()`` will automatically call the
``full_clean()`` on the formset).
``form_index`` is the number of the form within the ``Formset`` (starting
``form_index`` is the number of the form within the ``FormSet`` (starting
from 0). Use ``form_index=None`` to check the formset's non-form errors,
i.e. the errors you get when calling ``formset.non_form_errors()``. In that
case you must also use ``field=None``.
@ -1590,9 +1593,14 @@ your test suite.
.. deprecated:: 4.1
Support for passing a response object and a formset name to
``assertFormsetError()`` is deprecated and will be removed in Django
``assertFormSetError()`` is deprecated and will be removed in Django
5.0. Use the formset instance directly instead.
.. deprecated:: 4.2
The ``assertFormsetError()`` assertion method is deprecated. Use
``assertFormSetError()`` instead.
.. method:: SimpleTestCase.assertContains(response, text, count=None, status_code=200, msg_prefix='', html=False)
Asserts that a :class:`response <django.http.HttpResponse>` produced the
@ -1769,7 +1777,7 @@ your test suite.
Output in case of error can be customized with the ``msg`` argument.
.. method:: TransactionTestCase.assertQuerysetEqual(qs, values, transform=None, ordered=True, msg=None)
.. method:: TransactionTestCase.assertQuerySetEqual(qs, values, transform=None, ordered=True, msg=None)
Asserts that a queryset ``qs`` matches a particular iterable of values
``values``.
@ -1786,6 +1794,11 @@ your test suite.
Output in case of error can be customized with the ``msg`` argument.
.. deprecated:: 4.2
The ``assertQuerysetEqual()`` assertion method is deprecated. Use
``assertQuerySetEqual()`` instead.
.. method:: TransactionTestCase.assertNumQueries(num, func, *args, **kwargs)
Asserts that when ``func`` is called with ``*args`` and ``**kwargs`` that

View File

@ -9,7 +9,7 @@
"npm": ">=1.3.0 <3.0.0"
},
"devDependencies": {
"eslint": "^8.23.0",
"eslint": "^8.24.0",
"puppeteer": "^14.1.1",
"grunt": "^1.5.3",
"grunt-cli": "^1.4.3",

View File

@ -1,4 +1,5 @@
import datetime
from unittest import mock
from django.contrib import admin
from django.contrib.admin.models import LogEntry
@ -16,12 +17,12 @@ from django.contrib.admin.views.main import (
from django.contrib.auth.models import User
from django.contrib.contenttypes.models import ContentType
from django.contrib.messages.storage.cookie import CookieStorage
from django.db import connection, models
from django.db import DatabaseError, connection, models
from django.db.models import F, Field, IntegerField
from django.db.models.functions import Upper
from django.db.models.lookups import Contains, Exact
from django.template import Context, Template, TemplateSyntaxError
from django.test import TestCase, override_settings
from django.test import TestCase, override_settings, skipUnlessDBFeature
from django.test.client import RequestFactory
from django.test.utils import CaptureQueriesContext, isolate_apps, register_lookup
from django.urls import reverse
@ -400,6 +401,53 @@ class ChangeListTests(TestCase):
with self.assertRaises(IncorrectLookupParameters):
m.get_changelist_instance(request)
@skipUnlessDBFeature("supports_transactions")
def test_list_editable_atomicity(self):
a = Swallow.objects.create(origin="Swallow A", load=4, speed=1)
b = Swallow.objects.create(origin="Swallow B", load=2, speed=2)
self.client.force_login(self.superuser)
changelist_url = reverse("admin:admin_changelist_swallow_changelist")
data = {
"form-TOTAL_FORMS": "2",
"form-INITIAL_FORMS": "2",
"form-MIN_NUM_FORMS": "0",
"form-MAX_NUM_FORMS": "1000",
"form-0-uuid": str(a.pk),
"form-1-uuid": str(b.pk),
"form-0-load": "9.0",
"form-0-speed": "3.0",
"form-1-load": "5.0",
"form-1-speed": "1.0",
"_save": "Save",
}
with mock.patch(
"django.contrib.admin.ModelAdmin.log_change", side_effect=DatabaseError
):
with self.assertRaises(DatabaseError):
self.client.post(changelist_url, data)
# Original values are preserved.
a.refresh_from_db()
self.assertEqual(a.load, 4)
self.assertEqual(a.speed, 1)
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
with mock.patch(
"django.contrib.admin.ModelAdmin.log_change",
side_effect=[None, DatabaseError],
):
with self.assertRaises(DatabaseError):
self.client.post(changelist_url, data)
# Original values are preserved.
a.refresh_from_db()
self.assertEqual(a.load, 4)
self.assertEqual(a.speed, 1)
b.refresh_from_db()
self.assertEqual(b.load, 2)
self.assertEqual(b.speed, 2)
def test_custom_paginator(self):
new_parent = Parent.objects.create(name="parent")
for i in range(1, 201):
@ -1537,7 +1585,7 @@ class ChangeListTests(TestCase):
self.assertContains(
response,
'<input type="text" size="40" name="q" value="" id="searchbar" '
'autofocus aria-describedby="searchbar_helptext">',
'aria-describedby="searchbar_helptext">',
)

View File

@ -1587,6 +1587,21 @@ class ManageRunserver(SimpleTestCase):
call_command(self.cmd, addrport="7000")
self.assertServerSettings("127.0.0.1", "7000")
@mock.patch("django.core.management.commands.runserver.run")
@mock.patch("django.core.management.base.BaseCommand.check_migrations")
def test_zero_ip_addr(self, *mocked_objects):
call_command(
"runserver",
addrport="0:8000",
use_reloader=False,
skip_checks=True,
stdout=self.output,
)
self.assertIn(
"Starting development server at http://0.0.0.0:8000/",
self.output.getvalue(),
)
@unittest.skipUnless(socket.has_ipv6, "platform doesn't support IPv6")
def test_runner_addrport_ipv6(self):
call_command(self.cmd, addrport="", use_ipv6=True)

View File

@ -148,7 +148,7 @@ class LogEntryTests(TestCase):
)
response = self.client.post(change_url, post_data)
self.assertRedirects(response, reverse("admin:admin_utils_site_changelist"))
self.assertQuerysetEqual(Article.objects.filter(pk=a2.pk), [])
self.assertSequenceEqual(Article.objects.filter(pk=a2.pk), [])
logentry = LogEntry.objects.filter(content_type__model__iexact="site").latest(
"action_time"
)

View File

@ -1165,6 +1165,14 @@ class GetFormsetsArgumentCheckingAdmin(admin.ModelAdmin):
return super().get_formsets_with_inlines(request, obj)
class CountryAdmin(admin.ModelAdmin):
search_fields = ["name"]
class TravelerAdmin(admin.ModelAdmin):
autocomplete_fields = ["living_country"]
site = admin.AdminSite(name="admin")
site.site_url = "/my-site-url/"
site.register(Article, ArticleAdmin)
@ -1286,8 +1294,8 @@ site.register(ExplicitlyProvidedPK, GetFormsetsArgumentCheckingAdmin)
site.register(ImplicitlyGeneratedPK, GetFormsetsArgumentCheckingAdmin)
site.register(UserProxy)
site.register(Box)
site.register(Country)
site.register(Traveler)
site.register(Country, CountryAdmin)
site.register(Traveler, TravelerAdmin)
# Register core models we need in our tests
site.register(User, UserAdmin)

View File

@ -0,0 +1,25 @@
from django.contrib.auth.models import User
from django.test import TestCase, override_settings
from django.urls import reverse
@override_settings(ROOT_URLCONF="admin_views.urls")
class AdminBreadcrumbsTests(TestCase):
@classmethod
def setUpTestData(cls):
cls.superuser = User.objects.create_superuser(
username="super",
password="secret",
email="super@example.com",
)
def setUp(self):
self.client.force_login(self.superuser)
def test_breadcrumbs_absent(self):
response = self.client.get(reverse("admin:index"))
self.assertNotContains(response, '<nav aria-label="Breadcrumbs">')
def test_breadcrumbs_present(self):
response = self.client.get(reverse("admin:auth_user_add"))
self.assertContains(response, '<nav aria-label="Breadcrumbs">')

View File

@ -96,27 +96,6 @@ class SeleniumTests(AdminSeleniumTestCase):
self.selenium.switch_to.active_element, django_administration_title
)
def test_skip_link_is_skipped_when_there_is_searchbar(self):
from selenium.webdriver.common.by import By
self.admin_login(
username="super",
password="secret",
login_url=reverse("admin:index"),
)
group_a_tag = self.selenium.find_element(By.LINK_TEXT, "Groups")
with self.wait_page_loaded():
group_a_tag.click()
# `Skip link` is not present.
skip_link = self.selenium.find_element(By.CLASS_NAME, "skip-to-content-link")
self.assertFalse(skip_link.is_displayed())
# `Searchbar` has autofocus.
searchbar = self.selenium.find_element(By.ID, "searchbar")
self.assertEqual(self.selenium.switch_to.active_element, searchbar)
def test_skip_link_with_RTL_language_doesnt_create_horizontal_scrolling(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.common.keys import Keys

View File

@ -6317,18 +6317,24 @@ class SeleniumTests(AdminSeleniumTestCase):
finally:
self.selenium.set_window_size(current_size["width"], current_size["height"])
def test_updating_related_objects_updates_fk_selects(self):
def test_updating_related_objects_updates_fk_selects_except_autocompletes(self):
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import Select
born_country_select_id = "id_born_country"
living_country_select_id = "id_living_country"
living_country_select2_textbox_id = "select2-id_living_country-container"
favorite_country_to_vacation_select_id = "id_favorite_country_to_vacation"
continent_select_id = "id_continent"
def _get_HTML_inside_element_by_id(id_):
return self.selenium.find_element(By.ID, id_).get_attribute("innerHTML")
def _get_text_inside_element_by_selector(selector):
return self.selenium.find_element(By.CSS_SELECTOR, selector).get_attribute(
"innerText"
)
self.admin_login(
username="super", password="secret", login_url=reverse("admin:index")
)
@ -6353,12 +6359,16 @@ class SeleniumTests(AdminSeleniumTestCase):
<option value="1" selected="">Argentina</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
""",
# Argentina isn't added to the living_country select nor selected by
# the select2 widget.
self.assertEqual(
_get_text_inside_element_by_selector(f"#{living_country_select_id}"), ""
)
self.assertEqual(
_get_text_inside_element_by_selector(
f"#{living_country_select2_textbox_id}"
),
"",
)
# Argentina won't appear because favorite_country_to_vacation field has
# limit_choices_to.
@ -6386,13 +6396,18 @@ class SeleniumTests(AdminSeleniumTestCase):
<option value="2">Spain</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Spain</option>
""",
# Spain is added to the living_country select and it's also selected by
# the select2 widget.
self.assertEqual(
_get_text_inside_element_by_selector(f"#{living_country_select_id} option"),
"Spain",
)
self.assertEqual(
_get_text_inside_element_by_selector(
f"#{living_country_select2_textbox_id}"
),
"Spain",
)
# Spain won't appear because favorite_country_to_vacation field has
# limit_choices_to.
@ -6422,13 +6437,17 @@ class SeleniumTests(AdminSeleniumTestCase):
<option value="2">Italy</option>
""",
)
self.assertHTMLEqual(
_get_HTML_inside_element_by_id(living_country_select_id),
"""
<option value="" selected="">---------</option>
<option value="1">Argentina</option>
<option value="2" selected="">Italy</option>
""",
# Italy is added to the living_country select and it's also selected by
# the select2 widget.
self.assertEqual(
_get_text_inside_element_by_selector(f"#{living_country_select_id} option"),
"Italy",
)
self.assertEqual(
_get_text_inside_element_by_selector(
f"#{living_country_select2_textbox_id}"
),
"Italy",
)
# favorite_country_to_vacation field has no options.
self.assertHTMLEqual(
@ -8091,7 +8110,7 @@ class AdminViewOnSiteTests(TestCase):
Issue #20522
Verifying that if the parent form fails validation, the inlines also
run validation even if validation is contingent on parent form data.
Also, assertFormError() and assertFormsetError() is usable for admin
Also, assertFormError() and assertFormSetError() is usable for admin
forms and formsets.
"""
# The form validation should fail because 'some_required_info' is
@ -8115,7 +8134,7 @@ class AdminViewOnSiteTests(TestCase):
["This field is required."],
)
self.assertFormError(response.context["adminform"], None, [])
self.assertFormsetError(
self.assertFormSetError(
response.context["inline_admin_formset"],
0,
None,
@ -8124,7 +8143,7 @@ class AdminViewOnSiteTests(TestCase):
"contrived test case"
],
)
self.assertFormsetError(
self.assertFormSetError(
response.context["inline_admin_formset"], None, None, []
)
@ -8160,7 +8179,7 @@ class AdminViewOnSiteTests(TestCase):
"some_required_info",
["This field is required."],
)
self.assertFormsetError(
self.assertFormSetError(
response.context["inline_admin_formset"],
0,
None,

View File

@ -240,7 +240,7 @@ class AggregateTestCase(TestCase):
self.assertEqual(vals, {"min_year": 1991})
def test_annotate_basic(self):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Book.objects.annotate().order_by("pk"),
[
"The Definitive Guide to Django: Web Development Done Right",
@ -276,7 +276,7 @@ class AggregateTestCase(TestCase):
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs.order_by("pk"), rows, lambda r: (r.id, r.isbn, r.page_sum, r.name)
)
@ -297,7 +297,7 @@ class AggregateTestCase(TestCase):
"The Definitive Guide to Django: Web Development Done Right",
)
]
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs.order_by("pk"),
rows,
lambda r: (r.id, r.isbn, r.page_sum, r.contact.name, r.name),
@ -309,7 +309,7 @@ class AggregateTestCase(TestCase):
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 51.5),
@ -321,7 +321,7 @@ class AggregateTestCase(TestCase):
)
books = Book.objects.annotate(num_authors=Count("authors")).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 2),
@ -344,7 +344,7 @@ class AggregateTestCase(TestCase):
.annotate(Avg("book__rating"))
.order_by("name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 4.5),
@ -358,7 +358,7 @@ class AggregateTestCase(TestCase):
)
authors = Author.objects.annotate(num_books=Count("book")).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),
@ -376,7 +376,7 @@ class AggregateTestCase(TestCase):
def test_reverse_fkey_annotate(self):
books = Book.objects.annotate(Sum("publisher__num_awards")).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books,
[
("Artificial Intelligence: A Modern Approach", 7),
@ -394,7 +394,7 @@ class AggregateTestCase(TestCase):
)
publishers = Publisher.objects.annotate(Sum("book__price")).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
[
("Apress", Decimal("59.69")),
@ -515,7 +515,7 @@ class AggregateTestCase(TestCase):
)
authors = Author.objects.annotate(Avg("friends__age")).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 32.0),
@ -709,7 +709,7 @@ class AggregateTestCase(TestCase):
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
@ -718,7 +718,7 @@ class AggregateTestCase(TestCase):
publishers = Publisher.objects.filter(book__price__lt=Decimal("40.0")).order_by(
"pk"
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
[
"Apress",
@ -735,7 +735,7 @@ class AggregateTestCase(TestCase):
.filter(num_books__gt=1, book__price__lt=Decimal("40.0"))
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
["Apress", "Prentice Hall", "Expensive Publisher"],
lambda p: p.name,
@ -747,14 +747,14 @@ class AggregateTestCase(TestCase):
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book"))
.filter(num_books__range=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
[
"Apress",
@ -771,7 +771,7 @@ class AggregateTestCase(TestCase):
.filter(num_books__range=[1, 2])
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
["Apress", "Sams", "Prentice Hall", "Morgan Kaufmann"],
lambda p: p.name,
@ -782,7 +782,7 @@ class AggregateTestCase(TestCase):
.filter(num_books__in=[1, 3])
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers,
["Sams", "Morgan Kaufmann", "Expensive Publisher"],
lambda p: p.name,
@ -802,7 +802,7 @@ class AggregateTestCase(TestCase):
.filter(num_authors__exact=2)
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books,
[
"The Definitive Guide to Django: Web Development Done Right",
@ -816,14 +816,14 @@ class AggregateTestCase(TestCase):
.filter(num_friends=0)
.order_by("pk")
)
self.assertQuerysetEqual(authors, ["Brad Dayley"], lambda a: a.name)
self.assertQuerySetEqual(authors, ["Brad Dayley"], lambda a: a.name)
publishers = (
Publisher.objects.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
publishers, ["Apress", "Prentice Hall"], lambda p: p.name
)
@ -832,12 +832,12 @@ class AggregateTestCase(TestCase):
.annotate(num_books=Count("book__id"))
.filter(num_books__gt=1)
)
self.assertQuerysetEqual(publishers, ["Apress"], lambda p: p.name)
self.assertQuerySetEqual(publishers, ["Apress"], lambda p: p.name)
books = Book.objects.annotate(num_authors=Count("authors__id")).filter(
authors__name__contains="Norvig", num_authors__gt=1
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books, ["Artificial Intelligence: A Modern Approach"], lambda b: b.name
)
@ -1002,7 +1002,7 @@ class AggregateTestCase(TestCase):
# Try to generate query tree
str(excluded_books.query)
self.assertQuerysetEqual(excluded_books, all_books, lambda x: x.pk)
self.assertQuerySetEqual(excluded_books, all_books, lambda x: x.pk)
# Check internal state
self.assertIsNone(annotated_books.query.alias_map["aggregation_book"].join_type)
@ -1086,7 +1086,7 @@ class AggregateTestCase(TestCase):
combined_ages=Sum("age") + Sum("friends__age")
).order_by("name")
for qs in (authors, authors2):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
("Adrian Holovaty", 132),
@ -1381,7 +1381,7 @@ class AggregateTestCase(TestCase):
.filter(price_or_median__gte=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
self.assertQuerySetEqual(qs, [1, 3, 7, 9], lambda v: v.num_awards)
qs2 = (
Publisher.objects.annotate(
@ -1392,7 +1392,7 @@ class AggregateTestCase(TestCase):
.filter(rating_or_num_awards__gt=F("num_awards"))
.order_by("num_awards")
)
self.assertQuerysetEqual(qs2, [1, 3], lambda v: v.num_awards)
self.assertQuerySetEqual(qs2, [1, 3], lambda v: v.num_awards)
def test_arguments_must_be_expressions(self):
msg = "QuerySet.aggregate() received non-expression(s): %s."
@ -1440,9 +1440,7 @@ class AggregateTestCase(TestCase):
.annotate(cnt=Count("isbn"))
.filter(cnt__gt=1)
)
query = publishers_having_more_than_one_book_qs.query.exists(
using=connection.alias
)
query = publishers_having_more_than_one_book_qs.query.exists()
_, _, group_by = query.get_compiler(connection=connection).pre_sql_setup()
self.assertEqual(len(group_by), 1)
@ -1641,12 +1639,12 @@ class AggregateTestCase(TestCase):
)
.annotate(count=Count("authors"))
)
self.assertSequenceEqual(books_qs, [book])
# FIXME: GROUP BY doesn't need to include a subquery with
# non-multivalued JOINs, see Col.possibly_multivalued (refs #31150):
# with self.assertNumQueries(1) as ctx:
# self.assertSequenceEqual(books_qs, [book])
# self.assertEqual(ctx[0]['sql'].count('SELECT'), 2)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(books_qs, [book])
# Outerquery SELECT, annotation SELECT, and WHERE SELECT but GROUP BY
# selected alias, if allowed.
if connection.features.allows_group_by_refs:
self.assertEqual(ctx[0]["sql"].count("SELECT"), 3)
@skipUnlessDBFeature("supports_subqueries_in_group_by")
def test_aggregation_nested_subquery_outerref(self):
@ -1689,12 +1687,12 @@ class AggregateTestCase(TestCase):
authors = Author.objects.annotate(
Count("book"),
).filter(Q(book__count__gt=0) | Q(pk__in=Book.objects.values("authors")))
self.assertQuerysetEqual(authors, Author.objects.all(), ordered=False)
self.assertCountEqual(authors, Author.objects.all())
def test_aggregation_random_ordering(self):
"""Random() is not included in the GROUP BY when used for ordering."""
authors = Author.objects.annotate(contact_count=Count("book")).order_by("?")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
("Adrian Holovaty", 1),

View File

@ -178,10 +178,19 @@ class AggregationTests(TestCase):
)
.annotate(sum_discount=Sum("discount_price"))
)
self.assertSequenceEqual(
values,
[{"discount_price": Decimal("59.38"), "sum_discount": Decimal("59.38")}],
)
with self.assertNumQueries(1) as ctx:
self.assertSequenceEqual(
values,
[
{
"discount_price": Decimal("59.38"),
"sum_discount": Decimal("59.38"),
}
],
)
if connection.features.allows_group_by_refs:
alias = connection.ops.quote_name("discount_price")
self.assertIn(f"GROUP BY {alias}", ctx[0]["sql"])
def test_aggregates_in_where_clause(self):
"""
@ -750,10 +759,10 @@ class AggregationTests(TestCase):
qs = Clues.objects.values("EntryID__Entry").annotate(
Appearances=Count("EntryID"), Distinct_Clues=Count("Clue", distinct=True)
)
self.assertQuerysetEqual(qs, [])
self.assertSequenceEqual(qs, [])
qs = Entries.objects.annotate(clue_count=Count("clues__ID"))
self.assertQuerysetEqual(qs, [])
self.assertSequenceEqual(qs, [])
def test_boolean_conversion(self):
# Aggregates mixed up ordering of columns for backend's convert_values
@ -819,7 +828,7 @@ class AggregationTests(TestCase):
# Regression for #10113 - Fields mentioned in order_by() must be
# included in the GROUP BY. This only becomes a problem when the
# order_by introduces a new join.
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Book.objects.annotate(num_authors=Count("authors")).order_by(
"publisher__name", "name"
),
@ -842,7 +851,7 @@ class AggregationTests(TestCase):
.annotate(Avg("authors__age"))
.order_by("name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
(
@ -905,7 +914,7 @@ class AggregationTests(TestCase):
.filter(n_authors__gt=2)
.order_by("n_authors")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Book.objects.filter(id__in=ids),
[
"Python Web Development with Django",
@ -1014,7 +1023,7 @@ class AggregationTests(TestCase):
# the original query can still be used
books = Book.objects.all()
books.aggregate(Avg("authors__age"))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books.all(),
[
"Artificial Intelligence: A Modern Approach",
@ -1050,7 +1059,7 @@ class AggregationTests(TestCase):
.order_by("sheets")
.values("sheets")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs, [150, 175, 224, 264, 473, 566], lambda b: int(b["sheets"])
)
@ -1075,7 +1084,7 @@ class AggregationTests(TestCase):
self.assertEqual(sorted(p.name for p in publishers), ["Apress", "Sams"])
books = Book.objects.filter(publisher__in=publishers)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books,
[
"Practical Django Projects",
@ -1181,7 +1190,7 @@ class AggregationTests(TestCase):
.filter(pages__lt=F("n_authors") * 200)
.values_list("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Book.objects.filter(pk__in=qs),
["Python Web Development with Django"],
attrgetter("name"),
@ -1230,7 +1239,7 @@ class AggregationTests(TestCase):
.filter(Q(n_authors=2) | Q(name="Python Web Development with Django"))
.order_by("name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
"Artificial Intelligence: A Modern Approach",
@ -1249,7 +1258,7 @@ class AggregationTests(TestCase):
)
)
).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
"The Definitive Guide to Django: Web Development Done Right",
@ -1264,7 +1273,7 @@ class AggregationTests(TestCase):
.filter(Q(rating_sum__gt=5.5) | Q(rating_sum__isnull=True))
.order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
"Apress",
@ -1281,7 +1290,7 @@ class AggregationTests(TestCase):
.filter(Q(rating_sum__gt=F("book_count")) | Q(rating_sum=None))
.order_by("num_awards")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
"Jonno's House of Books",
@ -1299,7 +1308,7 @@ class AggregationTests(TestCase):
.annotate(authorCount=Count("authors"))
.order_by("authorCount")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
("Python Web Development with Django", 3),
@ -1378,14 +1387,14 @@ class AggregationTests(TestCase):
.filter(book_cnt=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Neither in this case
qs = (
Author.objects.annotate(book_count=Count("book"))
.filter(book_count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# This case used to fail because the ORM couldn't resolve the
# automatically generated annotation name `book__count`
qs = (
@ -1393,7 +1402,7 @@ class AggregationTests(TestCase):
.filter(book__count=2)
.order_by("name")
)
self.assertQuerysetEqual(qs, ["Peter Norvig"], lambda b: b.name)
self.assertQuerySetEqual(qs, ["Peter Norvig"], lambda b: b.name)
# Referencing the auto-generated name in an aggregate() also works.
self.assertEqual(
Author.objects.annotate(Count("book")).aggregate(Max("book__count")),
@ -1561,7 +1570,7 @@ class AggregationTests(TestCase):
self.assertEqual(len(grouping), 2)
self.assertIn("id", grouping[0][0])
self.assertIn("id", grouping[1][0])
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs.order_by("name"),
[
("Artificial Intelligence: A Modern Approach", 2),
@ -1650,7 +1659,7 @@ class AggregationTests(TestCase):
.exclude(Q(book_cnt=2), Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
self.assertQuerySetEqual(qs, expected_results, lambda b: b.name)
expected_results = Author.objects.exclude(
pk__in=Author.objects.annotate(book_cnt=Count("book")).filter(book_cnt=2)
).order_by("name")
@ -1660,7 +1669,7 @@ class AggregationTests(TestCase):
.exclude(Q(book_cnt=2) | Q(book_cnt=2))
.order_by("name")
)
self.assertQuerysetEqual(qs, expected_results, lambda b: b.name)
self.assertQuerySetEqual(qs, expected_results, lambda b: b.name)
def test_name_filters(self):
qs = (
@ -1668,7 +1677,7 @@ class AggregationTests(TestCase):
.filter(Q(book__count__exact=2) | Q(name="Adrian Holovaty"))
.order_by("name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
@ -1681,7 +1690,7 @@ class AggregationTests(TestCase):
.filter(Q(name="Peter Norvig") | Q(age=F("book__count") + 33))
.order_by("name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs, ["Adrian Holovaty", "Peter Norvig"], lambda b: b.name
)
@ -1689,7 +1698,7 @@ class AggregationTests(TestCase):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 | q2).order_by("pk")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
query,
[self.b1.pk, self.b4.pk, self.b5.pk, self.b6.pk],
attrgetter("pk"),
@ -1701,7 +1710,7 @@ class AggregationTests(TestCase):
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 & q2)).order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
query,
[self.b1.pk, self.b2.pk, self.b3.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
@ -1711,7 +1720,7 @@ class AggregationTests(TestCase):
q1 = Q(price__gt=50)
q2 = Q(authors__count__gt=1)
query = Book.objects.annotate(Count("authors")).filter(q1 ^ q2).order_by("pk")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
query,
[self.b1.pk, self.b4.pk, self.b6.pk],
attrgetter("pk"),
@ -1723,7 +1732,7 @@ class AggregationTests(TestCase):
query = (
Book.objects.annotate(Count("authors")).filter(~(q1 ^ q2)).order_by("pk")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
query,
[self.b2.pk, self.b3.pk, self.b5.pk],
attrgetter("pk"),
@ -1747,7 +1756,7 @@ class AggregationTests(TestCase):
qs = Book.objects.annotate(account=Count("authors")).filter(
account=F("publisher__num_awards")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs, ["Sams Teach Yourself Django in 24 Hours"], lambda b: b.name
)
@ -1864,7 +1873,7 @@ class SelfReferentialFKTests(TestCase):
t1 = SelfRefFK.objects.create(name="t1")
SelfRefFK.objects.create(name="t2", parent=t1)
SelfRefFK.objects.create(name="t3", parent=t1)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
SelfRefFK.objects.annotate(num_children=Count("children")).order_by("name"),
[("t1", 2), ("t2", 0), ("t3", 0)],
lambda x: (x.name, x.num_children),

View File

@ -517,7 +517,7 @@ class NonAggregateAnnotationTestCase(TestCase):
.order_by("store_name")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
books,
["Amazon.com", "Books.com", "Mamma and Pappa's Books"],
lambda b: b.store_name,
@ -609,7 +609,7 @@ class NonAggregateAnnotationTestCase(TestCase):
.filter(chain="Westfield")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
("Angus & Robinson", "Westfield", True, "155860191"),
@ -629,7 +629,7 @@ class NonAggregateAnnotationTestCase(TestCase):
def test_order_by_annotation(self):
authors = Author.objects.annotate(other_age=F("age")).order_by("other_age")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
25,
@ -651,7 +651,7 @@ class NonAggregateAnnotationTestCase(TestCase):
.annotate(age_count=Count("age"))
.order_by("age_count", "age")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
(25, 1),
@ -735,7 +735,7 @@ class NonAggregateAnnotationTestCase(TestCase):
(2, "Buffy", False, 42, "Summers", 18, Decimal(40000.00), store.name, 17),
]
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs.order_by("id"),
rows,
lambda e: (
@ -786,7 +786,7 @@ class NonAggregateAnnotationTestCase(TestCase):
]
# and we respect deferred columns!
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs.defer("age").order_by("id"),
rows,
lambda e: (
@ -835,7 +835,7 @@ class NonAggregateAnnotationTestCase(TestCase):
)
).order_by("name")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
("Apple", "APPL"),
@ -891,7 +891,7 @@ class NonAggregateAnnotationTestCase(TestCase):
# LOWER function supported by:
# oracle, postgres, mysql, sqlite, sqlserver
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
("Apple", "APPL".lower()),
@ -1333,7 +1333,7 @@ class AliasTests(TestCase):
def test_order_by_alias(self):
qs = Author.objects.alias(other_age=F("age")).order_by("other_age")
self.assertIs(hasattr(qs.first(), "other_age"), False)
self.assertQuerysetEqual(qs, [34, 34, 35, 46, 57], lambda a: a.age)
self.assertQuerySetEqual(qs, [34, 34, 35, 46, 57], lambda a: a.age)
def test_order_by_alias_aggregate(self):
qs = (
@ -1342,7 +1342,7 @@ class AliasTests(TestCase):
.order_by("age_count", "age")
)
self.assertIs(hasattr(qs.first(), "age_count"), False)
self.assertQuerysetEqual(qs, [35, 46, 57, 34], lambda a: a["age"])
self.assertQuerySetEqual(qs, [35, 46, 57, 34], lambda a: a["age"])
def test_dates_alias(self):
qs = Book.objects.alias(

View File

@ -6,8 +6,8 @@ from asgiref.sync import async_to_sync
from django.core.cache import DEFAULT_CACHE_ALIAS, caches
from django.core.exceptions import ImproperlyConfigured, SynchronousOnlyOperation
from django.http import HttpResponse
from django.test import SimpleTestCase
from django.http import HttpResponse, HttpResponseNotAllowed
from django.test import RequestFactory, SimpleTestCase
from django.utils.asyncio import async_unsafe
from django.views.generic.base import View
@ -119,6 +119,25 @@ class ViewTests(SimpleTestCase):
self.assertIsInstance(response, HttpResponse)
def test_http_method_not_allowed_responds_correctly(self):
request_factory = RequestFactory()
tests = [
(SyncView, False),
(AsyncView, True),
]
for view_cls, is_coroutine in tests:
with self.subTest(view_cls=view_cls, is_coroutine=is_coroutine):
instance = view_cls()
response = instance.http_method_not_allowed(request_factory.post("/"))
self.assertIs(
asyncio.iscoroutine(response),
is_coroutine,
)
if is_coroutine:
response = asyncio.run(response)
self.assertIsInstance(response, HttpResponseNotAllowed)
def test_base_view_class_is_sync(self):
"""
View and by extension any subclasses that don't define handlers are

View File

@ -312,6 +312,19 @@ class CreatesuperuserManagementCommandTestCase(TestCase):
# created password should be unusable
self.assertFalse(u.has_usable_password())
def test_validate_username(self):
msg = (
"Enter a valid username. This value may contain only letters, numbers, "
"and @/./+/-/_ characters."
)
with self.assertRaisesMessage(CommandError, msg):
call_command(
"createsuperuser",
interactive=False,
username="🤠",
email="joe@somewhere.org",
)
def test_non_ascii_verbose_name(self):
@mock_inputs(
{

View File

@ -199,7 +199,7 @@ class ModelTest(TestCase):
some_pub_date = datetime(2014, 5, 16, 12, 1)
for headline in headlines:
Article(headline=headline, pub_date=some_pub_date).save()
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Article.objects.order_by("headline"),
sorted(headlines),
transform=lambda a: a.headline,
@ -805,8 +805,9 @@ class SelectOnSaveTests(TestCase):
"An error occurred in the current transaction. You can't "
"execute queries until the end of the 'atomic' block."
)
with self.assertRaisesMessage(DatabaseError, msg):
with self.assertRaisesMessage(DatabaseError, msg) as cm:
asos.save(update_fields=["pub_date"])
self.assertIsInstance(cm.exception.__cause__, DatabaseError)
finally:
Article._base_manager._queryset_class = orig_class

View File

@ -49,7 +49,7 @@ class BulkCreateTests(TestCase):
def test_simple(self):
created = Country.objects.bulk_create(self.data)
self.assertEqual(created, self.data)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Country.objects.order_by("-name"),
[
"United States of America",
@ -119,7 +119,7 @@ class BulkCreateTests(TestCase):
Country(name="Tortall", iso_two_letter="TA"),
]
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
ProxyCountry.objects.all(),
{"Qwghlm", "Tortall"},
attrgetter("name"),
@ -131,7 +131,7 @@ class BulkCreateTests(TestCase):
ProxyProxyCountry(name="Netherlands", iso_two_letter="NT"),
]
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
ProxyProxyCountry.objects.all(),
{
"Qwghlm",
@ -146,7 +146,7 @@ class BulkCreateTests(TestCase):
State.objects.bulk_create(
[State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"]]
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
State.objects.order_by("two_letter_code"),
[
"CA",
@ -163,7 +163,7 @@ class BulkCreateTests(TestCase):
State.objects.bulk_create(
[State(two_letter_code=s) for s in ["IL", "NY", "CA", "ME"]]
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
State.objects.order_by("two_letter_code"),
[
"CA",

View File

@ -1,10 +1,10 @@
from copy import copy, deepcopy
from django.core.checks import Error
from django.core.checks import Warning
from django.core.checks.templates import (
E001,
E002,
E003,
W003,
check_for_template_tags_with_the_same_name,
check_setting_app_dirs_loaders,
check_string_if_invalid_is_string,
@ -108,15 +108,15 @@ class CheckTemplateTagLibrariesWithSameName(SimpleTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.error_same_tags = Error(
E003.msg.format(
cls.warning_same_tags = Warning(
W003.msg.format(
"'same_tags'",
"'check_framework.template_test_apps.same_tags_app_1."
"templatetags.same_tags', "
"'check_framework.template_test_apps.same_tags_app_2."
"templatetags.same_tags'",
),
id=E003.id,
id=W003.id,
)
@staticmethod
@ -139,7 +139,7 @@ class CheckTemplateTagLibrariesWithSameName(SimpleTestCase):
def test_template_tags_with_same_name(self):
self.assertEqual(
check_for_template_tags_with_the_same_name(None),
[self.error_same_tags],
[self.warning_same_tags],
)
def test_template_tags_with_same_library_name(self):
@ -155,7 +155,7 @@ class CheckTemplateTagLibrariesWithSameName(SimpleTestCase):
):
self.assertEqual(
check_for_template_tags_with_the_same_name(None),
[self.error_same_tags],
[self.warning_same_tags],
)
@override_settings(
@ -186,15 +186,15 @@ class CheckTemplateTagLibrariesWithSameName(SimpleTestCase):
self.assertEqual(
check_for_template_tags_with_the_same_name(None),
[
Error(
E003.msg.format(
Warning(
W003.msg.format(
"'same_tags'",
"'check_framework.template_test_apps.different_tags_app."
"templatetags.different_tags', "
"'check_framework.template_test_apps.same_tags_app_1."
"templatetags.same_tags'",
),
id=E003.id,
id=W003.id,
)
],
)

View File

@ -17,14 +17,7 @@ class CustomColumnsTests(TestCase):
cls.article.authors.set(cls.authors)
def test_query_all_available_authors(self):
self.assertQuerysetEqual(
Author.objects.all(),
[
"Peter Jones",
"John Smith",
],
str,
)
self.assertSequenceEqual(Author.objects.all(), [self.a2, self.a1])
def test_get_first_name(self):
self.assertEqual(
@ -33,12 +26,9 @@ class CustomColumnsTests(TestCase):
)
def test_filter_first_name(self):
self.assertQuerysetEqual(
self.assertSequenceEqual(
Author.objects.filter(first_name__exact="John"),
[
"John Smith",
],
str,
[self.a1],
)
def test_field_error(self):
@ -57,17 +47,10 @@ class CustomColumnsTests(TestCase):
self.a1.last
def test_get_all_authors_for_an_article(self):
self.assertQuerysetEqual(
self.article.authors.all(),
[
"Peter Jones",
"John Smith",
],
str,
)
self.assertSequenceEqual(self.article.authors.all(), [self.a2, self.a1])
def test_get_all_articles_for_an_author(self):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.a1.article_set.all(),
[
"Django lets you build web apps easily",
@ -76,8 +59,8 @@ class CustomColumnsTests(TestCase):
)
def test_get_author_m2m_relation(self):
self.assertQuerysetEqual(
self.article.authors.filter(last_name="Jones"), ["Peter Jones"], str
self.assertSequenceEqual(
self.article.authors.filter(last_name="Jones"), [self.a2]
)
def test_author_querying(self):

View File

@ -373,7 +373,7 @@ class BilateralTransformTests(TestCase):
Author(name="Ray"),
]
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Author.objects.filter(name__upper__in=["foo", "bar", "doe"]).order_by(
"name"
),

View File

@ -164,22 +164,6 @@ class Book(models.Model):
base_manager_name = "annotated_objects"
class ConfusedBook(models.Model):
title = models.CharField(max_length=50)
author = models.CharField(max_length=30)
favorite_things = GenericRelation(
Person,
content_type_field="favorite_thing_type",
object_id_field="favorite_thing_id",
)
less_favorite_things = GenericRelation(
FunPerson,
content_type_field="favorite_thing_type",
object_id_field="favorite_thing_id",
related_query_name="favorite_things",
)
class FastCarManager(models.Manager):
def get_queryset(self):
return super().get_queryset().filter(top_speed__gt=150)

View File

@ -4,7 +4,6 @@ from django.test import TestCase
from .models import (
Book,
Car,
ConfusedBook,
CustomManager,
CustomQuerySet,
DeconstructibleCustomManager,
@ -45,7 +44,7 @@ class CustomManagerTests(TestCase):
"""
Test a custom Manager method.
"""
self.assertQuerysetEqual(Person.objects.get_fun_people(), ["Bugs Bunny"], str)
self.assertQuerySetEqual(Person.objects.get_fun_people(), ["Bugs Bunny"], str)
def test_queryset_copied_to_default(self):
"""
@ -84,7 +83,7 @@ class CustomManagerTests(TestCase):
with self.subTest(manager_name=manager_name):
manager = getattr(Person, manager_name)
queryset = manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], str)
self.assertQuerySetEqual(queryset, ["Bugs Bunny"], str)
self.assertIs(queryset._filter_CustomQuerySet, True)
# Specialized querysets inherit from our custom queryset.
@ -117,7 +116,7 @@ class CustomManagerTests(TestCase):
Queryset method doesn't override the custom manager method.
"""
queryset = Person.custom_queryset_custom_manager.filter()
self.assertQuerysetEqual(queryset, ["Bugs Bunny"], str)
self.assertQuerySetEqual(queryset, ["Bugs Bunny"], str)
self.assertIs(queryset._filter_CustomManager, True)
def test_related_manager(self):
@ -140,7 +139,7 @@ class CustomManagerTests(TestCase):
"""
Custom managers respond to usual filtering methods
"""
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Book.published_objects.all(),
[
"How to program",
@ -162,7 +161,7 @@ class CustomManagerTests(TestCase):
first_name="Droopy", last_name="Dog", fun=False, favorite_book=self.b1
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books.order_by("first_name").all(),
[
"Bugs",
@ -171,7 +170,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.fun_people_favorite_books.all(),
[
"Bugs",
@ -179,7 +178,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[
"Droopy",
@ -187,7 +186,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books(manager="fun_people").all(),
[
"Bugs",
@ -196,10 +195,6 @@ class CustomManagerTests(TestCase):
ordered=False,
)
def test_fk_related_manager_reused(self):
self.assertIs(self.b1.favorite_books, self.b1.favorite_books)
self.assertIn("favorite_books", self.b1._state.related_managers_cache)
def test_gfk_related_manager(self):
Person.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_thing=self.b1
@ -214,7 +209,7 @@ class CustomManagerTests(TestCase):
first_name="Droopy", last_name="Dog", fun=False, favorite_thing=self.b1
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things.all(),
[
"Bugs",
@ -223,7 +218,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.fun_people_favorite_things.all(),
[
"Bugs",
@ -231,7 +226,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[
"Droopy",
@ -239,7 +234,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things(manager="fun_people").all(),
[
"Bugs",
@ -248,67 +243,6 @@ class CustomManagerTests(TestCase):
ordered=False,
)
def test_gfk_related_manager_reused(self):
self.assertIs(
self.b1.fun_people_favorite_things,
self.b1.fun_people_favorite_things,
)
self.assertIn(
"fun_people_favorite_things",
self.b1._state.related_managers_cache,
)
def test_gfk_related_manager_not_reused_when_alternate(self):
self.assertIsNot(
self.b1.favorite_things(manager="fun_people"),
self.b1.favorite_things(manager="fun_people"),
)
def test_gfk_related_manager_no_overlap_when_not_hidden(self):
"""
If a GenericRelation defines a related_query_name (and thus the
related_name) which shadows another GenericRelation, it should not
cause those separate managers to clash.
"""
book = ConfusedBook.objects.create(
title="How to program",
author="Rodney Dangerfield",
)
person = Person.objects.create(
first_name="Bugs",
last_name="Bunny",
fun=True,
favorite_thing=book,
)
fun_person = FunPerson.objects.create(
first_name="Droopy",
last_name="Dog",
fun=False,
favorite_thing=book,
)
# The managers don't collide in the internal cache.
self.assertIsNot(book.favorite_things, book.less_favorite_things)
self.assertIs(book.favorite_things, book.favorite_things)
self.assertIs(book.less_favorite_things, book.less_favorite_things)
# Both managers are cached separately despite the collision in names.
self.assertIn("favorite_things", book._state.related_managers_cache)
self.assertIn("less_favorite_things", book._state.related_managers_cache)
# "less_favorite_things" isn't available as a reverse related manager,
# so never ends up in the cache.
self.assertQuerysetEqual(fun_person.favorite_things.all(), [book])
with self.assertRaises(AttributeError):
fun_person.less_favorite_things
self.assertIn("favorite_things", fun_person._state.related_managers_cache)
self.assertNotIn(
"less_favorite_things",
fun_person._state.related_managers_cache,
)
# The GenericRelation doesn't exist for Person, only FunPerson, so the
# exception prevents the cache from being polluted.
with self.assertRaises(AttributeError):
person.favorite_things
self.assertNotIn("favorite_things", person._state.related_managers_cache)
def test_m2m_related_manager(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny", fun=True)
self.b1.authors.add(bugs)
@ -321,7 +255,7 @@ class CustomManagerTests(TestCase):
)
self.b1.fun_authors.add(droopy)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors.order_by("first_name").all(),
[
"Bugs",
@ -330,7 +264,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.fun_authors.order_by("first_name").all(),
[
"Bugs",
@ -338,7 +272,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors(manager="boring_people").all(),
[
"Droopy",
@ -346,7 +280,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors(manager="fun_people").all(),
[
"Bugs",
@ -355,16 +289,6 @@ class CustomManagerTests(TestCase):
ordered=False,
)
def test_m2m_related_forward_manager_reused(self):
self.assertIs(self.b1.authors, self.b1.authors)
self.assertIn("authors", self.b1._state.related_managers_cache)
def test_m2m_related_revers_manager_reused(self):
bugs = Person.objects.create(first_name="Bugs", last_name="Bunny")
self.b1.authors.add(bugs)
self.assertIs(bugs.books, bugs.books)
self.assertIn("books", bugs._state.related_managers_cache)
def test_removal_through_default_fk_related_manager(self, bulk=True):
bugs = FunPerson.objects.create(
first_name="Bugs", last_name="Bunny", fun=True, favorite_book=self.b1
@ -374,7 +298,7 @@ class CustomManagerTests(TestCase):
)
self.b1.fun_people_favorite_books.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1),
[
"Bugs",
@ -385,7 +309,7 @@ class CustomManagerTests(TestCase):
)
self.b1.fun_people_favorite_books.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1),
[
"Droopy",
@ -397,7 +321,7 @@ class CustomManagerTests(TestCase):
bugs.save()
self.b1.fun_people_favorite_books.clear(bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FunPerson._base_manager.filter(favorite_book=self.b1),
[
"Droopy",
@ -419,7 +343,7 @@ class CustomManagerTests(TestCase):
# The fun manager DOESN'T remove boring people.
self.b1.favorite_books(manager="fun_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[
"Droopy",
@ -429,7 +353,7 @@ class CustomManagerTests(TestCase):
)
# The boring manager DOES remove boring people.
self.b1.favorite_books(manager="boring_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[],
lambda c: c.first_name,
@ -440,7 +364,7 @@ class CustomManagerTests(TestCase):
# The fun manager ONLY clears fun people.
self.b1.favorite_books(manager="fun_people").clear(bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books(manager="boring_people").all(),
[
"Droopy",
@ -448,7 +372,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_books(manager="fun_people").all(),
[],
lambda c: c.first_name,
@ -467,7 +391,7 @@ class CustomManagerTests(TestCase):
)
self.b1.fun_people_favorite_things.remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FunPerson._base_manager.order_by("first_name").filter(
favorite_thing_id=self.b1.pk
),
@ -480,7 +404,7 @@ class CustomManagerTests(TestCase):
)
self.b1.fun_people_favorite_things.remove(bugs, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FunPerson._base_manager.order_by("first_name").filter(
favorite_thing_id=self.b1.pk
),
@ -494,7 +418,7 @@ class CustomManagerTests(TestCase):
bugs.save()
self.b1.fun_people_favorite_things.clear(bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FunPerson._base_manager.order_by("first_name").filter(
favorite_thing_id=self.b1.pk
),
@ -518,7 +442,7 @@ class CustomManagerTests(TestCase):
# The fun manager DOESN'T remove boring people.
self.b1.favorite_things(manager="fun_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[
"Droopy",
@ -529,7 +453,7 @@ class CustomManagerTests(TestCase):
# The boring manager DOES remove boring people.
self.b1.favorite_things(manager="boring_people").remove(droopy, bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[],
lambda c: c.first_name,
@ -540,7 +464,7 @@ class CustomManagerTests(TestCase):
# The fun manager ONLY clears fun people.
self.b1.favorite_things(manager="fun_people").clear(bulk=bulk)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things(manager="boring_people").all(),
[
"Droopy",
@ -548,7 +472,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.favorite_things(manager="fun_people").all(),
[],
lambda c: c.first_name,
@ -567,7 +491,7 @@ class CustomManagerTests(TestCase):
self.b1.fun_authors.add(droopy)
self.b1.fun_authors.remove(droopy)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.fun_authors.through._default_manager.all(),
[
"Bugs",
@ -578,7 +502,7 @@ class CustomManagerTests(TestCase):
)
self.b1.fun_authors.remove(bugs)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.fun_authors.through._default_manager.all(),
[
"Droopy",
@ -589,7 +513,7 @@ class CustomManagerTests(TestCase):
self.b1.fun_authors.add(bugs)
self.b1.fun_authors.clear()
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.fun_authors.through._default_manager.all(),
[
"Droopy",
@ -606,7 +530,7 @@ class CustomManagerTests(TestCase):
# The fun manager DOESN'T remove boring people.
self.b1.authors(manager="fun_people").remove(droopy)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors(manager="boring_people").all(),
[
"Droopy",
@ -617,7 +541,7 @@ class CustomManagerTests(TestCase):
# The boring manager DOES remove boring people.
self.b1.authors(manager="boring_people").remove(droopy)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors(manager="boring_people").all(),
[],
lambda c: c.first_name,
@ -627,7 +551,7 @@ class CustomManagerTests(TestCase):
# The fun manager ONLY clears fun people.
self.b1.authors(manager="fun_people").clear()
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors(manager="boring_people").all(),
[
"Droopy",
@ -635,7 +559,7 @@ class CustomManagerTests(TestCase):
lambda c: c.first_name,
ordered=False,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.b1.authors(manager="fun_people").all(),
[],
lambda c: c.first_name,
@ -704,7 +628,7 @@ class CustomManagerTests(TestCase):
It will be inherited by the abstract model's children.
"""
PersonFromAbstract.abstract_persons.create(objects="Test")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
PersonFromAbstract.abstract_persons.all(),
["Test"],
lambda c: c.objects,
@ -718,7 +642,7 @@ class TestCars(TestCase):
Car.cars.create(name="Corvette", mileage=21, top_speed=180)
Car.cars.create(name="Neon", mileage=31, top_speed=100)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Car._default_manager.order_by("name"),
[
"Corvette",
@ -726,7 +650,7 @@ class TestCars(TestCase):
],
lambda c: c.name,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Car.cars.order_by("name"),
[
"Corvette",
@ -735,7 +659,7 @@ class TestCars(TestCase):
lambda c: c.name,
)
# alternate manager
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Car.fast_cars.all(),
[
"Corvette",
@ -743,7 +667,7 @@ class TestCars(TestCase):
lambda c: c.name,
)
# explicit default manager
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FastCarAsDefault.cars.order_by("name"),
[
"Corvette",
@ -751,7 +675,7 @@ class TestCars(TestCase):
],
lambda c: c.name,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FastCarAsDefault._default_manager.all(),
[
"Corvette",
@ -759,7 +683,7 @@ class TestCars(TestCase):
lambda c: c.name,
)
# explicit base manager
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FastCarAsBase.cars.order_by("name"),
[
"Corvette",
@ -767,7 +691,7 @@ class TestCars(TestCase):
],
lambda c: c.name,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
FastCarAsBase._base_manager.all(),
[
"Corvette",
@ -855,4 +779,4 @@ class CustomManagersRegressTestCase(TestCase):
"""
qs_custom = Person.custom_init_queryset_manager.all()
qs_default = Person.objects.all()
self.assertQuerysetEqual(qs_custom, qs_default)
self.assertQuerySetEqual(qs_custom, qs_default)

View File

@ -15,14 +15,14 @@ class MethodsTests(TestCase):
)
self.assertFalse(a.was_published_today())
self.assertQuerysetEqual(
self.assertQuerySetEqual(
a.articles_from_same_day_1(),
[
"Beatles reunite",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
a.articles_from_same_day_2(),
[
"Beatles reunite",
@ -30,14 +30,14 @@ class MethodsTests(TestCase):
lambda a: a.headline,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
b.articles_from_same_day_1(),
[
"Parrot programs in Python",
],
lambda a: a.headline,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
b.articles_from_same_day_2(),
[
"Parrot programs in Python",

View File

@ -25,44 +25,18 @@ class BasicCustomPKTests(TestCase):
"""
Both pk and custom attribute_name can be used in filter and friends
"""
self.assertQuerysetEqual(
Employee.objects.filter(pk=123),
[
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
Employee.objects.filter(employee_code=123),
[
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
self.assertSequenceEqual(Employee.objects.filter(pk=123), [self.dan])
self.assertSequenceEqual(Employee.objects.filter(employee_code=123), [self.dan])
self.assertSequenceEqual(
Employee.objects.filter(pk__in=[123, 456]),
[
"Fran Bones",
"Dan Jones",
],
str,
[self.fran, self.dan],
)
self.assertSequenceEqual(Employee.objects.all(), [self.fran, self.dan])
self.assertQuerysetEqual(
Employee.objects.all(),
[
"Fran Bones",
"Dan Jones",
],
str,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Business.objects.filter(name="Sears"), ["Sears"], lambda b: b.name
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Business.objects.filter(pk="Sears"),
[
"Sears",
@ -74,15 +48,11 @@ class BasicCustomPKTests(TestCase):
"""
Custom pk doesn't affect related_name based lookups
"""
self.assertQuerysetEqual(
self.assertSequenceEqual(
self.business.employees.all(),
[
"Fran Bones",
"Dan Jones",
],
str,
[self.fran, self.dan],
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
self.fran.business_set.all(),
[
"Sears",
@ -94,31 +64,23 @@ class BasicCustomPKTests(TestCase):
"""
Queries across tables, involving primary key
"""
self.assertQuerysetEqual(
self.assertSequenceEqual(
Employee.objects.filter(business__name="Sears"),
[
"Fran Bones",
"Dan Jones",
],
str,
[self.fran, self.dan],
)
self.assertQuerysetEqual(
self.assertSequenceEqual(
Employee.objects.filter(business__pk="Sears"),
[
"Fran Bones",
"Dan Jones",
],
str,
[self.fran, self.dan],
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Business.objects.filter(employees__employee_code=123),
[
"Sears",
],
lambda b: b.name,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Business.objects.filter(employees__pk=123),
[
"Sears",
@ -126,7 +88,7 @@ class BasicCustomPKTests(TestCase):
lambda b: b.name,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Business.objects.filter(employees__first_name__startswith="Fran"),
[
"Sears",
@ -186,13 +148,9 @@ class BasicCustomPKTests(TestCase):
fran.last_name = "Jones"
fran.save()
self.assertQuerysetEqual(
self.assertSequenceEqual(
Employee.objects.filter(last_name="Jones"),
[
"Dan Jones",
"Fran Jones",
],
str,
[self.dan, fran],
)

View File

@ -80,7 +80,7 @@ class DatesTests(TestCase):
datetime.date(2010, 7, 28),
],
)
self.assertQuerysetEqual(
self.assertSequenceEqual(
Article.objects.dates("comments__approval_date", "day"), []
)
self.assertSequenceEqual(

View File

@ -85,7 +85,7 @@ class DateTimesTests(TestCase):
datetime.datetime(2010, 7, 28),
],
)
self.assertQuerysetEqual(
self.assertSequenceEqual(
Article.objects.datetimes("comments__approval_date", "day"), []
)
self.assertSequenceEqual(

View File

@ -15,7 +15,7 @@ class CoalesceTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(display_name=Coalesce("alias", "name"))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["smithj", "Rhonda"], lambda a: a.display_name
)
@ -39,7 +39,7 @@ class CoalesceTests(TestCase):
article = Article.objects.annotate(
headline=Coalesce("summary", "text", output_field=TextField()),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
article.order_by("title"), [lorem_ipsum], lambda a: a.headline
)
# mixed Text and Char wrapped
@ -48,7 +48,7 @@ class CoalesceTests(TestCase):
Lower("summary"), Lower("text"), output_field=TextField()
),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
article.order_by("title"), [lorem_ipsum.lower()], lambda a: a.headline
)
@ -56,11 +56,11 @@ class CoalesceTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.order_by(Coalesce("alias", "name"))
self.assertQuerysetEqual(authors, ["Rhonda", "John Smith"], lambda a: a.name)
self.assertQuerySetEqual(authors, ["Rhonda", "John Smith"], lambda a: a.name)
authors = Author.objects.order_by(Coalesce("alias", "name").asc())
self.assertQuerysetEqual(authors, ["Rhonda", "John Smith"], lambda a: a.name)
self.assertQuerySetEqual(authors, ["Rhonda", "John Smith"], lambda a: a.name)
authors = Author.objects.order_by(Coalesce("alias", "name").desc())
self.assertQuerysetEqual(authors, ["John Smith", "Rhonda"], lambda a: a.name)
self.assertQuerySetEqual(authors, ["John Smith", "Rhonda"], lambda a: a.name)
def test_empty_queryset(self):
Author.objects.create(name="John Smith")

View File

@ -277,21 +277,21 @@ class DateFunctionTests(TestCase):
):
list(DTModel.objects.annotate(extracted=Extract("start_date", "second")))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "year")
).order_by("start_datetime"),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "quarter")
).order_by("start_datetime"),
[(start_datetime, 2), (end_datetime, 2)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "month")
).order_by("start_datetime"),
@ -301,21 +301,21 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "day")
).order_by("start_datetime"),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "week")
).order_by("start_datetime"),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "week_day")
).order_by("start_datetime"),
@ -325,7 +325,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "iso_week_day"),
).order_by("start_datetime"),
@ -335,14 +335,14 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "hour")
).order_by("start_datetime"),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "minute")
).order_by("start_datetime"),
@ -352,7 +352,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=Extract("start_datetime", "second")
).order_by("start_datetime"),
@ -417,7 +417,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=Extract("duration", "second")).order_by(
"start_datetime"
),
@ -465,14 +465,14 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractYear("start_datetime")).order_by(
"start_datetime"
),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractYear("start_date")).order_by(
"start_datetime"
),
@ -494,14 +494,14 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractIsoYear("start_datetime")
).order_by("start_datetime"),
[(start_datetime, start_datetime.year), (end_datetime, end_datetime.year)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractIsoYear("start_date")).order_by(
"start_datetime"
),
@ -538,7 +538,7 @@ class DateFunctionTests(TestCase):
)
.order_by("start_datetime")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
(week_52_day_2014, 2014),
@ -569,7 +569,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractMonth("start_datetime")).order_by(
"start_datetime"
),
@ -579,7 +579,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractMonth("start_date")).order_by(
"start_datetime"
),
@ -604,14 +604,14 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractDay("start_datetime")).order_by(
"start_datetime"
),
[(start_datetime, start_datetime.day), (end_datetime, end_datetime.day)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractDay("start_date")).order_by(
"start_datetime"
),
@ -633,14 +633,14 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractWeek("start_datetime")).order_by(
"start_datetime"
),
[(start_datetime, 25), (end_datetime, 24)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractWeek("start_date")).order_by(
"start_datetime"
),
@ -663,14 +663,14 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractQuarter("start_datetime")
).order_by("start_datetime"),
[(start_datetime, 2), (end_datetime, 3)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractQuarter("start_date")).order_by(
"start_datetime"
),
@ -704,7 +704,7 @@ class DateFunctionTests(TestCase):
)
.order_by("start_datetime")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
(last_quarter_2014, 4),
@ -737,7 +737,7 @@ class DateFunctionTests(TestCase):
)
.order_by("start_datetime")
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
qs,
[
(week_52_day_2014, 52),
@ -755,7 +755,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractWeekDay("start_datetime")
).order_by("start_datetime"),
@ -765,7 +765,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractWeekDay("start_date")).order_by(
"start_datetime"
),
@ -790,7 +790,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractIsoWeekDay("start_datetime"),
).order_by("start_datetime"),
@ -800,7 +800,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractIsoWeekDay("start_date"),
).order_by("start_datetime"),
@ -825,14 +825,14 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractHour("start_datetime")).order_by(
"start_datetime"
),
[(start_datetime, start_datetime.hour), (end_datetime, end_datetime.hour)],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractHour("start_time")).order_by(
"start_datetime"
),
@ -854,7 +854,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractMinute("start_datetime")
).order_by("start_datetime"),
@ -864,7 +864,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractMinute("start_time")).order_by(
"start_datetime"
),
@ -889,7 +889,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
extracted=ExtractSecond("start_datetime")
).order_by("start_datetime"),
@ -899,7 +899,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=ExtractSecond("start_time")).order_by(
"start_datetime"
),
@ -963,7 +963,7 @@ class DateFunctionTests(TestCase):
self.create_model(end_datetime, start_datetime)
def test_datetime_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc(
"start_datetime", kind, output_field=DateTimeField()
@ -977,7 +977,7 @@ class DateFunctionTests(TestCase):
)
def test_date_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc("start_date", kind, output_field=DateField())
).order_by("start_datetime"),
@ -989,7 +989,7 @@ class DateFunctionTests(TestCase):
)
def test_time_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc("start_time", kind, output_field=TimeField())
).order_by("start_datetime"),
@ -1001,7 +1001,7 @@ class DateFunctionTests(TestCase):
)
def test_datetime_to_time_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc("start_datetime", kind, output_field=TimeField()),
).order_by("start_datetime"),
@ -1044,7 +1044,7 @@ class DateFunctionTests(TestCase):
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc("start_datetime", "week", output_field=DateTimeField())
).order_by("start_datetime"),
@ -1054,7 +1054,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.truncated),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc("start_date", "week", output_field=DateField())
).order_by("start_datetime"),
@ -1137,7 +1137,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncYear("start_datetime")).order_by(
"start_datetime"
),
@ -1147,7 +1147,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncYear("start_date")).order_by(
"start_datetime"
),
@ -1194,7 +1194,7 @@ class DateFunctionTests(TestCase):
self.create_model(start_datetime=end_datetime, end_datetime=start_datetime)
self.create_model(start_datetime=last_quarter_2015, end_datetime=end_datetime)
self.create_model(start_datetime=first_quarter_2016, end_datetime=end_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncQuarter("start_date")).order_by(
"start_datetime"
),
@ -1206,7 +1206,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncQuarter("start_datetime")).order_by(
"start_datetime"
),
@ -1241,7 +1241,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncMonth("start_datetime")).order_by(
"start_datetime"
),
@ -1251,7 +1251,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncMonth("start_date")).order_by(
"start_datetime"
),
@ -1288,7 +1288,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncWeek("start_datetime")).order_by(
"start_datetime"
),
@ -1325,7 +1325,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncDate("start_datetime")).order_by(
"start_datetime"
),
@ -1372,7 +1372,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncTime("start_datetime")).order_by(
"start_datetime"
),
@ -1446,7 +1446,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncDay("start_datetime")).order_by(
"start_datetime"
),
@ -1482,7 +1482,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncHour("start_datetime")).order_by(
"start_datetime"
),
@ -1492,7 +1492,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncHour("start_time")).order_by(
"start_datetime"
),
@ -1529,7 +1529,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncMinute("start_datetime")).order_by(
"start_datetime"
),
@ -1539,7 +1539,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncMinute("start_time")).order_by(
"start_datetime"
),
@ -1578,7 +1578,7 @@ class DateFunctionTests(TestCase):
end_datetime = timezone.make_aware(end_datetime)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncSecond("start_datetime")).order_by(
"start_datetime"
),
@ -1588,7 +1588,7 @@ class DateFunctionTests(TestCase):
],
lambda m: (m.start_datetime, m.extracted),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(extracted=TruncSecond("start_time")).order_by(
"start_datetime"
),
@ -1909,7 +1909,7 @@ class DateFunctionWithTimeZoneTests(DateFunctionTests):
with self.subTest(repr(melb)):
def test_datetime_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc(
"start_datetime",
@ -1934,7 +1934,7 @@ class DateFunctionWithTimeZoneTests(DateFunctionTests):
)
def test_datetime_to_date_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc(
"start_datetime",
@ -1959,7 +1959,7 @@ class DateFunctionWithTimeZoneTests(DateFunctionTests):
)
def test_datetime_to_time_kind(kind):
self.assertQuerysetEqual(
self.assertQuerySetEqual(
DTModel.objects.annotate(
truncated=Trunc(
"start_datetime",

View File

@ -1,6 +1,8 @@
from datetime import datetime, timedelta
from django.db.models.functions import Now
from django.db import connection
from django.db.models import TextField
from django.db.models.functions import Cast, Now
from django.test import TestCase
from django.utils import timezone
@ -37,13 +39,27 @@ class NowTests(TestCase):
a2.save()
a2.refresh_from_db()
self.assertIsInstance(a2.published, datetime)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Article.objects.filter(published__lte=Now()),
["How to Django"],
lambda a: a.title,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Article.objects.filter(published__gt=Now()),
["How to Time Travel"],
lambda a: a.title,
)
def test_microseconds(self):
Article.objects.create(
title="How to Django",
text=lorem_ipsum,
written=timezone.now(),
)
now_string = (
Article.objects.annotate(now_string=Cast(Now(), TextField()))
.get()
.now_string
)
precision = connection.features.time_cast_precision
self.assertRegex(now_string, rf"^.*\.\d{{1,{precision}}}")

View File

@ -17,7 +17,7 @@ class FunctionTests(TestCase):
Author.objects.create(name="Rhonda Simpson", alias="ronny")
authors = Author.objects.order_by(Length(Coalesce("alias", "name")))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
"Rhonda Simpson",
@ -27,7 +27,7 @@ class FunctionTests(TestCase):
)
authors = Author.objects.order_by(Length(Coalesce("alias", "name")).desc())
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
"John Smith",
@ -41,7 +41,7 @@ class FunctionTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__exact="john smith")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"),
[
"John Smith",
@ -54,7 +54,7 @@ class FunctionTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__upper__in=["john smith", "rhonda"])
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"),
[
"John Smith",
@ -66,12 +66,12 @@ class FunctionTests(TestCase):
def test_function_as_filter(self):
Author.objects.create(name="John Smith", alias="SMITHJ")
Author.objects.create(name="Rhonda")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Author.objects.filter(alias=Upper(V("smithj"))),
["John Smith"],
lambda x: x.name,
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
Author.objects.exclude(alias=Upper(V("smithj"))),
["Rhonda"],
lambda x: x.name,

View File

@ -21,7 +21,7 @@ class ConcatTests(TestCase):
Author.objects.create(name="Margaret", goes_by="Maggie")
Author.objects.create(name="Rhonda", alias="adnohR")
authors = Author.objects.annotate(joined=Concat("alias", "goes_by"))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"),
[
"",
@ -46,7 +46,7 @@ class ConcatTests(TestCase):
authors = Author.objects.annotate(
joined=Concat("name", V(" ("), "goes_by", V(")"), output_field=CharField()),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"),
[
"Jayden ()",

View File

@ -13,12 +13,12 @@ class LeftTests(TestCase):
def test_basic(self):
authors = Author.objects.annotate(name_part=Left("name", 5))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["John ", "Rhond"], lambda a: a.name_part
)
# If alias is null, set it to the first 2 lower characters of the name.
Author.objects.filter(alias__isnull=True).update(alias=Lower(Left("name", 2)))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["smithj", "rh"], lambda a: a.alias
)
@ -30,6 +30,6 @@ class LeftTests(TestCase):
authors = Author.objects.annotate(
name_part=Left("name", Value(3, output_field=IntegerField()))
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["Joh", "Rho"], lambda a: a.name_part
)

View File

@ -14,7 +14,7 @@ class LengthTests(TestCase):
name_length=Length("name"),
alias_length=Length("alias"),
)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"),
[(10, 6), (6, None)],
lambda a: (a.name_length, a.alias_length),
@ -26,7 +26,7 @@ class LengthTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj1")
Author.objects.create(name="Rhonda", alias="ronny")
authors = Author.objects.order_by(Length("name"), Length("alias"))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors,
[
("Rhonda", "ronny"),
@ -41,6 +41,6 @@ class LengthTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__length__gt=7)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["John Smith"], lambda a: a.name
)

View File

@ -11,11 +11,11 @@ class LowerTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.annotate(lower_name=Lower("name"))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["john smith", "rhonda"], lambda a: a.lower_name
)
Author.objects.update(name=Lower("name"))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"),
[
("john smith", "john smith"),
@ -35,6 +35,6 @@ class LowerTests(TestCase):
Author.objects.create(name="John Smith", alias="smithj")
Author.objects.create(name="Rhonda")
authors = Author.objects.filter(name__lower__exact="john smith")
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("name"), ["John Smith"], lambda a: a.name
)

View File

@ -35,7 +35,7 @@ class PadTests(TestCase):
for function, padded_name in tests:
with self.subTest(function=function):
authors = Author.objects.annotate(padded_name=function)
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors, [padded_name], lambda a: a.padded_name, ordered=False
)
@ -51,7 +51,7 @@ class PadTests(TestCase):
Author.objects.create(name="Rhonda", alias="john_smith")
Author.objects.create(name="♥♣♠", alias="bytes")
authors = Author.objects.annotate(filled=LPad("name", Length("alias")))
self.assertQuerysetEqual(
self.assertQuerySetEqual(
authors.order_by("alias"),
[" ♥♣♠", " Rhonda"],
lambda a: a.filled,

Some files were not shown because too many files have changed in this diff Show More