Refs #27656 -- Updated django.db docstring verbs according to PEP 257.

This commit is contained in:
Anton Samarchyan 2017-01-24 18:04:12 -05:00 committed by Tim Graham
parent d6e26e5b7c
commit 60e52a047e
63 changed files with 934 additions and 1266 deletions

View File

@ -22,9 +22,7 @@ NO_DB_ALIAS = '__no_db__'
class BaseDatabaseWrapper:
"""
Represents a database connection.
"""
"""Represent a database connection."""
# Mapping of Field objects to their column types.
data_types = {}
# Mapping of Field objects to their SQL suffix such as AUTOINCREMENT.
@ -112,7 +110,7 @@ class BaseDatabaseWrapper:
"""
Time zone for datetimes stored as naive values in the database.
Returns a tzinfo object or None.
Return a tzinfo object or None.
This is only needed when time zone support is enabled and the database
doesn't support time zones. (When the database supports time zones,
@ -154,25 +152,25 @@ class BaseDatabaseWrapper:
# ##### Backend-specific methods for creating connections and cursors #####
def get_connection_params(self):
"""Returns a dict of parameters suitable for get_new_connection."""
"""Return a dict of parameters suitable for get_new_connection."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_connection_params() method')
def get_new_connection(self, conn_params):
"""Opens a connection to the database."""
"""Open a connection to the database."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a get_new_connection() method')
def init_connection_state(self):
"""Initializes the database connection settings."""
"""Initialize the database connection settings."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require an init_connection_state() method')
def create_cursor(self, name=None):
"""Creates a cursor. Assumes that a connection is established."""
"""Create a cursor. Assume that a connection is established."""
raise NotImplementedError('subclasses of BaseDatabaseWrapper may require a create_cursor() method')
# ##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
"""Connect to the database. Assume that the connection is closed."""
# Check for invalid configurations.
self.check_settings()
# In case the previous connection was closed while in an atomic block
@ -205,9 +203,7 @@ class BaseDatabaseWrapper:
"handles time zones conversions natively." % self.alias)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
"""Guarantee that a connection to the database is established."""
if self.connection is None:
with self.wrap_database_errors:
self.connect()
@ -248,15 +244,11 @@ class BaseDatabaseWrapper:
# ##### Generic wrappers for PEP-249 connection methods #####
def cursor(self):
"""
Creates a cursor, opening a connection if necessary.
"""
"""Create a cursor, opening a connection if necessary."""
return self._cursor()
def commit(self):
"""
Commits a transaction and resets the dirty flag.
"""
"""Commit a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit()
@ -265,9 +257,7 @@ class BaseDatabaseWrapper:
self.run_commit_hooks_on_set_autocommit_on = True
def rollback(self):
"""
Rolls back a transaction and resets the dirty flag.
"""
"""Roll back a transaction and reset the dirty flag."""
self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback()
@ -277,9 +267,7 @@ class BaseDatabaseWrapper:
self.run_on_commit = []
def close(self):
"""
Closes the connection to the database.
"""
"""Close the connection to the database."""
self.validate_thread_sharing()
self.run_on_commit = []
@ -319,9 +307,9 @@ class BaseDatabaseWrapper:
def savepoint(self):
"""
Creates a savepoint inside the current transaction. Returns an
Create a savepoint inside the current transaction. Return an
identifier for the savepoint that will be used for the subsequent
rollback or commit. Does nothing if savepoints are not supported.
rollback or commit. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
@ -339,7 +327,7 @@ class BaseDatabaseWrapper:
def savepoint_rollback(self, sid):
"""
Rolls back to a savepoint. Does nothing if savepoints are not supported.
Roll back to a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
@ -354,7 +342,7 @@ class BaseDatabaseWrapper:
def savepoint_commit(self, sid):
"""
Releases a savepoint. Does nothing if savepoints are not supported.
Release a savepoint. Do nothing if savepoints are not supported.
"""
if not self._savepoint_allowed():
return
@ -364,7 +352,7 @@ class BaseDatabaseWrapper:
def clean_savepoints(self):
"""
Resets the counter used to generate unique savepoint ids in this thread.
Reset the counter used to generate unique savepoint ids in this thread.
"""
self.savepoint_state = 0
@ -379,9 +367,7 @@ class BaseDatabaseWrapper:
# ##### Generic transaction management methods #####
def get_autocommit(self):
"""
Check the autocommit state.
"""
"""Get the autocommit state."""
self.ensure_connection()
return self.autocommit
@ -417,9 +403,7 @@ class BaseDatabaseWrapper:
self.run_commit_hooks_on_set_autocommit_on = False
def get_rollback(self):
"""
Get the "needs rollback" flag -- for *advanced use* only.
"""
"""Get the "needs rollback" flag -- for *advanced use* only."""
if not self.in_atomic_block:
raise TransactionManagementError(
"The rollback flag doesn't work outside of an 'atomic' block.")
@ -435,9 +419,7 @@ class BaseDatabaseWrapper:
self.needs_rollback = rollback
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
"""Raise an error if an atomic block is active."""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
@ -453,7 +435,7 @@ class BaseDatabaseWrapper:
@contextmanager
def constraint_checks_disabled(self):
"""
Context manager that disables foreign key constraint checking.
Disable foreign key constraint checking.
"""
disabled = self.disable_constraint_checking()
try:
@ -489,9 +471,9 @@ class BaseDatabaseWrapper:
def is_usable(self):
"""
Tests if the database connection is usable.
Test if the database connection is usable.
This function may assume that self.connection is not None.
This method may assume that self.connection is not None.
Actual implementations should take care not to raise exceptions
as that may prevent Django from recycling unusable connections.
@ -501,7 +483,7 @@ class BaseDatabaseWrapper:
def close_if_unusable_or_obsolete(self):
"""
Closes the current connection if unrecoverable errors have occurred,
Close the current connection if unrecoverable errors have occurred
or if it outlived its maximum age.
"""
if self.connection is not None:
@ -528,10 +510,10 @@ class BaseDatabaseWrapper:
def validate_thread_sharing(self):
"""
Validates that the connection isn't accessed by another thread than the
Validate that the connection isn't accessed by another thread than the
one which originally created it, unless the connection was explicitly
authorized to be shared between threads (via the `allow_thread_sharing`
property). Raises an exception if the validation fails.
property). Raise an exception if the validation fails.
"""
if not (self.allow_thread_sharing or self._thread_ident == _thread.get_ident()):
raise DatabaseError(
@ -567,15 +549,11 @@ class BaseDatabaseWrapper:
return self.cursor()
def make_debug_cursor(self, cursor):
"""
Creates a cursor that logs all queries in self.queries_log.
"""
"""Create a cursor that logs all queries in self.queries_log."""
return utils.CursorDebugWrapper(cursor, self)
def make_cursor(self, cursor):
"""
Creates a cursor without debug logging.
"""
"""Create a cursor without debug logging."""
return utils.CursorWrapper(cursor, self)
@contextmanager
@ -585,7 +563,7 @@ class BaseDatabaseWrapper:
if it opened one, closes it to avoid leaving a dangling connection.
This is useful for operations outside of the request-response cycle.
Provides a cursor: with self.temporary_connection() as cursor: ...
Provide a cursor: with self.temporary_connection() as cursor: ...
"""
must_close = self.connection is None
cursor = self.cursor()
@ -599,8 +577,8 @@ class BaseDatabaseWrapper:
@property
def _nodb_connection(self):
"""
Return an alternative connection to be used when there is no need to access
the main database, specifically for test db creation/deletion.
Return an alternative connection to be used when there is no need to
access the main database, specifically for test db creation/deletion.
This also prevents the production database from being exposed to
potential child threads while (or after) the test database is destroyed.
Refs #10868, #17786, #16969.
@ -624,7 +602,7 @@ class BaseDatabaseWrapper:
def schema_editor(self, *args, **kwargs):
"""
Returns a new instance of this backend's SchemaEditor.
Return a new instance of this backend's SchemaEditor.
"""
if self.SchemaEditorClass is None:
raise NotImplementedError(

View File

@ -1,8 +1,5 @@
class BaseDatabaseClient:
"""
This class encapsulates all backend-specific methods for opening a
client shell.
"""
"""Encapsulate backend-specific methods for opening a client shell."""
# This should be a string representing the name of the executable
# (e.g., "psql"). Subclasses must override this.
executable_name = None

View File

@ -13,8 +13,8 @@ TEST_DATABASE_PREFIX = 'test_'
class BaseDatabaseCreation:
"""
This class encapsulates all backend-specific differences that pertain to
creation and destruction of the test database.
Encapsulate backend-specific differences pertaining to creation and
destruction of the test database.
"""
def __init__(self, connection):
self.connection = connection
@ -28,8 +28,8 @@ class BaseDatabaseCreation:
def create_test_db(self, verbosity=1, autoclobber=False, serialize=True, keepdb=False):
"""
Creates a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
Create a test database, prompting the user for confirmation if the
database already exists. Return the name of the test database created.
"""
# Don't import django.core.management if it isn't needed.
from django.core.management import call_command
@ -84,14 +84,14 @@ class BaseDatabaseCreation:
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
"""
self.connection.settings_dict['NAME'] = primary_settings_dict['NAME']
def serialize_db_to_string(self):
"""
Serializes all data in the database into a JSON string.
Serialize all data in the database into a JSON string.
Designed only for test runner usage; will not handle large
amounts of data.
"""
@ -121,8 +121,8 @@ class BaseDatabaseCreation:
def deserialize_db_from_string(self, data):
"""
Reloads the database with data from a string generated by
the serialize_db_to_string method.
Reload the database with data from a string generated by
the serialize_db_to_string() method.
"""
data = StringIO(data)
for obj in serializers.deserialize("json", data, using=self.connection.alias):
@ -139,7 +139,7 @@ class BaseDatabaseCreation:
def _get_test_db_name(self):
"""
Internal implementation - returns the name of the test DB that will be
Internal implementation - return the name of the test DB that will be
created. Only useful when called from create_test_db() and
_create_test_db() and when no external munging is done with the 'NAME'
settings.
@ -150,7 +150,7 @@ class BaseDatabaseCreation:
def _create_test_db(self, verbosity, autoclobber, keepdb=False):
"""
Internal implementation - creates the test db tables.
Internal implementation - create the test db tables.
"""
suffix = self.sql_table_creation_suffix()
@ -285,7 +285,7 @@ class BaseDatabaseCreation:
def test_db_signature(self):
"""
Returns a tuple with elements of self.connection.settings_dict (a
Return a tuple with elements of self.connection.settings_dict (a
DATABASES setting value) that uniquely identify a database
accordingly to the RDBMS particularities.
"""

View File

@ -8,24 +8,25 @@ FieldInfo = namedtuple('FieldInfo', 'name type_code display_size internal_size p
class BaseDatabaseIntrospection:
"""
This class encapsulates all backend-specific introspection utilities
"""
"""Encapsulate backend-specific introspection utilities."""
data_types_reverse = {}
def __init__(self, connection):
self.connection = connection
def get_field_type(self, data_type, description):
"""Hook for a database backend to use the cursor description to
"""
Hook for a database backend to use the cursor description to
match a Django field type to a database column.
For Oracle, the column data_type on its own is insufficient to
distinguish between a FloatField and IntegerField, for example."""
distinguish between a FloatField and IntegerField, for example.
"""
return self.data_types_reverse[data_type]
def table_name_converter(self, name):
"""Apply a conversion to the name for the purposes of comparison.
"""
Apply a conversion to the name for the purposes of comparison.
The default table name converter is for case sensitive comparison.
"""
@ -35,16 +36,16 @@ class BaseDatabaseIntrospection:
"""
Apply a conversion to the column name for the purposes of comparison.
Uses table_name_converter() by default.
Use table_name_converter() by default.
"""
return self.table_name_converter(name)
def table_names(self, cursor=None, include_views=False):
"""
Returns a list of names of all tables that exist in the database.
The returned table list is sorted by Python's default sorting. We
do NOT use database's ORDER BY here to avoid subtle differences
in sorting order between databases.
Return a list of names of all tables that exist in the database.
Sort the returned table list by Python's default sorting. Do NOT use
the database's ORDER BY here to avoid subtle differences in sorting
order between databases.
"""
def get_names(cursor):
return sorted(ti.name for ti in self.get_table_list(cursor)
@ -56,18 +57,17 @@ class BaseDatabaseIntrospection:
def get_table_list(self, cursor):
"""
Returns an unsorted list of TableInfo named tuples of all tables and
Return an unsorted list of TableInfo named tuples of all tables and
views that exist in the database.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_table_list() method')
def django_table_names(self, only_existing=False, include_views=True):
"""
Returns a list of all table names that have associated Django models and
Return a list of all table names that have associated Django models and
are in INSTALLED_APPS.
If only_existing is True, the resulting list will only include the tables
that actually exist in the database.
If only_existing is True, include only the tables in the database.
"""
from django.apps import apps
from django.db import router
@ -92,7 +92,10 @@ class BaseDatabaseIntrospection:
return tables
def installed_models(self, tables):
"Returns a set of all models represented by the provided list of table names."
"""
Return a set of all models represented by the provided list of table
names.
"""
from django.apps import apps
from django.db import router
all_models = []
@ -105,7 +108,10 @@ class BaseDatabaseIntrospection:
}
def sequence_list(self):
"Returns a list of information about all DB sequences for all models in all apps."
"""
Return a list of information about all DB sequences for all models in
all apps.
"""
from django.apps import apps
from django.db import models, router
@ -132,14 +138,15 @@ class BaseDatabaseIntrospection:
def get_key_columns(self, cursor, table_name):
"""
Backends can override this to return a list of (column_name, referenced_table_name,
referenced_column_name) for all key columns in given table.
Backends can override this to return a list of:
(column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
raise NotImplementedError('subclasses of BaseDatabaseIntrospection may require a get_key_columns() method')
def get_primary_key_column(self, cursor, table_name):
"""
Returns the name of the primary key column for the given table.
Return the name of the primary key column for the given table.
"""
for constraint in self.get_constraints(cursor, table_name).values():
if constraint['primary_key']:
@ -149,7 +156,7 @@ class BaseDatabaseIntrospection:
def get_indexes(self, cursor, table_name):
"""
Deprecated in Django 1.11, use get_constraints instead.
Returns a dictionary of indexed fieldname -> infodict for the given
Return a dictionary of indexed fieldname -> infodict for the given
table, where each infodict is in the format:
{'primary_key': boolean representing whether it's the primary key,
'unique': boolean representing whether it's a unique index}
@ -160,10 +167,10 @@ class BaseDatabaseIntrospection:
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index)
Retrieve any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Returns a dict mapping constraint names to their attributes,
Return a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise

View File

@ -12,9 +12,8 @@ from django.utils.encoding import force_text
class BaseDatabaseOperations:
"""
This class encapsulates all backend-specific differences, such as the way
a backend performs ordering or calculates the ID of a recently-inserted
row.
Encapsulate backend-specific differences, such as the way a backend
performs ordering or calculates the ID of a recently-inserted row.
"""
compiler_module = "django.db.models.sql.compiler"
@ -39,7 +38,7 @@ class BaseDatabaseOperations:
def autoinc_sql(self, table, column):
"""
Returns any SQL needed to support auto-incrementing primary keys, or
Return any SQL needed to support auto-incrementing primary keys, or
None if no SQL is necessary.
This SQL is executed when a table is created.
@ -48,7 +47,7 @@ class BaseDatabaseOperations:
def bulk_batch_size(self, fields, objs):
"""
Returns the maximum allowed batch size for the backend. The fields
Return the maximum allowed batch size for the backend. The fields
are the fields going to be inserted in the batch, the objs contains
all the objects to be inserted.
"""
@ -56,7 +55,7 @@ class BaseDatabaseOperations:
def cache_key_culling_sql(self):
"""
Returns an SQL query that retrieves the first cache key greater than the
Return an SQL query that retrieves the first cache key greater than the
n smallest.
This is used by the 'db' cache backend to determine where to start
@ -66,28 +65,28 @@ class BaseDatabaseOperations:
def unification_cast_sql(self, output_field):
"""
Given a field instance, returns the SQL necessary to cast the result of
a union to that type. Note that the resulting string should contain a
'%s' placeholder for the expression being cast.
Given a field instance, return the SQL that casts the result of a union
to that type. The resulting string should contain a '%s' placeholder
for the expression being cast.
"""
return '%s'
def date_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
extracts a value from the given date field field_name.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_extract_sql() method')
def date_interval_sql(self, timedelta):
"""
Implements the date interval functionality for expressions
Implement the date interval functionality for expressions.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a date_interval_sql() method')
def date_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'year', 'month' or 'day', returns the SQL that
Given a lookup_type of 'year', 'month', or 'day', return the SQL that
truncates the given date field field_name to a date object with only
the given specificity.
"""
@ -95,13 +94,13 @@ class BaseDatabaseOperations:
def datetime_cast_date_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to date value.
Return the SQL to cast a datetime value to date value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_date() method')
def datetime_cast_time_sql(self, field_name, tzname):
"""
Returns the SQL necessary to cast a datetime value to time value.
Return the SQL to cast a datetime value to time value.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a datetime_cast_time_sql() method')
@ -123,7 +122,7 @@ class BaseDatabaseOperations:
def time_trunc_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
Given a lookup_type of 'hour', 'minute' or 'second', return the SQL
that truncates the given time field field_name to a time object with
only the given specificity.
"""
@ -131,23 +130,23 @@ class BaseDatabaseOperations:
def time_extract_sql(self, lookup_type, field_name):
"""
Given a lookup_type of 'hour', 'minute' or 'second', returns the SQL
Given a lookup_type of 'hour', 'minute', or 'second', return the SQL
that extracts a value from the given time field field_name.
"""
return self.date_extract_sql(lookup_type, field_name)
def deferrable_sql(self):
"""
Returns the SQL necessary to make a constraint "initially deferred"
during a CREATE TABLE statement.
Return the SQL to make a constraint "initially deferred" during a
CREATE TABLE statement.
"""
return ''
def distinct_sql(self, fields):
"""
Returns an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only the given fields are being
checked for duplicates.
Return an SQL DISTINCT clause which removes duplicate rows from the
result set. If any fields are given, only check the given fields for
duplicates.
"""
if fields:
raise NotImplementedError('DISTINCT ON fields is not supported by this database backend')
@ -157,31 +156,30 @@ class BaseDatabaseOperations:
def fetch_returned_insert_id(self, cursor):
"""
Given a cursor object that has just performed an INSERT...RETURNING
statement into a table that has an auto-incrementing ID, returns the
statement into a table that has an auto-incrementing ID, return the
newly created ID.
"""
return cursor.fetchone()[0]
def field_cast_sql(self, db_type, internal_type):
"""
Given a column type (e.g. 'BLOB', 'VARCHAR'), and an internal type
(e.g. 'GenericIPAddressField'), returns the SQL necessary to cast it
before using it in a WHERE statement. Note that the resulting string
should contain a '%s' placeholder for the column being searched against.
Given a column type (e.g. 'BLOB', 'VARCHAR') and an internal type
(e.g. 'GenericIPAddressField'), return the SQL to cast it before using
it in a WHERE statement. The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return '%s'
def force_no_ordering(self):
"""
Returns a list used in the "ORDER BY" clause to force no ordering at
all. Returning an empty list means that nothing will be included in the
ordering.
Return a list used in the "ORDER BY" clause to force no ordering at
all. Return an empty list to include nothing in the ordering.
"""
return []
def for_update_sql(self, nowait=False, skip_locked=False):
"""
Returns the FOR UPDATE SQL clause to lock rows for an update operation.
Return the FOR UPDATE SQL clause to lock rows for an update operation.
"""
if nowait:
return 'FOR UPDATE NOWAIT'
@ -192,10 +190,10 @@ class BaseDatabaseOperations:
def last_executed_query(self, cursor, sql, params):
"""
Returns a string of the query last executed by the given cursor, with
Return a string of the query last executed by the given cursor, with
placeholders replaced with actual values.
`sql` is the raw query containing placeholders, and `params` is the
`sql` is the raw query containing placeholders and `params` is the
sequence of parameters. These are used by default, but this method
exists for database backends to provide a better implementation
according to their own quoting schemes.
@ -215,52 +213,51 @@ class BaseDatabaseOperations:
def last_insert_id(self, cursor, table_name, pk_name):
"""
Given a cursor object that has just performed an INSERT statement into
a table that has an auto-incrementing ID, returns the newly created ID.
a table that has an auto-incrementing ID, return the newly created ID.
This method also receives the table name and the name of the primary-key
column.
`pk_name` is the name of the primary-key column.
"""
return cursor.lastrowid
def lookup_cast(self, lookup_type, internal_type=None):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc.). The resulting string should contain a '%s'
placeholder for the column being searched against.
Return the string to use in a query when performing lookups
("contains", "like", etc.). It should contain a '%s' placeholder for
the column being searched against.
"""
return "%s"
def max_in_list_size(self):
"""
Returns the maximum number of items that can be passed in a single 'IN'
Return the maximum number of items that can be passed in a single 'IN'
list condition, or None if the backend does not impose a limit.
"""
return None
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
Return the maximum length of table and column names, or None if there
is no limit.
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
Return the value to use for the LIMIT when we are wanting "LIMIT
infinity". Return None if the limit clause can be omitted in this case.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a no_limit_value() method')
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
Return the value to use during an INSERT statement to specify that
the field should use its default value.
"""
return 'DEFAULT'
def prepare_sql_script(self, sql):
"""
Takes an SQL script that may contain multiple lines and returns a list
Take an SQL script that may contain multiple lines and return a list
of statements to feed to successive cursor.execute() calls.
Since few databases are able to process raw SQL scripts in a single
@ -280,23 +277,23 @@ class BaseDatabaseOperations:
def process_clob(self, value):
"""
Returns the value of a CLOB column, for backends that return a locator
Return the value of a CLOB column, for backends that return a locator
object that requires additional processing.
"""
return value
def return_insert_id(self):
"""
For backends that support returning the last insert ID as part
of an insert query, this method returns the SQL and params to
append to the INSERT query. The returned fragment should
contain a format string to hold the appropriate column.
For backends that support returning the last insert ID as part of an
insert query, return the SQL and params to append to the INSERT query.
The returned fragment should contain a format string to hold the
appropriate column.
"""
pass
def compiler(self, compiler_name):
"""
Returns the SQLCompiler class corresponding to the given name,
Return the SQLCompiler class corresponding to the given name,
in the namespace corresponding to the `compiler_module` attribute
on this backend.
"""
@ -306,31 +303,29 @@ class BaseDatabaseOperations:
def quote_name(self, name):
"""
Returns a quoted version of the given table, index or column name. Does
Return a quoted version of the given table, index, or column name. Do
not quote the given name if it's already been quoted.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a quote_name() method')
def random_function_sql(self):
"""
Returns an SQL expression that returns a random value.
"""
"""Return an SQL expression that returns a random value."""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
Return the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). It should contain a '%s'
placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
If the feature is not supported (or part of it is not supported), raise
NotImplementedError.
"""
raise NotImplementedError('subclasses of BaseDatabaseOperations may require a regex_lookup() method')
def savepoint_create_sql(self, sid):
"""
Returns the SQL for starting a new savepoint. Only required if the
Return the SQL for starting a new savepoint. Only required if the
"uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id.
"""
@ -338,32 +333,30 @@ class BaseDatabaseOperations:
def savepoint_commit_sql(self, sid):
"""
Returns the SQL for committing the given savepoint.
Return the SQL for committing the given savepoint.
"""
return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid):
"""
Returns the SQL for rolling back the given savepoint.
Return the SQL for rolling back the given savepoint.
"""
return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self):
"""
Returns the SQL that will set the connection's time zone.
Return the SQL that will set the connection's time zone.
Returns '' if the backend doesn't support time zones.
Return '' if the backend doesn't support time zones.
"""
return ''
def sql_flush(self, style, tables, sequences, allow_cascade=False):
"""
Returns a list of SQL statements required to remove all data from
Return a list of SQL statements required to remove all data from
the given database tables (without actually removing the tables
themselves).
The returned value also includes SQL statements required to reset DB
sequences passed in :param sequences:.
themselves) and the SQL statements required to reset the sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
@ -376,8 +369,8 @@ class BaseDatabaseOperations:
def sequence_reset_by_name_sql(self, style, sequences):
"""
Returns a list of the SQL statements required to reset sequences
passed in :param sequences:.
Return a list of the SQL statements required to reset sequences
passed in `sequences`.
The `style` argument is a Style object as returned by either
color_style() or no_style() in django.core.management.color.
@ -386,7 +379,7 @@ class BaseDatabaseOperations:
def sequence_reset_sql(self, style, model_list):
"""
Returns a list of the SQL statements required to reset sequences for
Return a list of the SQL statements required to reset sequences for
the given models.
The `style` argument is a Style object as returned by either
@ -395,32 +388,28 @@ class BaseDatabaseOperations:
return [] # No sequence reset required by default.
def start_transaction_sql(self):
"""
Returns the SQL statement required to start a transaction.
"""
"""Return the SQL statement required to start a transaction."""
return "BEGIN;"
def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
"""Return the SQL statement required to end a transaction."""
if not success:
return "ROLLBACK;"
return "COMMIT;"
def tablespace_sql(self, tablespace, inline=False):
"""
Returns the SQL that will be used in a query to define the tablespace.
Return the SQL that will be used in a query to define the tablespace.
Returns '' if the backend doesn't support tablespaces.
Return '' if the backend doesn't support tablespaces.
If inline is True, the SQL is appended to a row; otherwise it's appended
to the entire CREATE TABLE or CREATE INDEX statement.
If `inline` is True, append the SQL to a row; otherwise append it to
the entire CREATE TABLE or CREATE INDEX statement.
"""
return ''
def prep_for_like_query(self, x):
"""Prepares a value for use in a LIKE query."""
"""Prepare a value for use in a LIKE query."""
return force_text(x).replace("\\", "\\\\").replace("%", r"\%").replace("_", r"\_")
# Same as prep_for_like_query(), but called for "iexact" matches, which
@ -430,14 +419,14 @@ class BaseDatabaseOperations:
def validate_autopk_value(self, value):
"""
Certain backends do not accept some values for "serial" fields
(for example zero in MySQL). This method will raise a ValueError
if the value is invalid, otherwise returns validated value.
(for example zero in MySQL). Raise a ValueError if the value is
invalid, otherwise return the validated value.
"""
return value
def adapt_unknown_value(self, value):
"""
Transforms a value to something compatible with the backend driver.
Transform a value to something compatible with the backend driver.
This method only depends on the type of the value. It's designed for
cases where the target type isn't known, such as .raw() SQL queries.
@ -456,7 +445,7 @@ class BaseDatabaseOperations:
def adapt_datefield_value(self, value):
"""
Transforms a date value to an object compatible with what is expected
Transform a date value to an object compatible with what is expected
by the backend driver for date columns.
"""
if value is None:
@ -465,7 +454,7 @@ class BaseDatabaseOperations:
def adapt_datetimefield_value(self, value):
"""
Transforms a datetime value to an object compatible with what is expected
Transform a datetime value to an object compatible with what is expected
by the backend driver for datetime columns.
"""
if value is None:
@ -474,7 +463,7 @@ class BaseDatabaseOperations:
def adapt_timefield_value(self, value):
"""
Transforms a time value to an object compatible with what is expected
Transform a time value to an object compatible with what is expected
by the backend driver for time columns.
"""
if value is None:
@ -485,21 +474,21 @@ class BaseDatabaseOperations:
def adapt_decimalfield_value(self, value, max_digits=None, decimal_places=None):
"""
Transforms a decimal.Decimal value to an object compatible with what is
Transform a decimal.Decimal value to an object compatible with what is
expected by the backend driver for decimal (numeric) columns.
"""
return utils.format_number(value, max_digits, decimal_places)
def adapt_ipaddressfield_value(self, value):
"""
Transforms a string representation of an IP address into the expected
Transform a string representation of an IP address into the expected
type for the backend driver.
"""
return value or None
def year_lookup_bounds_for_date_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateField value using a year
lookup.
@ -513,7 +502,7 @@ class BaseDatabaseOperations:
def year_lookup_bounds_for_datetime_field(self, value):
"""
Returns a two-elements list with the lower and upper bound to be used
Return a two-elements list with the lower and upper bound to be used
with a BETWEEN operator to query a DateTimeField value using a year
lookup.
@ -531,7 +520,7 @@ class BaseDatabaseOperations:
def get_db_converters(self, expression):
"""
Get a list of functions needed to convert field data.
Return a list of functions needed to convert field data.
Some field types on some backends do not provide data in the correct
format, this is the hook for converter functions.
@ -556,10 +545,11 @@ class BaseDatabaseOperations:
pass
def combine_expression(self, connector, sub_expressions):
"""Combine a list of subexpressions into a single expression, using
"""
Combine a list of subexpressions into a single expression, using
the provided connecting operator. This is required because operators
can vary between backends (e.g., Oracle with %% and &) and between
subexpression types (e.g., date expressions)
subexpression types (e.g., date expressions).
"""
conn = ' %s ' % connector
return conn.join(sub_expressions)
@ -575,7 +565,8 @@ class BaseDatabaseOperations:
return '%s'
def modify_insert_params(self, placeholder, params):
"""Allow modification of insert parameters. Needed for Oracle Spatial
"""
Allow modification of insert parameters. Needed for Oracle Spatial
backend due to #10888.
"""
return params
@ -583,7 +574,7 @@ class BaseDatabaseOperations:
def integer_field_range(self, internal_type):
"""
Given an integer field internal type (e.g. 'PositiveIntegerField'),
returns a tuple of the (min_value, max_value) form representing the
return a tuple of the (min_value, max_value) form representing the
range of the column type bound to the field.
"""
return self.integer_field_ranges[internal_type]

View File

@ -11,8 +11,8 @@ logger = logging.getLogger('django.db.backends.schema')
def _related_non_m2m_objects(old_field, new_field):
# Filters out m2m objects from reverse relations.
# Returns (old_relation, new_relation) tuples.
# Filter out m2m objects from reverse relations.
# Return (old_relation, new_relation) tuples.
return zip(
(obj for obj in old_field.model._meta.related_objects if not obj.field.many_to_many),
(obj for obj in new_field.model._meta.related_objects if not obj.field.many_to_many)
@ -21,17 +21,9 @@ def _related_non_m2m_objects(old_field, new_field):
class BaseDatabaseSchemaEditor:
"""
This class (and its subclasses) are responsible for emitting schema-changing
This class and its subclasses are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
@ -96,9 +88,7 @@ class BaseDatabaseSchemaEditor:
# Core utility functions
def execute(self, sql, params=()):
"""
Executes the given SQL statement, with optional parameters.
"""
"""Execute the given SQL statement, with optional parameters."""
# Don't perform the transactional DDL check if SQL is being collected
# as it's not going to be executed anyway.
if not self.collect_sql and self.connection.in_atomic_block and not self.connection.features.can_rollback_ddl:
@ -124,7 +114,7 @@ class BaseDatabaseSchemaEditor:
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
Generate a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
"""
h = hashlib.md5()
@ -136,8 +126,8 @@ class BaseDatabaseSchemaEditor:
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
Take a field and return its column definition.
The field must already have had set_attributes_from_name() called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
@ -199,9 +189,7 @@ class BaseDatabaseSchemaEditor:
)
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
"""Return a field's effective database default value."""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
@ -230,7 +218,7 @@ class BaseDatabaseSchemaEditor:
def quote_value(self, value):
"""
Returns a quoted version of the value so it's safe to use in an SQL
Return a quoted version of the value so it's safe to use in an SQL
string. This is not safe against injection from user code; it is
intended only for use in making SQL scripts or preparing default values
for particularly tricky backends (defaults are not user-defined, though,
@ -242,8 +230,8 @@ class BaseDatabaseSchemaEditor:
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
Create a table and any accompanying indexes or unique constraints for
the given `model`.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
@ -310,9 +298,7 @@ class BaseDatabaseSchemaEditor:
self.create_model(field.remote_field.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
"""Delete a model from the database."""
# Handle auto-created intermediary models
for field in model._meta.local_many_to_many:
if field.remote_field.through._meta.auto_created:
@ -324,22 +310,18 @@ class BaseDatabaseSchemaEditor:
})
def add_index(self, model, index):
"""
Add an index on a model.
"""
"""Add an index on a model."""
self.execute(index.create_sql(model, self))
def remove_index(self, model, index):
"""
Remove an index from a model.
"""
"""Remove an index from a model."""
self.execute(index.remove_sql(model, self))
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
Deal with a model changing its unique_together. The input
unique_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
@ -353,9 +335,9 @@ class BaseDatabaseSchemaEditor:
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
Deal with a model changing its index_together. The input
index_togethers must be doubly-nested, not the single-nested
["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
@ -379,9 +361,7 @@ class BaseDatabaseSchemaEditor:
self.execute(self._delete_constraint_sql(sql, model, constraint_names[0]))
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
"""Rename the table a model points to."""
if (old_db_table == new_db_table or
(self.connection.features.ignores_table_name_case and
old_db_table.lower() == new_db_table.lower())):
@ -392,9 +372,7 @@ class BaseDatabaseSchemaEditor:
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
"""Move a model's table between tablespaces."""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
@ -403,9 +381,8 @@ class BaseDatabaseSchemaEditor:
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
@ -447,7 +424,7 @@ class BaseDatabaseSchemaEditor:
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
@ -473,11 +450,11 @@ class BaseDatabaseSchemaEditor:
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
Allow a field's type, uniqueness, nullability, default, column,
constraints, etc. to be modified.
`old_field` is required to compute the necessary changes.
If `strict` is True, raise errors if the old column does not match
`old_field` precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
@ -514,8 +491,7 @@ class BaseDatabaseSchemaEditor:
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
"""Perform a "physical" (non-ManyToMany) field update."""
# Drop any FK constraints, we'll remake them later
fks_dropped = set()
if old_field.remote_field and old_field.db_constraint:
@ -797,9 +773,9 @@ class BaseDatabaseSchemaEditor:
for cases when a creation type is different to an alteration type
(e.g. SERIAL in PostgreSQL, PostGIS fields).
Should return two things; an SQL fragment of (sql, params) to insert
into an ALTER TABLE statement, and a list of extra (sql, params) tuples
to run once the field is altered.
Return a two-tuple of: an SQL fragment of (sql, params) to insert into
an ALTER TABLE statement and a list of extra (sql, params) tuples to
run once the field is altered.
"""
return (
(
@ -813,9 +789,7 @@ class BaseDatabaseSchemaEditor:
)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
"""Alter M2Ms to repoint their to= endpoints."""
# Rename the through table
if old_field.remote_field.through._meta.db_table != new_field.remote_field.through._meta.db_table:
self.alter_db_table(old_field.remote_field.through, old_field.remote_field.through._meta.db_table,
@ -837,7 +811,7 @@ class BaseDatabaseSchemaEditor:
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
Generate a unique name for an index/unique constraint.
The name is divided into 3 parts: the table name, the column names,
and a unique digest and suffix.
@ -895,8 +869,8 @@ class BaseDatabaseSchemaEditor:
def _model_indexes_sql(self, model):
"""
Return all index SQL statements (field indexes, index_together,
Meta.indexes) for the specified model, as a list.
Return a list of all index SQL statements (field indexes,
index_together, Meta.indexes) for the specified model.
"""
if not model._meta.managed or model._meta.proxy or model._meta.swapped:
return []
@ -967,9 +941,7 @@ class BaseDatabaseSchemaEditor:
def _constraint_names(self, model, column_names=None, unique=None,
primary_key=None, index=None, foreign_key=None,
check=None):
"""
Returns all constraint names matching the columns and conditions
"""
"""Return all constraint names matching the columns and conditions."""
if column_names is not None:
column_names = [
self.connection.introspection.column_name_converter(name)

View File

@ -1,7 +1,5 @@
class BaseDatabaseValidation:
"""
This class encapsulates all backend-specific validation.
"""
"""Encapsulate backend-specific validation."""
def __init__(self, connection):
self.connection = connection

View File

@ -3,7 +3,7 @@ Dummy database backend for Django.
Django uses this if the database ENGINE setting is empty (None or empty string).
Each of these API functions, except connection.close(), raises
Each of these API functions, except connection.close(), raise
ImproperlyConfigured.
"""

View File

@ -51,10 +51,10 @@ server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
class CursorWrapper:
"""
A thin wrapper around MySQLdb's normal cursor class so that we can catch
particular exception instances and reraise them with the right types.
A thin wrapper around MySQLdb's normal cursor class that catches particular
exception instances and reraises them with the correct types.
Implemented as a wrapper, rather than a subclass, so that we aren't stuck
Implemented as a wrapper, rather than a subclass, so that it isn't stuck
to the particular underlying representation returned by Connection.cursor().
"""
codes_for_integrityerror = (1048,)
@ -269,8 +269,9 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def disable_constraint_checking(self):
"""
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,
to indicate constraint checks need to be re-enabled.
Disable foreign key checks, primarily for use in adding rows with
forward references. Always return True to indicate constraint checks
need to be re-enabled.
"""
self.cursor().execute('SET foreign_key_checks=0')
return True
@ -289,14 +290,14 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
Raise an IntegrityError on the first invalid foreign key reference
encountered (if any) and provide detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply

View File

@ -50,16 +50,15 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
"""Return a list of table and view names in the current database."""
cursor.execute("SHOW FULL TABLES")
return [TableInfo(row[0], {'BASE TABLE': 't', 'VIEW': 'v'}.get(row[1]))
for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"""
Returns a description of the table, with the DB-API cursor.description interface."
Return a description of the table with the DB-API cursor.description
interface."
"""
# information_schema database gives more accurate results for some figures:
# - varchar length returned by cursor.description is an internal length,
@ -99,7 +98,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
constraints = self.get_key_columns(cursor, table_name)
@ -110,8 +109,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in the given table.
"""
key_columns = []
cursor.execute("""
@ -153,7 +152,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_storage_engine(self, cursor, table_name):
"""
Retrieves the storage engine for a given table. Returns the default
Retrieve the storage engine for a given table. Return the default
storage engine if the table doesn't exist.
"""
cursor.execute(
@ -167,7 +166,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns

View File

@ -277,8 +277,8 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')

View File

@ -4,7 +4,7 @@ from django.db.models.sql import compiler
class SQLCompiler(compiler.SQLCompiler):
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
Create the SQL for this query. Return the SQL string and list
of parameters. This is overridden from the original Query class
to handle the additional SQL Oracle requires to emulate LIMIT
and OFFSET.

View File

@ -103,11 +103,12 @@ class DatabaseCreation(BaseDatabaseCreation):
def _switch_to_test_user(self, parameters):
"""
Oracle doesn't have the concept of separate databases under the same user.
Thus, we use a separate user (see _create_test_db). This method is used
to switch to that user. We will need the main user again for clean-up when
we end testing, so we keep its credentials in SAVED_USER/SAVED_PASSWORD
entries in the settings dict.
Switch to the user that's used for creating the test database.
Oracle doesn't have the concept of separate databases under the same
user, so a separate user is used; see _create_test_db(). The main user
is also needed for cleanup when testing is completed, so save its
credentials in the SAVED_USER/SAVED_PASSWORD key in the settings dict.
"""
real_settings = settings.DATABASES[self.connection.alias]
real_settings['SAVED_USER'] = self.connection.settings_dict['SAVED_USER'] = \
@ -122,8 +123,8 @@ class DatabaseCreation(BaseDatabaseCreation):
def set_as_test_mirror(self, primary_settings_dict):
"""
Set this database up to be used in testing as a mirror of a primary database
whose settings are given
Set this database up to be used in testing as a mirror of a primary
database whose settings are given.
"""
self.connection.settings_dict['USER'] = primary_settings_dict['USER']
self.connection.settings_dict['PASSWORD'] = primary_settings_dict['PASSWORD']
@ -166,7 +167,7 @@ class DatabaseCreation(BaseDatabaseCreation):
def _destroy_test_db(self, test_database_name, verbosity=1):
"""
Destroy a test database, prompting the user for confirmation if the
database already exists. Returns the name of the test database created.
database already exists. Return the name of the test database created.
"""
self.connection.settings_dict['USER'] = self.connection.settings_dict['SAVED_USER']
self.connection.settings_dict['PASSWORD'] = self.connection.settings_dict['SAVED_PASSWORD']
@ -292,9 +293,8 @@ class DatabaseCreation(BaseDatabaseCreation):
def _test_settings_get(self, key, default=None, prefixed=None):
"""
Return a value from the test settings dict,
or a given default,
or a prefixed entry from the main settings dict
Return a value from the test settings dict, or a given default, or a
prefixed entry from the main settings dict.
"""
settings_dict = self.connection.settings_dict
val = settings_dict['TEST'].get(key, default)
@ -345,9 +345,9 @@ class DatabaseCreation(BaseDatabaseCreation):
def _get_test_db_name(self):
"""
We need to return the 'production' DB name to get the test DB creation
machinery to work. This isn't a great deal in this case because DB
names as handled by Django haven't real counterparts in Oracle.
Return the 'production' DB name to get the test DB creation machinery
to work. This isn't a great deal in this case because DB names as
handled by Django don't have real counterparts in Oracle.
"""
return self.connection.settings_dict['NAME']

View File

@ -44,15 +44,16 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
return super().get_field_type(data_type, description)
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
"""Return a list of table and view names in the current database."""
cursor.execute("SELECT TABLE_NAME, 't' FROM USER_TABLES UNION ALL "
"SELECT VIEW_NAME, 'v' FROM USER_VIEWS")
return [TableInfo(row[0].lower(), row[1]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# user_tab_columns gives data default for columns
cursor.execute("""
SELECT
@ -81,19 +82,19 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
return description
def table_name_converter(self, name):
"Table name comparison is case insensitive under Oracle"
"""Table name comparison is case insensitive under Oracle."""
return name.lower()
def _name_to_index(self, cursor, table_name):
"""
Returns a dictionary of {field_name: field_index} for the given table.
Return a dictionary of {field_name: field_index} for the given table.
Indexes are 0-based.
"""
return {d[0]: i for i, d in enumerate(self.get_table_description(cursor, table_name))}
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
table_name = table_name.upper()
@ -164,7 +165,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs, uniques, and checks

View File

@ -105,7 +105,7 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by
quote_name(), but without the actual quotes.
quote_name() but without the quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
@ -113,9 +113,7 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
return nn
def _generate_temp_name(self, for_name):
"""
Generates temporary names for workarounds that need temp columns
"""
"""Generate temporary names for workarounds that need temp columns."""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)

View File

@ -230,8 +230,8 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def check_constraints(self, table_names=None):
"""
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they
are returned to deferred.
Check constraints by setting them to immediate. Return them to deferred
afterward.
"""
self.cursor().execute('SET CONSTRAINTS ALL IMMEDIATE')
self.cursor().execute('SET CONSTRAINTS ALL DEFERRED')

View File

@ -53,9 +53,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
return field_type
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
"""Return a list of table and view names in the current database."""
cursor.execute("""
SELECT c.relname, c.relkind
FROM pg_catalog.pg_class c
@ -68,7 +66,10 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
if row[0] not in self.ignored_tables]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
# As cursor.description does not return reliably the nullable property,
# we have to query the information_schema (#7783)
cursor.execute("""
@ -87,7 +88,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_relations(self, cursor, table_name):
"""
Returns a dictionary of {field_name: (field_name_other_table, other_table)}
Return a dictionary of {field_name: (field_name_other_table, other_table)}
representing all relationships to the given table.
"""
cursor.execute("""

View File

@ -202,16 +202,15 @@ class DatabaseOperations(BaseDatabaseOperations):
def max_name_length(self):
"""
Returns the maximum length of an identifier.
Return the maximum length of an identifier.
Note that the maximum length of an identifier is 63 by default, but can
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h .
The maximum length of an identifier is 63 by default, but can be
changed by recompiling PostgreSQL after editing the NAMEDATALEN
macro in src/include/pg_config_manual.h.
This implementation simply returns 63, but can easily be overridden by a
custom database backend that inherits most of its behavior from this one.
This implementation returns 63, but can be overridden by a custom
database backend that inherits most of its behavior from this one.
"""
return 63
def distinct_sql(self, fields):

View File

@ -52,9 +52,7 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
return None
def _alter_column_type_sql(self, table, old_field, new_field, new_type):
"""
Makes ALTER TYPE with SERIAL make sense.
"""
"""Make ALTER TYPE with SERIAL make sense."""
if new_type.lower() in ("serial", "bigserial"):
column = new_field.column
sequence_name = "%s_%s_seq" % (table, column)

View File

@ -1,5 +1,5 @@
"""
Extracts the version of the PostgreSQL server.
Extract the version of the PostgreSQL server.
"""
import re
@ -24,7 +24,7 @@ def _parse_version(text):
def get_version(connection):
"""
Returns an integer representing the major, minor and revision number of the
Return an integer representing the major, minor, and revision number of the
server. Format is the one used for the return value of libpq
PQServerVersion()/``server_version`` connection attribute (available in
newer psycopg2 versions.)

View File

@ -27,9 +27,8 @@ from .schema import DatabaseSchemaEditor # isort:skip
def decoder(conv_func):
""" The Python sqlite3 interface returns always byte strings.
This function converts the received value to a regular string before
passing it to the receiver function.
"""
Convert bytestrings from Python's sqlite3 interface to a regular string.
"""
return lambda s: conv_func(s.decode())
@ -215,14 +214,14 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def check_constraints(self, table_names=None):
"""
Checks each table name in `table_names` for rows with invalid foreign
Check each table name in `table_names` for rows with invalid foreign
key references. This method is intended to be used in conjunction with
`disable_constraint_checking()` and `enable_constraint_checking()`, to
determine if rows with invalid references were entered while constraint
checks were off.
Raises an IntegrityError on the first invalid foreign key reference
encountered (if any) and provides detailed information about the
Raise an IntegrityError on the first invalid foreign key reference
encountered (if any) and provide detailed information about the
invalid reference in the error message.
Backends can override this method if they can more directly apply
@ -409,9 +408,9 @@ def _sqlite_time_extract(lookup_type, dt):
def _sqlite_format_dtdelta(conn, lhs, rhs):
"""
LHS and RHS can be either:
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
- An integer number of microseconds
- A string representing a timedelta object
- A string representing a datetime
"""
try:
if isinstance(lhs, int):

View File

@ -99,7 +99,7 @@ class DatabaseCreation(BaseDatabaseCreation):
def test_db_signature(self):
"""
Returns a tuple that uniquely identifies a test database.
Return a tuple that uniquely identifies a test database.
This takes into account the special cases of ":memory:" and "" for
SQLite since the databases will be distinct despite having the same

View File

@ -53,12 +53,12 @@ class DatabaseFeatures(BaseDatabaseFeatures):
@cached_property
def supports_stddev(self):
"""Confirm support for STDDEV and related stats functions
"""
Confirm support for STDDEV and related stats functions.
SQLite supports STDDEV as an extension package; so
connection.ops.check_expression_support() can't unilaterally
rule out support for STDDEV. We need to manually check
whether the call works.
rule out support for STDDEV. Manually check whether the call works.
"""
with self.connection.cursor() as cursor:
cursor.execute('CREATE TABLE STDDEV_TEST (X INT)')

View File

@ -58,9 +58,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
data_types_reverse = FlexibleFieldLookupDict()
def get_table_list(self, cursor):
"""
Returns a list of table and view names in the current database.
"""
"""Return a list of table and view names in the current database."""
# Skip the sqlite_sequence system table used for autoincrement key
# generation.
cursor.execute("""
@ -70,7 +68,10 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
return [TableInfo(row[0], row[1][0]) for row in cursor.fetchall()]
def get_table_description(self, cursor, table_name):
"Returns a description of the table, with the DB-API cursor.description interface."
"""
Return a description of the table with the DB-API cursor.description
interface.
"""
return [
FieldInfo(
info['name'],
@ -156,8 +157,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_key_columns(self, cursor, table_name):
"""
Returns a list of (column_name, referenced_table_name, referenced_column_name) for all
key columns in given table.
Return a list of (column_name, referenced_table_name, referenced_column_name)
for all key columns in given table.
"""
key_columns = []
@ -207,9 +208,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
return indexes
def get_primary_key_column(self, cursor, table_name):
"""
Get the column name of the primary key for the given table.
"""
"""Return the column name of the primary key for the given table."""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
row = cursor.fetchone()
@ -238,7 +237,8 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
Retrieve any constraints or keys (unique, pk, fk, check, index) across
one or more columns.
"""
constraints = {}
# Get the index info

View File

@ -43,31 +43,24 @@ class DatabaseOperations(BaseDatabaseOperations):
pass
def date_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_date_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
"""
Support EXTRACT with a user-defined function django_date_extract()
that's registered in connect(). Use single quotes because this is a
string and could otherwise cause a collision with a field name.
"""
return "django_date_extract('%s', %s)" % (lookup_type.lower(), field_name)
def date_interval_sql(self, timedelta):
return "'%s'" % duration_string(timedelta), []
def format_for_duration_arithmetic(self, sql):
"""Do nothing here, we will handle it in the custom function."""
"""Do nothing since formatting is handled in the custom function."""
return sql
def date_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_date_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def time_trunc_sql(self, lookup_type, field_name):
# sqlite doesn't support DATE_TRUNC, so we fake it with a user-defined
# function django_date_trunc that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_trunc('%s', %s)" % (lookup_type.lower(), field_name)
def _convert_tzname_to_sql(self, tzname):
@ -84,22 +77,16 @@ class DatabaseOperations(BaseDatabaseOperations):
)
def datetime_extract_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_extract_sql.
return "django_datetime_extract('%s', %s, %s)" % (
lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname),
)
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
# Same comment as in date_trunc_sql.
return "django_datetime_trunc('%s', %s, %s)" % (
lookup_type.lower(), field_name, self._convert_tzname_to_sql(tzname),
)
def time_extract_sql(self, lookup_type, field_name):
# sqlite doesn't support extract, so we fake it with the user-defined
# function django_time_extract that's registered in connect(). Note that
# single quotes are used because this is a string (and could otherwise
# cause a collision with a field name).
return "django_time_extract('%s', %s)" % (lookup_type.lower(), field_name)
def pk_default_value(self):

View File

@ -19,7 +19,7 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
# Some SQLite schema alterations need foreign key constraints to be
# disabled. This is the default in SQLite but can be changed with a
# build flag and might change in future, so can't be relied upon.
# We enforce it here for the duration of the transaction.
# Enforce it here for the duration of the transaction.
c.execute('PRAGMA foreign_keys')
self._initial_pragma_fk = c.fetchone()[0]
c.execute('PRAGMA foreign_keys = 0')
@ -225,9 +225,8 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
Create a field on a model. Usually involves adding a column, but may
involve adding a table instead (for M2M fields).
"""
# Special-case implicit M2M tables
if field.many_to_many and field.remote_field.through._meta.auto_created:
@ -236,7 +235,7 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
Remove a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# M2M fields are a special case
@ -254,14 +253,12 @@ class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
def _alter_field(self, model, old_field, new_field, old_type, new_type,
old_db_params, new_db_params, strict=False):
"""Actually perform a "physical" (non-ManyToMany) field update."""
"""Perform a "physical" (non-ManyToMany) field update."""
# Alter by remaking table
self._remake_table(model, alter_field=(old_field, new_field))
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
"""Alter M2Ms to repoint their to= endpoints."""
if old_field.remote_field.through._meta.db_table == new_field.remote_field.through._meta.db_table:
# The field name didn't change, but some options did; we have to propagate this altering.
self._remake_table(

View File

@ -114,7 +114,7 @@ class CursorDebugWrapper(CursorWrapper):
###############################################
def typecast_date(s):
return datetime.date(*map(int, s.split('-'))) if s else None # returns None if s is null
return datetime.date(*map(int, s.split('-'))) if s else None # return None if s is null
def typecast_time(s): # does NOT store time zone information
@ -136,8 +136,7 @@ def typecast_timestamp(s): # does NOT store time zone information
if ' ' not in s:
return typecast_date(s)
d, t = s.split()
# Extract timezone information, if it exists. Currently we just throw
# it away, but in the future we may make use of it.
# Extract timezone information, if it exists. Currently it's ignored.
if '-' in t:
t, tz = t.split('-', 1)
tz = '-' + tz
@ -195,7 +194,7 @@ def truncate_name(name, length=None, hash_len=4):
def format_number(value, max_digits, decimal_places):
"""
Formats a number into a string with the requisite number of digits and
Format a number into a string with the requisite number of digits and
decimal places.
"""
if value is None:

View File

@ -18,9 +18,9 @@ from .topological_sort import stable_topological_sort
class MigrationAutodetector:
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Take a pair of ProjectStates and compare them to see what the first would
need doing to make it match the second (the second usually being the
project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
@ -38,7 +38,7 @@ class MigrationAutodetector:
def changes(self, graph, trim_to_apps=None, convert_apps=None, migration_name=None):
"""
Main entry point to produce a list of applicable changes.
Takes a graph to base names on and an optional set of apps
Take a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes(convert_apps, graph)
@ -90,9 +90,8 @@ class MigrationAutodetector:
def only_relation_agnostic_fields(self, fields):
"""
Return a definition of the fields that ignores field names and
what related fields actually relate to.
Used for detecting renames (as, of course, the related fields
change during renames)
what related fields actually relate to. Used for detecting renames (as,
of course, the related fields change during renames).
"""
fields_def = []
for name, field in sorted(fields):
@ -104,7 +103,7 @@ class MigrationAutodetector:
def _detect_changes(self, convert_apps=None, graph=None):
"""
Returns a dict of migration plans which will achieve the
Return a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
@ -117,11 +116,10 @@ class MigrationAutodetector:
graph is an optional argument that, if provided, can help improve
dependency generation and avoid potential circular dependencies.
"""
# The first phase is generating all the operations for each app
# and gathering them into a big per-app list.
# We'll then go through that list later and order it and split
# into migrations to resolve dependencies caused by M2Ms and FKs.
# Then go through that list, order it, and split into migrations to
# resolve dependencies caused by M2Ms and FKs.
self.generated_operations = {}
self.altered_indexes = {}
@ -198,9 +196,9 @@ class MigrationAutodetector:
def _prepare_field_lists(self):
"""
Prepare field lists, and prepare a list of the fields that used
through models in the old state so we can make dependencies
from the through model deletion to the field that uses it.
Prepare field lists and a list of the fields that used through models
in the old state so dependencies can be made from the through model
deletion to the field that uses it.
"""
self.kept_model_keys = set(self.old_model_keys).intersection(self.new_model_keys)
self.kept_proxy_keys = set(self.old_proxy_keys).intersection(self.new_proxy_keys)
@ -216,9 +214,7 @@ class MigrationAutodetector:
self.new_field_keys.update((app_label, model_name, x) for x, y in new_model_state.fields)
def _generate_through_model_map(self):
"""
Through model map generation
"""
"""Through model map generation."""
for app_label, model_name in sorted(self.old_model_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
old_model_state = self.from_state.models[app_label, old_model_name]
@ -234,13 +230,13 @@ class MigrationAutodetector:
def _build_migration_list(self, graph=None):
"""
We need to chop the lists of operations up into migrations with
dependencies on each other. We do this by stepping up an app's list of
operations until we find one that has an outgoing dependency that isn't
in another app's migration yet (hasn't been chopped off its list). We
then chop off the operations before it into a migration and move onto
the next app. If we loop back around without doing anything, there's a
circular dependency (which _should_ be impossible as the operations are
Chop the lists of operations up into migrations with dependencies on
each other. Do this by going through an app's list of operations until
one is found that has an outgoing dependency that isn't in another
app's migration yet (hasn't been chopped off its list). Then chop off
the operations before it into a migration and move onto the next app.
If the loops completes without doing anything, there's a circular
dependency (which _should_ be impossible as the operations are
all split at this point so they can't depend and be depended on).
"""
self.migrations = {}
@ -325,9 +321,8 @@ class MigrationAutodetector:
def _sort_migrations(self):
"""
Reorder to make things possible. The order we have already isn't bad,
but we need to pull a few things around so FKs work nicely inside the
same app
Reorder to make things possible. Reordering may be needed so FKs work
nicely inside the same app.
"""
for app_label, ops in sorted(self.generated_operations.items()):
# construct a dependency graph for intra-app dependencies
@ -360,8 +355,8 @@ class MigrationAutodetector:
def check_dependency(self, operation, dependency):
"""
Returns ``True`` if the given operation depends on the given dependency,
``False`` otherwise.
Return True if the given operation depends on the given dependency,
False otherwise.
"""
# Created model
if dependency[2] is None and dependency[3] is True:
@ -431,8 +426,8 @@ class MigrationAutodetector:
def swappable_first_key(self, item):
"""
Sorting key function that places potential swappable models first in
lists of created models (only real way to solve #22783)
Place potential swappable models first in lists of created models (only
real way to solve #22783).
"""
try:
model = self.new_apps.get_model(item[0], item[1])
@ -451,9 +446,9 @@ class MigrationAutodetector:
def generate_renamed_models(self):
"""
Finds any renamed models, and generates the operations for them,
and removes the old entry from the model lists.
Must be run before other model-level generation.
Find any renamed models, generate the operations for them, and remove
the old entry from the model lists. Must be run before other
model-level generation.
"""
self.renamed_models = {}
self.renamed_models_rel = {}
@ -490,11 +485,11 @@ class MigrationAutodetector:
"""
Find all new models (both managed and unmanaged) and make create
operations for them as well as separate operations to create any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
foreign key or M2M relationships (these are optimized later, if
possible).
We also defer any model options that refer to collections of fields
that might be deferred (e.g. unique_together, index_together).
Defer any model options that refer to collections of fields that might
be deferred (e.g. unique_together, index_together).
"""
old_keys = set(self.old_model_keys).union(self.old_unmanaged_keys)
added_models = set(self.new_model_keys) - old_keys
@ -643,10 +638,10 @@ class MigrationAutodetector:
def generate_created_proxies(self):
"""
Makes CreateModel statements for proxy models.
We use the same statements as that way there's less code duplication,
but of course for proxy models we can skip all that pointless field
stuff and just chuck out an operation.
Make CreateModel statements for proxy models. Use the same statements
as that way there's less code duplication, but of course for proxy
models it's safe to skip all the pointless field stuff and just chuck
out an operation.
"""
added = set(self.new_proxy_keys) - set(self.old_proxy_keys)
for app_label, model_name in sorted(added):
@ -679,10 +674,10 @@ class MigrationAutodetector:
"""
Find all deleted models (managed and unmanaged) and make delete
operations for them as well as separate operations to delete any
foreign key or M2M relationships (we'll optimize these back in later
if we can).
foreign key or M2M relationships (these are optimized later, if
possible).
We also bring forward removal of any model options that refer to
Also bring forward removal of any model options that refer to
collections of fields - the inverse of generate_created_models().
"""
new_keys = set(self.new_model_keys).union(self.new_unmanaged_keys)
@ -769,9 +764,7 @@ class MigrationAutodetector:
)
def generate_deleted_proxies(self):
"""
Makes DeleteModel statements for proxy models.
"""
"""Make DeleteModel options for proxy models."""
deleted = set(self.old_proxy_keys) - set(self.new_proxy_keys)
for app_label, model_name in sorted(deleted):
model_state = self.from_state.models[app_label, model_name]
@ -784,9 +777,7 @@ class MigrationAutodetector:
)
def generate_renamed_fields(self):
"""
Works out renamed fields
"""
"""Work out renamed fields."""
self.renamed_fields = {}
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
old_model_name = self.renamed_models.get((app_label, model_name), model_name)
@ -817,9 +808,7 @@ class MigrationAutodetector:
break
def generate_added_fields(self):
"""
Fields that have been added
"""
"""Make AddField operations."""
for app_label, model_name, field_name in sorted(self.new_field_keys - self.old_field_keys):
self._generate_added_field(app_label, model_name, field_name)
@ -855,9 +844,7 @@ class MigrationAutodetector:
)
def generate_removed_fields(self):
"""
Fields that have been removed.
"""
"""Make RemoveField operations."""
for app_label, model_name, field_name in sorted(self.old_field_keys - self.new_field_keys):
self._generate_removed_field(app_label, model_name, field_name)
@ -879,7 +866,8 @@ class MigrationAutodetector:
def generate_altered_fields(self):
"""
Fields that have been altered.
Make AlterField operations, or possibly RemovedField/AddField if alter
isn's possible.
"""
for app_label, model_name, field_name in sorted(self.old_field_keys.intersection(self.new_field_keys)):
# Did the field change?
@ -1057,9 +1045,9 @@ class MigrationAutodetector:
def generate_altered_options(self):
"""
Works out if any non-schema-affecting options have changed and
makes an operation to represent them in state changes (in case Python
code in migrations needs them)
Work out if any non-schema-affecting options have changed and make an
operation to represent them in state changes (in case Python code in
migrations needs them).
"""
models_to_check = self.kept_model_keys.union(
self.kept_proxy_keys
@ -1137,9 +1125,9 @@ class MigrationAutodetector:
def arrange_for_graph(self, changes, graph, migration_name=None):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
Take a result from changes() and a MigrationGraph, and fix the names
and dependencies of the changes so they extend the graph from the leaf
nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
@ -1186,11 +1174,10 @@ class MigrationAutodetector:
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
Take changes from arrange_for_graph() and set of app labels, and return
a modified set of changes which trims out as many migrations that are
not in app_labels as possible. Note that some other migrations may
still be present as they may be required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
@ -1214,10 +1201,9 @@ class MigrationAutodetector:
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names are not guaranteed to be unique,
but we put some effort in to the fallback name to avoid VCS conflicts
if we can.
Given a set of operations, suggest a name for the migration they might
represent. Names are not guaranteed to be unique, but put some effort
into the fallback name to avoid VCS conflicts if possible.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
@ -1236,8 +1222,8 @@ class MigrationAutodetector:
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
Given a migration name, try to extract a number from the beginning of
it. If no number is found, return None.
"""
match = re.match(r'^\d+', name)
if match:

View File

@ -2,51 +2,37 @@ from django.db.utils import DatabaseError
class AmbiguityError(Exception):
"""
Raised when more than one migration matches a name prefix.
"""
"""More than one migration matches a name prefix."""
pass
class BadMigrationError(Exception):
"""
Raised when there's a bad migration (unreadable/bad format/etc.).
"""
"""There's a bad migration (unreadable/bad format/etc.)."""
pass
class CircularDependencyError(Exception):
"""
Raised when there's an impossible-to-resolve circular dependency.
"""
"""There's an impossible-to-resolve circular dependency."""
pass
class InconsistentMigrationHistory(Exception):
"""
Raised when an applied migration has some of its dependencies not applied.
"""
"""An applied migration has some of its dependencies not applied."""
pass
class InvalidBasesError(ValueError):
"""
Raised when a model's base classes can't be resolved.
"""
"""A model's base classes can't be resolved."""
pass
class IrreversibleError(RuntimeError):
"""
Raised when a irreversible migration is about to be reversed.
"""
"""An irreversible migration is about to be reversed."""
pass
class NodeNotFoundError(LookupError):
"""
Raised when an attempt on a node is made that is not available in the graph.
"""
"""An attempt on a node is made that is not available in the graph."""
def __init__(self, message, node, origin=None):
self.message = message

View File

@ -9,8 +9,8 @@ from .state import ProjectState
class MigrationExecutor:
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
End-to-end migration execution - load migrations and run them up or down
to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
@ -21,7 +21,7 @@ class MigrationExecutor:
def migration_plan(self, targets, clean_start=False):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
Given a set of targets, return a list of (Migration instance, backwards?).
"""
plan = []
if clean_start:
@ -81,7 +81,7 @@ class MigrationExecutor:
def migrate(self, targets, plan=None, state=None, fake=False, fake_initial=False):
"""
Migrates the database up to the given targets.
Migrate the database up to the given targets.
Django first needs to create all project states before a migration is
(un)applied and in a second step run all the database operations.
@ -208,8 +208,8 @@ class MigrationExecutor:
def collect_sql(self, plan):
"""
Takes a migration plan and returns a list of collected SQL
statements that represent the best-efforts version of that plan.
Take a migration plan and return a list of collected SQL statements
that represent the best-efforts version of that plan.
"""
statements = []
state = None
@ -225,9 +225,7 @@ class MigrationExecutor:
return statements
def apply_migration(self, state, migration, fake=False, fake_initial=False):
"""
Runs a migration forwards.
"""
"""Run a migration forwards."""
if self.progress_callback:
self.progress_callback("apply_start", migration, fake)
if not fake:
@ -252,9 +250,7 @@ class MigrationExecutor:
return state
def unapply_migration(self, state, migration, fake=False):
"""
Runs a migration backwards.
"""
"""Run a migration backwards."""
if self.progress_callback:
self.progress_callback("unapply_start", migration, fake)
if not fake:
@ -275,12 +271,12 @@ class MigrationExecutor:
"""
Mark replacement migrations applied if their replaced set all are.
We do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, so as to correctly handle the case
Do this unconditionally on every migrate, rather than just when
migrations are applied or unapplied, to correctly handle the case
when a new squash migration is pushed to a deployment that already had
all its replaced migrations applied. In this case no new migration will
be applied, but we still want to correctly maintain the applied state
of the squash migration.
be applied, but the applied state of the squashed migration must be
maintained.
"""
applied = self.recorder.applied_migrations()
for key, migration in self.loader.replacements.items():
@ -290,7 +286,7 @@ class MigrationExecutor:
def detect_soft_applied(self, project_state, migration):
"""
Tests whether a migration has been implicitly applied - that the
Test whether a migration has been implicitly applied - that the
tables or columns it would create exist. This is intended only for use
on initial migrations (as it only looks for CreateModel and AddField).
"""

View File

@ -97,7 +97,7 @@ class DummyNode(Node):
class MigrationGraph:
"""
Represents the digraph of all migrations in a project.
Represent the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
@ -142,8 +142,9 @@ class MigrationGraph:
def add_dependency(self, migration, child, parent, skip_validation=False):
"""
This may create dummy nodes if they don't yet exist.
If `skip_validation` is set, validate_consistency should be called afterwards.
This may create dummy nodes if they don't yet exist. If
`skip_validation=True`, validate_consistency() should be called
afterwards.
"""
if child not in self.nodes:
error_message = (
@ -165,7 +166,7 @@ class MigrationGraph:
def remove_replaced_nodes(self, replacement, replaced):
"""
Removes each of the `replaced` nodes (when they exist). Any
Remove each of the `replaced` nodes (when they exist). Any
dependencies that were referencing them are changed to reference the
`replacement` node instead.
"""
@ -201,10 +202,10 @@ class MigrationGraph:
def remove_replacement_node(self, replacement, replaced):
"""
The inverse operation to `remove_replaced_nodes`. Almost. Removes the
replacement node `replacement` and remaps its child nodes to
`replaced` - the list of nodes it would have replaced. Its parent
nodes are not remapped as they are expected to be correct already.
The inverse operation to `remove_replaced_nodes`. Almost. Remove the
replacement node `replacement` and remap its child nodes to `replaced`
- the list of nodes it would have replaced. Don't remap its parent
nodes as they are expected to be correct already.
"""
self.nodes.pop(replacement, None)
try:
@ -237,9 +238,7 @@ class MigrationGraph:
self.clear_cache()
def validate_consistency(self):
"""
Ensure there are no dummy nodes remaining in the graph.
"""
"""Ensure there are no dummy nodes remaining in the graph."""
[n.raise_error() for n in self.node_map.values() if isinstance(n, DummyNode)]
def clear_cache(self):
@ -251,10 +250,9 @@ class MigrationGraph:
def forwards_plan(self, target):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
Given a node, return a list of which previous nodes (dependencies) must
be applied, ending with the node itself. This is the list you would
follow if applying the migrations to a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
@ -271,10 +269,9 @@ class MigrationGraph:
def backwards_plan(self, target):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
Given a node, return a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself. This is the list you
would follow if removing the migrations from a database.
"""
if target not in self.nodes:
raise NodeNotFoundError("Node %r not a valid node" % (target, ), target)
@ -290,9 +287,7 @@ class MigrationGraph:
return self.iterative_dfs(node, forwards=False)
def iterative_dfs(self, start, forwards=True):
"""
Iterative depth first search, for finding dependencies.
"""
"""Iterative depth-first search for finding dependencies."""
visited = deque()
visited.append(start)
if forwards:
@ -314,7 +309,7 @@ class MigrationGraph:
def root_nodes(self, app=None):
"""
Returns all root nodes - that is, nodes with no dependencies inside
Return all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
@ -325,7 +320,7 @@ class MigrationGraph:
def leaf_nodes(self, app=None):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
Return all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
@ -369,9 +364,9 @@ class MigrationGraph:
def make_state(self, nodes=None, at_end=True, real_apps=None):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
Given a migration node or nodes, return a complete ProjectState for it.
If at_end is False, return the state before the migration has run.
If nodes is not provided, return the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())

View File

@ -17,7 +17,7 @@ MIGRATIONS_MODULE_NAME = 'migrations'
class MigrationLoader:
"""
Loads migration files from disk, and their status from the database.
Load migration files from disk and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
@ -62,9 +62,7 @@ class MigrationLoader:
return '%s.%s' % (app_package_name, MIGRATIONS_MODULE_NAME), False
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
"""Load the migrations from all INSTALLED_APPS from disk."""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
@ -119,11 +117,13 @@ class MigrationLoader:
)
def get_migration(self, app_label, name_prefix):
"Gets the migration exactly named, or raises `graph.NodeNotFoundError`"
"""Return the named migration or raise NodeNotFoundError."""
return self.graph.nodes[app_label, name_prefix]
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
"""
Return the migration(s) which match the given app label and name_prefix.
"""
# Do the search
results = []
for migration_app_label, migration_name in self.disk_migrations:
@ -192,7 +192,7 @@ class MigrationLoader:
def build_graph(self):
"""
Builds a migration dependency graph using both the disk and database.
Build a migration dependency graph using both the disk and database.
You'll need to rebuild the graph if you apply migrations. This isn't
usually a problem as generally migration stuff runs in a one-shot process.
"""
@ -294,8 +294,8 @@ class MigrationLoader:
def detect_conflicts(self):
"""
Looks through the loaded graph and detects any conflicts - apps
with more than one leaf migration. Returns a dict of the app labels
Look through the loaded graph and detect any conflicts - apps
with more than one leaf migration. Return a dict of the app labels
that conflict with the migration names that conflict.
"""
seen_apps = {}
@ -308,9 +308,9 @@ class MigrationLoader:
def project_state(self, nodes=None, at_end=True):
"""
Returns a ProjectState object representing the most recent state
that the migrations we loaded represent.
Return a ProjectState object representing the most recent state
that the loaded migrations represent.
See graph.make_state for the meaning of "nodes" and "at_end"
See graph.make_state() for the meaning of "nodes" and "at_end".
"""
return self.graph.make_state(nodes=nodes, at_end=at_end, real_apps=list(self.unmigrated_apps))

View File

@ -73,9 +73,9 @@ class Migration:
def mutate_state(self, project_state, preserve=True):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it. Preserves the original object state by
default and will return a mutated state from a copy.
Take a ProjectState and return a new one with the migration's
operations applied to it. Preserve the original object state by
default and return a mutated state from a copy.
"""
new_state = project_state
if preserve:
@ -87,11 +87,11 @@ class Migration:
def apply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
Take a project_state representing all migrations prior to this one
and a schema_editor for a live database and apply the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Return the resulting project state for efficient reuse by following
Migrations.
"""
for operation in self.operations:
@ -124,8 +124,8 @@ class Migration:
def unapply(self, project_state, schema_editor, collect_sql=False):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
Take a project_state representing all migrations prior to this one
and a schema_editor for a live database and apply the migration
in a reverse order.
The backwards migration process consists of two phases:
@ -185,7 +185,5 @@ class SwappableTuple(tuple):
def swappable_dependency(value):
"""
Turns a setting value into a dependency.
"""
"""Turn a setting value into a dependency."""
return SwappableTuple((value.split(".", 1)[0], "__first__"), value)

View File

@ -41,7 +41,7 @@ class Operation:
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
Return a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
@ -53,21 +53,21 @@ class Operation:
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
Take the state from the previous migration, and mutate it
so that it matches what this migration would perform.
"""
raise NotImplementedError('subclasses of Operation must provide a state_forwards() method')
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
Perform the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError('subclasses of Operation must provide a database_forwards() method')
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
Perform the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
@ -75,13 +75,13 @@ class Operation:
def describe(self):
"""
Outputs a brief summary of what the action does.
Output a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)
def references_model(self, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
Return True if there is a chance this operation references the given
model name (as a string), with an optional app label for accuracy.
Used for optimization. If in doubt, return True;
@ -93,7 +93,7 @@ class Operation:
def references_field(self, model_name, name, app_label=None):
"""
Returns True if there is a chance this operation references the given
Return True if there is a chance this operation references the given
field name, with an optional app label for accuracy.
Used for optimization. If in doubt, return True.
@ -102,7 +102,7 @@ class Operation:
def allow_migrate_model(self, connection_alias, model):
"""
Returns if we're allowed to migrate the model.
Return wether or not a model may be migrated.
This is a thin wrapper around router.allow_migrate_model() that
preemptively rejects any proxy, swapped out, or unmanaged model.

View File

@ -37,9 +37,7 @@ class FieldOperation(Operation):
class AddField(FieldOperation):
"""
Adds a field to a model.
"""
"""Add a field to a model."""
def __init__(self, model_name, name, field, preserve_default=True):
self.field = field
@ -118,9 +116,7 @@ class AddField(FieldOperation):
class RemoveField(FieldOperation):
"""
Removes a field from a model.
"""
"""Remove a field from a model."""
def deconstruct(self):
kwargs = {
@ -163,7 +159,8 @@ class RemoveField(FieldOperation):
class AlterField(FieldOperation):
"""
Alters a field's database column (e.g. null, max_length) to the provided new field
Alter a field's database column (e.g. null, max_length) to the provided
new field.
"""
def __init__(self, model_name, name, field, preserve_default=True):
@ -236,9 +233,7 @@ class AlterField(FieldOperation):
class RenameField(FieldOperation):
"""
Renames a field on the model. Might affect db_column too.
"""
"""Rename a field on the model. Might affect db_column too."""
def __init__(self, model_name, old_name, new_name):
self.old_name = old_name

View File

@ -39,9 +39,7 @@ class ModelOperation(Operation):
class CreateModel(ModelOperation):
"""
Create a model's table.
"""
"""Create a model's table."""
serialization_expand_args = ['fields', 'options', 'managers']
@ -227,9 +225,7 @@ class CreateModel(ModelOperation):
class DeleteModel(ModelOperation):
"""
Drops a model's table.
"""
"""Drop a model's table."""
def deconstruct(self):
kwargs = {
@ -259,9 +255,7 @@ class DeleteModel(ModelOperation):
class RenameModel(ModelOperation):
"""
Renames a model.
"""
"""Rename a model."""
def __init__(self, old_name, new_name):
self.old_name = old_name
@ -423,9 +417,7 @@ class RenameModel(ModelOperation):
class AlterModelTable(ModelOperation):
"""
Renames a model's table
"""
"""Rename a model's table."""
def __init__(self, name, table):
self.table = table
@ -497,7 +489,7 @@ class FieldRelatedOptionOperation(ModelOptionOperation):
class AlterUniqueTogether(FieldRelatedOptionOperation):
"""
Changes the value of unique_together to the target one.
Change the value of unique_together to the target one.
Input value of unique_together must be a set of tuples.
"""
option_name = "unique_together"
@ -551,7 +543,7 @@ class AlterUniqueTogether(FieldRelatedOptionOperation):
class AlterIndexTogether(FieldRelatedOptionOperation):
"""
Changes the value of index_together to the target one.
Change the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
option_name = "index_together"
@ -604,9 +596,7 @@ class AlterIndexTogether(FieldRelatedOptionOperation):
class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
"""
Represents a change with the order_with_respect_to option.
"""
"""Represent a change with the order_with_respect_to option."""
def __init__(self, name, order_with_respect_to):
self.order_with_respect_to = order_with_respect_to
@ -664,7 +654,7 @@ class AlterOrderWithRespectTo(FieldRelatedOptionOperation):
class AlterModelOptions(ModelOptionOperation):
"""
Sets new model options that don't directly affect the database schema
Set new model options that don't directly affect the database schema
(like verbose_name, permissions, ordering). Python code in migrations
may still need them.
"""
@ -718,9 +708,7 @@ class AlterModelOptions(ModelOptionOperation):
class AlterModelManagers(ModelOptionOperation):
"""
Alters the model's managers
"""
"""Alter the model's managers."""
serialization_expand_args = ['managers']
@ -759,9 +747,7 @@ class IndexOperation(Operation):
class AddIndex(IndexOperation):
"""
Add an index on a model.
"""
"""Add an index on a model."""
def __init__(self, model_name, index):
self.model_name = model_name
@ -806,9 +792,7 @@ class AddIndex(IndexOperation):
class RemoveIndex(IndexOperation):
"""
Remove an index from a model.
"""
"""Remove an index from a model."""
def __init__(self, model_name, name):
self.model_name = model_name

View File

@ -5,7 +5,7 @@ from .base import Operation
class SeparateDatabaseAndState(Operation):
"""
Takes two lists of operations - ones that will be used for the database,
Take two lists of operations - ones that will be used for the database,
and ones that will be used for the state change. This allows operations
that don't support state change to have it applied, or have operations
that affect the state or not the database, or so on.
@ -62,9 +62,9 @@ class SeparateDatabaseAndState(Operation):
class RunSQL(Operation):
"""
Runs some raw SQL. A reverse SQL statement may be provided.
Run some raw SQL. A reverse SQL statement may be provided.
Also accepts a list of operations that represent the state change effected
Also accept a list of operations that represent the state change effected
by this SQL change, in case it's custom column/table creation/deletion.
"""
noop = ''
@ -132,7 +132,7 @@ class RunSQL(Operation):
class RunPython(Operation):
"""
Runs Python code in a context suitable for doing versioned ORM operations.
Run Python code in a context suitable for doing versioned ORM operations.
"""
reduces_to_sql = False

View File

@ -1,6 +1,6 @@
class MigrationOptimizer:
"""
Powers the optimization process, where you provide a list of Operations
Power the optimization process, where you provide a list of Operations
and you are returned a list of equal or shorter length - operations
are merged into one if possible.
@ -39,9 +39,7 @@ class MigrationOptimizer:
operations = result
def optimize_inner(self, operations, app_label=None):
"""
Inner optimization loop.
"""
"""Inner optimization loop."""
new_operations = []
for i, operation in enumerate(operations):
# Compare it to each operation after it

View File

@ -11,7 +11,7 @@ from .loader import MigrationLoader
class MigrationQuestioner:
"""
Gives the autodetector responses to questions it might have.
Give the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
@ -22,7 +22,7 @@ class MigrationQuestioner:
self.dry_run = dry_run
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
"""Should we create an initial migration for the app?"""
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
@ -52,29 +52,29 @@ class MigrationQuestioner:
return not any(x.endswith(".py") for x in filenames if x != "__init__.py")
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
"""Adding a NOT NULL field to a model."""
# None means quit
return None
def ask_not_null_alteration(self, field_name, model_name):
"Changing a NULL field to NOT NULL"
"""Changing a NULL field to NOT NULL."""
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
"""Was this field really renamed?"""
return self.defaults.get("ask_rename", False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
"""Was this model really renamed?"""
return self.defaults.get("ask_rename_model", False)
def ask_merge(self, app_label):
"Do you really want to merge these migrations?"
"""Do you really want to merge these migrations?"""
return self.defaults.get("ask_merge", False)
def ask_auto_now_add_addition(self, field_name, model_name):
"Adding an auto_now_add field to a model"
"""Adding an auto_now_add field to a model."""
# None means quit
return None
@ -138,7 +138,7 @@ class InteractiveMigrationQuestioner(MigrationQuestioner):
print("Invalid input: %s" % e)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
"""Adding a NOT NULL field to a model."""
if not self.dry_run:
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default; "
@ -157,7 +157,7 @@ class InteractiveMigrationQuestioner(MigrationQuestioner):
return None
def ask_not_null_alteration(self, field_name, model_name):
"Changing a NULL field to NOT NULL"
"""Changing a NULL field to NOT NULL."""
if not self.dry_run:
choice = self._choice_input(
"You are trying to change the nullable field '%s' on %s to non-nullable "
@ -182,13 +182,13 @@ class InteractiveMigrationQuestioner(MigrationQuestioner):
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
"""Was this field really renamed?"""
msg = "Did you rename %s.%s to %s.%s (a %s)? [y/N]"
return self._boolean_input(msg % (model_name, old_name, model_name, new_name,
field_instance.__class__.__name__), False)
def ask_rename_model(self, old_model_state, new_model_state):
"Was this model really renamed?"
"""Was this model really renamed?"""
msg = "Did you rename the %s.%s model to %s? [y/N]"
return self._boolean_input(msg % (old_model_state.app_label, old_model_state.name,
new_model_state.name), False)
@ -202,7 +202,7 @@ class InteractiveMigrationQuestioner(MigrationQuestioner):
)
def ask_auto_now_add_addition(self, field_name, model_name):
"Adding an auto_now_add field to a model"
"""Adding an auto_now_add field to a model."""
if not self.dry_run:
choice = self._choice_input(
"You are trying to add the field '{}' with 'auto_now_add=True' "

View File

@ -8,7 +8,7 @@ from .exceptions import MigrationSchemaMissing
class MigrationRecorder:
"""
Deals with storing migration records in the database.
Deal with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via migrations.
@ -40,9 +40,7 @@ class MigrationRecorder:
return self.Migration.objects.using(self.connection.alias)
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
"""Ensure the table exists and has the correct schema."""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.table_names(self.connection.cursor()):
@ -55,28 +53,20 @@ class MigrationRecorder:
raise MigrationSchemaMissing("Unable to create the django_migrations table (%s)" % exc)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
"""Return a set of (app, name) of applied migrations."""
self.ensure_schema()
return set(tuple(x) for x in self.migration_qs.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
"""Record that a migration was applied."""
self.ensure_schema()
self.migration_qs.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
"""Record that a migration was unapplied."""
self.ensure_schema()
self.migration_qs.filter(app=app, name=name).delete()
def flush(self):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
"""Delete all migration records. Useful for testing migrations."""
self.migration_qs.all().delete()

View File

@ -27,9 +27,7 @@ def _get_app_label_and_model_name(model, app_label=''):
def _get_related_models(m):
"""
Return all models that have a direct relationship to the given model.
"""
"""Return all models that have a direct relationship to the given model."""
related_models = [
subclass for subclass in m.__subclasses__()
if issubclass(subclass, models.Model)
@ -82,9 +80,9 @@ def get_related_models_recursive(model):
class ProjectState:
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
Represent the entire project's overall state. This is the item that is
passed around - do it here rather than at the app level so that cross-app
FKs/etc. resolve properly.
"""
def __init__(self, models=None, real_apps=None):
@ -194,7 +192,7 @@ class ProjectState:
self.apps.render_multiple(states_to_be_rendered)
def clone(self):
"Returns an exact copy of this ProjectState"
"""Return an exact copy of this ProjectState."""
new_state = ProjectState(
models={k: v.clone() for k, v in self.models.items()},
real_apps=self.real_apps,
@ -219,7 +217,7 @@ class ProjectState:
@classmethod
def from_apps(cls, apps):
"Takes in an Apps and returns a ProjectState matching it"
"""Take an Apps and return a ProjectState matching it."""
app_models = {}
for model in apps.get_models(include_swapped=True):
model_state = ModelState.from_model(model)
@ -235,9 +233,7 @@ class ProjectState:
class AppConfigStub(AppConfig):
"""
Stubs a Django AppConfig. Only provides a label, and a dict of models.
"""
"""Stub of an AppConfig. Only provides a label and a dict of models."""
# Not used, but required by AppConfig.__init__
path = ''
@ -325,9 +321,7 @@ class StateApps(Apps):
unrendered_models = new_unrendered_models
def clone(self):
"""
Return a clone of this registry, mainly used by the migration framework.
"""
"""Return a clone of this registry."""
clone = StateApps([], {})
clone.all_models = copy.deepcopy(self.all_models)
clone.app_configs = copy.deepcopy(self.app_configs)
@ -358,9 +352,9 @@ class StateApps(Apps):
class ModelState:
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Represent a Django Model. Don't use the actual Model class as it's not
designed to have its options changed - instead, mutate this one and then
render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
@ -409,9 +403,7 @@ class ModelState:
@classmethod
def from_model(cls, model, exclude_rels=False):
"""
Feed me a model, get a ModelState representing it out.
"""
"""Given a model, return a ModelState representing it."""
# Deconstruct the fields
fields = []
for field in model._meta.local_fields:
@ -532,7 +524,7 @@ class ModelState:
)
def construct_managers(self):
"Deep-clone the managers using deconstruction"
"""Deep-clone the managers using deconstruction."""
# Sort all managers by their creation counter
sorted_managers = sorted(self.managers, key=lambda v: v[1].creation_counter)
for mgr_name, manager in sorted_managers:
@ -546,7 +538,7 @@ class ModelState:
yield mgr_name, manager_class(*args, **kwargs)
def clone(self):
"Returns an exact copy of this ModelState"
"""Return an exact copy of this ModelState."""
return self.__class__(
app_label=self.app_label,
name=self.name,
@ -557,7 +549,7 @@ class ModelState:
)
def render(self, apps):
"Creates a Model object from our current state into the given apps"
"""Create a Model object from our current state into the given apps."""
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "apps": apps}
meta_contents.update(self.options)

View File

@ -1,9 +1,10 @@
def topological_sort_as_sets(dependency_graph):
"""Variation of Kahn's algorithm (1962) that returns sets.
"""
Variation of Kahn's algorithm (1962) that returns sets.
Takes a dependency graph as a dictionary of node => dependencies.
Take a dependency graph as a dictionary of node => dependencies.
Yields sets of items in topological order, where the first set contains
Yield sets of items in topological order, where the first set contains
all nodes without dependencies, and each following set contains all
nodes that may depend on the nodes only in the previously yielded sets.
"""

View File

@ -129,7 +129,7 @@ class OperationWriter:
class MigrationWriter:
"""
Takes a Migration instance and is able to produce the contents
Take a Migration instance and is able to produce the contents
of the migration file from it.
"""
@ -138,9 +138,7 @@ class MigrationWriter:
self.needs_manual_porting = False
def as_string(self):
"""
Returns a string of the file contents.
"""
"""Return a string of the file contents."""
items = {
"replaces_str": "",
"initial_str": "",

View File

@ -69,9 +69,7 @@ def subclass_exception(name, parents, module, attached_to=None):
class ModelBase(type):
"""
Metaclass for all models.
"""
"""Metaclass for all models."""
def __new__(cls, name, bases, attrs):
super_new = super().__new__
@ -322,9 +320,7 @@ class ModelBase(type):
setattr(cls, name, value)
def _prepare(cls):
"""
Creates some methods once self._meta has been populated.
"""
"""Create some methods once self._meta has been populated."""
opts = cls._meta
opts._prepare(cls)
@ -372,9 +368,7 @@ class ModelBase(type):
class ModelState:
"""
A class for storing instance state
"""
"""Store model instance state."""
def __init__(self, db=None):
self.db = db
# If true, uniqueness validation checks will consider this a new, as-yet-unsaved object.
@ -561,7 +555,7 @@ class Model(metaclass=ModelBase):
def get_deferred_fields(self):
"""
Returns a set containing names of deferred fields for this instance.
Return a set containing names of deferred fields for this instance.
"""
return {
f.attname for f in self._meta.concrete_fields
@ -570,7 +564,7 @@ class Model(metaclass=ModelBase):
def refresh_from_db(self, using=None, fields=None):
"""
Reloads field values from the database.
Reload field values from the database.
By default, the reloading happens from the database this instance was
loaded from, or by the read router if this instance wasn't loaded from
@ -622,10 +616,10 @@ class Model(metaclass=ModelBase):
def serializable_value(self, field_name):
"""
Returns the value of the field name for this instance. If the field is
a foreign key, returns the id value, instead of the object. If there's
no Field object with this name on the model, the model attribute's
value is returned directly.
Return the value of the field name for this instance. If the field is
a foreign key, return the id value instead of the object. If there's
no Field object with this name on the model, return the model
attribute's value.
Used to serialize a field's value (in the serializer, or form output,
for example). Normally, you would just access the attribute directly
@ -640,7 +634,7 @@ class Model(metaclass=ModelBase):
def save(self, force_insert=False, force_update=False, using=None,
update_fields=None):
"""
Saves the current instance. Override this in a subclass if you want to
Save the current instance. Override this in a subclass if you want to
control the saving process.
The 'force_insert' and 'force_update' parameters can be used to insist
@ -721,7 +715,7 @@ class Model(metaclass=ModelBase):
def save_base(self, raw=False, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Handles the parts of saving which should be done only once per save,
Handle the parts of saving which should be done only once per save,
yet need to be done in raw saves, too. This includes some sanity
checks and signal sending.
@ -761,9 +755,7 @@ class Model(metaclass=ModelBase):
save_base.alters_data = True
def _save_parents(self, cls, using, update_fields):
"""
Saves all the parents of cls using values from self.
"""
"""Save all the parents of cls using values from self."""
meta = cls._meta
for parent, field in meta.parents.items():
# Make sure the link fields are synced between parent and self.
@ -787,7 +779,7 @@ class Model(metaclass=ModelBase):
def _save_table(self, raw=False, cls=None, force_insert=False,
force_update=False, using=None, update_fields=None):
"""
Does the heavy-lifting involved in saving. Updates or inserts the data
Do the heavy-lifting involved in saving. Update or insert the data
for a single table.
"""
meta = cls._meta
@ -838,9 +830,8 @@ class Model(metaclass=ModelBase):
def _do_update(self, base_qs, using, pk_val, values, update_fields, forced_update):
"""
This method will try to update the model. If the model was updated (in
the sense that an update query was done and a matching row was found
from the DB) the method will return True.
Try to update the model. Return True if the model was updated (if an
update query was done and a matching row was found in the DB).
"""
filtered = base_qs.filter(pk=pk_val)
if not values:
@ -936,8 +927,8 @@ class Model(metaclass=ModelBase):
def validate_unique(self, exclude=None):
"""
Checks unique constraints on the model and raises ``ValidationError``
if any failed.
Check unique constraints on the model and raise ValidationError if any
failed.
"""
unique_checks, date_checks = self._get_unique_checks(exclude=exclude)
@ -952,12 +943,11 @@ class Model(metaclass=ModelBase):
def _get_unique_checks(self, exclude=None):
"""
Gather a list of checks to perform. Since validate_unique could be
Return a list of checks to perform. Since validate_unique() could be
called from a ModelForm, some fields may have been excluded; we can't
perform a unique check on a model that is missing fields involved
in that check.
Fields that did not validate should also be excluded, but they need
to be passed in via the exclude argument.
in that check. Fields that did not validate should also be excluded,
but they need to be passed in via the exclude argument.
"""
if exclude is None:
exclude = []
@ -1125,8 +1115,8 @@ class Model(metaclass=ModelBase):
def full_clean(self, exclude=None, validate_unique=True):
"""
Calls clean_fields, clean, and validate_unique, on the model,
and raises a ``ValidationError`` for any errors that occurred.
Call clean_fields(), clean(), and validate_unique() on the model.
Raise a ValidationError for any errors that occur.
"""
errors = {}
if exclude is None:
@ -1161,7 +1151,7 @@ class Model(metaclass=ModelBase):
def clean_fields(self, exclude=None):
"""
Cleans all fields and raises a ValidationError containing a dict
Clean all fields and raise a ValidationError containing a dict
of all validation errors if any occur.
"""
if exclude is None:
@ -1212,8 +1202,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_swappable(cls):
""" Check if the swapped model exists. """
"""Check if the swapped model exists."""
errors = []
if cls._meta.swapped:
try:
@ -1253,8 +1242,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_managers(cls, **kwargs):
""" Perform all manager checks. """
"""Perform all manager checks."""
errors = []
for manager in cls._meta.managers:
errors.extend(manager.check(**kwargs))
@ -1262,8 +1250,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_fields(cls, **kwargs):
""" Perform all field checks. """
"""Perform all field checks."""
errors = []
for field in cls._meta.local_fields:
errors.extend(field.check(**kwargs))
@ -1304,7 +1291,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_id_field(cls):
""" Check if `id` field is a primary key. """
"""Check if `id` field is a primary key."""
fields = list(f for f in cls._meta.local_fields if f.name == 'id' and f != cls._meta.pk)
# fields is empty or consists of the invalid "id" field
if fields and not fields[0].primary_key and cls._meta.pk.name == 'id':
@ -1321,8 +1308,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_field_name_clashes(cls):
""" Ref #17673. """
"""Forbid field shadowing in multi-table inheritance."""
errors = []
used_fields = {} # name or attname -> field
@ -1428,7 +1414,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_index_together(cls):
""" Check the value of "index_together" option. """
"""Check the value of "index_together" option."""
if not isinstance(cls._meta.index_together, (tuple, list)):
return [
checks.Error(
@ -1455,7 +1441,7 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_unique_together(cls):
""" Check the value of "unique_together" option. """
"""Check the value of "unique_together" option."""
if not isinstance(cls._meta.unique_together, (tuple, list)):
return [
checks.Error(
@ -1530,8 +1516,10 @@ class Model(metaclass=ModelBase):
@classmethod
def _check_ordering(cls):
""" Check "ordering" option -- is it a list of strings and do all fields
exist? """
"""
Check "ordering" option -- is it a list of strings and do all fields
exist?
"""
if cls._meta._ordering_clash:
return [
checks.Error(
@ -1710,9 +1698,7 @@ def make_foreign_order_accessors(model, related_model):
def model_unpickle(model_id):
"""
Used to unpickle Model subclasses with deferred fields.
"""
"""Used to unpickle Model subclasses with deferred fields."""
if isinstance(model_id, tuple):
model = apps.get_model(*model_id)
else:

View File

@ -79,11 +79,11 @@ class Collector:
def add(self, objs, source=None, nullable=False, reverse_dependency=False):
"""
Adds 'objs' to the collection of objects to be deleted. If the call is
Add 'objs' to the collection of objects to be deleted. If the call is
the result of a cascade, 'source' should be the model that caused it,
and 'nullable' should be set to True if the relation can be null.
Returns a list of all objects that were not already collected.
Return a list of all objects that were not already collected.
"""
if not objs:
return []
@ -106,7 +106,7 @@ class Collector:
def add_field_update(self, field, value, objs):
"""
Schedules a field update. 'objs' must be a homogeneous iterable
Schedule a field update. 'objs' must be a homogeneous iterable
collection of model instances (e.g. a QuerySet).
"""
if not objs:
@ -118,12 +118,12 @@ class Collector:
def can_fast_delete(self, objs, from_field=None):
"""
Determines if the objects in the given queryset-like can be
Determine if the objects in the given queryset-like can be
fast-deleted. This can be done if there are no cascades, no
parents and no signal listeners for the object class.
The 'from_field' tells where we are coming from - we need this to
determine if the objects are in fact to be deleted. Allows also
determine if the objects are in fact to be deleted. Allow also
skipping parent -> child -> parent chain preventing fast delete of
the child.
"""
@ -154,7 +154,7 @@ class Collector:
def get_del_batches(self, objs, field):
"""
Returns the objs in suitably sized batches for the used connection.
Return the objs in suitably sized batches for the used connection.
"""
conn_batch_size = max(
connections[self.using].ops.bulk_batch_size([field.name], objs), 1)
@ -167,7 +167,7 @@ class Collector:
def collect(self, objs, source=None, nullable=False, collect_related=True,
source_attr=None, reverse_dependency=False, keep_parents=False):
"""
Adds 'objs' to the collection of objects to be deleted as well as all
Add 'objs' to the collection of objects to be deleted as well as all
parent instances. 'objs' must be a homogeneous iterable collection of
model instances (e.g. a QuerySet). If 'collect_related' is True,
related objects will be handled by their respective on_delete handler.
@ -228,7 +228,7 @@ class Collector:
def related_objects(self, related, objs):
"""
Gets a QuerySet of objects related to ``objs`` via the relation ``related``.
Get a QuerySet of objects related to `objs` via the relation `related`.
"""
return related.related_model._base_manager.using(self.using).filter(
**{"%s__in" % related.field.name: objs}

View File

@ -11,7 +11,7 @@ from django.utils.functional import cached_property
class Combinable:
"""
Provides the ability to combine one or two objects with
Provide the ability to combine one or two objects with
some connector. For example F('foo') + F('bar').
"""
@ -120,9 +120,7 @@ class Combinable:
@deconstructible
class BaseExpression:
"""
Base class for all query expressions.
"""
"""Base class for all query expressions."""
# aggregate specific fields
is_summary = False
@ -170,7 +168,7 @@ class BaseExpression:
* connection: the database connection used for the current query.
Returns: (sql, params)
Return: (sql, params)
Where `sql` is a string containing ordered sql parameters to be
replaced with the elements of the list `params`.
"""
@ -192,7 +190,7 @@ class BaseExpression:
def resolve_expression(self, query=None, allow_joins=True, reuse=None, summarize=False, for_save=False):
"""
Provides the chance to do any preprocessing or validation before being
Provide the chance to do any preprocessing or validation before being
added to the query.
Arguments:
@ -203,7 +201,7 @@ class BaseExpression:
* summarize: a terminal aggregate clause
* for_save: whether this expression about to be used in a save or update
Returns: an Expression to be added to the query.
Return: an Expression to be added to the query.
"""
c = self.copy()
c.is_summary = summarize
@ -214,9 +212,7 @@ class BaseExpression:
return c
def _prepare(self, field):
"""
Hook used by Lookup.get_prep_lookup() to do custom preparation.
"""
"""Hook used by Lookup.get_prep_lookup() to do custom preparation."""
return self
@property
@ -225,9 +221,7 @@ class BaseExpression:
@cached_property
def output_field(self):
"""
Returns the output type of this expressions.
"""
"""Return the output type of this expressions."""
if self._output_field_or_none is None:
raise FieldError("Cannot resolve expression type, unknown output_field")
return self._output_field_or_none
@ -235,7 +229,7 @@ class BaseExpression:
@cached_property
def _output_field_or_none(self):
"""
Returns the output field of this expression, or None if no output type
Return the output field of this expression, or None if no output type
can be resolved. Note that the 'output_field' property will raise
FieldError if no type can be resolved, but this attribute allows for
None values.
@ -246,10 +240,9 @@ class BaseExpression:
def _resolve_output_field(self):
"""
Attempts to infer the output type of the expression. If the output
fields of all source fields match then we can simply infer the same
type here. This isn't always correct, but it makes sense most of the
time.
Attempt to infer the output type of the expression. If the output
fields of all source fields match then, simply infer the same type
here. This isn't always correct, but it makes sense most of the time.
Consider the difference between `2 + 2` and `2 / 3`. Inferring
the type here is a convenience for the common case. The user should
@ -316,10 +309,7 @@ class BaseExpression:
return cols
def get_source_fields(self):
"""
Returns the underlying field types used by this
aggregate.
"""
"""Return the underlying field types used by this aggregate."""
return [e._output_field_or_none for e in self.get_source_expressions()]
def asc(self, **kwargs):
@ -364,9 +354,7 @@ class BaseExpression:
class Expression(BaseExpression, Combinable):
"""
An expression that can be combined with other expressions.
"""
"""An expression that can be combined with other expressions."""
pass
@ -470,9 +458,7 @@ class TemporalSubtraction(CombinedExpression):
@deconstructible
class F(Combinable):
"""
An object capable of resolving references to existing query objects.
"""
"""An object capable of resolving references to existing query objects."""
def __init__(self, name):
"""
Arguments:
@ -527,9 +513,7 @@ class OuterRef(F):
class Func(Expression):
"""
An SQL function call.
"""
"""An SQL function call."""
function = None
template = '%(function)s(%(expressions)s)'
arg_joiner = ', '
@ -608,9 +592,7 @@ class Func(Expression):
class Value(Expression):
"""
Represents a wrapped value as a node within an expression
"""
"""Represent a wrapped value as a node within an expression."""
def __init__(self, value, output_field=None):
"""
Arguments:

View File

@ -190,9 +190,7 @@ class Field(RegisterLookupMixin):
return '%s.%s.%s' % (app, model._meta.object_name, self.name)
def __repr__(self):
"""
Displays the module, class and name of the field.
"""
"""Display the module, class, and name of the field."""
path = '%s.%s' % (self.__class__.__module__, self.__class__.__name__)
name = getattr(self, 'name', None)
if name is not None:
@ -210,9 +208,10 @@ class Field(RegisterLookupMixin):
return errors
def _check_field_name(self):
""" Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk". """
"""
Check if field name is valid, i.e. 1) does not end with an
underscore, 2) does not contain "__" and 3) is not "pk".
"""
if self.name.endswith('_'):
return [
checks.Error(
@ -348,37 +347,42 @@ class Field(RegisterLookupMixin):
def select_format(self, compiler, sql, params):
"""
Custom format for select clauses. For example, GIS columns need to be
selected as AsText(table.col) on MySQL as the table.col data can't be used
by Django.
selected as AsText(table.col) on MySQL as the table.col data can't be
used by Django.
"""
return sql, params
def deconstruct(self):
"""
Returns enough information to recreate the field as a 4-tuple:
Return enough information to recreate the field as a 4-tuple:
* The name of the field on the model, if contribute_to_class has been run
* The import path of the field, including the class: django.db.models.IntegerField
This should be the most portable version, so less specific may be better.
* A list of positional arguments
* A dict of keyword arguments
* The name of the field on the model, if contribute_to_class() has
been run.
* The import path of the field, including the class:e.g.
django.db.models.IntegerField This should be the most portable
version, so less specific may be better.
* A list of positional arguments.
* A dict of keyword arguments.
Note that the positional or keyword arguments must contain values of the
following types (including inner values of collection types):
Note that the positional or keyword arguments must contain values of
the following types (including inner values of collection types):
* None, bool, str, int, float, complex, set, frozenset, list, tuple, dict
* None, bool, str, int, float, complex, set, frozenset, list, tuple,
dict
* UUID
* datetime.datetime (naive), datetime.date
* top-level classes, top-level functions - will be referenced by their full import path
* top-level classes, top-level functions - will be referenced by their
full import path
* Storage instances - these have their own deconstruct() method
This is because the values here must be serialized into a text format
(possibly new Python code, possibly JSON) and these are the only types
with encoding handlers defined.
There's no need to return the exact way the field was instantiated this time,
just ensure that the resulting field is the same - prefer keyword arguments
over positional ones, and omit parameters with their default values.
There's no need to return the exact way the field was instantiated this
time, just ensure that the resulting field is the same - prefer keyword
arguments over positional ones, and omit parameters with their default
values.
"""
# Short-form way of fetching all the default parameters
keywords = {}
@ -486,7 +490,7 @@ class Field(RegisterLookupMixin):
def __reduce__(self):
"""
Pickling should return the model._meta.fields instance of the field,
not a new copy of that field. So, we use the app registry to load the
not a new copy of that field. So, use the app registry to load the
model and then the field back.
"""
if not hasattr(self, 'model'):
@ -512,9 +516,9 @@ class Field(RegisterLookupMixin):
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
Convert the input value into the expected Python data type, raising
django.core.exceptions.ValidationError if the data can't be converted.
Returns the converted value. Subclasses should override this.
Return the converted value. Subclasses should override this.
"""
return value
@ -544,8 +548,8 @@ class Field(RegisterLookupMixin):
def validate(self, value, model_instance):
"""
Validates value and throws ValidationError. Subclasses should override
this to provide validation logic.
Validate value and raise ValidationError if necessary. Subclasses
should override this to provide validation logic.
"""
if not self.editable:
# Skip validation for non-editable fields.
@ -576,8 +580,8 @@ class Field(RegisterLookupMixin):
def clean(self, value, model_instance):
"""
Convert the value's type and run validation. Validation errors
from to_python and validate are propagated. The correct value is
returned if no error is raised.
from to_python() and validate() are propagated. Return the correct
value if no error is raised.
"""
value = self.to_python(value)
self.validate(value, model_instance)
@ -632,9 +636,9 @@ class Field(RegisterLookupMixin):
def db_parameters(self, connection):
"""
Extension of db_type(), providing a range of different return
values (type, checks).
This will look at db_type(), allowing custom model fields to override it.
Extension of db_type(), providing a range of different return values
(type, checks). This will look at db_type(), allowing custom model
fields to override it.
"""
type_string = self.db_type(connection)
check_string = self.db_check(connection)
@ -667,9 +671,8 @@ class Field(RegisterLookupMixin):
"""
Register the field with the model class it belongs to.
If private_only is True, a separate instance of this field will be
created for every subclass of cls, even if cls is not an abstract
model.
If private_only is True, create a separate instance of this field
for every subclass of cls, even if cls is not an abstract model.
"""
self.set_attributes_from_name(name)
self.model = cls
@ -709,22 +712,18 @@ class Field(RegisterLookupMixin):
return self.__class__.__name__
def pre_save(self, model_instance, add):
"""
Returns field's value just before saving.
"""
"""Return field's value just before saving."""
return getattr(model_instance, self.attname)
def get_prep_value(self, value):
"""
Perform preliminary non-db specific value checks and conversions.
"""
"""Perform preliminary non-db specific value checks and conversions."""
if isinstance(value, Promise):
value = value._proxy____cast()
return value
def get_db_prep_value(self, value, connection, prepared=False):
"""Returns field's value prepared for interacting with the database
backend.
"""
Return field's value prepared for interacting with the database backend.
Used by the default implementations of get_db_prep_save().
"""
@ -733,22 +732,15 @@ class Field(RegisterLookupMixin):
return value
def get_db_prep_save(self, value, connection):
"""
Returns field's value prepared for saving into a database.
"""
return self.get_db_prep_value(value, connection=connection,
prepared=False)
"""Return field's value prepared for saving into a database."""
return self.get_db_prep_value(value, connection=connection, prepared=False)
def has_default(self):
"""
Returns a boolean of whether this field has a default value.
"""
"""Return a boolean of whether this field has a default value."""
return self.default is not NOT_PROVIDED
def get_default(self):
"""
Returns the default value for this field.
"""
"""Return the default value for this field."""
return self._get_default()
@cached_property
@ -760,11 +752,13 @@ class Field(RegisterLookupMixin):
if not self.empty_strings_allowed or self.null and not connection.features.interprets_empty_strings_as_nulls:
return return_None
return str # returns empty string
return str # return empty string
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH, limit_choices_to=None):
"""Returns choices with a default blank choices included, for use
as SelectField choices for this field."""
"""
Return choices with a default blank choices included, for use
as <select> choices for this field.
"""
blank_defined = False
choices = list(self.choices) if self.choices else []
named_groups = choices and isinstance(choices[0][1], (list, tuple))
@ -793,7 +787,7 @@ class Field(RegisterLookupMixin):
def value_to_string(self, obj):
"""
Returns a string value of this field from the passed obj.
Return a string value of this field from the passed obj.
This is used by the serialization framework.
"""
return force_text(self.value_from_object(obj))
@ -813,9 +807,7 @@ class Field(RegisterLookupMixin):
setattr(instance, self.name, data)
def formfield(self, form_class=None, choices_form_class=None, **kwargs):
"""
Returns a django.forms.Field instance for this database Field.
"""
"""Return a django.forms.Field instance for this field."""
defaults = {'required': not self.blank,
'label': capfirst(self.verbose_name),
'help_text': self.help_text}
@ -851,9 +843,7 @@ class Field(RegisterLookupMixin):
return form_class(**defaults)
def value_from_object(self, obj):
"""
Returns the value of this field in the given model instance.
"""
"""Return the value of this field in the given model instance."""
return getattr(obj, self.attname)
@ -1137,11 +1127,8 @@ class DateField(DateTimeCheckMixin, Field):
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
@ -1279,11 +1266,8 @@ class DateTimeField(DateField):
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
date or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []
@ -1539,7 +1523,7 @@ class DecimalField(Field):
def format_number(self, value):
"""
Formats a number into a string with the requisite number of digits and
Format a number into a string with the requisite number of digits and
decimal places.
"""
# Method moved to django.db.backends.utils.
@ -1569,9 +1553,10 @@ class DecimalField(Field):
class DurationField(Field):
"""Stores timedelta objects.
"""
Store timedelta objects.
Uses interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
Use interval on PostgreSQL, INTERVAL DAY TO SECOND on Oracle, and bigint
of microseconds on other databases.
"""
empty_strings_allowed = False
@ -2123,11 +2108,8 @@ class TimeField(DateTimeCheckMixin, Field):
def _check_fix_default_value(self):
"""
Adds a warning to the checks framework stating, that using an actual
time or datetime value is probably wrong; it's only being evaluated on
server start-up.
For details see ticket #21905
Warn that using an actual date or datetime value is probably wrong;
it's only evaluated on server startup.
"""
if not self.has_default():
return []

View File

@ -133,14 +133,14 @@ class FieldFile(File):
class FileDescriptor:
"""
The descriptor for the file attribute on the model instance. Returns a
FieldFile when accessed so you can do stuff like::
The descriptor for the file attribute on the model instance. Return a
FieldFile when accessed so you can write code like::
>>> from myapp.models import MyModel
>>> instance = MyModel.objects.get(pk=1)
>>> instance.file.size
Assigns a file object on assignment so you can do::
Assign a file object on assignment so you can do::
>>> with open('/path/to/hello.world', 'r') as f:
... instance.file = File(f)
@ -275,7 +275,6 @@ class FileField(Field):
return "FileField"
def get_prep_value(self, value):
"Returns field's value prepared for saving into a database."
value = super().get_prep_value(value)
# Need to convert File objects provided via a form to string for database insertion
if value is None:
@ -283,7 +282,6 @@ class FileField(Field):
return str(value)
def pre_save(self, model_instance, add):
"Returns field's value just before saving."
file = super().pre_save(model_instance, add)
if file and not file._committed:
# Commit the file to storage prior to saving the model
@ -406,7 +404,7 @@ class ImageField(FileField):
def update_dimension_fields(self, instance, force=False, *args, **kwargs):
"""
Updates field's width and height fields, if defined.
Update field's width and height fields, if defined.
This method is hooked up to model's post_init signal to update
dimensions after instantiating a model instance. However, dimensions

View File

@ -80,9 +80,7 @@ def lazy_related_operation(function, model, *related_models, **kwargs):
class RelatedField(Field):
"""
Base class that all relational fields inherit from.
"""
"""Base class that all relational fields inherit from."""
# Field flags
one_to_many = False
@ -192,9 +190,7 @@ class RelatedField(Field):
return []
def _check_clashes(self):
"""
Check accessor and reverse query name clashes.
"""
"""Check accessor and reverse query name clashes."""
from django.db.models.base import ModelBase
errors = []
@ -424,7 +420,7 @@ class RelatedField(Field):
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
@ -436,7 +432,7 @@ class RelatedField(Field):
class ForeignObject(RelatedField):
"""
Abstraction of the ForeignKey relation, supports multi-column relations.
Abstraction of the ForeignKey relation to support multi-column relations.
"""
# Field flags
@ -693,17 +689,13 @@ class ForeignObject(RelatedField):
return None
def get_path_info(self):
"""
Get path from this field to the related model.
"""
"""Get path from this field to the related model."""
opts = self.remote_field.model._meta
from_opts = self.model._meta
return [PathInfo(from_opts, opts, self.foreign_related_fields, self, False, True)]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
@ -861,9 +853,7 @@ class ForeignKey(ForeignObject):
return self.foreign_related_fields[0]
def get_reverse_path_info(self):
"""
Get path from the related model to this field's model.
"""
"""Get path from the related model to this field's model."""
opts = self.model._meta
from_opts = self.remote_field.model._meta
pathinfos = [PathInfo(from_opts, opts, (opts.pk,), self.remote_field, not self.unique, False)]
@ -900,7 +890,7 @@ class ForeignKey(ForeignObject):
return attname, column
def get_default(self):
"Here we check if the default value is an object and return the to_field if so."
"""Return the to_field if the default value is an object."""
field_default = super().get_default()
if isinstance(field_default, self.remote_field.model):
return getattr(field_default, self.target_field.attname)
@ -1441,9 +1431,7 @@ class ManyToManyField(RelatedField):
return name, path, args, kwargs
def _get_path_info(self, direct=False):
"""
Called by both direct and indirect m2m traversal.
"""
"""Called by both direct and indirect m2m traversal."""
pathinfos = []
int_model = self.remote_field.through
linkfield1 = int_model._meta.get_field(self.m2m_field_name())
@ -1598,9 +1586,6 @@ class ManyToManyField(RelatedField):
pass
def value_from_object(self, obj):
"""
Return the value of this field in the given model instance.
"""
if obj.pk is None:
return self.related_model.objects.none()
return getattr(obj, self.attname).all()

View File

@ -66,7 +66,7 @@ class ForeignObjectRel:
@property
def target_field(self):
"""
When filtering against this relation, returns the field on the remote
When filtering against this relation, return the field on the remote
model against which the filtering should happen.
"""
target_fields = self.get_path_info()[-1].target_fields
@ -116,8 +116,8 @@ class ForeignObjectRel:
def get_choices(self, include_blank=True, blank_choice=BLANK_CHOICE_DASH):
"""
Return choices with a default blank choices included, for use as
SelectField choices for this field.
Return choices with a default blank choices included, for use
as <select> choices for this field.
Analog of django.db.models.fields.Field.get_choices(), provided
initially for utilization by RelatedFieldListFilter.
@ -127,7 +127,7 @@ class ForeignObjectRel:
]
def is_hidden(self):
"Should the related object be hidden?"
"""Should the related object be hidden?"""
return bool(self.related_name) and self.related_name[-1] == '+'
def get_joining_columns(self):

View File

@ -5,9 +5,7 @@ from django.db.models import Func, Transform, Value, fields
class Cast(Func):
"""
Coerce an expression to a new field type.
"""
"""Coerce an expression to a new field type."""
function = 'CAST'
template = '%(function)s(%(expressions)s AS %(db_type)s)'
@ -38,9 +36,7 @@ class Cast(Func):
class Coalesce(Func):
"""
Chooses, from left to right, the first non-null expression and returns it.
"""
"""Return, from left to right, the first non-null expression."""
function = 'COALESCE'
def __init__(self, *expressions, **extra):
@ -65,9 +61,8 @@ class Coalesce(Func):
class ConcatPair(Func):
"""
A helper class that concatenates two arguments together. This is used
by `Concat` because not all backend databases support more than two
arguments.
Concatenate two arguments together. This is used by `Concat` because not
all backend databases support more than two arguments.
"""
function = 'CONCAT'
@ -98,9 +93,9 @@ class ConcatPair(Func):
class Concat(Func):
"""
Concatenates text fields together. Backends that result in an entire
Concatenate text fields together. Backends that result in an entire
null expression when any arguments are null will wrap each argument in
coalesce functions to ensure we always get a non-null result.
coalesce functions to ensure a non-null result.
"""
function = None
template = "%(expressions)s"
@ -122,7 +117,7 @@ class Concat(Func):
class Greatest(Func):
"""
Chooses the maximum expression and returns it.
Return the maximum expression.
If any expression is null the return value is database-specific:
On Postgres, the maximum not-null expression is returned.
@ -142,11 +137,11 @@ class Greatest(Func):
class Least(Func):
"""
Chooses the minimum expression and returns it.
Return the minimum expression.
If any expression is null the return value is database-specific:
On Postgres, the minimum not-null expression is returned.
On MySQL, Oracle, and SQLite, if any expression is null, null is returned.
On Postgres, return the minimum not-null expression.
On MySQL, Oracle, and SQLite, if any expression is null, return null.
"""
function = 'LEAST'
@ -161,7 +156,7 @@ class Least(Func):
class Length(Transform):
"""Returns the number of characters in the expression"""
"""Return the number of characters in the expression."""
function = 'LENGTH'
lookup_name = 'length'

View File

@ -7,18 +7,18 @@ from django.db.models.query import QuerySet
class BaseManager:
# Tracks each time a Manager instance is created. Used to retain order.
# To retain order, track each time a Manager instance is created.
creation_counter = 0
# Set to True for the 'objects' managers that are automatically created.
auto_created = False
#: If set to True the manager will be serialized into migrations and will
#: thus be available in e.g. RunPython operations
#: thus be available in e.g. RunPython operations.
use_in_migrations = False
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
# Capture the arguments to make returning them trivial.
obj = super().__new__(cls)
obj._constructor_args = (args, kwargs)
return obj
@ -32,15 +32,15 @@ class BaseManager:
self._hints = {}
def __str__(self):
""" Return "app_label.model_label.manager_name". """
"""Return "app_label.model_label.manager_name"."""
return '%s.%s' % (self.model._meta.label, self.name)
def deconstruct(self):
"""
Returns a 5-tuple of the form (as_manager (True), manager_class,
Return a 5-tuple of the form (as_manager (True), manager_class,
queryset_class, args, kwargs).
Raises a ValueError if the manager is dynamically generated.
Raise a ValueError if the manager is dynamically generated.
"""
qs_class = self._queryset_class
if getattr(self, '_built_with_as_manager', False):
@ -118,7 +118,7 @@ class BaseManager:
def _set_creation_counter(self):
"""
Sets the creation counter value for this instance and increments the
Set the creation counter value for this instance and increment the
class-level copy.
"""
self.creation_counter = BaseManager.creation_counter
@ -140,8 +140,8 @@ class BaseManager:
def get_queryset(self):
"""
Returns a new QuerySet object. Subclasses can override this method to
easily customize the behavior of the Manager.
Return a new QuerySet object. Subclasses can override this method to
customize the behavior of the Manager.
"""
return self._queryset_class(model=self.model, using=self._db, hints=self._hints)

View File

@ -284,7 +284,7 @@ class Options:
def setup_proxy(self, target):
"""
Does the internal setup so that the current model is a proxy for
Do the internal setup so that the current model is a proxy for
"target".
"""
self.pk = target._meta.pk
@ -315,11 +315,7 @@ class Options:
@property
def verbose_name_raw(self):
"""
There are a few places where the untranslated verbose name is needed
(so that we get the same value regardless of currently active
locale).
"""
"""Return the untranslated verbose name."""
with override(None):
return force_text(self.verbose_name)
@ -427,7 +423,7 @@ class Options:
@cached_property
def fields(self):
"""
Returns a list of all forward fields on the model and its parents,
Return a list of all forward fields on the model and its parents,
excluding ManyToManyFields.
Private API intended only to be used by Django itself; get_fields()
@ -461,7 +457,7 @@ class Options:
@cached_property
def concrete_fields(self):
"""
Returns a list of all concrete fields on the model and its parents.
Return a list of all concrete fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
@ -474,7 +470,7 @@ class Options:
@cached_property
def local_concrete_fields(self):
"""
Returns a list of all concrete fields on the model.
Return a list of all concrete fields on the model.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
@ -487,7 +483,7 @@ class Options:
@cached_property
def many_to_many(self):
"""
Returns a list of all many to many fields on the model and its parents.
Return a list of all many to many fields on the model and its parents.
Private API intended only to be used by Django itself; get_fields()
combined with filtering of field properties is the public API for
@ -501,7 +497,7 @@ class Options:
@cached_property
def related_objects(self):
"""
Returns all related objects pointing to the current model. The related
Return all related objects pointing to the current model. The related
objects can come from a one-to-one, one-to-many, or many-to-many field
relation type.
@ -589,7 +585,7 @@ class Options:
def get_parent_list(self):
"""
Returns all the ancestors of this model as a list ordered by MRO.
Return all the ancestors of this model as a list ordered by MRO.
Useful for determining if something is an ancestor, regardless of lineage.
"""
result = OrderedSet(self.parents)
@ -600,12 +596,12 @@ class Options:
def get_ancestor_link(self, ancestor):
"""
Returns the field on the current model which points to the given
Return the field on the current model which points to the given
"ancestor". This is possible an indirect link (a pointer to a parent
model, which points, eventually, to the ancestor). Used when
constructing table joins for model inheritance.
Returns None if the model isn't an ancestor of this one.
Return None if the model isn't an ancestor of this one.
"""
if ancestor in self.parents:
return self.parents[ancestor]
@ -717,7 +713,7 @@ class Options:
def get_fields(self, include_parents=True, include_hidden=False):
"""
Returns a list of fields associated to the model. By default, includes
Return a list of fields associated to the model. By default, include
forward and reverse fields, fields derived from inheritance, but not
hidden fields. The returned fields can be changed using the parameters:

View File

@ -39,9 +39,7 @@ class BaseIterable:
class ModelIterable(BaseIterable):
"""
Iterable that yields a model instance for each row.
"""
"""Iterable that yields a model instance for each row."""
def __iter__(self):
queryset = self.queryset
@ -86,8 +84,7 @@ class ModelIterable(BaseIterable):
class ValuesIterable(BaseIterable):
"""
Iterable returned by QuerySet.values() that yields a dict
for each row.
Iterable returned by QuerySet.values() that yields a dict for each row.
"""
def __iter__(self):
@ -108,8 +105,8 @@ class ValuesIterable(BaseIterable):
class ValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=False)
that yields a tuple for each row.
Iterable returned by QuerySet.values_list(flat=False) that yields a tuple
for each row.
"""
def __iter__(self):
@ -141,8 +138,8 @@ class ValuesListIterable(BaseIterable):
class FlatValuesListIterable(BaseIterable):
"""
Iterable returned by QuerySet.values_list(flat=True) that
yields single values.
Iterable returned by QuerySet.values_list(flat=True) that yields single
values.
"""
def __iter__(self):
@ -153,9 +150,7 @@ class FlatValuesListIterable(BaseIterable):
class QuerySet:
"""
Represents a lazy database lookup for a set of objects.
"""
"""Represent a lazy database lookup for a set of objects."""
def __init__(self, model=None, query=None, using=None, hints=None):
self.model = model
@ -185,9 +180,7 @@ class QuerySet:
########################
def __deepcopy__(self, memo):
"""
Deep copy of a QuerySet doesn't populate the cache
"""
"""Don't populate the QuerySet's cache."""
obj = self.__class__()
for k, v in self.__dict__.items():
if k == '_result_cache':
@ -254,9 +247,7 @@ class QuerySet:
return bool(self._result_cache)
def __getitem__(self, k):
"""
Retrieves an item or slice from the set of results.
"""
"""Retrieve an item or slice from the set of results."""
if not isinstance(k, (int, slice)):
raise TypeError
assert ((not isinstance(k, slice) and (k >= 0)) or
@ -319,8 +310,8 @@ class QuerySet:
def aggregate(self, *args, **kwargs):
"""
Returns a dictionary containing the calculations (aggregation)
over the current queryset
Return a dictionary containing the calculations (aggregation)
over the current queryset.
If args is present the expression is passed as a kwarg using
the Aggregate object's default alias.
@ -347,11 +338,11 @@ class QuerySet:
def count(self):
"""
Performs a SELECT COUNT() and returns the number of records as an
Perform a SELECT COUNT() and return the number of records as an
integer.
If the QuerySet is already fully cached this simply returns the length
of the cached results set to avoid multiple SELECT COUNT(*) calls.
If the QuerySet is already fully cached, return the length of the
cached results set to avoid multiple SELECT COUNT(*) calls.
"""
if self._result_cache is not None:
return len(self._result_cache)
@ -360,7 +351,7 @@ class QuerySet:
def get(self, *args, **kwargs):
"""
Performs the query and returns a single object matching the given
Perform the query and return a single object matching the given
keyword arguments.
"""
clone = self.filter(*args, **kwargs)
@ -381,7 +372,7 @@ class QuerySet:
def create(self, **kwargs):
"""
Creates a new object with the given kwargs, saving it to the database
Create a new object with the given kwargs, saving it to the database
and returning the created object.
"""
obj = self.model(**kwargs)
@ -396,9 +387,9 @@ class QuerySet:
def bulk_create(self, objs, batch_size=None):
"""
Inserts each of the instances into the database. This does *not* call
save() on each of the instances, does not send any pre/post save
signals, and does not set the primary key attribute if it is an
Insert each of the instances into the database. Do *not* call
save() on each of the instances, do not send any pre/post_save
signals, and do not set the primary key attribute if it is an
autoincrement field (except if features.can_return_ids_from_bulk_insert=True).
Multi-table models are not supported.
"""
@ -447,8 +438,8 @@ class QuerySet:
def get_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, creating one if necessary.
Returns a tuple of (object, created), where created is a boolean
Look up an object with the given kwargs, creating one if necessary.
Return a tuple of (object, created), where created is a boolean
specifying whether an object was created.
"""
lookup, params = self._extract_model_params(defaults, **kwargs)
@ -462,9 +453,9 @@ class QuerySet:
def update_or_create(self, defaults=None, **kwargs):
"""
Looks up an object with the given kwargs, updating one with defaults
if it exists, otherwise creates a new one.
Returns a tuple (object, created), where created is a boolean
Look up an object with the given kwargs, updating one with defaults
if it exists, otherwise create a new one.
Return a tuple (object, created), where created is a boolean
specifying whether an object was created.
"""
defaults = defaults or {}
@ -484,8 +475,8 @@ class QuerySet:
def _create_object_from_params(self, lookup, params):
"""
Tries to create an object using passed params.
Used by get_or_create and update_or_create
Try to create an object using passed params. Used by get_or_create()
and update_or_create().
"""
try:
with transaction.atomic(using=self.db):
@ -502,9 +493,9 @@ class QuerySet:
def _extract_model_params(self, defaults, **kwargs):
"""
Prepares `lookup` (kwargs that are valid model attributes), `params`
Prepare `lookup` (kwargs that are valid model attributes), `params`
(for creating a model instance) based on given kwargs; for use by
get_or_create and update_or_create.
get_or_create() and update_or_create().
"""
defaults = defaults or {}
lookup = kwargs.copy()
@ -530,7 +521,7 @@ class QuerySet:
def _earliest_or_latest(self, field_name=None, direction="-"):
"""
Returns the latest object, according to the model's
Return the latest object, according to the model's
'get_latest_by' option or optional given field_name.
"""
order_by = field_name or getattr(self.model._meta, 'get_latest_by')
@ -551,18 +542,14 @@ class QuerySet:
return self._earliest_or_latest(field_name=field_name, direction="-")
def first(self):
"""
Returns the first object of a query, returns None if no match is found.
"""
"""Return the first object of a query or None if no match is found."""
objects = list((self if self.ordered else self.order_by('pk'))[:1])
if objects:
return objects[0]
return None
def last(self):
"""
Returns the last object of a query, returns None if no match is found.
"""
"""Return the last object of a query or None if no match is found."""
objects = list((self.reverse() if self.ordered else self.order_by('-pk'))[:1])
if objects:
return objects[0]
@ -570,8 +557,8 @@ class QuerySet:
def in_bulk(self, id_list=None):
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, the entire QuerySet is evaluated.
Return a dictionary mapping each of the given IDs to the object with
that ID. If `id_list` isn't provided, evaluate the entire QuerySet.
"""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with in_bulk"
@ -584,9 +571,7 @@ class QuerySet:
return {obj._get_pk_val(): obj for obj in qs}
def delete(self):
"""
Deletes the records in the current QuerySet.
"""
"""Delete the records in the current QuerySet."""
assert self.query.can_filter(), \
"Cannot use 'limit' or 'offset' with delete."
@ -618,15 +603,15 @@ class QuerySet:
def _raw_delete(self, using):
"""
Deletes objects found from the given queryset in single direct SQL
query. No signals are sent, and there is no protection for cascades.
Delete objects found from the given queryset in single direct SQL
query. No signals are sent and there is no protection for cascades.
"""
return sql.DeleteQuery(self.model).delete_qs(self, using)
_raw_delete.alters_data = True
def update(self, **kwargs):
"""
Updates all elements in the current QuerySet, setting all the given
Update all elements in the current QuerySet, setting all the given
fields to the appropriate values.
"""
assert self.query.can_filter(), \
@ -644,7 +629,7 @@ class QuerySet:
def _update(self, values):
"""
A version of update that accepts field objects instead of field names.
A version of update() that accepts field objects instead of field names.
Used primarily for model saving and not intended for use by general
code (it requires too much poking around at model internals to be
useful at that level).
@ -711,7 +696,7 @@ class QuerySet:
def dates(self, field_name, kind, order='ASC'):
"""
Returns a list of date objects representing all available dates for
Return a list of date objects representing all available dates for
the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day"), \
@ -727,7 +712,7 @@ class QuerySet:
def datetimes(self, field_name, kind, order='ASC', tzinfo=None):
"""
Returns a list of datetime objects representing all available
Return a list of datetime objects representing all available
datetimes for the given field_name, scoped to 'kind'.
"""
assert kind in ("year", "month", "day", "hour", "minute", "second"), \
@ -747,9 +732,7 @@ class QuerySet:
).distinct().filter(plain_field__isnull=False).order_by(('-' if order == 'DESC' else '') + 'datetimefield')
def none(self):
"""
Returns an empty QuerySet.
"""
"""Return an empty QuerySet."""
clone = self._clone()
clone.query.set_empty()
return clone
@ -760,21 +743,21 @@ class QuerySet:
def all(self):
"""
Returns a new QuerySet that is a copy of the current one. This allows a
Return a new QuerySet that is a copy of the current one. This allows a
QuerySet to proxy for a model manager in some cases.
"""
return self._clone()
def filter(self, *args, **kwargs):
"""
Returns a new QuerySet instance with the args ANDed to the existing
Return a new QuerySet instance with the args ANDed to the existing
set.
"""
return self._filter_or_exclude(False, *args, **kwargs)
def exclude(self, *args, **kwargs):
"""
Returns a new QuerySet instance with NOT (args) ANDed to the existing
Return a new QuerySet instance with NOT (args) ANDed to the existing
set.
"""
return self._filter_or_exclude(True, *args, **kwargs)
@ -793,7 +776,7 @@ class QuerySet:
def complex_filter(self, filter_obj):
"""
Returns a new QuerySet instance with filter_obj added to the filters.
Return a new QuerySet instance with filter_obj added to the filters.
filter_obj can be a Q object (or anything with an add_to_query()
method) or a dictionary of keyword lookup arguments.
@ -830,7 +813,7 @@ class QuerySet:
def select_for_update(self, nowait=False, skip_locked=False):
"""
Returns a new QuerySet instance that will select objects with a
Return a new QuerySet instance that will select objects with a
FOR UPDATE lock.
"""
if nowait and skip_locked:
@ -844,12 +827,12 @@ class QuerySet:
def select_related(self, *fields):
"""
Returns a new QuerySet instance that will select related objects.
Return a new QuerySet instance that will select related objects.
If fields are specified, they must be ForeignKey fields and only those
related objects are included in the selection.
If select_related(None) is called, the list is cleared.
If select_related(None) is called, clear the list.
"""
if self._fields is not None:
@ -866,13 +849,12 @@ class QuerySet:
def prefetch_related(self, *lookups):
"""
Returns a new QuerySet instance that will prefetch the specified
Return a new QuerySet instance that will prefetch the specified
Many-To-One and Many-To-Many related objects when the QuerySet is
evaluated.
When prefetch_related() is called more than once, the list of lookups to
prefetch is appended to. If prefetch_related(None) is called, the list
is cleared.
When prefetch_related() is called more than once, append to the list of
prefetch lookups. If prefetch_related(None) is called, clear the list.
"""
clone = self._clone()
if lookups == (None,):
@ -924,9 +906,7 @@ class QuerySet:
return clone
def order_by(self, *field_names):
"""
Returns a new QuerySet instance with the ordering changed.
"""
"""Return a new QuerySet instance with the ordering changed."""
assert self.query.can_filter(), \
"Cannot reorder a query once a slice has been taken."
obj = self._clone()
@ -936,7 +916,7 @@ class QuerySet:
def distinct(self, *field_names):
"""
Returns a new QuerySet instance that will select only distinct results.
Return a new QuerySet instance that will select only distinct results.
"""
assert self.query.can_filter(), \
"Cannot create distinct fields once a slice has been taken."
@ -946,9 +926,7 @@ class QuerySet:
def extra(self, select=None, where=None, params=None, tables=None,
order_by=None, select_params=None):
"""
Adds extra SQL fragments to the query.
"""
"""Add extra SQL fragments to the query."""
assert self.query.can_filter(), \
"Cannot change a query once a slice has been taken"
clone = self._clone()
@ -956,20 +934,17 @@ class QuerySet:
return clone
def reverse(self):
"""
Reverses the ordering of the QuerySet.
"""
"""Reverse the ordering of the QuerySet."""
clone = self._clone()
clone.query.standard_ordering = not clone.query.standard_ordering
return clone
def defer(self, *fields):
"""
Defers the loading of data for certain fields until they are accessed.
The set of fields to defer is added to any existing set of deferred
fields. The only exception to this is if None is passed in as the only
parameter, in which case all deferrals are removed (None acts as a
reset option).
Defer the loading of data for certain fields until they are accessed.
Add the set of deferred fields to any existing set of deferred fields.
The only exception to this is if None is passed in as the only
parameter, in which case removal all deferrals.
"""
if self._fields is not None:
raise TypeError("Cannot call defer() after .values() or .values_list()")
@ -982,7 +957,7 @@ class QuerySet:
def only(self, *fields):
"""
Essentially, the opposite of defer. Only the fields passed into this
Essentially, the opposite of defer(). Only the fields passed into this
method and that are not already specified as deferred are loaded
immediately when the queryset is evaluated.
"""
@ -997,9 +972,7 @@ class QuerySet:
return clone
def using(self, alias):
"""
Selects which database this QuerySet should execute its query against.
"""
"""Select which database this QuerySet should execute against."""
clone = self._clone()
clone._db = alias
return clone
@ -1011,7 +984,7 @@ class QuerySet:
@property
def ordered(self):
"""
Returns True if the QuerySet is ordered -- i.e. has an order_by()
Return True if the QuerySet is ordered -- i.e. has an order_by()
clause or a default ordering on the model.
"""
if self.query.extra_order_by or self.query.order_by:
@ -1023,7 +996,7 @@ class QuerySet:
@property
def db(self):
"Return the database that will be used if this query is executed now"
"""Return the database used if this query is executed now."""
if self._for_write:
return self._db or router.db_for_write(self.model, **self._hints)
return self._db or router.db_for_read(self.model, **self._hints)
@ -1034,7 +1007,7 @@ class QuerySet:
def _insert(self, objs, fields, return_id=False, raw=False, using=None):
"""
Inserts a new record for the given model. This provides an interface to
Insert a new record for the given model. This provides an interface to
the InsertQuery class and is how Model.save() is implemented.
"""
self._for_write = True
@ -1048,9 +1021,9 @@ class QuerySet:
def _batched_insert(self, objs, fields, batch_size):
"""
A little helper method for bulk_insert to insert the bulk one batch
at a time. Inserts recursively a batch from the front of the bulk and
then _batched_insert() the remaining objects again.
A helper method for bulk_create() to insert the bulk one batch at a
time. Insert recursively a batch from the front of the bulk and then
_batched_insert() the remaining objects again.
"""
if not objs:
return
@ -1090,7 +1063,7 @@ class QuerySet:
def _next_is_sticky(self):
"""
Indicates that the next filter call and the one following that should
Indicate that the next filter call and the one following that should
be treated as a single filter. This is only important when it comes to
determining when to reuse tables for many-to-many filters. Required so
that we can filter naturally on the results of related managers.
@ -1103,9 +1076,7 @@ class QuerySet:
return self
def _merge_sanity_check(self, other):
"""
Checks that we are merging two comparable QuerySet classes.
"""
"""Check that two QuerySet classes may be merged."""
if self._fields is not None and (
set(self.query.values_select) != set(other.query.values_select) or
set(self.query.extra_select) != set(other.query.extra_select) or
@ -1136,17 +1107,16 @@ class QuerySet:
def _add_hints(self, **hints):
"""
Update hinting information for later use by Routers
Update hinting information for use by routers. Add new key/values or
overwrite existing key/values.
"""
# If there is any hinting information, add it to what we already know.
# If we have a new hint for an existing key, overwrite with the new value.
self._hints.update(hints)
def _has_filters(self):
"""
Checks if this QuerySet has any filtering going on. Note that this
isn't equivalent for checking if all objects are present in results,
for example qs[1:]._has_filters() -> False.
Check if this QuerySet has any filtering going on. This isn't
equivalent with checking if all objects are present in results, for
example, qs[1:]._has_filters() -> False.
"""
return self.query.has_filters()
@ -1158,7 +1128,7 @@ class InstanceCheckMeta(type):
class EmptyQuerySet(metaclass=InstanceCheckMeta):
"""
Marker class usable for checking if a queryset is empty by .none():
Marker class to checking if a queryset is empty by .none():
isinstance(qs.none(), EmptyQuerySet) -> True
"""
@ -1168,7 +1138,7 @@ class EmptyQuerySet(metaclass=InstanceCheckMeta):
class RawQuerySet:
"""
Provides an iterator which converts the results of raw SQL queries into
Provide an iterator which converts the results of raw SQL queries into
annotated model instances.
"""
def __init__(self, raw_query, model=None, query=None, params=None,
@ -1182,9 +1152,7 @@ class RawQuerySet:
self.translations = translations or {}
def resolve_model_init_order(self):
"""
Resolve the init field names and value positions
"""
"""Resolve the init field names and value positions."""
model_init_fields = [f for f in self.model._meta.fields if f.column in self.columns]
annotation_fields = [(column, pos) for pos, column in enumerate(self.columns)
if column not in self.model_fields]
@ -1240,13 +1208,11 @@ class RawQuerySet:
@property
def db(self):
"Return the database that will be used if this query is executed now"
"""Return the database used if this query is executed now."""
return self._db or router.db_for_read(self.model, **self._hints)
def using(self, alias):
"""
Selects which database this Raw QuerySet should execute its query against.
"""
"""Select the database this RawQuerySet should execute against."""
return RawQuerySet(
self.raw_query, model=self.model,
query=self.query.clone(using=alias),
@ -1273,9 +1239,7 @@ class RawQuerySet:
@cached_property
def model_fields(self):
"""
A dict mapping column names to model field names.
"""
"""A dict mapping column names to model field names."""
converter = connections[self.db].introspection.table_name_converter
model_fields = {}
for field in self.model._meta.fields:
@ -1336,9 +1300,7 @@ class Prefetch:
def normalize_prefetch_lookups(lookups, prefix=None):
"""
Helper function that normalize lookups into Prefetch objects.
"""
"""Normalize lookups into Prefetch objects."""
ret = []
for lookup in lookups:
if not isinstance(lookup, Prefetch):
@ -1474,9 +1436,9 @@ def prefetch_related_objects(model_instances, *related_lookups):
def get_prefetcher(instance, through_attr, to_attr):
"""
For the attribute 'through_attr' on the given instance, finds
For the attribute 'through_attr' on the given instance, find
an object that has a get_prefetch_queryset().
Returns a 4 tuple containing:
Return a 4 tuple containing:
(the object with get_prefetch_queryset (or None),
the descriptor object representing this relationship (or None),
a boolean that is False if the attribute was not found at all,
@ -1521,14 +1483,13 @@ def get_prefetcher(instance, through_attr, to_attr):
def prefetch_one_level(instances, prefetcher, lookup, level):
"""
Helper function for prefetch_related_objects
Helper function for prefetch_related_objects().
Runs prefetches on all instances using the prefetcher object,
Run prefetches on all instances using the prefetcher object,
assigning results to relevant caches in instance.
The prefetched objects are returned, along with any additional
prefetches that must be done due to prefetch_related lookups
found from default managers.
Return the prefetched objects along with any additional prefetches that
must be done due to prefetch_related lookups found from default managers.
"""
# prefetcher must have a method get_prefetch_queryset() which takes a list
# of instances, and returns a tuple:

View File

@ -19,9 +19,7 @@ PathInfo = namedtuple('PathInfo', 'from_opts to_opts target_fields join_field m2
class InvalidQuery(Exception):
"""
The query passed to raw isn't a safe query to use with raw.
"""
"""The query passed to raw() isn't a safe query to use with raw()."""
pass
@ -47,7 +45,7 @@ class QueryWrapper:
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
Encapsulate filters as objects that can then be combined logically (using
`&` and `|`).
"""
# Connection types
@ -112,8 +110,8 @@ class DeferredAttribute:
def __get__(self, instance, cls=None):
"""
Retrieves and caches the value from the datastore on the first lookup.
Returns the cached value.
Retrieve and caches the value from the datastore on the first lookup.
Return the cached value.
"""
if instance is None:
return self
@ -211,7 +209,7 @@ class RegisterLookupMixin:
def select_related_descend(field, restricted, requested, load_fields, reverse=False):
"""
Returns True if this field should be used to descend deeper for
Return True if this field should be used to descend deeper for
select_related() purposes. Used by both the query construction code
(sql.query.fill_related_selections()) and the model instance creation code
(query.get_klass_info()).
@ -247,10 +245,9 @@ def select_related_descend(field, restricted, requested, load_fields, reverse=Fa
def refs_expression(lookup_parts, annotations):
"""
A helper method to check if the lookup_parts contains references
to the given annotations set. Because the LOOKUP_SEP is contained in the
default annotation names we must check each prefix of the lookup_parts
for a match.
Check if the lookup_parts contains references to the given annotations set.
Because the LOOKUP_SEP is contained in the default annotation names, check
each prefix of the lookup_parts for a match.
"""
for n in range(len(lookup_parts) + 1):
level_n_lookup = LOOKUP_SEP.join(lookup_parts[0:n])

View File

@ -38,7 +38,7 @@ class SQLCompiler:
def pre_sql_setup(self):
"""
Does any necessary class setup immediately prior to producing SQL. This
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
@ -51,7 +51,7 @@ class SQLCompiler:
def get_group_by(self, select, order_by):
"""
Returns a list of 2-tuples of form (sql, params).
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
@ -163,7 +163,7 @@ class SQLCompiler:
def get_select(self):
"""
Returns three values:
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
@ -232,7 +232,7 @@ class SQLCompiler:
def get_order_by(self):
"""
Returns a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
@ -400,7 +400,7 @@ class SQLCompiler:
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list of
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
@ -513,14 +513,14 @@ class SQLCompiler:
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Computes the default columns for selecting every field in the base
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Returns a list of strings, quoted appropriately for use in SQL
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, returns a list of (alias, col_name) pairs instead
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
@ -558,10 +558,10 @@ class SQLCompiler:
def get_distinct(self):
"""
Returns a quoted list of fields to use in DISTINCT ON part of the query.
Return a quoted list of fields to use in DISTINCT ON part of the query.
Note that this method can alter the tables in the query, and thus it
must be called before get_from_clause().
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
qn = self.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
@ -582,7 +582,7 @@ class SQLCompiler:
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Returns the table alias (the name might be ambiguous, the alias will
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
@ -613,11 +613,11 @@ class SQLCompiler:
def _setup_joins(self, pieces, opts, alias):
"""
A helper method for get_order_by and get_distinct.
Helper method for get_order_by() and get_distinct().
Note that get_ordering and get_distinct must produce same target
columns on same input, as the prefixes of get_ordering and get_distinct
must match. Executing SQL where this is not true is an error.
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
if not alias:
alias = self.query.get_initial_alias()
@ -628,14 +628,14 @@ class SQLCompiler:
def get_from_clause(self):
"""
Returns a list of strings that are joined together to go after the
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Sub-classes, can override this to create a
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables we need. This means the select columns,
ordering and distinct must be done first.
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
@ -792,8 +792,8 @@ class SQLCompiler:
def deferred_to_columns(self):
"""
Converts the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Returns the
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
@ -820,9 +820,7 @@ class SQLCompiler:
return tuple(row)
def results_iter(self, results=None):
"""
Returns an iterator over the results from executing this query.
"""
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI)
fields = [s[0] for s in self.select[0:self.col_count]]
@ -845,7 +843,7 @@ class SQLCompiler:
def execute_sql(self, result_type=MULTI, chunked_fetch=False):
"""
Run the query against the database and returns the result(s). The
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
@ -933,10 +931,10 @@ class SQLInsertCompiler(SQLCompiler):
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Checks for raw values,
expressions and fields with get_placeholder() defined in that order.
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, the value is considered raw and is used as the
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
@ -994,9 +992,9 @@ class SQLInsertCompiler(SQLCompiler):
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values,
generate placeholder SQL and parameters for each field and value, and
return a pair containing:
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
@ -1105,7 +1103,7 @@ class SQLInsertCompiler(SQLCompiler):
class SQLDeleteCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
assert len([t for t in self.query.tables if self.query.alias_refcount[t] > 0]) == 1, \
@ -1121,7 +1119,7 @@ class SQLDeleteCompiler(SQLCompiler):
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
@ -1176,7 +1174,7 @@ class SQLUpdateCompiler(SQLCompiler):
def execute_sql(self, result_type):
"""
Execute the specified update. Returns the number of rows affected by
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
@ -1197,13 +1195,12 @@ class SQLUpdateCompiler(SQLCompiler):
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
@ -1242,7 +1239,7 @@ class SQLUpdateCompiler(SQLCompiler):
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
sql, params = [], []
@ -1261,7 +1258,7 @@ class SQLAggregateCompiler(SQLCompiler):
def cursor_iter(cursor, sentinel, col_count):
"""
Yields blocks of rows from a cursor and ensures the cursor is closed when
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:

View File

@ -59,7 +59,7 @@ class Join:
def as_sql(self, compiler, connection):
"""
Generates the full
Generate the full
LEFT OUTER JOIN sometable ON sometable.somecol = othertable.othercol, params
clause for this join.
"""

View File

@ -44,9 +44,7 @@ def get_field_names_from_opts(opts):
class RawQuery:
"""
A single raw SQL query
"""
"""A single raw SQL query."""
def __init__(self, sql, using, params=None, context=None):
self.params = params or ()
@ -112,9 +110,7 @@ class RawQuery:
class Query:
"""
A single SQL query.
"""
"""A single SQL query."""
alias_prefix = 'T'
subq_aliases = frozenset([alias_prefix])
@ -221,7 +217,7 @@ class Query:
def __str__(self):
"""
Returns the query as a string of SQL with the parameter values
Return the query as a string of SQL with the parameter values
substituted in (use sql_with_params() to see the unsubstituted string).
Parameter values won't necessarily be quoted correctly, since that is
@ -232,7 +228,7 @@ class Query:
def sql_with_params(self):
"""
Returns the query as an SQL string and the parameters that will be
Return the query as an SQL string and the parameters that will be
substituted into the query.
"""
return self.get_compiler(DEFAULT_DB_ALIAS).as_sql()
@ -254,7 +250,7 @@ class Query:
def get_meta(self):
"""
Returns the Options instance (the model._meta) from which to start
Return the Options instance (the model._meta) from which to start
processing. Normally, this is self.model._meta, but it can be changed
by subclasses.
"""
@ -262,7 +258,7 @@ class Query:
def clone(self, klass=None, memo=None, **kwargs):
"""
Creates a copy of the current instance. The 'kwargs' parameter can be
Create a copy of the current instance. The 'kwargs' parameter can be
used by clients to update attributes after copying has taken place.
"""
obj = Empty()
@ -395,7 +391,7 @@ class Query:
def get_aggregation(self, using, added_aggregate_names):
"""
Returns the dictionary with the values of the existing aggregations.
Return the dictionary with the values of the existing aggregations.
"""
if not self.annotation_select:
return {}
@ -488,7 +484,7 @@ class Query:
def get_count(self, using):
"""
Performs a COUNT() query using the current filter constraints.
Perform a COUNT() query using the current filter constraints.
"""
obj = self.clone()
obj.add_annotation(Count('*'), alias='__count', is_summary=True)
@ -613,7 +609,7 @@ class Query:
def deferred_to_data(self, target, callback):
"""
Converts the self.deferred_loading data structure to an alternate data
Convert the self.deferred_loading data structure to an alternate data
structure, describing the field that *will* be loaded. This is used to
compute the columns to select from the database and also by the
QuerySet class to work out which fields are being initialized on each
@ -699,7 +695,7 @@ class Query:
def table_alias(self, table_name, create=False):
"""
Returns a table alias for the given table_name and whether this is a
Return a table alias for the given table_name and whether this is a
new alias or not.
If 'create' is true, a new alias is always created. Otherwise, the
@ -724,17 +720,17 @@ class Query:
return alias, True
def ref_alias(self, alias):
""" Increases the reference count for this alias. """
"""Increases the reference count for this alias."""
self.alias_refcount[alias] += 1
def unref_alias(self, alias, amount=1):
""" Decreases the reference count for this alias. """
"""Decreases the reference count for this alias."""
self.alias_refcount[alias] -= amount
def promote_joins(self, aliases):
"""
Promotes recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, the join is only promoted if
Promote recursively the join type of given aliases and its children to
an outer join. If 'unconditional' is False, only promote the join if
it is nullable or the parent join is an outer join.
The children promotion is done to avoid join chains that contain a LOUTER
@ -786,8 +782,8 @@ class Query:
def reset_refcounts(self, to_counts):
"""
This method will reset reference counts for aliases so that they match
the value passed in :param to_counts:.
Reset reference counts for aliases so that they match the value passed
in `to_counts`.
"""
for alias, cur_refcount in self.alias_refcount.copy().items():
unref_amount = cur_refcount - to_counts.get(alias, 0)
@ -795,7 +791,7 @@ class Query:
def change_aliases(self, change_map):
"""
Changes the aliases in change_map (which maps old-alias -> new-alias),
Change the aliases in change_map (which maps old-alias -> new-alias),
relabelling any references to them in select columns and the where
clause.
"""
@ -831,14 +827,14 @@ class Query:
def bump_prefix(self, outer_query):
"""
Changes the alias prefix to the next letter in the alphabet in a way
Change the alias prefix to the next letter in the alphabet in a way
that the outer query's aliases and this query's aliases will not
conflict. Even tables that previously had no alias will get an alias
after this call.
"""
def prefix_gen():
"""
Generates a sequence of characters in alphabetical order:
Generate a sequence of characters in alphabetical order:
-> 'A', 'B', 'C', ...
When the alphabet is finished, the sequence will continue with the
@ -878,7 +874,7 @@ class Query:
def get_initial_alias(self):
"""
Returns the first alias for this query, after increasing its reference
Return the first alias for this query, after increasing its reference
count.
"""
if self.tables:
@ -890,15 +886,15 @@ class Query:
def count_active_tables(self):
"""
Returns the number of tables in this query with a non-zero reference
count. Note that after execution, the reference counts are zeroed, so
tables added in compiler will not be seen by this method.
Return the number of tables in this query with a non-zero reference
count. After execution, the reference counts are zeroed, so tables
added in compiler will not be seen by this method.
"""
return len([1 for count in self.alias_refcount.values() if count])
def join(self, join, reuse=None):
"""
Returns an alias for the join in 'connection', either reusing an
Return an alias for the join in 'connection', either reusing an
existing alias for that join or creating a new one. 'connection' is a
tuple (lhs, table, join_cols) where 'lhs' is either an existing
table alias or a table name. 'join_cols' is a tuple of tuples containing
@ -940,7 +936,7 @@ class Query:
def join_parent_model(self, opts, model, alias, seen):
"""
Makes sure the given 'model' is joined in the query. If 'model' isn't
Make sure the given 'model' is joined in the query. If 'model' isn't
a parent of 'opts' or if it is None this method is a no-op.
The 'alias' is the root alias for starting the join, 'seen' is a dict
@ -973,9 +969,7 @@ class Query:
return alias or seen[None]
def add_annotation(self, annotation, alias, is_summary=False):
"""
Adds a single annotation expression to the Query
"""
"""Add a single annotation expression to the Query."""
annotation = annotation.resolve_expression(self, allow_joins=True, reuse=None,
summarize=is_summary)
self.append_annotation_mask([alias])
@ -1031,7 +1025,7 @@ class Query:
def solve_lookup_type(self, lookup):
"""
Solve the lookup type from the lookup (eg: 'foobar__id__icontains')
Solve the lookup type from the lookup (e.g.: 'foobar__id__icontains').
"""
lookup_splitted = lookup.split(LOOKUP_SEP)
if self._annotations:
@ -1051,8 +1045,8 @@ class Query:
def check_query_object_type(self, value, opts, field):
"""
Checks whether the object passed while querying is of the correct type.
If not, it raises a ValueError specifying the wrong object.
Check whether the object passed while querying is of the correct type.
If not, raise a ValueError specifying the wrong object.
"""
if hasattr(value, '_meta'):
if not check_rel_lookup_compatibility(value._meta.model, opts, field):
@ -1061,9 +1055,7 @@ class Query:
(value, opts.object_name))
def check_related_objects(self, field, value, opts):
"""
Checks the type of object passed to query relations.
"""
"""Check the type of object passed to query relations."""
if field.is_relation:
# Check that the field and the queryset use the same model in a
# query like .filter(author=Author.objects.all()). For example, the
@ -1087,7 +1079,7 @@ class Query:
def build_lookup(self, lookups, lhs, rhs):
"""
Tries to extract transforms and lookup from given lhs.
Try to extract transforms and lookup from given lhs.
The lhs value is something that works like SQLExpression.
The rhs value is what the lookup is going to compare against.
@ -1114,7 +1106,7 @@ class Query:
def try_transform(self, lhs, name, rest_of_lookups):
"""
Helper method for build_lookup. Tries to fetch and initialize
Helper method for build_lookup(). Try to fetch and initialize
a transform for name parameter from lhs.
"""
transform_class = lhs.get_transform(name)
@ -1129,7 +1121,7 @@ class Query:
def build_filter(self, filter_expr, branch_negated=False, current_negated=False,
can_reuse=None, connector=AND, allow_joins=True, split_subq=True):
"""
Builds a WhereNode for a single filter clause, but doesn't add it
Build a WhereNode for a single filter clause but don't add it
to this Query. Query.add_q() will then add this filter to the where
Node.
@ -1140,7 +1132,7 @@ class Query:
negated or not and this will be used to determine if IS NULL filtering
is needed.
The difference between current_netageted and branch_negated is that
The difference between current_negated and branch_negated is that
branch_negated is set on first negation, but current_negated is
flipped for each negation.
@ -1263,9 +1255,7 @@ class Query:
def _add_q(self, q_object, used_aliases, branch_negated=False,
current_negated=False, allow_joins=True, split_subq=True):
"""
Adds a Q-object to the current filter.
"""
"""Add a Q-object to the current filter."""
connector = q_object.connector
current_negated = current_negated ^ q_object.negated
branch_negated = branch_negated or q_object.negated
@ -1292,20 +1282,18 @@ class Query:
def names_to_path(self, names, opts, allow_many=True, fail_on_missing=False):
"""
Walks the list of names and turns them into PathInfo tuples. Note that
a single name in 'names' can generate multiple PathInfos (m2m for
example).
Walk the list of names and turns them into PathInfo tuples. A single
name in 'names' can generate multiple PathInfos (m2m, for example).
'names' is the path of names to travel, 'opts' is the model Options we
start the name resolving from, 'allow_many' is as for setup_joins().
If fail_on_missing is set to True, then a name that can't be resolved
will generate a FieldError.
Returns a list of PathInfo tuples. In addition returns the final field
(the last used join field), and target (which is a field guaranteed to
contain the same value as the final field). Finally, the method returns
those names that weren't found (which are likely transforms and the
final lookup).
Return a list of PathInfo tuples. In addition return the final field
(the last used join field) and target (which is a field guaranteed to
contain the same value as the final field). Finally, return those names
that weren't found (which are likely transforms and the final lookup).
"""
path, names_with_path = [], []
for pos, name in enumerate(names):
@ -1397,7 +1385,7 @@ class Query:
If 'allow_many' is False, then any reverse foreign key seen will
generate a MultiJoin exception.
Returns the final field involved in the joins, the target field (used
Return the final field involved in the joins, the target field (used
for any 'where' constraint), the final 'opts' value, the joins and the
field path travelled to generate the joins.
@ -1433,13 +1421,12 @@ class Query:
is the full list of join aliases. The 'path' contain the PathInfos
used to create the joins.
Returns the final target field and table alias and the new active
Return the final target field and table alias and the new active
joins.
We will always trim any direct join if we have the target column
available already in the previous table. Reverse joins can't be
trimmed as we don't know if there is anything on the other side of
the join.
Always trim any direct join if the target column is already in the
previous table. Can't trim reverse joins as it's unknown if there's
anything on the other side of the join.
"""
joins = joins[:]
for pos, info in enumerate(reversed(path)):
@ -1555,13 +1542,12 @@ class Query:
def set_limits(self, low=None, high=None):
"""
Adjusts the limits on the rows retrieved. We use low/high to set these,
Adjust the limits on the rows retrieved. Use low/high to set these,
as it makes it more Pythonic to read and write. When the SQL query is
created, they are converted to the appropriate offset and limit values.
created, convert them to the appropriate offset and limit values.
Any limits passed in here are applied relative to the existing
constraints. So low is added to the current low value and both will be
clamped to any existing high value.
Apply any limits passed in here to the existing constraints. Add low
to the current low value and clamp both to any existing high value.
"""
if high is not None:
if self.high_mark is not None:
@ -1578,23 +1564,19 @@ class Query:
self.set_empty()
def clear_limits(self):
"""
Clears any existing limits.
"""
"""Clear any existing limits."""
self.low_mark, self.high_mark = 0, None
def can_filter(self):
"""
Returns True if adding filters to this instance is still possible.
Return True if adding filters to this instance is still possible.
Typically, this means no limits or offsets have been put on the results.
"""
return not self.low_mark and self.high_mark is None
def clear_select_clause(self):
"""
Removes all fields from SELECT clause.
"""
"""Remove all fields from SELECT clause."""
self.select = []
self.default_cols = False
self.select_related = False
@ -1603,7 +1585,7 @@ class Query:
def clear_select_fields(self):
"""
Clears the list of fields to select (but not extra_select columns).
Clear the list of fields to select (but not extra_select columns).
Some queryset types completely replace any existing list of select
columns.
"""
@ -1620,15 +1602,15 @@ class Query:
def add_distinct_fields(self, *field_names):
"""
Adds and resolves the given fields to the query's "distinct on" clause.
Add and resolve the given fields to the query's "distinct on" clause.
"""
self.distinct_fields = field_names
self.distinct = True
def add_fields(self, field_names, allow_m2m=True):
"""
Adds the given (model) fields to the select set. The field names are
added in the order specified.
Add the given (model) fields to the select set. Add the field names in
the order specified.
"""
alias = self.get_initial_alias()
opts = self.get_meta()
@ -1656,12 +1638,12 @@ class Query:
def add_ordering(self, *ordering):
"""
Adds items from the 'ordering' sequence to the query's "order by"
Add items from the 'ordering' sequence to the query's "order by"
clause. These items are either field names (not column names) --
possibly with a direction prefix ('-' or '?') -- or OrderBy
expressions.
If 'ordering' is empty, all ordering is cleared from the query.
If 'ordering' is empty, clear all ordering from the query.
"""
errors = []
for item in ordering:
@ -1681,7 +1663,7 @@ class Query:
def clear_ordering(self, force_empty):
"""
Removes any ordering settings. If 'force_empty' is True, there will be
Remove any ordering settings. If 'force_empty' is True, there will be
no ordering in the resulting query (not even the model's default).
"""
self.order_by = []
@ -1691,7 +1673,7 @@ class Query:
def set_group_by(self):
"""
Expands the GROUP BY clause required by the query.
Expand the GROUP BY clause required by the query.
This will usually be the set of all non-aggregate fields in the
return data. If the database backend supports grouping by the
@ -1710,7 +1692,7 @@ class Query:
def add_select_related(self, fields):
"""
Sets up the select_related data structure so that we only select
Set up the select_related data structure so that we only select
certain related models (as opposed to all models, when
self.select_related=True).
"""
@ -1726,7 +1708,7 @@ class Query:
def add_extra(self, select, select_params, where, params, tables, order_by):
"""
Adds data to the various extra_* attributes for user-created additions
Add data to the various extra_* attributes for user-created additions
to the query.
"""
if select:
@ -1758,16 +1740,14 @@ class Query:
self.extra_order_by = order_by
def clear_deferred_loading(self):
"""
Remove any fields from the deferred loading set.
"""
"""Remove any fields from the deferred loading set."""
self.deferred_loading = (set(), True)
def add_deferred_loading(self, field_names):
"""
Add the given list of model field names to the set of fields to
exclude from loading from the database when automatic column selection
is done. The new field names are added to any existing field names that
is done. Add the new field names to any existing field names that
are deferred (or removed from any existing field names that are marked
as the only ones for immediate loading).
"""
@ -1788,8 +1768,8 @@ class Query:
Add the given list of model field names to the set of fields to
retrieve when the SQL is executed ("immediate loading" fields). The
field names replace any existing immediate loading field names. If
there are field names already specified for deferred loading, those
names are removed from the new field_names before storing the new names
there are field names already specified for deferred loading, remove
those names from the new field_names before storing the new names
for immediate loading. (That is, immediate loading overrides any
existing immediate values, but respects existing deferrals.)
"""
@ -1809,12 +1789,12 @@ class Query:
def get_loaded_field_names(self):
"""
If any fields are marked to be deferred, returns a dictionary mapping
If any fields are marked to be deferred, return a dictionary mapping
models to a set of names in those fields that will be loaded. If a
model is not in the returned dictionary, none of its fields are
deferred.
If no fields are marked for deferral, returns an empty dictionary.
If no fields are marked for deferral, return an empty dictionary.
"""
# We cache this because we call this function multiple times
# (compiler.fill_related_selections, query.iterator)
@ -1827,13 +1807,11 @@ class Query:
return collection
def get_loaded_field_names_cb(self, target, model, fields):
"""
Callback used by get_deferred_field_names().
"""
"""Callback used by get_deferred_field_names()."""
target[model] = {f.attname for f in fields}
def set_annotation_mask(self, names):
"Set the mask of annotations that will actually be returned by the SELECT"
"""Set the mask of annotations that will be returned by the SELECT."""
if names is None:
self.annotation_select_mask = None
else:
@ -1846,9 +1824,8 @@ class Query:
def set_extra_mask(self, names):
"""
Set the mask of extra select items that will be returned by SELECT,
we don't actually remove them from the Query since they might be used
later
Set the mask of extra select items that will be returned by SELECT.
Don't remove them from the Query since they might be used later.
"""
if names is None:
self.extra_select_mask = None
@ -1893,10 +1870,9 @@ class Query:
@property
def annotation_select(self):
"""The OrderedDict of aggregate columns that are not masked, and should
be used in the SELECT clause.
This result is cached for optimization purposes.
"""
Return the OrderedDict of aggregate columns that are not masked and
should be used in the SELECT clause. Cache this result for performance.
"""
if self._annotation_select_cache is not None:
return self._annotation_select_cache
@ -1928,16 +1904,16 @@ class Query:
def trim_start(self, names_with_path):
"""
Trims joins from the start of the join path. The candidates for trim
Trim joins from the start of the join path. The candidates for trim
are the PathInfos in names_with_path structure that are m2m joins.
Also sets the select column so the start matches the join.
Also set the select column so the start matches the join.
This method is meant to be used for generating the subquery joins &
cols in split_exclude().
Returns a lookup usable for doing outerq.filter(lookup=self). Returns
also if the joins in the prefix contain a LEFT OUTER join.
Return a lookup usable for doing outerq.filter(lookup=self) and a
boolean indicating if the joins in the prefix contain a LEFT OUTER join.
_"""
all_paths = []
for _, paths in names_with_path:
@ -1995,7 +1971,7 @@ class Query:
def is_nullable(self, field):
"""
A helper to check if the given field should be treated as nullable.
Check if the given field should be treated as nullable.
Some backends treat '' as null and Django treats such fields as
nullable for those backends. In such situations field.null can be
@ -2025,7 +2001,7 @@ class Query:
def get_order_dir(field, default='ASC'):
"""
Returns the field name and direction for an order specification. For
Return the field name and direction for an order specification. For
example, '-foo' is returned as ('foo', 'DESC').
The 'default' param is used to indicate which way no prefix (or a '+'
@ -2039,8 +2015,8 @@ def get_order_dir(field, default='ASC'):
def add_to_dict(data, key, value):
"""
A helper function to add "value" to the set of values for "key", whether or
not "key" already exists.
Add "value" to the set of values for "key", whether or not "key" already
exists.
"""
if key in data:
data[key].add(value)
@ -2050,8 +2026,8 @@ def add_to_dict(data, key, value):
def is_reverse_o2o(field):
"""
A little helper to check if the given field is reverse-o2o. The field is
expected to be some sort of relation field or related object.
Check if the given field is reverse-o2o. The field is expected to be some
sort of relation field or related object.
"""
return field.is_relation and field.one_to_one and not field.concrete

View File

@ -14,10 +14,7 @@ __all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'AggregateQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
"""A DELETE SQL query."""
compiler = 'SQLDeleteCompiler'
@ -81,9 +78,7 @@ class DeleteQuery(Query):
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
"""An UPDATE SQL query."""
compiler = 'SQLUpdateCompiler'
@ -93,7 +88,7 @@ class UpdateQuery(Query):
def _setup_query(self):
"""
Runs on initialization and after cloning. Any attributes that would
Run on initialization and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
@ -148,15 +143,15 @@ class UpdateQuery(Query):
def add_related_update(self, model, field, value):
"""
Adds (name, value) to an update query for an ancestor model.
Add (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
Update are coalesced so that only one update query per ancestor is run.
"""
self.related_updates.setdefault(model, []).append((field, None, value))
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
Return a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
@ -181,15 +176,6 @@ class InsertQuery(Query):
self.objs = []
def insert_values(self, fields, objs, raw=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
self.fields = fields
self.objs = objs
self.raw = raw
@ -197,8 +183,8 @@ class InsertQuery(Query):
class AggregateQuery(Query):
"""
An AggregateQuery takes another query as a parameter to the FROM
clause and only selects the elements in the provided list.
Take another query as a parameter to the FROM clause and only select the
elements in the provided list.
"""
compiler = 'SQLAggregateCompiler'

View File

@ -13,7 +13,7 @@ OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
An SQL WHERE clause.
The class is tied to the Query class that created it (in order to create
the correct SQL).
@ -29,7 +29,7 @@ class WhereNode(tree.Node):
def split_having(self, negated=False):
"""
Returns two possibly None nodes: one for those parts of self that
Return two possibly None nodes: one for those parts of self that
should be included in the WHERE clause and one for those parts of
self that must be included in the HAVING clause.
"""
@ -62,9 +62,9 @@ class WhereNode(tree.Node):
def as_sql(self, compiler, connection):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns '', [] if this node matches everything,
None, [] if this node is empty, and raises EmptyResultSet if this
Return the SQL version of the where clause and the value to be
substituted in. Return '', [] if this node matches everything,
None, [] if this node is empty, and raise EmptyResultSet if this
node can't match anything.
"""
result = []
@ -127,7 +127,7 @@ class WhereNode(tree.Node):
def relabel_aliases(self, change_map):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
Relabel the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
for pos, child in enumerate(self.children):
@ -139,7 +139,7 @@ class WhereNode(tree.Node):
def clone(self):
"""
Creates a clone of the tree. Must only be called on root nodes (nodes
Create a clone of the tree. Must only be called on root nodes (nodes
with empty subtree_parents). Childs must be either (Contraint, lookup,
value) tuples, or objects supporting .clone().
"""
@ -173,9 +173,7 @@ class WhereNode(tree.Node):
class NothingNode:
"""
A node that matches nothing.
"""
"""A node that matches nothing."""
contains_aggregate = False
def as_sql(self, compiler=None, connection=None):

View File

@ -1,8 +1,8 @@
def make_model_tuple(model):
"""
Takes a model or a string of the form "app_label.ModelName" and returns a
Take a model or a string of the form "app_label.ModelName" and return a
corresponding ("app_label", "modelname") tuple. If a tuple is passed in,
it's assumed to be a valid model tuple already and returned unchanged.
assume it's a valid model tuple already and return it unchanged.
"""
try:
if isinstance(model, tuple):

View File

@ -6,9 +6,7 @@ from django.db import (
class TransactionManagementError(ProgrammingError):
"""
This exception is thrown when transaction management is used improperly.
"""
"""Transaction management is used improperly."""
pass
@ -23,37 +21,29 @@ def get_connection(using=None):
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
"""Get the autocommit status of the connection."""
return get_connection(using).get_autocommit()
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
"""Set the autocommit status of the connection."""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None):
"""
Commits a transaction.
"""
"""Commit a transaction."""
get_connection(using).commit()
def rollback(using=None):
"""
Rolls back a transaction.
"""
"""Roll back a transaction."""
get_connection(using).rollback()
def savepoint(using=None):
"""
Creates a savepoint (if supported and required by the backend) inside the
current transaction. Returns an identifier for the savepoint that will be
Create a savepoint (if supported and required by the backend) inside the
current transaction. Return an identifier for the savepoint that will be
used for the subsequent rollback or commit.
"""
return get_connection(using).savepoint()
@ -61,7 +51,7 @@ def savepoint(using=None):
def savepoint_rollback(sid, using=None):
"""
Rolls back the most recent savepoint (if one exists). Does nothing if
Roll back the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_rollback(sid)
@ -69,7 +59,7 @@ def savepoint_rollback(sid, using=None):
def savepoint_commit(sid, using=None):
"""
Commits the most recent savepoint (if one exists). Does nothing if
Commit the most recent savepoint (if one exists). Do nothing if
savepoints are not supported.
"""
get_connection(using).savepoint_commit(sid)
@ -77,29 +67,27 @@ def savepoint_commit(sid, using=None):
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
Reset the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def get_rollback(using=None):
"""
Gets the "needs rollback" flag -- for *advanced use* only.
"""
"""Get the "needs rollback" flag -- for *advanced use* only."""
return get_connection(using).get_rollback()
def set_rollback(rollback, using=None):
"""
Sets or unsets the "needs rollback" flag -- for *advanced use* only.
Set or unset the "needs rollback" flag -- for *advanced use* only.
When `rollback` is `True`, it triggers a rollback when exiting the
innermost enclosing atomic block that has `savepoint=True` (that's the
default). Use this to force a rollback without raising an exception.
When `rollback` is `True`, trigger a rollback when exiting the innermost
enclosing atomic block that has `savepoint=True` (that's the default). Use
this to force a rollback without raising an exception.
When `rollback` is `False`, it prevents such a rollback. Use this only
after rolling back to a known-good state! Otherwise, you break the atomic
block and data corruption may occur.
When `rollback` is `False`, prevent such a rollback. Use this only after
rolling back to a known-good state! Otherwise, you break the atomic block
and data corruption may occur.
"""
return get_connection(using).set_rollback(rollback)
@ -118,7 +106,7 @@ def on_commit(func, using=None):
class Atomic(ContextDecorator):
"""
This class guarantees the atomic execution of a given block.
Guarantee the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.

View File

@ -50,7 +50,7 @@ class NotSupportedError(DatabaseError):
class DatabaseErrorWrapper:
"""
Context manager and decorator that re-throws backend-specific database
Context manager and decorator that reraises backend-specific database
exceptions using Django's common wrappers.
"""
@ -161,7 +161,7 @@ class ConnectionHandler:
def ensure_defaults(self, alias):
"""
Puts the defaults into the settings dictionary for a given connection
Put the defaults into the settings dictionary for a given connection
where no settings is provided.
"""
try:
@ -182,7 +182,7 @@ class ConnectionHandler:
def prepare_test_settings(self, alias):
"""
Makes sure the test settings are available in the 'TEST' sub-dictionary.
Make sure the test settings are available in the 'TEST' sub-dictionary.
"""
try:
conn = self.databases[alias]
@ -229,7 +229,7 @@ class ConnectionHandler:
class ConnectionRouter:
def __init__(self, routers=None):
"""
If routers is not specified, will default to settings.DATABASE_ROUTERS.
If routers is not specified, default to settings.DATABASE_ROUTERS.
"""
self._routers = routers
@ -304,8 +304,6 @@ class ConnectionRouter:
)
def get_migratable_models(self, app_config, db, include_auto_created=False):
"""
Return app models allowed to be synchronized on provided db.
"""
"""Return app models allowed to be migrated on provided db."""
models = app_config.get_models(include_auto_created=include_auto_created)
return [model for model in models if self.allow_migrate_model(db, model)]