Merged the queryset-refactor branch into trunk.

This is a big internal change, but mostly backwards compatible with existing
code. Also adds a couple of new features.

Fixed #245, #1050, #1656, #1801, #2076, #2091, #2150, #2253, #2306, #2400, #2430, #2482, #2496, #2676, #2737, #2874, #2902, #2939, #3037, #3141, #3288, #3440, #3592, #3739, #4088, #4260, #4289, #4306, #4358, #4464, #4510, #4858, #5012, #5020, #5261, #5295, #5321, #5324, #5325, #5555, #5707, #5796, #5817, #5987, #6018, #6074, #6088, #6154, #6177, #6180, #6203, #6658


git-svn-id: http://code.djangoproject.com/svn/django/trunk@7477 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
Malcolm Tredinnick 2008-04-27 02:50:16 +00:00
parent c91a30f00f
commit 9c52d56f6f
57 changed files with 5717 additions and 1739 deletions

View File

@ -8,7 +8,7 @@ from django.core.exceptions import ImproperlyConfigured, ObjectDoesNotExist, Per
from django.core.paginator import QuerySetPaginator, InvalidPage
from django.shortcuts import get_object_or_404, render_to_response
from django.db import models
from django.db.models.query import handle_legacy_orderlist, QuerySet
from django.db.models.query import QuerySet
from django.http import Http404, HttpResponse, HttpResponseRedirect
from django.utils.html import escape
from django.utils.text import capfirst, get_text_list
@ -627,7 +627,7 @@ class ChangeList(object):
# Perform a slight optimization: Check to see whether any filters were
# given. If not, use paginator.hits to calculate the number of objects,
# because we've already done paginator.hits and the value is cached.
if isinstance(self.query_set._filters, models.Q) and not self.query_set._filters.kwargs:
if not self.query_set.query.where:
full_result_count = result_count
else:
full_result_count = self.manager.count()
@ -653,15 +653,12 @@ class ChangeList(object):
def get_ordering(self):
lookup_opts, params = self.lookup_opts, self.params
# For ordering, first check the "ordering" parameter in the admin options,
# then check the object's default ordering. If neither of those exist,
# order descending by ID by default. Finally, look for manually-specified
# ordering from the query string.
# For ordering, first check the "ordering" parameter in the admin
# options, then check the object's default ordering. If neither of
# those exist, order descending by ID by default. Finally, look for
# manually-specified ordering from the query string.
ordering = lookup_opts.admin.ordering or lookup_opts.ordering or ['-' + lookup_opts.pk.name]
# Normalize it to new-style ordering.
ordering = handle_legacy_orderlist(ordering)
if ordering[0].startswith('-'):
order_field, order_type = ordering[0][1:], 'desc'
else:
@ -753,8 +750,7 @@ class ChangeList(object):
for bit in self.query.split():
or_queries = [models.Q(**{construct_search(field_name): bit}) for field_name in self.lookup_opts.admin.search_fields]
other_qs = QuerySet(self.model)
if qs._select_related:
other_qs = other_qs.select_related()
other_qs.dup_select_related(qs)
other_qs = other_qs.filter(reduce(operator.or_, or_queries))
qs = qs & other_qs

View File

@ -154,6 +154,11 @@ class GenericRelation(RelatedField, Field):
def get_internal_type(self):
return "ManyToManyField"
def db_type(self):
# Since we're simulating a ManyToManyField, in effect, best return the
# same db_type as well.
return None
class ReverseGenericRelatedObjectsDescriptor(object):
"""
This class provides the functionality that makes the related-object

View File

@ -27,3 +27,8 @@ class MiddlewareNotUsed(Exception):
class ImproperlyConfigured(Exception):
"Django is somehow improperly configured"
pass
class FieldError(Exception):
"""Some kind of problem with a model field."""
pass

View File

@ -26,7 +26,7 @@ def django_table_list(only_existing=False):
for app in models.get_apps():
for model in models.get_models(app):
tables.append(model._meta.db_table)
tables.extend([f.m2m_db_table() for f in model._meta.many_to_many])
tables.extend([f.m2m_db_table() for f in model._meta.local_many_to_many])
if only_existing:
existing = table_list()
tables = [t for t in tables if t in existing]
@ -54,12 +54,12 @@ def sequence_list():
for app in apps:
for model in models.get_models(app):
for f in model._meta.fields:
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
sequence_list.append({'table': model._meta.db_table, 'column': f.column})
break # Only one AutoField is allowed per model, so don't bother continuing.
for f in model._meta.many_to_many:
for f in model._meta.local_many_to_many:
sequence_list.append({'table': f.m2m_db_table(), 'column': None})
return sequence_list
@ -149,7 +149,7 @@ def sql_delete(app, style):
if cursor and table_name_converter(model._meta.db_table) in table_names:
# The table exists, so it needs to be dropped
opts = model._meta
for f in opts.fields:
for f in opts.local_fields:
if f.rel and f.rel.to not in to_delete:
references_to_delete.setdefault(f.rel.to, []).append( (model, f) )
@ -181,7 +181,7 @@ def sql_delete(app, style):
# Output DROP TABLE statements for many-to-many tables.
for model in app_models:
opts = model._meta
for f in opts.many_to_many:
for f in opts.local_many_to_many:
if isinstance(f.rel, generic.GenericRel):
continue
if cursor and table_name_converter(f.m2m_db_table()) in table_names:
@ -258,7 +258,7 @@ def sql_model_create(model, style, known_models=set()):
pending_references = {}
qn = connection.ops.quote_name
inline_references = connection.features.inline_fk_references
for f in opts.fields:
for f in opts.local_fields:
col_type = f.db_type()
tablespace = f.db_tablespace or opts.db_tablespace
if col_type is None:
@ -294,14 +294,8 @@ def sql_model_create(model, style, known_models=set()):
style.SQL_COLTYPE(models.IntegerField().db_type()) + ' ' + \
style.SQL_KEYWORD('NULL'))
for field_constraints in opts.unique_together:
constraint_output = [style.SQL_KEYWORD('UNIQUE')]
constraint_output.append('(%s)' % \
table_output.append(style.SQL_KEYWORD('UNIQUE') + ' (%s)' % \
", ".join([style.SQL_FIELD(qn(opts.get_field(f).column)) for f in field_constraints]))
if opts.db_tablespace and connection.features.supports_tablespaces \
and connection.features.autoindexes_primary_keys:
constraint_output.append(connection.ops.tablespace_sql(
opts.db_tablespace, inline=True))
table_output.append(' '.join(constraint_output))
full_statement = [style.SQL_KEYWORD('CREATE TABLE') + ' ' + style.SQL_TABLE(qn(opts.db_table)) + ' (']
for i, line in enumerate(table_output): # Combine and add commas.
@ -359,7 +353,7 @@ def many_to_many_sql_for_model(model, style):
final_output = []
qn = connection.ops.quote_name
inline_references = connection.features.inline_fk_references
for f in opts.many_to_many:
for f in opts.local_many_to_many:
if not isinstance(f.rel, generic.GenericRel):
tablespace = f.db_tablespace or opts.db_tablespace
if tablespace and connection.features.supports_tablespaces and connection.features.autoindexes_primary_keys:
@ -466,7 +460,7 @@ def sql_indexes_for_model(model, style):
output = []
qn = connection.ops.quote_name
for f in model._meta.fields:
for f in model._meta.local_fields:
if f.db_index and not ((f.primary_key or f.unique) and connection.features.autoindexes_primary_keys):
unique = f.unique and 'UNIQUE ' or ''
tablespace = f.db_tablespace or model._meta.db_tablespace

View File

@ -32,7 +32,7 @@ def get_validation_errors(outfile, app=None):
opts = cls._meta
# Do field-specific validation.
for f in opts.fields:
for f in opts.local_fields:
if f.name == 'id' and not f.primary_key and opts.pk.name == 'id':
e.add(opts, '"%s": You can\'t use "id" as a field name, because each model automatically gets an "id" field if none of the fields have primary_key=True. You need to either remove/rename your "id" field or add primary_key=True to a field.' % f.name)
if f.name.endswith('_'):
@ -69,8 +69,8 @@ def get_validation_errors(outfile, app=None):
if db_version < (5, 0, 3) and isinstance(f, (models.CharField, models.CommaSeparatedIntegerField, models.SlugField)) and f.max_length > 255:
e.add(opts, '"%s": %s cannot have a "max_length" greater than 255 when you are using a version of MySQL prior to 5.0.3 (you are using %s).' % (f.name, f.__class__.__name__, '.'.join([str(n) for n in db_version[:3]])))
# Check to see if the related field will clash with any
# existing fields, m2m fields, m2m related objects or related objects
# Check to see if the related field will clash with any existing
# fields, m2m fields, m2m related objects or related objects
if f.rel:
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has relation with model %s, which has not been installed" % (f.name, f.rel.to))
@ -87,7 +87,7 @@ def get_validation_errors(outfile, app=None):
e.add(opts, "Accessor for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.many_to_many:
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
@ -104,9 +104,10 @@ def get_validation_errors(outfile, app=None):
if r.get_accessor_name() == rel_query_name:
e.add(opts, "Reverse query name for field '%s' clashes with related field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.get_accessor_name(), f.name))
for i, f in enumerate(opts.many_to_many):
for i, f in enumerate(opts.local_many_to_many):
# Check to see if the related m2m field will clash with any
# existing fields, m2m fields, m2m related objects or related objects
# existing fields, m2m fields, m2m related objects or related
# objects
if f.rel.to not in models.get_models():
e.add(opts, "'%s' has m2m relation with model %s, which has not been installed" % (f.name, f.rel.to))
# it is a string and we could not find the model it refers to
@ -117,17 +118,17 @@ def get_validation_errors(outfile, app=None):
rel_opts = f.rel.to._meta
rel_name = RelatedObject(f.rel.to, cls, f).get_accessor_name()
rel_query_name = f.related_query_name()
# If rel_name is none, there is no reverse accessor.
# (This only occurs for symmetrical m2m relations to self).
# If this is the case, there are no clashes to check for this field, as
# there are no reverse descriptors for this field.
# If rel_name is none, there is no reverse accessor (this only
# occurs for symmetrical m2m relations to self). If this is the
# case, there are no clashes to check for this field, as there are
# no reverse descriptors for this field.
if rel_name is not None:
for r in rel_opts.fields:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
e.add(opts, "Reverse query name for m2m field '%s' clashes with field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
for r in rel_opts.many_to_many:
for r in rel_opts.local_many_to_many:
if r.name == rel_name:
e.add(opts, "Accessor for m2m field '%s' clashes with m2m field '%s.%s'. Add a related_name argument to the definition for '%s'." % (f.name, rel_opts.object_name, r.name, f.name))
if r.name == rel_query_name:
@ -200,7 +201,10 @@ def get_validation_errors(outfile, app=None):
field_name = field_name[1:]
if opts.order_with_respect_to and field_name == '_order':
continue
if '.' in field_name: continue # Skip ordering in the format 'table.field'.
# Skip ordering in the format field1__field2 (FIXME: checking
# this format would be nice, but it's a little fiddly).
if '_' in field_name:
continue
try:
opts.get_field(field_name, many_to_many=False)
except models.FieldDoesNotExist:
@ -228,5 +232,7 @@ def get_validation_errors(outfile, app=None):
else:
if isinstance(f.rel, models.ManyToManyRel):
e.add(opts, '"unique_together" refers to %s. ManyToManyFields are not supported in unique_together.' % f.name)
if f not in opts.local_fields:
e.add(opts, '"unique_together" refers to %s. This is not in the same model as the unique_together statement.' % f.name)
return len(e.errors)

View File

@ -165,7 +165,7 @@ class DeserializedObject(object):
# This ensures that the data that is deserialized is literally
# what came from the file, not post-processed by pre_save/save
# methods.
models.Model.save(self.object, raw=True)
models.Model.save_base(self.object, raw=True)
if self.m2m_data and save_m2m:
for accessor_name, object_list in self.m2m_data.items():
setattr(self.object, accessor_name, object_list)

View File

@ -11,16 +11,18 @@ if not settings.DATABASE_ENGINE:
settings.DATABASE_ENGINE = 'dummy'
try:
# Most of the time, the database backend will be one of the official
# Most of the time, the database backend will be one of the official
# backends that ships with Django, so look there first.
_import_path = 'django.db.backends.'
backend = __import__('%s%s.base' % (_import_path, settings.DATABASE_ENGINE), {}, {}, [''])
creation = __import__('%s%s.creation' % (_import_path, settings.DATABASE_ENGINE), {}, {}, [''])
except ImportError, e:
# If the import failed, we might be looking for a database backend
# If the import failed, we might be looking for a database backend
# distributed external to Django. So we'll try that next.
try:
_import_path = ''
backend = __import__('%s.base' % settings.DATABASE_ENGINE, {}, {}, [''])
creation = __import__('%s.creation' % settings.DATABASE_ENGINE, {}, {}, [''])
except ImportError, e_user:
# The database backend wasn't found. Display a helpful error message
# listing all possible (built-in) database backends.
@ -37,10 +39,12 @@ def _import_database_module(import_path='', module_name=''):
"""Lazily import a database module when requested."""
return __import__('%s%s.%s' % (import_path, settings.DATABASE_ENGINE, module_name), {}, {}, [''])
# We don't want to import the introspect/creation modules unless
# someone asks for 'em, so lazily load them on demmand.
# We don't want to import the introspect module unless someone asks for it, so
# lazily load it on demmand.
get_introspection_module = curry(_import_database_module, _import_path, 'introspection')
get_creation_module = curry(_import_database_module, _import_path, 'creation')
def get_creation_module():
return creation
# We want runshell() to work the same way, but we have to treat it a
# little differently (since it just runs instead of returning a module like

View File

@ -49,7 +49,8 @@ class BaseDatabaseFeatures(object):
supports_constraints = True
supports_tablespaces = False
uses_case_insensitive_names = False
uses_custom_queryset = False
uses_custom_query_class = False
empty_fetchmany_value = []
class BaseDatabaseOperations(object):
"""
@ -86,10 +87,9 @@ class BaseDatabaseOperations(object):
Returns the SQL necessary to cast a datetime value so that it will be
retrieved as a Python datetime object instead of a string.
This SQL should include a '%s' in place of the field's name. This
method should return None if no casting is necessary.
This SQL should include a '%s' in place of the field's name.
"""
return None
return "%s"
def deferrable_sql(self):
"""
@ -169,6 +169,14 @@ class BaseDatabaseOperations(object):
sql += " OFFSET %s" % offset
return sql
def lookup_cast(self, lookup_type):
"""
Returns the string to use in a query when performing lookups
("contains", "like", etc). The resulting string should contain a '%s'
placeholder for the column being searched against.
"""
return "%s"
def max_name_length(self):
"""
Returns the maximum length of table and column names, or None if there
@ -176,6 +184,14 @@ class BaseDatabaseOperations(object):
"""
return None
def no_limit_value(self):
"""
Returns the value to use for the LIMIT when we are wanting "LIMIT
infinity". Returns None if the limit clause can be omitted in this case.
"""
# FIXME: API may need to change once Oracle backend is repaired.
raise NotImplementedError()
def pk_default_value(self):
"""
Returns the value to use during an INSERT statement to specify that
@ -183,11 +199,11 @@ class BaseDatabaseOperations(object):
"""
return 'DEFAULT'
def query_set_class(self, DefaultQuerySet):
def query_class(self, DefaultQueryClass):
"""
Given the default QuerySet class, returns a custom QuerySet class
to use for this backend. Returns None if a custom QuerySet isn't used.
See also BaseDatabaseFeatures.uses_custom_queryset, which regulates
See also BaseDatabaseFeatures.uses_custom_query_class, which regulates
whether this method is called at all.
"""
return None
@ -205,6 +221,17 @@ class BaseDatabaseOperations(object):
"""
return 'RANDOM()'
def regex_lookup(self, lookup_type):
"""
Returns the string to use in a query when performing regular expression
lookups (using "regex" or "iregex"). The resulting string should
contain a '%s' placeholder for the column being searched against.
If the feature is not supported (or part of it is not supported), a
NotImplementedError exception can be raised.
"""
raise NotImplementedError
def sql_flush(self, style, tables, sequences):
"""
Returns a list of SQL statements required to remove all data from

View File

@ -62,6 +62,7 @@ server_version_re = re.compile(r'(\d{1,2})\.(\d{1,2})\.(\d{1,2})')
class DatabaseFeatures(BaseDatabaseFeatures):
autoindexes_primary_keys = False
inline_fk_references = False
empty_fetchmany_value = ()
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
@ -94,6 +95,10 @@ class DatabaseOperations(BaseDatabaseOperations):
sql += "%s," % offset
return sql + str(limit)
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.

View File

@ -66,6 +66,7 @@ class MysqlDebugWrapper:
class DatabaseFeatures(BaseDatabaseFeatures):
autoindexes_primary_keys = False
inline_fk_references = False
empty_fetchmany_value = ()
class DatabaseOperations(BaseDatabaseOperations):
def date_extract_sql(self, lookup_type, field_name):
@ -98,6 +99,10 @@ class DatabaseOperations(BaseDatabaseOperations):
sql += "%s," % offset
return sql + str(limit)
def no_limit_value(self):
# 2**64 - 1, as recommended by the MySQL documentation
return 18446744073709551615L
def quote_name(self, name):
if name.startswith("`") and name.endswith("`"):
return name # Quoting once is enough.

View File

@ -4,11 +4,12 @@ Oracle database backend for Django.
Requires cx_Oracle: http://www.python.net/crew/atuining/cx_Oracle/
"""
import os
from django.db.backends import BaseDatabaseWrapper, BaseDatabaseFeatures, BaseDatabaseOperations, util
from django.db.backends.oracle import query
from django.utils.datastructures import SortedDict
from django.utils.encoding import smart_str, force_unicode
import datetime
import os
# Oracle takes client-side character set encoding from the environment.
os.environ['NLS_LANG'] = '.UTF8'
@ -24,11 +25,12 @@ IntegrityError = Database.IntegrityError
class DatabaseFeatures(BaseDatabaseFeatures):
allows_group_by_ordinal = False
allows_unique_and_pk = False # Suppress UNIQUE/PK for Oracle (ORA-02259)
empty_fetchmany_value = ()
needs_datetime_string_cast = False
needs_upper_for_iops = True
supports_tablespaces = True
uses_case_insensitive_names = True
uses_custom_queryset = True
uses_custom_query_class = True
class DatabaseOperations(BaseDatabaseOperations):
def autoinc_sql(self, table, column):
@ -89,243 +91,16 @@ class DatabaseOperations(BaseDatabaseOperations):
# Instead, they are handled in django/db/backends/oracle/query.py.
return ""
def lookup_cast(self, lookup_type):
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
return "UPPER(%s)"
return "%s"
def max_name_length(self):
return 30
def query_set_class(self, DefaultQuerySet):
from django.db import connection
from django.db.models.query import EmptyResultSet, GET_ITERATOR_CHUNK_SIZE, quote_only_if_word
class OracleQuerySet(DefaultQuerySet):
def iterator(self):
"Performs the SELECT database lookup of this QuerySet."
from django.db.models.query import get_cached_row
# self._select is a dictionary, and dictionaries' key order is
# undefined, so we convert it to a list of tuples.
extra_select = self._select.items()
full_query = None
try:
try:
select, sql, params, full_query = self._get_sql_clause(get_full_query=True)
except TypeError:
select, sql, params = self._get_sql_clause()
except EmptyResultSet:
raise StopIteration
if not full_query:
full_query = "SELECT %s%s\n%s" % ((self._distinct and "DISTINCT " or ""), ', '.join(select), sql)
cursor = connection.cursor()
cursor.execute(full_query, params)
fill_cache = self._select_related
fields = self.model._meta.fields
index_end = len(fields)
# so here's the logic;
# 1. retrieve each row in turn
# 2. convert NCLOBs
while 1:
rows = cursor.fetchmany(GET_ITERATOR_CHUNK_SIZE)
if not rows:
raise StopIteration
for row in rows:
row = self.resolve_columns(row, fields)
if fill_cache:
obj, index_end = get_cached_row(klass=self.model, row=row,
index_start=0, max_depth=self._max_related_depth)
else:
obj = self.model(*row[:index_end])
for i, k in enumerate(extra_select):
setattr(obj, k[0], row[index_end+i])
yield obj
def _get_sql_clause(self, get_full_query=False):
from django.db.models.query import fill_table_cache, \
handle_legacy_orderlist, orderfield2column
opts = self.model._meta
qn = connection.ops.quote_name
# Construct the fundamental parts of the query: SELECT X FROM Y WHERE Z.
select = ["%s.%s" % (qn(opts.db_table), qn(f.column)) for f in opts.fields]
tables = [quote_only_if_word(t) for t in self._tables]
joins = SortedDict()
where = self._where[:]
params = self._params[:]
# Convert self._filters into SQL.
joins2, where2, params2 = self._filters.get_sql(opts)
joins.update(joins2)
where.extend(where2)
params.extend(params2)
# Add additional tables and WHERE clauses based on select_related.
if self._select_related:
fill_table_cache(opts, select, tables, where, opts.db_table, [opts.db_table])
# Add any additional SELECTs.
if self._select:
select.extend(['(%s) AS %s' % (quote_only_if_word(s[1]), qn(s[0])) for s in self._select.items()])
# Start composing the body of the SQL statement.
sql = [" FROM", qn(opts.db_table)]
# Compose the join dictionary into SQL describing the joins.
if joins:
sql.append(" ".join(["%s %s %s ON %s" % (join_type, table, alias, condition)
for (alias, (table, join_type, condition)) in joins.items()]))
# Compose the tables clause into SQL.
if tables:
sql.append(", " + ", ".join(tables))
# Compose the where clause into SQL.
if where:
sql.append(where and "WHERE " + " AND ".join(where))
# ORDER BY clause
order_by = []
if self._order_by is not None:
ordering_to_use = self._order_by
else:
ordering_to_use = opts.ordering
for f in handle_legacy_orderlist(ordering_to_use):
if f == '?': # Special case.
order_by.append(DatabaseOperations().random_function_sql())
else:
if f.startswith('-'):
col_name = f[1:]
order = "DESC"
else:
col_name = f
order = "ASC"
if "." in col_name:
table_prefix, col_name = col_name.split('.', 1)
table_prefix = qn(table_prefix) + '.'
else:
# Use the database table as a column prefix if it wasn't given,
# and if the requested column isn't a custom SELECT.
if "." not in col_name and col_name not in (self._select or ()):
table_prefix = qn(opts.db_table) + '.'
else:
table_prefix = ''
order_by.append('%s%s %s' % (table_prefix, qn(orderfield2column(col_name, opts)), order))
if order_by:
sql.append("ORDER BY " + ", ".join(order_by))
# Look for column name collisions in the select elements
# and fix them with an AS alias. This allows us to do a
# SELECT * later in the paging query.
cols = [clause.split('.')[-1] for clause in select]
for index, col in enumerate(cols):
if cols.count(col) > 1:
col = '%s%d' % (col.replace('"', ''), index)
cols[index] = col
select[index] = '%s AS %s' % (select[index], col)
# LIMIT and OFFSET clauses
# To support limits and offsets, Oracle requires some funky rewriting of an otherwise normal looking query.
select_clause = ",".join(select)
distinct = (self._distinct and "DISTINCT " or "")
if order_by:
order_by_clause = " OVER (ORDER BY %s )" % (", ".join(order_by))
else:
#Oracle's row_number() function always requires an order-by clause.
#So we need to define a default order-by, since none was provided.
order_by_clause = " OVER (ORDER BY %s.%s)" % \
(qn(opts.db_table), qn(opts.fields[0].db_column or opts.fields[0].column))
# limit_and_offset_clause
if self._limit is None:
assert self._offset is None, "'offset' is not allowed without 'limit'"
if self._offset is not None:
offset = int(self._offset)
else:
offset = 0
if self._limit is not None:
limit = int(self._limit)
else:
limit = None
limit_and_offset_clause = ''
if limit is not None:
limit_and_offset_clause = "WHERE rn > %s AND rn <= %s" % (offset, limit+offset)
elif offset:
limit_and_offset_clause = "WHERE rn > %s" % (offset)
if len(limit_and_offset_clause) > 0:
fmt = \
"""SELECT * FROM
(SELECT %s%s,
ROW_NUMBER()%s AS rn
%s)
%s"""
full_query = fmt % (distinct, select_clause,
order_by_clause, ' '.join(sql).strip(),
limit_and_offset_clause)
else:
full_query = None
if get_full_query:
return select, " ".join(sql), params, full_query
else:
return select, " ".join(sql), params
def resolve_columns(self, row, fields=()):
from django.db.models.fields import DateField, DateTimeField, \
TimeField, BooleanField, NullBooleanField, DecimalField, Field
values = []
for value, field in map(None, row, fields):
if isinstance(value, Database.LOB):
value = value.read()
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and isinstance(field, Field) and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif value in (1, 0) and isinstance(field, (BooleanField, NullBooleanField)):
value = bool(value)
# Convert floats to decimals
elif value is not None and isinstance(field, DecimalField):
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
# In Python 2.3, the cx_Oracle driver returns its own
# Timestamp object that we must convert to a datetime class.
if not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month, value.day, value.hour,
value.minute, value.second, value.fsecond)
if isinstance(field, DateTimeField):
pass # DateTimeField subclasses DateField so must be checked first.
elif isinstance(field, DateField):
value = value.date()
elif isinstance(field, TimeField) or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
values.append(value)
return values
return OracleQuerySet
def query_class(self, DefaultQueryClass):
return query.query_class(DefaultQueryClass, Database)
def quote_name(self, name):
# SQL92 requires delimited (quoted) names to be case-sensitive. When
@ -339,6 +114,23 @@ class DatabaseOperations(BaseDatabaseOperations):
def random_function_sql(self):
return "DBMS_RANDOM.RANDOM"
def regex_lookup_9(self, lookup_type):
raise NotImplementedError("Regexes are not supported in Oracle before version 10g.")
def regex_lookup_10(self, lookup_type):
if lookup_type == 'regex':
match_option = "'c'"
else:
match_option = "'i'"
return 'REGEXP_LIKE(%%s, %%s, %s)' % match_option
def regex_lookup(self, lookup_type):
# If regex_lookup is called before it's been initialized, then create
# a cursor to initialize it and recur.
from django.db import connection
connection.cursor()
return connection.ops.regex_lookup(lookup_type)
def sql_flush(self, style, tables, sequences):
# Return a list of 'TRUNCATE x;', 'TRUNCATE y;',
# 'TRUNCATE z;'... style SQL statements
@ -430,6 +222,14 @@ class DatabaseWrapper(BaseDatabaseWrapper):
"NLS_TIMESTAMP_FORMAT = 'YYYY-MM-DD HH24:MI:SS.FF'")
try:
self.oracle_version = int(self.connection.version.split('.')[0])
# There's no way for the DatabaseOperations class to know the
# currently active Oracle version, so we do some setups here.
# TODO: Multi-db support will need a better solution (a way to
# communicate the current version).
if self.oracle_version <= 9:
self.ops.regex_lookup = self.ops.regex_lookup_9
else:
self.ops.regex_lookup = self.ops.regex_lookup_10
except ValueError:
pass
try:

View File

@ -0,0 +1,151 @@
"""
Custom Query class for this backend (a derivative of
django.db.models.sql.query.Query).
"""
import datetime
from django.db.backends import util
# Cache. Maps default query class to new Oracle query class.
_classes = {}
def query_class(QueryClass, Database):
"""
Returns a custom djang.db.models.sql.query.Query subclass that is
appropraite for Oracle.
The 'Database' module (cx_Oracle) is passed in here so that all the setup
required to import it only needs to be done by the calling module.
"""
global _classes
try:
return _classes[QueryClass]
except KeyError:
pass
class OracleQuery(QueryClass):
def resolve_columns(self, row, fields=()):
index_start = len(self.extra_select.keys())
values = [self.convert_values(v, None) for v in row[:index_start]]
for value, field in map(None, row[index_start:], fields):
values.append(self.convert_values(value, field))
return values
def convert_values(self, value, field):
from django.db.models.fields import DateField, DateTimeField, \
TimeField, BooleanField, NullBooleanField, DecimalField, Field
if isinstance(value, Database.LOB):
value = value.read()
# Oracle stores empty strings as null. We need to undo this in
# order to adhere to the Django convention of using the empty
# string instead of null, but only if the field accepts the
# empty string.
if value is None and isinstance(field, Field) and field.empty_strings_allowed:
value = u''
# Convert 1 or 0 to True or False
elif value in (1, 0) and isinstance(field, (BooleanField, NullBooleanField)):
value = bool(value)
# Convert floats to decimals
elif value is not None and isinstance(field, DecimalField):
value = util.typecast_decimal(field.format_number(value))
# cx_Oracle always returns datetime.datetime objects for
# DATE and TIMESTAMP columns, but Django wants to see a
# python datetime.date, .time, or .datetime. We use the type
# of the Field to determine which to cast to, but it's not
# always available.
# As a workaround, we cast to date if all the time-related
# values are 0, or to time if the date is 1/1/1900.
# This could be cleaned a bit by adding a method to the Field
# classes to normalize values from the database (the to_python
# method is used for validation and isn't what we want here).
elif isinstance(value, Database.Timestamp):
# In Python 2.3, the cx_Oracle driver returns its own
# Timestamp object that we must convert to a datetime class.
if not isinstance(value, datetime.datetime):
value = datetime.datetime(value.year, value.month,
value.day, value.hour, value.minute, value.second,
value.fsecond)
if isinstance(field, DateTimeField):
# DateTimeField subclasses DateField so must be checked
# first.
pass
elif isinstance(field, DateField):
value = value.date()
elif isinstance(field, TimeField) or (value.year == 1900 and value.month == value.day == 1):
value = value.time()
elif value.hour == value.minute == value.second == value.microsecond == 0:
value = value.date()
return value
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Creates the SQL for this query. Returns the SQL string and list
of parameters. This is overriden from the original Query class
to accommodate Oracle's limit/offset SQL.
If 'with_limits' is False, any limit/offset information is not
included in the query.
"""
# The `do_offset` flag indicates whether we need to construct
# the SQL needed to use limit/offset w/Oracle.
do_offset = with_limits and (self.high_mark or self.low_mark)
# If no offsets, just return the result of the base class
# `as_sql`.
if not do_offset:
return super(OracleQuery, self).as_sql(with_limits=False,
with_col_aliases=with_col_aliases)
# `get_columns` needs to be called before `get_ordering` to
# populate `_select_alias`.
self.pre_sql_setup()
out_cols = self.get_columns()
ordering = self.get_ordering()
# Getting the "ORDER BY" SQL for the ROW_NUMBER() result.
if ordering:
rn_orderby = ', '.join(ordering)
else:
# Oracle's ROW_NUMBER() function always requires an
# order-by clause. So we need to define a default
# order-by, since none was provided.
qn = self.quote_name_unless_alias
opts = self.model._meta
rn_orderby = '%s.%s' % (qn(opts.db_table), qn(opts.fields[0].db_column or opts.fields[0].column))
# Getting the selection SQL and the params, which has the `rn`
# extra selection SQL.
self.extra_select['rn'] = 'ROW_NUMBER() OVER (ORDER BY %s )' % rn_orderby
sql, params= super(OracleQuery, self).as_sql(with_limits=False,
with_col_aliases=True)
# Constructing the result SQL, using the initial select SQL
# obtained above.
result = ['SELECT * FROM (%s)' % sql]
# Place WHERE condition on `rn` for the desired range.
result.append('WHERE rn > %d' % self.low_mark)
if self.high_mark:
result.append('AND rn <= %d' % self.high_mark)
# Returning the SQL w/params.
return ' '.join(result), params
def set_limits(self, low=None, high=None):
super(OracleQuery, self).set_limits(low, high)
# We need to select the row number for the LIMIT/OFFSET sql.
# A placeholder is added to extra_select now, because as_sql is
# too late to be modifying extra_select. However, the actual sql
# depends on the ordering, so that is generated in as_sql.
self.extra_select['rn'] = '1'
def clear_limits(self):
super(OracleQuery, self).clear_limits()
if 'rn' in self.extra_select:
del self.extra_select['rn']
_classes[QueryClass] = OracleQuery
return OracleQuery

View File

@ -44,6 +44,9 @@ class DatabaseOperations(BaseDatabaseOperations):
cursor.execute("SELECT CURRVAL('\"%s_%s_seq\"')" % (table_name, pk_name))
return cursor.fetchone()[0]
def no_limit_value(self):
return None
def quote_name(self, name):
if name.startswith('"') and name.endswith('"'):
return name # Quoting once is enough.

View File

@ -63,6 +63,9 @@ class DatabaseOperations(BaseDatabaseOperations):
return name # Quoting once is enough.
return '"%s"' % name
def no_limit_value(self):
return -1
def sql_flush(self, style, tables, sequences):
# NB: The generated SQL below is specific to SQLite
# Note: The DELETE FROM... SQL generated below works for SQLite databases

View File

@ -1,10 +1,16 @@
import django.db.models.manipulators
import django.db.models.manager
import copy
import types
import sys
import os
from itertools import izip
import django.db.models.manipulators # Imported to register signal handler.
import django.db.models.manager # Ditto.
from django.core import validators
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned
from django.core.exceptions import ObjectDoesNotExist, MultipleObjectsReturned, FieldError
from django.db.models.fields import AutoField, ImageField, FieldDoesNotExist
from django.db.models.fields.related import OneToOneRel, ManyToOneRel
from django.db.models.query import delete_objects
from django.db.models.fields.related import OneToOneRel, ManyToOneRel, OneToOneField
from django.db.models.query import delete_objects, Q
from django.db.models.options import Options, AdminOptions
from django.db import connection, transaction
from django.db.models import signals
@ -14,10 +20,11 @@ from django.utils.datastructures import SortedDict
from django.utils.functional import curry
from django.utils.encoding import smart_str, force_unicode, smart_unicode
from django.conf import settings
from itertools import izip
import types
import sys
import os
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
class ModelBase(type):
"Metaclass for all models"
@ -25,29 +32,45 @@ class ModelBase(type):
# If this isn't a subclass of Model, don't do anything special.
try:
parents = [b for b in bases if issubclass(b, Model)]
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
except NameError:
# 'Model' isn't defined yet, meaning we're looking at Django's own
# Model class, defined below.
parents = []
if not parents:
return super(ModelBase, cls).__new__(cls, name, bases, attrs)
# Create the class.
new_class = type.__new__(cls, name, bases, {'__module__': attrs.pop('__module__')})
new_class.add_to_class('_meta', Options(attrs.pop('Meta', None)))
new_class.add_to_class('DoesNotExist', types.ClassType('DoesNotExist', (ObjectDoesNotExist,), {}))
new_class.add_to_class('MultipleObjectsReturned',
types.ClassType('MultipleObjectsReturned', (MultipleObjectsReturned, ), {}))
# Build complete list of parents
for base in parents:
# Things without _meta aren't functional models, so they're
# uninteresting parents.
if hasattr(base, '_meta'):
new_class._meta.parents.append(base)
new_class._meta.parents.extend(base._meta.parents)
module = attrs.pop('__module__')
new_class = type.__new__(cls, name, bases, {'__module__': module})
attr_meta = attrs.pop('Meta', None)
abstract = getattr(attr_meta, 'abstract', False)
if not attr_meta:
meta = getattr(new_class, 'Meta', None)
else:
meta = attr_meta
base_meta = getattr(new_class, '_meta', None)
new_class.add_to_class('_meta', Options(meta))
if not abstract:
new_class.add_to_class('DoesNotExist',
subclass_exception('DoesNotExist', ObjectDoesNotExist, module))
new_class.add_to_class('MultipleObjectsReturned',
subclass_exception('MultipleObjectsReturned', MultipleObjectsReturned, module))
if base_meta and not base_meta.abstract:
# Non-abstract child classes inherit some attributes from their
# non-abstract parent (unless an ABC comes before it in the
# method resolution order).
if not hasattr(meta, 'ordering'):
new_class._meta.ordering = base_meta.ordering
if not hasattr(meta, 'get_latest_by'):
new_class._meta.get_latest_by = base_meta.get_latest_by
old_default_mgr = None
if getattr(new_class, '_default_manager', None):
# We have a parent who set the default manager.
if new_class._default_manager.model._meta.abstract:
old_default_mgr = new_class._default_manager
new_class._default_manager = None
if getattr(new_class._meta, 'app_label', None) is None:
# Figure out the app_label by looking one level up.
# For 'django.contrib.sites.models', this would be 'sites'.
@ -63,21 +86,50 @@ class ModelBase(type):
for obj_name, obj in attrs.items():
new_class.add_to_class(obj_name, obj)
# Add Fields inherited from parents
for parent in new_class._meta.parents:
for field in parent._meta.fields:
# Only add parent fields if they aren't defined for this class.
try:
new_class._meta.get_field(field.name)
except FieldDoesNotExist:
field.contribute_to_class(new_class, field.name)
# Do the appropriate setup for any model parents.
o2o_map = dict([(f.rel.to, f) for f in new_class._meta.local_fields
if isinstance(f, OneToOneField)])
for base in parents:
if not hasattr(base, '_meta'):
# Things without _meta aren't functional models, so they're
# uninteresting parents.
continue
if not base._meta.abstract:
if base in o2o_map:
field = o2o_map[base]
field.primary_key = True
new_class._meta.setup_pk(field)
else:
attr_name = '%s_ptr' % base._meta.module_name
field = OneToOneField(base, name=attr_name,
auto_created=True, parent_link=True)
new_class.add_to_class(attr_name, field)
new_class._meta.parents[base] = field
else:
# The abstract base class case.
names = set([f.name for f in new_class._meta.local_fields + new_class._meta.many_to_many])
for field in base._meta.local_fields + base._meta.local_many_to_many:
if field.name in names:
raise FieldError('Local field %r in class %r clashes with field of similar name from abstract base class %r'
% (field.name, name, base.__name__))
new_class.add_to_class(field.name, copy.deepcopy(field))
if abstract:
# Abstract base models can't be instantiated and don't appear in
# the list of models for an app. We do the final setup for them a
# little differently from normal models.
attr_meta.abstract = False
new_class.Meta = attr_meta
return new_class
if old_default_mgr and not new_class._default_manager:
new_class._default_manager = old_default_mgr._copy_to_model(new_class)
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first class for this model to register with the framework. There
# should only be one class for each model, so we must always return the
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name, False)
@ -113,31 +165,6 @@ class ModelBase(type):
class Model(object):
__metaclass__ = ModelBase
def _get_pk_val(self):
return getattr(self, self._meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def __repr__(self):
return smart_str(u'<%s: %s>' % (self.__class__.__name__, unicode(self)))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def __init__(self, *args, **kwargs):
dispatcher.send(signal=signals.pre_init, sender=self.__class__, args=args, kwargs=kwargs)
@ -210,72 +237,133 @@ class Model(object):
raise TypeError, "'%s' is an invalid keyword argument for this function" % kwargs.keys()[0]
dispatcher.send(signal=signals.post_init, sender=self.__class__, instance=self)
def save(self, raw=False):
dispatcher.send(signal=signals.pre_save, sender=self.__class__,
instance=self, raw=raw)
def from_sequence(cls, values):
"""
An alternate class constructor, primarily for internal use.
non_pks = [f for f in self._meta.fields if not f.primary_key]
cursor = connection.cursor()
Creates a model instance from a sequence of values (which corresponds
to all the non-many-to-many fields in creation order. If there are more
fields than values, the remaining (final) fields are given their
default values.
qn = connection.ops.quote_name
ForeignKey fields can only be initialised using id values, not
instances, in this method.
"""
dispatcher.send(signal=signals.pre_init, sender=cls, args=values,
kwargs={})
obj = Empty()
obj.__class__ = cls
field_iter = iter(obj._meta.fields)
for val, field in izip(values, field_iter):
setattr(obj, field.attname, val)
for field in field_iter:
setattr(obj, field.attname, field.get_default())
dispatcher.send(signal=signals.post_init, sender=cls, instance=obj)
return obj
from_sequence = classmethod(from_sequence)
def __repr__(self):
return smart_str(u'<%s: %s>' % (self.__class__.__name__, unicode(self)))
def __str__(self):
if hasattr(self, '__unicode__'):
return force_unicode(self).encode('utf-8')
return '%s object' % self.__class__.__name__
def __eq__(self, other):
return isinstance(other, self.__class__) and self._get_pk_val() == other._get_pk_val()
def __ne__(self, other):
return not self.__eq__(other)
def __hash__(self):
return hash(self._get_pk_val())
def _get_pk_val(self, meta=None):
if not meta:
meta = self._meta
return getattr(self, meta.pk.attname)
def _set_pk_val(self, value):
return setattr(self, self._meta.pk.attname, value)
pk = property(_get_pk_val, _set_pk_val)
def save(self):
"""
Save the current instance. Override this in a subclass if you want to
control the saving process.
"""
self.save_base()
save.alters_data = True
def save_base(self, raw=False, cls=None):
"""
Does the heavy-lifting involved in saving. Subclasses shouldn't need to
override this method. It's separate from save() in order to hide the
need for overrides of save() to pass around internal-only parameters
('raw' and 'cls').
"""
if not cls:
cls = self.__class__
meta = self._meta
signal = True
dispatcher.send(signal=signals.pre_save, sender=self.__class__,
instance=self, raw=raw)
else:
meta = cls._meta
signal = False
for parent, field in meta.parents.items():
self.save_base(raw, parent)
setattr(self, field.attname, self._get_pk_val(parent._meta))
non_pks = [f for f in meta.local_fields if not f.primary_key]
# First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_val = self._get_pk_val()
pk_val = self._get_pk_val(meta)
# Note: the comparison with '' is required for compatibility with
# oldforms-style model creation.
pk_set = pk_val is not None and smart_unicode(pk_val) != u''
record_exists = True
manager = cls._default_manager
if pk_set:
# Determine whether a record with the primary key already exists.
cursor.execute("SELECT 1 FROM %s WHERE %s=%%s" % \
(qn(self._meta.db_table), qn(self._meta.pk.column)),
self._meta.pk.get_db_prep_lookup('exact', pk_val))
# If it does already exist, do an UPDATE.
if cursor.fetchone():
db_values = [f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, False)) for f in non_pks]
if db_values:
cursor.execute("UPDATE %s SET %s WHERE %s=%%s" % \
(qn(self._meta.db_table),
','.join(['%s=%%s' % qn(f.column) for f in non_pks]),
qn(self._meta.pk.column)),
db_values + self._meta.pk.get_db_prep_lookup('exact', pk_val))
if manager.filter(pk=pk_val).extra(select={'a': 1}).values('a').order_by():
# It does already exist, so do an UPDATE.
if non_pks:
values = [(f, None, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
manager.filter(pk=pk_val)._update(values)
else:
record_exists = False
if not pk_set or not record_exists:
field_names = [qn(f.column) for f in self._meta.fields if not isinstance(f, AutoField)]
db_values = [f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True)) for f in self._meta.fields if not isinstance(f, AutoField)]
# If the PK has been manually set, respect that.
if pk_set:
field_names += [f.column for f in self._meta.fields if isinstance(f, AutoField)]
db_values += [f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True)) for f in self._meta.fields if isinstance(f, AutoField)]
placeholders = ['%s'] * len(field_names)
if self._meta.order_with_respect_to:
field_names.append(qn('_order'))
placeholders.append('%s')
subsel = 'SELECT COUNT(*) FROM %s WHERE %s = %%s' % (
qn(self._meta.db_table),
qn(self._meta.order_with_respect_to.column))
cursor.execute(subsel, (getattr(self, self._meta.order_with_respect_to.attname),))
db_values.append(cursor.fetchone()[0])
if not pk_set:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields if not isinstance(f, AutoField)]
else:
values = [(f, f.get_db_prep_save(raw and getattr(self, f.attname) or f.pre_save(self, True))) for f in meta.local_fields]
if meta.order_with_respect_to:
field = meta.order_with_respect_to
values.append((meta.get_field_by_name('_order')[0], manager.filter(**{field.name: getattr(self, field.attname)}).count()))
record_exists = False
if db_values:
cursor.execute("INSERT INTO %s (%s) VALUES (%s)" % \
(qn(self._meta.db_table), ','.join(field_names),
','.join(placeholders)), db_values)
update_pk = bool(meta.has_auto_field and not pk_set)
if values:
# Create a new record.
result = manager._insert(values, return_id=update_pk)
else:
# Create a new record with defaults for everything.
cursor.execute("INSERT INTO %s (%s) VALUES (%s)" %
(qn(self._meta.db_table), qn(self._meta.pk.column),
connection.ops.pk_default_value()))
if self._meta.has_auto_field and not pk_set:
setattr(self, self._meta.pk.attname, connection.ops.last_insert_id(cursor, self._meta.db_table, self._meta.pk.column))
result = manager._insert([(meta.pk, connection.ops.pk_default_value())], return_id=update_pk, raw_values=True)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed()
# Run any post-save hooks.
dispatcher.send(signal=signals.post_save, sender=self.__class__,
instance=self, created=(not record_exists), raw=raw)
save.alters_data = True
if signal:
dispatcher.send(signal=signals.post_save, sender=self.__class__,
instance=self, created=(not record_exists), raw=raw)
def validate(self):
"""
@ -341,32 +429,31 @@ class Model(object):
return force_unicode(dict(field.choices).get(value, value), strings_only=True)
def _get_next_or_previous_by_FIELD(self, field, is_next, **kwargs):
qn = connection.ops.quote_name
op = is_next and '>' or '<'
where = '(%s %s %%s OR (%s = %%s AND %s.%s %s %%s))' % \
(qn(field.column), op, qn(field.column),
qn(self._meta.db_table), qn(self._meta.pk.column), op)
op = is_next and 'gt' or 'lt'
order = not is_next and '-' or ''
param = smart_str(getattr(self, field.attname))
q = self.__class__._default_manager.filter(**kwargs).order_by((not is_next and '-' or '') + field.name, (not is_next and '-' or '') + self._meta.pk.name)
q._where.append(where)
q._params.extend([param, param, getattr(self, self._meta.pk.attname)])
q = Q(**{'%s__%s' % (field.name, op): param})
q = q|Q(**{field.name: param, 'pk__%s' % op: self.pk})
qs = self.__class__._default_manager.filter(**kwargs).filter(q).order_by('%s%s' % (order, field.name), '%spk' % order)
try:
return q[0]
return qs[0]
except IndexError:
raise self.DoesNotExist, "%s matching query does not exist." % self.__class__._meta.object_name
def _get_next_or_previous_in_order(self, is_next):
qn = connection.ops.quote_name
cachename = "__%s_order_cache" % is_next
if not hasattr(self, cachename):
qn = connection.ops.quote_name
op = is_next and '>' or '<'
order = not is_next and '-_order' or '_order'
order_field = self._meta.order_with_respect_to
# FIXME: When querysets support nested queries, this can be turned
# into a pure queryset operation.
where = ['%s %s (SELECT %s FROM %s WHERE %s=%%s)' % \
(qn('_order'), op, qn('_order'),
qn(self._meta.db_table), qn(self._meta.pk.column)),
'%s=%%s' % qn(order_field.column)]
params = [self._get_pk_val(), getattr(self, order_field.attname)]
obj = self._default_manager.order_by('_order').extra(where=where, params=params)[:1].get()
qn(self._meta.db_table), qn(self._meta.pk.column))]
params = [self.pk]
obj = self._default_manager.filter(**{order_field.name: getattr(self, order_field.attname)}).extra(where=where, params=params).order_by(order)[:1].get()
setattr(self, cachename, obj)
return getattr(self, cachename)
@ -446,29 +533,20 @@ class Model(object):
# ORDERING METHODS #########################
def method_set_order(ordered_obj, self, id_list):
qn = connection.ops.quote_name
cursor = connection.cursor()
# Example: "UPDATE poll_choices SET _order = %s WHERE poll_id = %s AND id = %s"
sql = "UPDATE %s SET %s = %%s WHERE %s = %%s AND %s = %%s" % \
(qn(ordered_obj._meta.db_table), qn('_order'),
qn(ordered_obj._meta.order_with_respect_to.column),
qn(ordered_obj._meta.pk.column))
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
cursor.executemany(sql, [(i, rel_val, j) for i, j in enumerate(id_list)])
order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update
# for situations like this.
for i, j in enumerate(id_list):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
transaction.commit_unless_managed()
def method_get_order(ordered_obj, self):
qn = connection.ops.quote_name
cursor = connection.cursor()
# Example: "SELECT id FROM poll_choices WHERE poll_id = %s ORDER BY _order"
sql = "SELECT %s FROM %s WHERE %s = %%s ORDER BY %s" % \
(qn(ordered_obj._meta.pk.column),
qn(ordered_obj._meta.db_table),
qn(ordered_obj._meta.order_with_respect_to.column),
qn('_order'))
rel_val = getattr(self, ordered_obj._meta.order_with_respect_to.rel.field_name)
cursor.execute(sql, [rel_val])
return [r[0] for r in cursor.fetchall()]
order_name = ordered_obj._meta.order_with_respect_to.name
pk_name = ordered_obj._meta.pk.name
return [r[pk_name] for r in
ordered_obj.objects.filter(**{order_name: rel_val}).values(pk_name)]
##############################################
# HELPER FUNCTIONS (CURRIED MODEL FUNCTIONS) #
@ -476,3 +554,20 @@ def method_get_order(ordered_obj, self):
def get_absolute_url(opts, func, self, *args, **kwargs):
return settings.ABSOLUTE_URL_OVERRIDES.get('%s.%s' % (opts.app_label, opts.module_name), func)(self, *args, **kwargs)
########
# MISC #
########
class Empty(object):
pass
if sys.version_info < (2, 5):
# Prior to Python 2.5, Exception was an old-style class
def subclass_exception(name, parent, unused):
return types.ClassType(name, (parent,), {})
else:
def subclass_exception(name, parent, module):
return type(name, (parent,), {'__module__': module})

View File

@ -1,3 +1,4 @@
import copy
import datetime
import os
import time
@ -75,15 +76,19 @@ class Field(object):
# database level.
empty_strings_allowed = True
# Tracks each time a Field instance is created. Used to retain order.
# These track each time a Field instance is created. Used to retain order.
# The auto_creation_counter is used for fields that Django implicitly
# creates, creation_counter is used for all user-specified fields.
creation_counter = 0
auto_creation_counter = -1
def __init__(self, verbose_name=None, name=None, primary_key=False,
max_length=None, unique=False, blank=False, null=False, db_index=False,
core=False, rel=None, default=NOT_PROVIDED, editable=True, serialize=True,
prepopulate_from=None, unique_for_date=None, unique_for_month=None,
unique_for_year=None, validator_list=None, choices=None, radio_admin=None,
help_text='', db_column=None, db_tablespace=None):
max_length=None, unique=False, blank=False, null=False,
db_index=False, core=False, rel=None, default=NOT_PROVIDED,
editable=True, serialize=True, prepopulate_from=None,
unique_for_date=None, unique_for_month=None, unique_for_year=None,
validator_list=None, choices=None, radio_admin=None, help_text='',
db_column=None, db_tablespace=None, auto_created=False):
self.name = name
self.verbose_name = verbose_name
self.primary_key = primary_key
@ -109,14 +114,27 @@ class Field(object):
# Set db_index to True if the field has a relationship and doesn't explicitly set db_index.
self.db_index = db_index
# Increase the creation counter, and save our local copy.
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
# Adjust the appropriate creation counter, and save our local copy.
if auto_created:
self.creation_counter = Field.auto_creation_counter
Field.auto_creation_counter -= 1
else:
self.creation_counter = Field.creation_counter
Field.creation_counter += 1
def __cmp__(self, other):
# This is needed because bisect does not take a comparison function.
return cmp(self.creation_counter, other.creation_counter)
def __deepcopy__(self, memodict):
# We don't have to deepcopy very much here, since most things are not
# intended to be altered after initial creation.
obj = copy.copy(self)
if self.rel:
obj.rel = copy.copy(self.rel)
memodict[id(self)] = obj
return obj
def to_python(self, value):
"""
Converts the input value into the expected Python data type, raising
@ -145,11 +163,10 @@ class Field(object):
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
data_types = get_creation_module().DATA_TYPES
internal_type = self.get_internal_type()
if internal_type not in data_types:
try:
return get_creation_module().DATA_TYPES[self.get_internal_type()] % self.__dict__
except KeyError:
return None
return data_types[internal_type] % self.__dict__
def validate_full(self, field_data, all_data):
"""

View File

@ -0,0 +1,16 @@
"""
Field-like classes that aren't really fields. It's easier to use objects that
have the same attributes as fields sometimes (avoids a lot of special casing).
"""
from django.db.models import fields
class OrderWrt(fields.IntegerField):
"""
A proxy for the _order database field that is used when
Meta.order_with_respect_to is specified.
"""
name = '_order'
attname = '_order'
column = '_order'

View File

@ -2,6 +2,7 @@ from django.db import connection, transaction
from django.db.models import signals, get_model
from django.db.models.fields import AutoField, Field, IntegerField, PositiveIntegerField, PositiveSmallIntegerField, get_ul_class
from django.db.models.related import RelatedObject
from django.db.models.query_utils import QueryWrapper
from django.utils.text import capfirst
from django.utils.translation import ugettext_lazy, string_concat, ungettext, ugettext as _
from django.utils.functional import curry
@ -27,21 +28,21 @@ def add_lazy_relation(cls, field, relation):
"""
Adds a lookup on ``cls`` when a related field is defined using a string,
i.e.::
class MyModel(Model):
fk = ForeignKey("AnotherModel")
This string can be:
* RECURSIVE_RELATIONSHIP_CONSTANT (i.e. "self") to indicate a recursive
relation.
* The name of a model (i.e "AnotherModel") to indicate another model in
the same app.
* An app-label and model name (i.e. "someapp.AnotherModel") to indicate
another model in a different app.
If the other model hasn't yet been loaded -- almost a given if you're using
lazy relationships -- then the relation won't be set up until the
class_prepared signal fires at the end of model initialization.
@ -50,7 +51,7 @@ def add_lazy_relation(cls, field, relation):
if relation == RECURSIVE_RELATIONSHIP_CONSTANT:
app_label = cls._meta.app_label
model_name = cls.__name__
else:
# Look for an "app.Model" relation
try:
@ -59,10 +60,10 @@ def add_lazy_relation(cls, field, relation):
# If we can't split, assume a model in current app
app_label = cls._meta.app_label
model_name = relation
# Try to look up the related model, and if it's already loaded resolve the
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name, False)
if model:
@ -72,7 +73,7 @@ def add_lazy_relation(cls, field, relation):
key = (app_label, model_name)
value = (cls, field)
pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender):
"""
Handle any pending relations to the sending model. Sent from class_prepared.
@ -107,6 +108,8 @@ class RelatedField(object):
add_lazy_relation(cls, self, other)
else:
self.do_related_class(other, cls)
if not cls._meta.abstract and self.rel.related_name:
self.rel.related_name = self.rel.related_name % {'class': cls.__name__.lower()}
def set_attributes_from_rel(self):
self.name = self.name or (self.rel.to._meta.object_name.lower() + '_' + self.rel.to._meta.pk.name)
@ -136,6 +139,9 @@ class RelatedField(object):
pass
return v
if hasattr(value, 'as_sql'):
sql, params = value.as_sql()
return QueryWrapper(('(%s)' % sql), params)
if lookup_type == 'exact':
return [pk_trace(value)]
if lookup_type == 'in':
@ -145,9 +151,10 @@ class RelatedField(object):
raise TypeError, "Related Field has invalid lookup: %s" % lookup_type
def _get_related_query_name(self, opts):
# This method defines the name that can be used to identify this related object
# in a table-spanning query. It uses the lower-cased object_name by default,
# but this can be overridden with the "related_name" option.
# This method defines the name that can be used to identify this
# related object in a table-spanning query. It uses the lower-cased
# object_name by default, but this can be overridden with the
# "related_name" option.
return self.rel.related_name or opts.object_name.lower()
class SingleRelatedObjectDescriptor(object):
@ -158,14 +165,19 @@ class SingleRelatedObjectDescriptor(object):
# SingleRelatedObjectDescriptor instance.
def __init__(self, related):
self.related = related
self.cache_name = '_%s_cache' % related.field.name
def __get__(self, instance, instance_type=None):
if instance is None:
raise AttributeError, "%s must be accessed via instance" % self.related.opts.object_name
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
return rel_obj
try:
return getattr(instance, self.cache_name)
except AttributeError:
params = {'%s__pk' % self.related.field.name: instance._get_pk_val()}
rel_obj = self.related.model._default_manager.get(**params)
setattr(instance, self.cache_name, rel_obj)
return rel_obj
def __set__(self, instance, value):
if instance is None:
@ -495,13 +507,77 @@ class ReverseManyRelatedObjectsDescriptor(object):
manager.clear()
manager.add(*value)
class ManyToOneRel(object):
def __init__(self, to, field_name, num_in_admin=3, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=1, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None,
raw_id_admin=False, parent_link=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.min_num_in_admin, self.max_num_in_admin = min_num_in_admin, max_num_in_admin
self.num_extra_on_change, self.related_name = num_extra_on_change, related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = True
self.parent_link = parent_link
def get_related_field(self):
"""
Returns the Field in the 'to' object to which this relationship is
tied.
"""
data = self.to._meta.get_field_by_name(self.field_name)
if not data[2]:
raise FieldDoesNotExist("No related field named '%s'" %
self.field_name)
return data[0]
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, num_in_admin=0, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=None, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None,
raw_id_admin=False, parent_link=False):
# NOTE: *_num_in_admin and num_extra_on_change are intentionally
# ignored here. We accept them as parameters only to match the calling
# signature of ManyToOneRel.__init__().
super(OneToOneRel, self).__init__(to, field_name, num_in_admin,
edit_inline=edit_inline, related_name=related_name,
limit_choices_to=limit_choices_to,
lookup_overrides=lookup_overrides, raw_id_admin=raw_id_admin,
parent_link=parent_link)
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, num_in_admin=0, related_name=None,
filter_interface=None, limit_choices_to=None, raw_id_admin=False, symmetrical=True):
self.to = to
self.num_in_admin = num_in_admin
self.related_name = related_name
self.filter_interface = filter_interface
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.edit_inline = False
self.raw_id_admin = raw_id_admin
self.symmetrical = symmetrical
self.multiple = True
assert not (self.raw_id_admin and self.filter_interface), "ManyToManyRels may not use both raw_id_admin and filter_interface"
class ForeignKey(RelatedField, Field):
empty_strings_allowed = False
def __init__(self, to, to_field=None, **kwargs):
def __init__(self, to, to_field=None, rel_class=ManyToOneRel, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "ForeignKey(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (to, RECURSIVE_RELATIONSHIP_CONSTANT)
assert isinstance(to, basestring), "%s(%r) is invalid. First parameter to ForeignKey must be either a model, a model name, or the string %r" % (self.__class__.__name__, to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
@ -511,7 +587,7 @@ class ForeignKey(RelatedField, Field):
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.", DeprecationWarning)
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = ManyToOneRel(to, to_field,
kwargs['rel'] = rel_class(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 3),
min_num_in_admin=kwargs.pop('min_num_in_admin', None),
max_num_in_admin=kwargs.pop('max_num_in_admin', None),
@ -520,7 +596,8 @@ class ForeignKey(RelatedField, Field):
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False))
raw_id_admin=kwargs.pop('raw_id_admin', False),
parent_link=kwargs.pop('parent_link', False))
Field.__init__(self, **kwargs)
self.db_index = True
@ -606,82 +683,25 @@ class ForeignKey(RelatedField, Field):
return IntegerField().db_type()
return rel_field.db_type()
class OneToOneField(RelatedField, IntegerField):
class OneToOneField(ForeignKey):
"""
A OneToOneField is essentially the same as a ForeignKey, with the exception
that always carries a "unique" constraint with it and the reverse relation
always returns the object pointed to (since there will only ever be one),
rather than returning a list.
"""
def __init__(self, to, to_field=None, **kwargs):
try:
to_name = to._meta.object_name.lower()
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "OneToOneField(%r) is invalid. First parameter to OneToOneField must be either a model, a model name, or the string %r" % (to, RECURSIVE_RELATIONSHIP_CONSTANT)
else:
to_field = to_field or to._meta.pk.name
kwargs['verbose_name'] = kwargs.get('verbose_name', '')
if 'edit_inline_type' in kwargs:
import warnings
warnings.warn("edit_inline_type is deprecated. Use edit_inline instead.", DeprecationWarning)
kwargs['edit_inline'] = kwargs.pop('edit_inline_type')
kwargs['rel'] = OneToOneRel(to, to_field,
num_in_admin=kwargs.pop('num_in_admin', 0),
edit_inline=kwargs.pop('edit_inline', False),
related_name=kwargs.pop('related_name', None),
limit_choices_to=kwargs.pop('limit_choices_to', None),
lookup_overrides=kwargs.pop('lookup_overrides', None),
raw_id_admin=kwargs.pop('raw_id_admin', False))
kwargs['primary_key'] = True
IntegerField.__init__(self, **kwargs)
self.db_index = True
def get_attname(self):
return '%s_id' % self.name
def get_validator_unique_lookup_type(self):
return '%s__%s__exact' % (self.name, self.rel.get_related_field().name)
# TODO: Copied from ForeignKey... putting this in RelatedField adversely affects
# ManyToManyField. This works for now.
def prepare_field_objs_and_params(self, manipulator, name_prefix):
params = {'validator_list': self.validator_list[:], 'member_name': name_prefix + self.attname}
if self.rel.raw_id_admin:
field_objs = self.get_manipulator_field_objs()
params['validator_list'].append(curry(manipulator_valid_rel_key, self, manipulator))
else:
if self.radio_admin:
field_objs = [oldforms.RadioSelectField]
params['ul_class'] = get_ul_class(self.radio_admin)
else:
if self.null:
field_objs = [oldforms.NullSelectField]
else:
field_objs = [oldforms.SelectField]
params['choices'] = self.get_choices_default()
return field_objs, params
def contribute_to_class(self, cls, name):
super(OneToOneField, self).contribute_to_class(cls, name)
setattr(cls, self.name, ReverseSingleRelatedObjectDescriptor(self))
kwargs['unique'] = True
if 'num_in_admin' not in kwargs:
kwargs['num_in_admin'] = 0
super(OneToOneField, self).__init__(to, to_field, OneToOneRel, **kwargs)
def contribute_to_related_class(self, cls, related):
setattr(cls, related.get_accessor_name(), SingleRelatedObjectDescriptor(related))
setattr(cls, related.get_accessor_name(),
SingleRelatedObjectDescriptor(related))
if not cls._meta.one_to_one_field:
cls._meta.one_to_one_field = self
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelChoiceField, 'queryset': self.rel.to._default_manager.all()}
defaults.update(kwargs)
return super(OneToOneField, self).formfield(**defaults)
def db_type(self):
# The database column type of a OneToOneField is the column type
# of the field to which it points. An exception is if the OneToOneField
# points to an AutoField/PositiveIntegerField/PositiveSmallIntegerField,
# in which case the column type is simply that of an IntegerField.
rel_field = self.rel.get_related_field()
if isinstance(rel_field, (AutoField, PositiveIntegerField, PositiveSmallIntegerField)):
return IntegerField().db_type()
return rel_field.db_type()
class ManyToManyField(RelatedField, Field):
def __init__(self, to, **kwargs):
kwargs['verbose_name'] = kwargs.get('verbose_name', None)
@ -798,7 +818,7 @@ class ManyToManyField(RelatedField, Field):
def save_form_data(self, instance, data):
setattr(instance, self.attname, data)
def formfield(self, **kwargs):
defaults = {'form_class': forms.ModelMultipleChoiceField, 'queryset': self.rel.to._default_manager.all()}
defaults.update(kwargs)
@ -813,56 +833,3 @@ class ManyToManyField(RelatedField, Field):
# so return None.
return None
class ManyToOneRel(object):
def __init__(self, to, field_name, num_in_admin=3, min_num_in_admin=None,
max_num_in_admin=None, num_extra_on_change=1, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None, raw_id_admin=False):
try:
to._meta
except AttributeError: # to._meta doesn't exist, so it must be RECURSIVE_RELATIONSHIP_CONSTANT
assert isinstance(to, basestring), "'to' must be either a model, a model name or the string %r" % RECURSIVE_RELATIONSHIP_CONSTANT
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.min_num_in_admin, self.max_num_in_admin = min_num_in_admin, max_num_in_admin
self.num_extra_on_change, self.related_name = num_extra_on_change, related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = True
def get_related_field(self):
"Returns the Field in the 'to' object to which this relationship is tied."
return self.to._meta.get_field(self.field_name)
class OneToOneRel(ManyToOneRel):
def __init__(self, to, field_name, num_in_admin=0, edit_inline=False,
related_name=None, limit_choices_to=None, lookup_overrides=None,
raw_id_admin=False):
self.to, self.field_name = to, field_name
self.num_in_admin, self.edit_inline = num_in_admin, edit_inline
self.related_name = related_name
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.lookup_overrides = lookup_overrides or {}
self.raw_id_admin = raw_id_admin
self.multiple = False
class ManyToManyRel(object):
def __init__(self, to, num_in_admin=0, related_name=None,
filter_interface=None, limit_choices_to=None, raw_id_admin=False, symmetrical=True):
self.to = to
self.num_in_admin = num_in_admin
self.related_name = related_name
self.filter_interface = filter_interface
if limit_choices_to is None:
limit_choices_to = {}
self.limit_choices_to = limit_choices_to
self.edit_inline = False
self.raw_id_admin = raw_id_admin
self.symmetrical = symmetrical
self.multiple = True
assert not (self.raw_id_admin and self.filter_interface), "ManyToManyRels may not use both raw_id_admin and filter_interface"

View File

@ -1,11 +1,13 @@
from django.db.models.query import QuerySet, EmptyQuerySet
import copy
from django.db.models.query import QuerySet, EmptyQuerySet, insert_query
from django.dispatch import dispatcher
from django.db.models import signals
from django.db.models.fields import FieldDoesNotExist
def ensure_default_manager(sender):
cls = sender
if not hasattr(cls, '_default_manager'):
if not getattr(cls, '_default_manager', None) and not cls._meta.abstract:
# Create the default manager, if needed.
try:
cls._meta.get_field('objects')
@ -31,13 +33,24 @@ class Manager(object):
# TODO: Use weakref because of possible memory leak / circular reference.
self.model = model
setattr(model, name, ManagerDescriptor(self))
if not hasattr(model, '_default_manager') or self.creation_counter < model._default_manager.creation_counter:
if not getattr(model, '_default_manager', None) or self.creation_counter < model._default_manager.creation_counter:
model._default_manager = self
def _copy_to_model(self, model):
"""
Makes a copy of the manager and assigns it to 'model', which should be
a child of the existing model (used when inheriting a manager from an
abstract base class).
"""
assert issubclass(model, self.model)
mgr = copy.copy(self)
mgr.model = model
return mgr
#######################
# PROXIES TO QUERYSET #
#######################
def get_empty_query_set(self):
return EmptyQuerySet(self.model)
@ -46,7 +59,7 @@ class Manager(object):
to easily customize the behavior of the Manager.
"""
return QuerySet(self.model)
def none(self):
return self.get_empty_query_set()
@ -70,7 +83,7 @@ class Manager(object):
def get_or_create(self, **kwargs):
return self.get_query_set().get_or_create(**kwargs)
def create(self, **kwargs):
return self.get_query_set().create(**kwargs)
@ -101,6 +114,21 @@ class Manager(object):
def values(self, *args, **kwargs):
return self.get_query_set().values(*args, **kwargs)
def values_list(self, *args, **kwargs):
return self.get_query_set().values_list(*args, **kwargs)
def update(self, *args, **kwargs):
return self.get_query_set().update(*args, **kwargs)
def reverse(self, *args, **kwargs):
return self.get_query_set().reverse(*args, **kwargs)
def _insert(self, values, **kwargs):
return insert_query(self.model, values, **kwargs)
def _update(self, values, **kwargs):
return self.get_query_set()._update(values, **kwargs)
class ManagerDescriptor(object):
# This class ensures managers aren't accessible via model instances.
# For example, Poll.objects works, but poll_obj.objects raises AttributeError.

View File

@ -1,25 +1,32 @@
import re
from bisect import bisect
try:
set
except NameError:
from sets import Set as set # Python 2.3 fallback
from django.conf import settings
from django.db.models.related import RelatedObject
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.db.models.query import orderlist2sql
from django.db.models import Manager
from django.utils.translation import activate, deactivate_all, get_language, string_concat
from django.utils.encoding import force_unicode, smart_str
from bisect import bisect
import re
from django.utils.datastructures import SortedDict
# Calculate the verbose_name by converting from InitialCaps to "lowercase with spaces".
get_verbose_name = lambda class_name: re.sub('(((?<=[a-z])[A-Z])|([A-Z](?![A-Z]|$)))', ' \\1', class_name).lower().strip()
DEFAULT_NAMES = ('verbose_name', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace')
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract')
class Options(object):
def __init__(self, meta):
self.fields, self.many_to_many = [], []
self.local_fields, self.local_many_to_many = [], []
self.module_name, self.verbose_name = None, None
self.verbose_name_plural = None
self.db_table = ''
@ -35,7 +42,8 @@ class Options(object):
self.pk = None
self.has_auto_field, self.auto_field = False, None
self.one_to_one_field = None
self.parents = []
self.abstract = False
self.parents = SortedDict()
def contribute_to_class(self, cls, name):
cls._meta = self
@ -47,11 +55,14 @@ class Options(object):
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__
meta_attrs = self.meta.__dict__.copy()
del meta_attrs['__module__']
del meta_attrs['__doc__']
for attr_name in DEFAULT_NAMES:
setattr(self, attr_name, meta_attrs.pop(attr_name, getattr(self, attr_name)))
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
@ -82,9 +93,16 @@ class Options(object):
self.order_with_respect_to = None
if self.pk is None:
auto = AutoField(verbose_name='ID', primary_key=True)
auto.creation_counter = -1
model.add_to_class('id', auto)
if self.parents:
# Promote the first parent link in lieu of adding yet another
# field.
field = self.parents.value_for_index(0)
field.primary_key = True
self.pk = field
else:
auto = AutoField(verbose_name='ID', primary_key=True,
auto_created=True)
model.add_to_class('id', auto)
# If the db_table wasn't provided, use the app_label + module_name.
if not self.db_table:
@ -94,14 +112,26 @@ class Options(object):
def add_field(self, field):
# Insert the given field in the order in which it was created, using
# the "creation_counter" attribute of the field.
# Move many-to-many related fields from self.fields into self.many_to_many.
# Move many-to-many related fields from self.fields into
# self.many_to_many.
if field.rel and isinstance(field.rel, ManyToManyRel):
self.many_to_many.insert(bisect(self.many_to_many, field), field)
self.local_many_to_many.insert(bisect(self.local_many_to_many, field), field)
if hasattr(self, '_m2m_cache'):
del self._m2m_cache
else:
self.fields.insert(bisect(self.fields, field), field)
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
self.local_fields.insert(bisect(self.local_fields, field), field)
self.setup_pk(field)
if hasattr(self, '_field_cache'):
del self._field_cache
del self._field_name_cache
if hasattr(self, '_name_map'):
del self._name_map
def setup_pk(self, field):
if not self.pk and field.primary_key:
self.pk = field
field.serialize = False
def __repr__(self):
return '<Options for %s>' % self.object_name
@ -122,19 +152,137 @@ class Options(object):
return raw
verbose_name_raw = property(verbose_name_raw)
def _fields(self):
"""
The getter for self.fields. This returns the list of field objects
available to this model (including through parent models).
Callers are not permitted to modify this list, since it's a reference
to this instance (not a copy).
"""
try:
self._field_name_cache
except AttributeError:
self._fill_fields_cache()
return self._field_name_cache
fields = property(_fields)
def get_fields_with_model(self):
"""
Returns a sequence of (field, model) pairs for all fields. The "model"
element is None for fields on the current model. Mostly of use when
constructing queries so that we know which model a field belongs to.
"""
try:
self._field_cache
except AttributeError:
self._fill_fields_cache()
return self._field_cache
def _fill_fields_cache(self):
cache = []
for parent in self.parents:
for field, model in parent._meta.get_fields_with_model():
if model:
cache.append((field, model))
else:
cache.append((field, parent))
cache.extend([(f, None) for f in self.local_fields])
self._field_cache = tuple(cache)
self._field_name_cache = [x for x, _ in cache]
def _many_to_many(self):
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.keys()
many_to_many = property(_many_to_many)
def get_m2m_with_model(self):
"""
The many-to-many version of get_fields_with_model().
"""
try:
self._m2m_cache
except AttributeError:
self._fill_m2m_cache()
return self._m2m_cache.items()
def _fill_m2m_cache(self):
cache = SortedDict()
for parent in self.parents:
for field, model in parent._meta.get_m2m_with_model():
if model:
cache[field] = model
else:
cache[field] = parent
for field in self.local_many_to_many:
cache[field] = None
self._m2m_cache = cache
def get_field(self, name, many_to_many=True):
"Returns the requested field by name. Raises FieldDoesNotExist on error."
"""
Returns the requested field by name. Raises FieldDoesNotExist on error.
"""
to_search = many_to_many and (self.fields + self.many_to_many) or self.fields
for f in to_search:
if f.name == name:
return f
raise FieldDoesNotExist, '%s has no field named %r' % (self.object_name, name)
def get_order_sql(self, table_prefix=''):
"Returns the full 'ORDER BY' clause for this object, according to self.ordering."
if not self.ordering: return ''
pre = table_prefix and (table_prefix + '.') or ''
return 'ORDER BY ' + orderlist2sql(self.ordering, self, pre)
def get_field_by_name(self, name):
"""
Returns the (field_object, model, direct, m2m), where field_object is
the Field instance for the given name, model is the model containing
this field (None for local fields), direct is True if the field exists
on this model, and m2m is True for many-to-many relations. When
'direct' is False, 'field_object' is the corresponding RelatedObject
for this field (since the field doesn't have an instance associated
with it).
Uses a cache internally, so after the first access, this is very fast.
"""
try:
try:
return self._name_map[name]
except AttributeError:
cache = self.init_name_map()
return self._name_map[name]
except KeyError:
raise FieldDoesNotExist('%s has no field named %r'
% (self.object_name, name))
def get_all_field_names(self):
"""
Returns a list of all field names that are possible for this model
(including reverse relation names).
"""
try:
cache = self._name_map
except AttributeError:
cache = self.init_name_map()
names = cache.keys()
names.sort()
return names
def init_name_map(self):
"""
Initialises the field name -> field object mapping.
"""
cache = dict([(f.name, (f, m, True, False)) for f, m in
self.get_fields_with_model()])
for f, model in self.get_m2m_with_model():
cache[f.name] = (f, model, True, True)
for f, model in self.get_all_related_m2m_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, True)
for f, model in self.get_all_related_objects_with_model():
cache[f.field.related_query_name()] = (f, model, False, False)
if self.order_with_respect_to:
cache['_order'] = OrderWrt(), None, True, False
if app_cache_ready():
self._name_map = cache
return cache
def get_add_permission(self):
return 'add_%s' % self.object_name.lower()
@ -145,17 +293,81 @@ class Options(object):
def get_delete_permission(self):
return 'delete_%s' % self.object_name.lower()
def get_all_related_objects(self):
try: # Try the cache first.
return self._all_related_objects
def get_all_related_objects(self, local_only=False):
try:
self._related_objects_cache
except AttributeError:
rel_objs = []
for klass in get_models():
for f in klass._meta.fields:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
rel_objs.append(RelatedObject(f.rel.to, klass, f))
self._all_related_objects = rel_objs
return rel_objs
self._fill_related_objects_cache()
if local_only:
return [k for k, v in self._related_objects_cache.items() if not v]
return self._related_objects_cache.keys()
def get_all_related_objects_with_model(self):
"""
Returns a list of (related-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
self._related_objects_cache
except AttributeError:
self._fill_related_objects_cache()
return self._related_objects_cache.items()
def _fill_related_objects_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_objects_with_model():
if (obj.field.creation_counter < 0 or obj.field.rel.parent_link) and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
self._related_objects_cache = cache
def get_all_related_many_to_many_objects(self, local_only=False):
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
if local_only:
return [k for k, v in cache.items() if not v]
return cache.keys()
def get_all_related_m2m_objects_with_model(self):
"""
Returns a list of (related-m2m-object, model) pairs. Similar to
get_fields_with_model().
"""
try:
cache = self._related_many_to_many_cache
except AttributeError:
cache = self._fill_related_many_to_many_cache()
return cache.items()
def _fill_related_many_to_many_cache(self):
cache = SortedDict()
parent_list = self.get_parent_list()
for parent in self.parents:
for obj, model in parent._meta.get_all_related_m2m_objects_with_model():
if obj.field.creation_counter < 0 and obj.model not in parent_list:
continue
if not model:
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models():
for f in klass._meta.local_many_to_many:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
cache[RelatedObject(f.rel.to, klass, f)] = None
if app_cache_ready():
self._related_many_to_many_cache = cache
return cache
def get_followed_related_objects(self, follow=None):
if follow == None:
@ -179,18 +391,34 @@ class Options(object):
follow[f.name] = fol
return follow
def get_all_related_many_to_many_objects(self):
try: # Try the cache first.
return self._all_related_many_to_many_objects
except AttributeError:
rel_objs = []
for klass in get_models():
for f in klass._meta.many_to_many:
if f.rel and not isinstance(f.rel.to, str) and self == f.rel.to._meta:
rel_objs.append(RelatedObject(f.rel.to, klass, f))
if app_cache_ready():
self._all_related_many_to_many_objects = rel_objs
return rel_objs
def get_base_chain(self, model):
"""
Returns a list of parent classes leading to 'model' (order from closet
to most distant ancestor). This has to handle the case were 'model' is
a granparent or even more distant relation.
"""
if not self.parents:
return
if model in self.parents:
return [model]
for parent in self.parents:
res = parent._meta.get_base_chain(model)
if res:
res.insert(0, parent)
return res
raise TypeError('%r is not an ancestor of this model'
% model._meta.module_name)
def get_parent_list(self):
"""
Returns a list of all the ancestor of this model as a list. Useful for
determining if something is an ancestor, regardless of lineage.
"""
result = set()
for parent in self.parents:
result.add(parent)
result.update(parent._meta.get_parent_list())
return result
def get_ordered_objects(self):
"Returns a list of Options objects that are ordered with respect to this object."

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,50 @@
"""
Various data structures used in query construction.
Factored out from django.db.models.query so that they can also be used by other
modules without getting into circular import difficulties.
"""
from copy import deepcopy
from django.utils import tree
class QueryWrapper(object):
"""
A type that indicates the contents are an SQL fragment and the associate
parameters. Can be used to pass opaque data to a where-clause, for example.
"""
def __init__(self, sql, params):
self.data = sql, params
class Q(tree.Node):
"""
Encapsulates filters as objects that can then be combined logically (using
& and |).
"""
# Connection types
AND = 'AND'
OR = 'OR'
default = AND
def __init__(self, *args, **kwargs):
super(Q, self).__init__(children=list(args) + kwargs.items())
def _combine(self, other, conn):
if not isinstance(other, Q):
raise TypeError(other)
obj = deepcopy(self)
obj.add(other, conn)
return obj
def __or__(self, other):
return self._combine(other, self.OR)
def __and__(self, other):
return self._combine(other, self.AND)
def __invert__(self):
obj = deepcopy(self)
obj.negate()
return obj

View File

@ -0,0 +1,7 @@
from query import *
from subqueries import *
from where import AND, OR
from datastructures import EmptyResultSet
__all__ = ['Query', 'AND', 'OR', 'EmptyResultSet']

View File

@ -0,0 +1,36 @@
import re
# Valid query types (a dictionary is used for speedy lookups).
QUERY_TERMS = dict([(x, None) for x in (
'exact', 'iexact', 'contains', 'icontains', 'gt', 'gte', 'lt', 'lte', 'in',
'startswith', 'istartswith', 'endswith', 'iendswith', 'range', 'year',
'month', 'day', 'isnull', 'search', 'regex', 'iregex',
)])
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
# Separator used to split filter strings apart.
LOOKUP_SEP = '__'
# Constants to make looking up tuple values clearer.
# Join lists
TABLE_NAME = 0
RHS_ALIAS = 1
JOIN_TYPE = 2
LHS_ALIAS = 3
LHS_JOIN_COL = 4
RHS_JOIN_COL = 5
NULLABLE = 6
# How many results to expect from a cursor.execute call
MULTI = 'multi'
SINGLE = 'single'
ORDER_PATTERN = re.compile(r'\?|[-+]?\w+$')
ORDER_DIR = {
'ASC': ('ASC', 'DESC'),
'DESC': ('DESC', 'ASC')}

View File

@ -0,0 +1,103 @@
"""
Useful auxilliary data structures for query construction. Not useful outside
the SQL domain.
"""
class EmptyResultSet(Exception):
pass
class FullResultSet(Exception):
pass
class MultiJoin(Exception):
"""
Used by join construction code to indicate the point at which a
multi-valued join was attempted (if the caller wants to treat that
exceptionally).
"""
def __init__(self, level):
self.level = level
class Empty(object):
pass
class RawValue(object):
def __init__(self, value):
self.value = value
class Aggregate(object):
"""
Base class for all aggregate-related classes (min, max, avg, count, sum).
"""
def relabel_aliases(self, change_map):
"""
Relabel the column alias, if necessary. Must be implemented by
subclasses.
"""
raise NotImplementedError
def as_sql(self, quote_func=None):
"""
Returns the SQL string fragment for this object.
The quote_func function is used to quote the column components. If
None, it defaults to doing nothing.
Must be implemented by subclasses.
"""
raise NotImplementedError
class Count(Aggregate):
"""
Perform a count on the given column.
"""
def __init__(self, col='*', distinct=False):
"""
Set the column to count on (defaults to '*') and set whether the count
should be distinct or not.
"""
self.col = col
self.distinct = distinct
def relabel_aliases(self, change_map):
c = self.col
if isinstance(c, (list, tuple)):
self.col = (change_map.get(c[0], c[0]), c[1])
def as_sql(self, quote_func=None):
if not quote_func:
quote_func = lambda x: x
if isinstance(self.col, (list, tuple)):
col = ('%s.%s' % tuple([quote_func(c) for c in self.col]))
elif hasattr(self.col, 'as_sql'):
col = self.col.as_sql(quote_func)
else:
col = self.col
if self.distinct:
return 'COUNT(DISTINCT %s)' % col
else:
return 'COUNT(%s)' % col
class Date(object):
"""
Add a date selection column.
"""
def __init__(self, col, lookup_type, date_sql_func):
self.col = col
self.lookup_type = lookup_type
self.date_sql_func= date_sql_func
def relabel_aliases(self, change_map):
c = self.col
if isinstance(c, (list, tuple)):
self.col = (change_map.get(c[0], c[0]), c[1])
def as_sql(self, quote_func=None):
if not quote_func:
quote_func = lambda x: x
if isinstance(self.col, (list, tuple)):
col = '%s.%s' % tuple([quote_func(c) for c in self.col])
else:
col = self.col
return self.date_sql_func(self.lookup_type, col)

File diff suppressed because it is too large Load Diff

View File

@ -0,0 +1,367 @@
"""
Query subclasses which provide extra functionality beyond simple data retrieval.
"""
from django.contrib.contenttypes import generic
from django.core.exceptions import FieldError
from django.db.models.sql.constants import *
from django.db.models.sql.datastructures import RawValue, Date
from django.db.models.sql.query import Query
from django.db.models.sql.where import AND
__all__ = ['DeleteQuery', 'UpdateQuery', 'InsertQuery', 'DateQuery',
'CountQuery']
class DeleteQuery(Query):
"""
Delete queries are done through this class, since they are more constrained
than general queries.
"""
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
assert len(self.tables) == 1, \
"Can only delete from one table at a time."
result = ['DELETE FROM %s' % self.quote_name_unless_alias(self.tables[0])]
where, params = self.where.as_sql()
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
def do_query(self, table, where):
self.tables = [table]
self.where = where
self.execute_sql(None)
def delete_batch_related(self, pk_list):
"""
Set up and execute delete queries for all the objects related to the
primary key values in pk_list. To delete the objects themselves, use
the delete_batch() method.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
cls = self.model
for related in cls._meta.get_all_related_many_to_many_objects():
if not isinstance(related.field, generic.GenericRelation):
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((None, related.field.m2m_reverse_name(),
related.field, 'in',
pk_list[offset : offset+GET_ITERATOR_CHUNK_SIZE]),
AND)
self.do_query(related.field.m2m_db_table(), where)
for f in cls._meta.many_to_many:
w1 = self.where_class()
if isinstance(f, generic.GenericRelation):
from django.contrib.contenttypes.models import ContentType
field = f.rel.to._meta.get_field(f.content_type_field_name)
w1.add((None, field.column, field, 'exact',
ContentType.objects.get_for_model(cls).id), AND)
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
where.add((None, f.m2m_column_name(), f, 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
if w1:
where.add(w1, AND)
self.do_query(f.m2m_db_table(), where)
def delete_batch(self, pk_list):
"""
Set up and execute delete queries for all the objects in pk_list. This
should be called after delete_batch_related(), if necessary.
More than one physical query may be executed if there are a
lot of values in pk_list.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
where = self.where_class()
field = self.model._meta.pk
where.add((None, field.column, field, 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]), AND)
self.do_query(self.model._meta.db_table, where)
class UpdateQuery(Query):
"""
Represents an "update" SQL query.
"""
def __init__(self, *args, **kwargs):
super(UpdateQuery, self).__init__(*args, **kwargs)
self._setup_query()
def _setup_query(self):
"""
Runs on initialisation and after cloning. Any attributes that would
normally be set in __init__ should go in here, instead, so that they
are also set up after a clone() call.
"""
self.values = []
self.related_ids = None
if not hasattr(self, 'related_updates'):
self.related_updates = {}
def clone(self, klass=None, **kwargs):
return super(UpdateQuery, self).clone(klass,
related_updates=self.related_updates.copy, **kwargs)
def execute_sql(self, result_type=None):
super(UpdateQuery, self).execute_sql(result_type)
for query in self.get_related_updates():
query.execute_sql(result_type)
def as_sql(self):
"""
Creates the SQL for this query. Returns the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.values:
return '', ()
table = self.tables[0]
qn = self.quote_name_unless_alias
result = ['UPDATE %s' % qn(table)]
result.append('SET')
values, update_params = [], []
for name, val, placeholder in self.values:
if val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
result.append(', '.join(values))
where, params = self.where.as_sql()
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def pre_sql_setup(self):
"""
If the update depends on results from other tables, we need to do some
munging of the "where" conditions to match the format required for
(portable) SQL updates. That is done here.
Further, if we are going to be running multiple updates, we pull out
the id values to update at this point so that they don't change as a
result of the progressive updates.
"""
self.select_related = False
self.clear_ordering(True)
super(UpdateQuery, self).pre_sql_setup()
count = self.count_active_tables()
if not self.related_updates and count == 1:
return
# We need to use a sub-select in the where clause to filter on things
# from other tables.
query = self.clone(klass=Query)
query.bump_prefix()
query.select = []
query.extra_select = {}
query.add_fields([query.model._meta.pk.name])
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.where = self.where_class()
if self.related_updates:
idents = []
for rows in query.execute_sql(MULTI):
idents.extend([r[0] for r in rows])
self.add_filter(('pk__in', idents))
self.related_ids = idents
else:
self.add_filter(('pk__in', query))
for alias in self.tables[1:]:
self.alias_refcount[alias] = 0
def clear_related(self, related_field, pk_list):
"""
Set up and execute an update query that clears related entries for the
keys in pk_list.
This is used by the QuerySet.delete_objects() method.
"""
for offset in range(0, len(pk_list), GET_ITERATOR_CHUNK_SIZE):
self.where = self.where_class()
f = self.model._meta.pk
self.where.add((None, f.column, f, 'in',
pk_list[offset : offset + GET_ITERATOR_CHUNK_SIZE]),
AND)
self.values = [(related_field.column, None, '%s')]
self.execute_sql(None)
def add_update_values(self, values):
"""
Convert a dictionary of field name to value mappings into an update
query. This is the entry point for the public update() method on
querysets.
"""
values_seq = []
for name, val in values.iteritems():
field, model, direct, m2m = self.model._meta.get_field_by_name(name)
if not direct or m2m:
raise FieldError('Cannot update model field %r (only non-relations and foreign keys permitted).' % field)
values_seq.append((field, model, val))
return self.add_update_fields(values_seq)
def add_update_fields(self, values_seq):
"""
Turn a sequence of (field, model, value) triples into an update query.
Used by add_update_values() as well as the "fast" update path when
saving models.
"""
from django.db.models.base import Model
for field, model, val in values_seq:
# FIXME: Some sort of db_prep_* is probably more appropriate here.
if field.rel and isinstance(val, Model):
val = val.pk
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val)
else:
placeholder = '%s'
if model:
self.add_related_update(model, field.column, val, placeholder)
else:
self.values.append((field.column, val, placeholder))
def add_related_update(self, model, column, value, placeholder):
"""
Adds (name, value) to an update query for an ancestor model.
Updates are coalesced so that we only run one update query per ancestor.
"""
try:
self.related_updates[model].append((column, value, placeholder))
except KeyError:
self.related_updates[model] = [(column, value, placeholder)]
def get_related_updates(self):
"""
Returns a list of query objects: one for each update required to an
ancestor model. Each query will have the same filtering conditions as
the current query but will only update a single table.
"""
if not self.related_updates:
return []
result = []
for model, values in self.related_updates.iteritems():
query = UpdateQuery(model, self.connection)
query.values = values
if self.related_ids:
query.add_filter(('pk__in', self.related_ids))
result.append(query)
return result
class InsertQuery(Query):
def __init__(self, *args, **kwargs):
super(InsertQuery, self).__init__(*args, **kwargs)
self.columns = []
self.values = []
self.params = ()
def clone(self, klass=None, **kwargs):
extras = {'columns': self.columns[:], 'values': self.values[:],
'params': self.params}
return super(InsertQuery, self).clone(klass, extras)
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
result = ['INSERT INTO %s' % qn(self.model._meta.db_table)]
result.append('(%s)' % ', '.join([qn(c) for c in self.columns]))
result.append('VALUES (%s)' % ', '.join(self.values))
return ' '.join(result), self.params
def execute_sql(self, return_id=False):
cursor = super(InsertQuery, self).execute_sql(None)
if return_id:
return self.connection.ops.last_insert_id(cursor,
self.model._meta.db_table, self.model._meta.pk.column)
def insert_values(self, insert_values, raw_values=False):
"""
Set up the insert query from the 'insert_values' dictionary. The
dictionary gives the model field names and their target values.
If 'raw_values' is True, the values in the 'insert_values' dictionary
are inserted directly into the query, rather than passed as SQL
parameters. This provides a way to insert NULL and DEFAULT keywords
into the query, for example.
"""
placeholders, values = [], []
for field, val in insert_values:
if hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
placeholders.append(field.get_placeholder(val))
else:
placeholders.append('%s')
self.columns.append(field.column)
values.append(val)
if raw_values:
self.values.extend(values)
else:
self.params += tuple(values)
self.values.extend(placeholders)
class DateQuery(Query):
"""
A DateQuery is a normal query, except that it specifically selects a single
date field. This requires some special handling when converting the results
back to Python objects, so we put it in a separate class.
"""
def results_iter(self):
"""
Returns an iterator over the results from executing this query.
"""
resolve_columns = hasattr(self, 'resolve_columns')
if resolve_columns:
from django.db.models.fields import DateTimeField
fields = [DateTimeField()]
else:
from django.db.backends.util import typecast_timestamp
needs_string_cast = self.connection.features.needs_datetime_string_cast
offset = len(self.extra_select)
for rows in self.execute_sql(MULTI):
for row in rows:
date = row[offset]
if resolve_columns:
date = self.resolve_columns([date], fields)[0]
elif needs_string_cast:
date = typecast_timestamp(str(date))
yield date
def add_date_select(self, column, lookup_type, order='ASC'):
"""
Converts the query into a date extraction query.
"""
alias = self.join((None, self.model._meta.db_table, None, None))
select = Date((alias, column), lookup_type,
self.connection.ops.date_trunc_sql)
self.select = [select]
self.select_fields = [None]
self.distinct = True
self.order_by = order == 'ASC' and [1] or [-1]
class CountQuery(Query):
"""
A CountQuery knows how to take a normal query which would select over
multiple distinct columns and turn it into SQL that can be used on a
variety of backends (it requires a select in the FROM clause).
"""
def get_from_clause(self):
result, params = self._query.as_sql()
return ['(%s) A1' % result], params
def get_ordering(self):
return ()

View File

@ -0,0 +1,171 @@
"""
Code to manage the creation and SQL rendering of 'where' constraints.
"""
import datetime
from django.utils import tree
from django.db import connection
from django.db.models.fields import Field
from django.db.models.query_utils import QueryWrapper
from datastructures import EmptyResultSet, FullResultSet
# Connection types
AND = 'AND'
OR = 'OR'
class WhereNode(tree.Node):
"""
Used to represent the SQL where-clause.
The class is tied to the Query class that created it (in order to create
the corret SQL).
The children in this tree are usually either Q-like objects or lists of
[table_alias, field_name, field_class, lookup_type, value]. However, a
child could also be any class with as_sql() and relabel_aliases() methods.
"""
default = AND
def as_sql(self, node=None, qn=None):
"""
Returns the SQL version of the where clause and the value to be
substituted in. Returns None, None if this node is empty.
If 'node' is provided, that is the root of the SQL generation
(generally not needed except by the internal implementation for
recursion).
"""
if node is None:
node = self
if not qn:
qn = connection.ops.quote_name
if not node.children:
return None, []
result = []
result_params = []
empty = True
for child in node.children:
try:
if hasattr(child, 'as_sql'):
sql, params = child.as_sql(qn=qn)
format = '(%s)'
elif isinstance(child, tree.Node):
sql, params = self.as_sql(child, qn)
if len(child.children) == 1:
format = '%s'
else:
format = '(%s)'
if child.negated:
format = 'NOT %s' % format
else:
sql, params = self.make_atom(child, qn)
format = '%s'
except EmptyResultSet:
if node.connector == AND and not node.negated:
# We can bail out early in this particular case (only).
raise
elif node.negated:
empty = False
continue
except FullResultSet:
if self.connector == OR:
if node.negated:
empty = True
break
# We match everything. No need for any constraints.
return '', []
if node.negated:
empty = True
continue
empty = False
if sql:
result.append(format % sql)
result_params.extend(params)
if empty:
raise EmptyResultSet
conn = ' %s ' % node.connector
return conn.join(result), result_params
def make_atom(self, child, qn):
"""
Turn a tuple (table_alias, field_name, field_class, lookup_type, value)
into valid SQL.
Returns the string for the SQL fragment and the parameters to use for
it.
"""
table_alias, name, field, lookup_type, value = child
if table_alias:
lhs = '%s.%s' % (qn(table_alias), qn(name))
else:
lhs = qn(name)
db_type = field and field.db_type() or None
field_sql = connection.ops.field_cast_sql(db_type) % lhs
if isinstance(value, datetime.datetime):
cast_sql = connection.ops.datetime_cast_sql()
else:
cast_sql = '%s'
if field:
params = field.get_db_prep_lookup(lookup_type, value)
else:
params = Field().get_db_prep_lookup(lookup_type, value)
if isinstance(params, QueryWrapper):
extra, params = params.data
else:
extra = ''
if lookup_type in connection.operators:
format = "%s %%s %s" % (connection.ops.lookup_cast(lookup_type),
extra)
return (format % (field_sql,
connection.operators[lookup_type] % cast_sql), params)
if lookup_type == 'in':
if not value:
raise EmptyResultSet
if extra:
return ('%s IN %s' % (field_sql, extra), params)
return ('%s IN (%s)' % (field_sql, ', '.join(['%s'] * len(value))),
params)
elif lookup_type in ('range', 'year'):
return ('%s BETWEEN %%s and %%s' % field_sql, params)
elif lookup_type in ('month', 'day'):
return ('%s = %%s' % connection.ops.date_extract_sql(lookup_type,
field_sql), params)
elif lookup_type == 'isnull':
return ('%s IS %sNULL' % (field_sql, (not value and 'NOT ' or '')),
params)
elif lookup_type == 'search':
return (connection.ops.fulltext_search_sql(field_sql), params)
elif lookup_type in ('regex', 'iregex'):
return connection.ops.regex_lookup(lookup_type) % (field_sql, cast_sql), params
raise TypeError('Invalid lookup_type: %r' % lookup_type)
def relabel_aliases(self, change_map, node=None):
"""
Relabels the alias values of any children. 'change_map' is a dictionary
mapping old (current) alias values to the new values.
"""
if not node:
node = self
for pos, child in enumerate(node.children):
if hasattr(child, 'relabel_aliases'):
child.relabel_aliases(change_map)
elif isinstance(child, tree.Node):
self.relabel_aliases(change_map, child)
else:
if child[0] in change_map:
node.children[pos] = (change_map[child[0]],) + child[1:]
class EverythingNode(object):
"""
A node that matches everything.
"""
def as_sql(self, qn=None):
raise FullResultSet
def relabel_aliases(self, change_map, node=None):
return

134
django/utils/tree.py Normal file
View File

@ -0,0 +1,134 @@
"""
A class for storing a tree graph. Primarily used for filter constructs in the
ORM.
"""
from copy import deepcopy
class Node(object):
"""
A single internal node in the tree graph. A Node should be viewed as a
connection (the root) with the children being either leaf nodes or other
Node instances.
"""
# Standard connector type. Clients usually won't use this at all and
# subclasses will usually override the value.
default = 'DEFAULT'
def __init__(self, children=None, connector=None, negated=False):
"""
Constructs a new Node. If no connector is given, the default will be
used.
Warning: You probably don't want to pass in the 'negated' parameter. It
is NOT the same as constructing a node and calling negate() on the
result.
"""
self.children = children and children[:] or []
self.connector = connector or self.default
self.subtree_parents = []
self.negated = negated
def __str__(self):
if self.negated:
return '(NOT (%s: %s))' % (self.connector, ', '.join([str(c) for c
in self.children]))
return '(%s: %s)' % (self.connector, ', '.join([str(c) for c in
self.children]))
def __deepcopy__(self, memodict):
"""
Utility method used by copy.deepcopy().
"""
obj = Node(connector=self.connector, negated=self.negated)
obj.__class__ = self.__class__
obj.children = deepcopy(self.children, memodict)
obj.subtree_parents = deepcopy(self.subtree_parents, memodict)
return obj
def __len__(self):
"""
The size of a node if the number of children it has.
"""
return len(self.children)
def __nonzero__(self):
"""
For truth value testing.
"""
return bool(self.children)
def __contains__(self, other):
"""
Returns True is 'other' is a direct child of this instance.
"""
return other in self.children
def add(self, node, conn_type):
"""
Adds a new node to the tree. If the conn_type is the same as the root's
current connector type, the node is added to the first level.
Otherwise, the whole tree is pushed down one level and a new root
connector is created, connecting the existing tree and the new node.
"""
if node in self.children:
return
if len(self.children) < 2:
self.connector = conn_type
if self.connector == conn_type:
if isinstance(node, Node) and (node.connector == conn_type or
len(node) == 1):
self.children.extend(node.children)
else:
self.children.append(node)
else:
obj = Node(self.children, self.connector, self.negated)
self.connector = conn_type
self.children = [obj, node]
def negate(self):
"""
Negate the sense of the root connector. This reorganises the children
so that the current node has a single child: a negated node containing
all the previous children. This slightly odd construction makes adding
new children behave more intuitively.
Interpreting the meaning of this negate is up to client code. This
method is useful for implementing "not" arrangements.
"""
self.children = [Node(self.children, self.connector, not self.negated)]
self.connector = self.default
def start_subtree(self, conn_type):
"""
Sets up internal state so that new nodes are added to a subtree of the
current node. The conn_type specifies how the sub-tree is joined to the
existing children.
"""
if len(self.children) == 1:
self.connector = conn_type
elif self.connector != conn_type:
self.children = [Node(self.children, self.connector, self.negated)]
self.connector = conn_type
self.negated = False
self.subtree_parents.append(Node(self.children, self.connector,
self.negated))
self.connector = self.default
self.negated = False
self.children = []
def end_subtree(self):
"""
Closes off the most recently unmatched start_subtree() call.
This puts the current state into a node of the parent tree and returns
the current instances state to be the parent.
"""
obj = self.subtree_parents.pop()
node = Node(self.children, self.connector)
self.connector = obj.connector
self.negated = obj.negated
self.children = obj.children
self.children.append(node)

View File

@ -160,37 +160,6 @@ When you save an object, Django performs the following steps:
is used to provide notification that an object has been successfully
saved. (These signals are not yet documented.)
Raw saves
~~~~~~~~~
**New in Django development version**
The pre-processing step (#2 in the previous section) is useful, but it modifies
the data stored in a field. This can cause problems if you're relying upon the
data you provide being used as-is.
For example, if you're setting up conditions for a test, you'll want the test
conditions to be repeatable. If pre-processing is performed, the data used
to specify test conditions may be modified, changing the conditions for the
test each time the test is run.
In cases such as this, you need to prevent pre-processing from being performed
when you save an object. To do this, you can invoke a **raw save** by passing
``raw=True`` as an argument to the ``save()`` method::
b4.save(raw=True) # Save object, but do no pre-processing
A raw save skips the usual data pre-processing that is performed during the
save. All other steps in the save (pre-save signal, data preparation, data
insertion, and post-save signal) are performed as normal.
.. admonition:: When to use a raw save
Generally speaking, you shouldn't need to use a raw save. Disabling field
pre-processing is an extraordinary measure that should only be required
in extraordinary circumstances, such as setting up reliable test
conditions.
Saving changes to objects
=========================
@ -211,11 +180,11 @@ Saving ForeignKey and ManyToManyField fields
--------------------------------------------
Updating ``ForeignKey`` fields works exactly the same way as saving a normal
field; simply assign an object of the right type to the field in question::
field; simply assign an object of the right type to the field in question::
cheese_blog = Blog.objects.get(name="Cheddar Talk")
entry.blog = cheese_blog
entry.save()
cheese_blog = Blog.objects.get(name="Cheddar Talk")
entry.blog = cheese_blog
entry.save()
Updating a ``ManyToManyField`` works a little differently; use the ``add()``
method on the field to add a record to the relation::
@ -422,6 +391,14 @@ This returns the sixth through tenth objects (``OFFSET 5 LIMIT 5``)::
Entry.objects.all()[5:10]
You can also slice from the item ''N'' to the end of the queryset. For
example, to return everything from the fixth item onwards::
Entry.objects.all()[5:]
How this last example is implemented in SQL varies depending upon the database
used, but it is supported in all cases.
Generally, slicing a ``QuerySet`` returns a new ``QuerySet`` -- it doesn't
evaluate the query. An exception is if you use the "step" parameter of Python
slice syntax. For example, this would actually execute the query in order to
@ -514,15 +491,70 @@ like so::
Note: ``order_by('?')`` queries may be expensive and slow, depending on the
database backend you're using.
To order by a field in a different table, add the other table's name and a dot,
like so::
To order by a field in a different model, use the same syntax as when you are
querying across model relations. That is, the name of the field, followed by a
double underscore (``__``), followed by the name of the field in the new model,
and so on for as many models as you want to join. For example::
Entry.objects.order_by('blogs_blog.name', 'headline')
Entry.objects.order_by('blog__name', 'headline')
If you try to order by a field that is a relation to another model, Django will
use the default ordering on the related model (or order by the related model's
primary key if there is no ``Meta.ordering`` specified. For example::
Entry.objects.order_by('blog')
...is identical to::
Entry.objects.order_by('blog__id')
...since the ``Blog`` model has no default ordering specified.
Be cautious when ordering by fields in related models if you are also using
``distinct()``. See the note in the `distinct()`_ section for an explanation
of how related model ordering can change the expected results.
It is permissible to specify a multi-valued field to order the results by (for
example, a ``ManyToMany`` field). Normally this won't be a sensible thing to
do and it's really an advanced usage feature. However, if you know that your
queryset's filtering or available data implies that there will only be one
ordering piece of data for each of the main items you are selecting, the
ordering may well be exactly what you want to do. Use ordering on multi-valued
fields with care and make sure the results are what you expect.
**New in Django development version:** If you don't want any ordering to be
applied to a query, not even the default ordering, call ``order_by()`` with no
parameters.
**New in Django development version:** The syntax for ordering across related
models has changed. See the `Django 0.96 documentation`_ for the old behaviour.
.. _Django 0.96 documentation: http://www.djangoproject.com/documentation/0.96/model-api/#floatfield
There's no way to specify whether ordering should be case sensitive. With
respect to case-sensitivity, Django will order results however your database
backend normally orders them.
``reverse()``
~~~~~~~~~~~~~
**New in Django development version**
If you want to reverse the order in which a queryset's elements are returned,
you can use the ``reverse()`` method. Calling ``reverse()`` a second time
restores the ordering back to the normal direction.
To retrieve the ''last'' five items in a queryset, you could do this::
my_queryset.reverse()[:5]
Note that this is not quite the same as slicing from the end of a sequence in
Python. The above example will return the last item first, then the
penultimate item and so on. If we had a Python sequence and looked at
``seq[:-5]``, we would see the fifth-last item first. Django doesn't support
that mode of access (slicing from the end), since it is not possible to do it
efficiently in SQL.
``distinct()``
~~~~~~~~~~~~~~
@ -531,10 +563,28 @@ eliminates duplicate rows from the query results.
By default, a ``QuerySet`` will not eliminate duplicate rows. In practice, this
is rarely a problem, because simple queries such as ``Blog.objects.all()``
don't introduce the possibility of duplicate result rows.
don't introduce the possibility of duplicate result rows. However, if your
query spans multiple tables, it's possible to get duplicate results when a
``QuerySet`` is evaluated. That's when you'd use ``distinct()``.
However, if your query spans multiple tables, it's possible to get duplicate
results when a ``QuerySet`` is evaluated. That's when you'd use ``distinct()``.
.. note::
Any fields used in an ``order_by()`` call are included in the SQL
``SELECT`` columns. This can sometimes lead to unexpected results when
used in conjuntion with ``distinct()``. If you order by fields from a
related model, those fields will be added to the selected columns and they
may make otherwise duplicate rows appear to be distinct. Since the extra
columns don't appear in the returned results (they are only there to
support ordering), it sometimes looks like non-distinct results are being
returned.
Similarly, if you use a ``values()`` query to restrict the columns
selected, the columns used in any ``order_by()`` (or default model
ordering) will still be involved and may affect uniqueness of the results.
The moral here is that if you are using ``distinct()`` be careful about
ordering by related models. Similarly, when using ``distinct()`` and
``values()`` together, be careful when ordering by fields not in the
``values()`` call.
``values(*fields)``
~~~~~~~~~~~~~~~~~~~
@ -569,6 +619,43 @@ Example::
>>> Blog.objects.values('id', 'name')
[{'id': 1, 'name': 'Beatles Blog'}]
You can also retrieve values from across ``ForeignKey`` relations by using
double underscores to separate the field names, just as when calling the
``filter()`` command. For example::
>>> Entry.objects.values('blog__name').distinct()
[{'name': 'Beatles Blog'}]
A couple of subtleties that are worth mentioning:
* The ``values()`` method does not return anything for ``ManyToManyField``
attributes and will raise an error if you try to pass in this type of
field to it.
* If you have a field called ``foo`` that is a ``ForeignKey``, the default
``values()`` call will return a dictionary key called ``foo_id``, since
this is the name of the hidden model attribute that stores the actual
value (the ``foo`` attribute refers to the related model). When you are
calling ``values()`` and passing in field names, you can pass in either
``foo`` or ``foo_id`` and you will get back the same thing (the
dictionary key will match the field name you passed in).
For example::
>>> Entry.objects.values()
[{'blog_id: 1, 'headline': u'First Entry', ...}, ...]
>>> Entry.objects.values('blog')
[{'blog': 1}, ...]
>>> Entry.objects.values('blog_id')
[{'blog_id': 1}, ...]
* When using ``values()`` together with ``distinct()``, be aware that
ordering can affect the results. See the note in the `distinct()`_
section, above, for details.
**New in Django development version:** Previously, it was not possible to pass
``blog_id`` to ``values()`` in the above example, only ``blog``.
A ``ValuesQuerySet`` is useful when you know you're only going to need values
from a small number of the available fields and you won't need the
functionality of a model instance object. It's more efficient to select only
@ -586,6 +673,34 @@ followed (optionally) by any output-affecting methods (such as ``values()``),
but it doesn't really matter. This is your chance to really flaunt your
individualism.
``values_list(*fields)``
~~~~~~~~~~~~~~~~~~~~~~~~
**New in Django development version**
This is similar to ``values()`` except that instead of returning a list of
dictionaries, it returns a list of tuples. Each tuple contains the value from
the respective field passed into the ``values_list()`` call -- so the first
item is the first field, etc. For example::
>>> Entry.objects.values_list('id', 'headling')
[(1, u'First entry'), ...]
If you only pass in a single field, you can also pass in the ``flat``
parameter. If ``True``, this will mean the returned results are single values,
rather than one-tuples. An example should make the difference clearer::
>>> Entry.objects.values_list('id').order_by('id')
[(1,), (2,), (3,), ...]
>>> Entry.objects.values_list('id', flat=True).order_by('id')
[1, 2, 3, ...]
It is an error to pass in ``flat`` when there is more than one field.
If you don't pass any values to ``values_list()``, it will return all the
fields in the model, in the order they were declared.
``dates(field, kind, order='ASC')``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -635,6 +750,17 @@ Examples::
>>> Entry.objects.none()
[]
``all()``
~~~~~~~~~~
**New in Django development version**
Returns a ''copy'' of the current ``QuerySet`` (or ``QuerySet`` subclass you
pass in). This can be useful in some situations where you might want to pass
in either a model manager or a ``QuerySet`` and do further filtering on the
result. You can safely call ``all()`` on either object and then you'll
definitely have a ``QuerySet`` to work with.
``select_related()``
~~~~~~~~~~~~~~~~~~~~
@ -687,8 +813,8 @@ related ``Person`` *and* the related ``City``::
p = b.author # Hits the database.
c = p.hometown # Hits the database.
Note that ``select_related()`` does not follow foreign keys that have
``null=True``.
Note that, by default, ``select_related()`` does not follow foreign keys that
have ``null=True``.
Usually, using ``select_related()`` can vastly improve performance because your
app can avoid many database calls. However, in situations with deeply nested
@ -705,8 +831,43 @@ follow::
The ``depth`` argument is new in the Django development version.
``extra(select=None, where=None, params=None, tables=None)``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**New in Django development version:** Sometimes you only need to access
specific models that are related to your root model, not all of the related
models. In these cases, you can pass the related field names to
``select_related()`` and it will only follow those relations. You can even do
this for models that are more than one relation away by separating the field
names with double underscores, just as for filters. For example, if we have
thise model::
class Room(models.Model):
# ...
building = models.ForeignKey(...)
class Group(models.Model):
# ...
teacher = models.ForeignKey(...)
room = models.ForeignKey(Room)
subject = models.ForeignKey(...)
...and we only needed to work with the ``room`` and ``subject`` attributes, we
could write this::
g = Group.objects.select_related('room', 'subject')
This is also valid::
g = Group.objects.select_related('room__building', 'subject')
...and would also pull in the ``building`` relation.
You can only refer to ``ForeignKey`` relations in the list of fields passed to
``select_related``. You *can* refer to foreign keys that have ``null=True``
(unlike the default ``select_related()`` call). It's an error to use both a
list of fields and the ``depth`` parameter in the same ``select_related()``
call, since they are conflicting options.
``extra(select=None, where=None, params=None, tables=None, order_by=None, select_params=None)``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Sometimes, the Django query syntax by itself can't easily express a complex
``WHERE`` clause. For these edge cases, Django provides the ``extra()``
@ -763,6 +924,21 @@ of the arguments is required, but you should use at least one of them.
some database backends, such as some MySQL versions, don't support
subqueries.
**New in Django development version**
In some rare cases, you might wish to pass parameters to the SQL fragments
in ``extra(select=...)```. For this purpose, use the ``select_params``
parameter. Since ``select_params`` is a sequence and the ``select``
attribute is a dictionary, some care is required so that the parameters
are matched up correctly with the extra select pieces. In this situation,
you should use a ``django.utils.datastructures.SortedDict`` for the
``select`` value, not just a normal Python dictionary.
This will work, for example::
Blog.objects.extra(
select=SortedDict(('a', '%s'), ('b', '%s')),
select_params=('one', 'two'))
``where`` / ``tables``
You can define explicit SQL ``WHERE`` clauses -- perhaps to perform
non-explicit joins -- by using ``where``. You can manually add tables to
@ -779,20 +955,61 @@ of the arguments is required, but you should use at least one of them.
SELECT * FROM blog_entry WHERE id IN (3, 4, 5, 20);
Be careful when using the ``tables`` parameter if you're specifying
tables that are already used in the query. When you add extra tables
via the ``tables`` parameter, Django assumes you want that table included
an extra time, if it is already included. That creates a problem,
since the table name will then be given an alias. If a table appears
multiple times in an SQL statement, the second and subsequent occurrences
must use aliases so the database can tell them apart. If you're
referring to the extra table you added in the extra ``where`` parameter
this is going to cause errors.
Normally you'll only be adding extra tables that don't already appear in
the query. However, if the case outlined above does occur, there are a few
solutions. First, see if you can get by without including the extra table
and use the one already in the query. If that isn't possible, put your
``extra()`` call at the front of the queryset construction so that your
table is the first use of that table. Finally, if all else fails, look at
the query produced and rewrite your ``where`` addition to use the alias
given to your extra table. The alias will be the same each time you
construct the queryset in the same way, so you can rely upon the alias
name to not change.
``order_by``
If you need to order the resulting queryset using some of the new fields
or tables you have included via ``extra()`` use the ``order_by`` parameter
to ``extra()`` and pass in a sequence of strings. These strings should
either be model fields (as in the normal ``order_by()`` method on
querysets), of the form ``table_name.column_name`` or an alias for a column
that you specified in the ``select`` parameter to ``extra()``.
For example::
q = Entry.objects.extra(select={'is_recent': "pub_date > '2006-01-01'"})
q = q.extra(order_by = ['-is_recent'])
This would sort all the items for which ``is_recent`` is true to the front
of the result set (``True`` sorts before ``False`` in a descending
ordering).
This shows, by the way, that you can make multiple calls to
``extra()`` and it will behave as you expect (adding new constraints each
time).
``params``
The ``select`` and ``where`` parameters described above may use standard
Python database string placeholders -- ``'%s'`` to indicate parameters the
database engine should automatically quote. The ``params`` argument is a
list of any extra parameters to be substituted.
The ``where`` parameter described above may use standard Python database
string placeholders -- ``'%s'`` to indicate parameters the database engine
should automatically quote. The ``params`` argument is a list of any extra
parameters to be substituted.
Example::
Entry.objects.extra(where=['headline=%s'], params=['Lennon'])
Always use ``params`` instead of embedding values directly into ``select``
or ``where`` because ``params`` will ensure values are quoted correctly
according to your particular backend. (For example, quotes will be escaped
correctly.)
Always use ``params`` instead of embedding values directly into ``where``
because ``params`` will ensure values are quoted correctly according to
your particular backend. (For example, quotes will be escaped correctly.)
Bad::
@ -802,8 +1019,9 @@ of the arguments is required, but you should use at least one of them.
Entry.objects.extra(where=['headline=%s'], params=['Lennon'])
The combined number of placeholders in the list of strings for ``select``
or ``where`` should equal the number of values in the ``params`` list.
**New in Django development version** The ``select_params`` argument to
``extra()`` is new. Previously, you could attempt to pass parameters for
``select`` in the ``params`` argument, but it worked very unreliably.
QuerySet methods that do not return QuerySets
---------------------------------------------
@ -1031,7 +1249,12 @@ Examples::
SQL equivalents::
SELECT ... WHERE id = 14;
SELECT ... WHERE id = NULL;
SELECT ... WHERE id IS NULL;
**New in Django development version:** The semantics of ``id__exact=None`` have
changed in the development version. Previously, it was (intentionally)
converted to ``WHERE id = NULL`` at the SQL level, which would never match
anything. It has now been changed to behave the same as ``id__isnull=True``.
iexact
~~~~~~
@ -1261,14 +1484,6 @@ SQL equivalent::
SELECT ... WHERE pub_date IS NULL;
.. admonition:: ``__isnull=True`` vs ``__exact=None``
There is an important difference between ``__isnull=True`` and
``__exact=None``. ``__exact=None`` will *always* return an empty result
set, because SQL requires that no value is equal to ``NULL``.
``__isnull`` determines if the field is currently holding the value
of ``NULL`` without performing a comparison.
search
~~~~~~
@ -1368,6 +1583,11 @@ equivalent::
Entry.objects.filter(blog__id=3) # __exact is implied
Entry.objects.filter(blog__pk=3) # __pk implies __id__exact
.. note::
Because of this shortcut, you cannot have a field called ``pk`` that is not
the primary key of the model. It will always be replaced by the name of the
model's primary key in queries.
Lookups that span relationships
-------------------------------
@ -1392,6 +1612,60 @@ whose ``headline`` contains ``'Lennon'``::
Blog.objects.filter(entry__headline__contains='Lennon')
Spanning multi-valued relationships
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
**New in Django development version**
When you are filtering an object based on a ``ManyToManyField`` or a reverse
``ForeignKeyField``, there are two different sorts of filter you may be
interested in. Consider the ``Blog``/``Entry`` relationship (``Blog`` to
``Entry`` is a one-to-many relation). We might be interested in finding blogs
that have an entry which has both *"Lennon"* in the headline and was published
today. Or we might want to find blogs that have an entry with *"Lennon"* in
the headline as well as an entry that was published today. Since there are
multiple entries associated with a single ``Blog``, both of these queries are
possible and make sense in some situations.
The same type of situation arises with a ``ManyToManyField``. For example, if
an ``Entry`` has a ``ManyToManyField`` called ``tags``, we might want to find
entries linked to tags called *"music"* and *"bands"* or we might want an
entry that contains a tag with a name of *"music"* and a status of *"public"*.
To handle both of these situations, Django has a consistent way of processing
``filter()`` and ``exclude()`` calls. Everything inside a single ``filter()``
call is applied simultaneously to filter out items matching all those
requirements. Successive ``filter()`` calls further restrict the set of
objects, but for multi-valued relations, they apply to any object linked to
the primary model, not necessarily those objects that were selected by an
earlier ``filter()`` call.
That may sound a bit confusing, so hopefully an example will clarify. To
select all blogs that contains entries with *"Lennon"* in the headline and
were published today, we would write::
Blog.objects.filter(entry__headline__contains='Lennon',
entry__pub_date=datetime.date.today())
To select all blogs that contain an entry with *"Lennon"* in the headline
**as well as** an entry that was published today, we would write::
Blog.objects.filter(entry__headline__contains='Lennon').filter(
entry__pub_date=datetime.date.today())
In this second example, the first filter restricted the queryset to all those
blogs linked to that particular type of entry. The second filter restricted
the set of blogs *further* to those that are also linked to the second type of
entry. The entries select by the second filter may or may not be the same as
the entries in the first filter. We are filtering the ``Blog`` items with each
filter statement, not the ``Entry`` items.
All of this behaviour also applies to ``exclude()``: all the conditions in a
single ``exclude()`` statement apply to a single instance (if those conditions
are talking about the same multi-valued relation). Conditions in subsequent
``filter()`` or ``exclude()`` calls that refer to the same relation may end up
filtering on different linked objects.
Escaping percent signs and underscores in LIKE statements
---------------------------------------------------------
@ -1496,6 +1770,12 @@ This is equivalent to the following SQL ``WHERE`` clause::
You can compose statements of arbitrary complexity by combining ``Q`` objects
with the ``&`` and ``|`` operators. You can also use parenthetical grouping.
**New in Django development version:** ``Q`` objects can also be negated using
the ``~`` operator, allowing for combined lookups that combine both a normal
query and a negated (``NOT``) query::
Q(question__startswith='Who') | ~Q(pub_date__year=2005)
Each lookup function that takes keyword-arguments (e.g. ``filter()``,
``exclude()``, ``get()``) can also be passed one or more ``Q`` objects as
positional (not-named) arguments. If you provide multiple ``Q`` object
@ -1815,6 +2095,34 @@ complete query set::
Entry.objects.all().delete()
Updating multiple objects at once
=================================
**New in Django development version**
Sometimes you want to set a field to a particular value for all the objects in
a queryset. You can do this with the ``update()`` method. For example::
# Update all the headlings to the same value.
Entry.objects.all().update(headline='Everything is the same')
You can only set non-relation fields and ``ForeignKey`` fields using this
method and the value you set the field to must be a normal Python value (you
can't set a field to be equal to some other field at the moment).
To update ``ForeignKey`` fields, set the new value to be the new model
instance you want to point to. Example::
b = Blog.objects.get(pk=1)
# Make all entries belong to this blog.
Entry.objects.all().update(blog=b)
The ``update()`` method is applied instantly and doesn't return anything
(similar to ``delete()``). The only restriction on the queryset that is
updated is that it can only access one database table, the model's main
table. So don't try to filter based on related fields or anything like that;
it won't work.
Extra instance methods
======================

View File

@ -886,6 +886,10 @@ relationship should work. All are optional:
`related objects documentation`_ for a full
explanation and example.
If using this in an `abstract base class`_, be
sure to read the `extra notes`_ in that section
about ``related_name``.
``to_field`` The field on the related object that the relation
is to. By default, Django uses the primary key of
the related object.
@ -893,6 +897,8 @@ relationship should work. All are optional:
.. _`Database API reference`: ../db-api/
.. _related objects documentation: ../db-api/#related-objects
.. _abstract base class: `Abstract base classes`_
.. _extra notes: `Be careful with related_name`_
Many-to-many relationships
~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -988,9 +994,6 @@ the relationship should work. All are optional:
One-to-one relationships
~~~~~~~~~~~~~~~~~~~~~~~~
The semantics of one-to-one relationships will be changing soon, so we don't
recommend you use them. If that doesn't scare you away, keep reading.
To define a one-to-one relationship, use ``OneToOneField``. You use it just
like any other ``Field`` type: by including it as a class attribute of your
model.
@ -1012,9 +1015,26 @@ As with ``ForeignKey``, a relationship to self can be defined by using the
string ``"self"`` instead of the model name; references to as-yet undefined
models can be made by using a string containing the model name.
This ``OneToOneField`` will actually replace the primary key ``id`` field
(since one-to-one relations share the same primary key), and will be displayed
as a read-only field when you edit an object in the admin interface:
Finally, ``OneToOneField`` takes the following extra option:
======================= ============================================================
Argument Description
======================= ============================================================
``parent_link`` When ``True`` and used in a model inherited from
another model, indicates that this field should
be used as the link from the child back to the
parent. See `Model inheritance`_ for more
details.
**New in Django development version**
======================= ============================================================
**New in Django development version:** ``OneToOneField`` classes used to
automatically become the primary key on a model. This is no longer true,
although you can manually pass in the ``primary_key`` attribute if you like.
Thus, it's now possible to have multiple fields of type ``OneToOneField`` on a
single model.
See the `One-to-one relationship model example`_ for a full example.
@ -1048,6 +1068,14 @@ Model metadata is "anything that's not a field", such as ordering options, etc.
Here's a list of all possible ``Meta`` options. No options are required. Adding
``class Meta`` to a model is completely optional.
``abstract``
------------
**New in Django development version**
When set to ``True``, denotes this model as an abstract base class. See
`Abstract base classes`_ for more details. Defaults to ``False``.
``db_table``
------------
@ -1155,6 +1183,10 @@ together. It's used in the Django admin and is enforced at the database
level (i.e., the appropriate ``UNIQUE`` statements are included in the
``CREATE TABLE`` statement).
All the fields specified in ``unique_together`` must be part of the current
model. If you are using `model inheritance`_, you cannot refer to fields from
any parent classes in ``unique_together``.
**New in Django development version**
For convenience, unique_together can be a single list when dealing
@ -2041,6 +2073,238 @@ You can also prevent saving::
.. _database API docs: ../db-api/
Model inheritance
=================
**New in Django development version**
Model inheritance in Django works almost identically to the way normal class
inheritance works in Python. The only decision you have to make is whether you
want the parent models to be models in their own right (with their own
database tables), or if the parents are just holders of common information
that will only be visible through the child models.
Often, you will just want to use the parent class to hold information that you
don't want to have to type out for each child model. This class isn't going to
ever be used in isolation, so `abstract base classes`_ are what you're after. However, if you're subclassing an existing model (perhaps something from another application entirely), or want each model to have its own database table, `multi-table inheritance`_ is the way to go.
Abstract base classes
---------------------
Abstract base classes are useful when you want to put some common information
into a number of other models. You write your base class and put
``abstract=True`` in the ``Meta`` class. This model will then not be used to
create any database table. Instead, when it is used as a base class for other
models, its fields will be added to those of the child class. It is an error
to have fields in the abstract base class with the same name as those in the
child (and Django will raise an exception).
An example::
class CommonInfo(models.Model):
name = models.CharField(max_length=100)
age = models.PositiveIntegerField()
class Meta:
abstract = True
class Student(CommonInfo):
home_group = models.CharField(max_length=5)
The ``Student`` model will have three fields: ``name``, ``age`` and
``home_group``. The ``CommonInfo`` model cannot be used as a normal Django
model, since it is an abstract base class. It does not generate a database
table or have a manager or anything like that.
For many uses, this type of model inheritance will be exactly what you want.
It provides a way to factor out common information at the Python level, whilst
still only creating one database table per child model at the database level.
``Meta`` inheritance
~~~~~~~~~~~~~~~~~~~~
When an abstract base class is created, Django makes any ``Meta`` inner class
you declared on the base class available as an attribute. If a child class
does not declared its own ``Meta`` class, it will inherit the parent's
``Meta``. If the child wants to extend the parent's ``Meta`` class, it can
subclass it. For example::
class CommonInfo(models.Model):
...
class Meta:
abstract = True
ordering = ['name']
class Student(CommonInfo):
...
class Meta(CommonInfo.Meta):
db_table = 'student_info'
Django does make one adjustment to the ``Meta`` class of an abstract base
class: before installing the ``Meta`` attribute, it sets ``abstract=False``.
This means that children of abstract base classes don't automatically become
abstract classes themselves. Of course, you can make an abstract base class
that inherits from another abstract base class. You just need to remember to
explicitly set ``abstract=True`` each time.
Some attributes won't make sense to include in the ``Meta`` class of an
abstract base class. For example, including ``db_table`` would mean that all
the child classes (the ones that don't specify their own ``Meta``) would use
the same database table, which is almost certainly not what you want.
Be careful with ``related_name``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you are using the ``related_name`` attribute on a ``ForeignKey`` or
``ManyToManyField``, you must always specify a *unique* reverse name for the
field. This would normally cause a problem in abstract base classes, since the
fields on this class are included into each of the child classes, with exactly
the same values for the attributes (including ``related_name``) each time.
To work around this problem, when you are using ``related_name`` in an
abstract base class (only), part of the name should be the string
``'%(class)s'``. This is replaced by the lower-cased name of the child class
that the field is used in. Since each class has a different name, each related
name will end up being different. For example::
class Base(models.Model):
m2m = models.ManyToMany(OtherModel, related_name="%(class)s_related")
class Meta:
abstract = True
class ChildA(Base):
pass
class ChildB(Base):
pass
The reverse name of the ``ChildA.m2m`` field will be ``childa_related``,
whilst the reverse name of the ``ChildB.m2m`` field will be
``childb_related``. It is up to you how you use the ``'%(class)s'`` portion to
construct your related name, but if you forget to use it, Django will raise
errors when you validate your models (or run ``syncdb``).
If you don't specify a ``related_name`` attribute for a field in an abstract
base class, the default reverse name will be the name of the child class
followed by ``'_set'``, just as it normally would be if you'd declared the field directly on the child class. For example, in the above code, if the ``related_name`` attribute was omitted, the reverse name for the ``m2m`` field would be ``childa_set`` in the ``ChildA`` case and ``childb_set`` for the ``ChildB`` field.
Multi-table inheritance
-----------------------
The second type of model inheritance supported by Django is when each model in
the hierarchy is a model all by itself. Each model corresponds to its own
database table and can be queried and created indvidually. The inheritance
relationship introduces links between the child model and each of its parents
(via an automatically created ``OneToOneField``). For example::
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
class Restaurant(Place):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
All of the fields of ``Place`` will also be available in ``Restaurant``,
although the data will reside in a different database table. So these are both
possible::
>>> Place.objects.filter(name="Bob's Cafe")
>>> Restaurant.objects.filter(name="Bob's Cafe")
If you have a ``Place`` that is also a ``Restaurant``, you can get from the
``Place`` object to the ``Restaurant`` object by using the lower-case version
of the model name::
>>> p = Place.objects.filter(name="Bob's Cafe")
# If Bob's Cafe is a Restaurant object, this will give the child class:
>>> p.restaurant
<Restaurant: ...>
However, if ``p`` in the above example was *not* a ``Restaurant`` (it had been
created directly as a ``Place`` object or was the parent of some other class),
referring to ``p.restaurant`` would give an error.
``Meta`` and multi-table inheritance
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
In the multi-table inheritance situation, it doesn't make sense for a child
class to inherit from its parent's ``Meta`` class. All the ``Meta`` options
have already been applied to the parent class and applying them again would
normally only lead to contradictory behaviour (this is in contrast with the
abstract base class case, where the base class doesn't exist in its own
right).
So a child model does not have access to its parent's ``Meta`` class. However,
there are a few limited cases where the child inherits behaviour from the
parent: if the child does not specify an ``ordering`` attribute or a
``get_latest_by`` attribute, it will inherit these from its parent.
If the parent has an ordering and you don't want the child to have any natural
ordering, you can explicity set it to be empty::
class ChildModel(ParentModel):
...
class Meta:
# Remove parent's ordering effect
ordering = []
Inheritance and reverse relations
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Because multi-table inheritance uses an implicit ``OneToOneField`` to link the
child and the parent, it's possible to move from the parent down to the child,
as in the above example. However, this uses up the name that is the default
``related_name`` value for ``ForeignKey`` and ``ManyToManyField`` relations.
If you are putting those type of relations on a subclass of another model, you
**must** specify the ``related_name`` attribute on each such field. If you
forget, Django will raise an error when you run ``manage.py validate`` or try
to syncdb.
For example, using the above ``Place`` class again, let's create another
subclass with a ``ManyToManyField``::
class Supplier(Place):
# Must specify related_name on all relations.
customers = models.ManyToManyField(Restaurant,
related_name='provider')
For more information about reverse relations, refer to the `Database API
reference`_ . For now, just remember to run ``manage.py validate`` when
you're writing your models and pay attention to the error messages.
Specifying the parent link field
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
As mentioned, Django will automatically create a ``OneToOneField`` linking
your child class back any non-abstract parent models. If you want to control
the name of the attribute linking back to the parent, you can create your own
link field and pass it ``parent_link=True``. For example, to explicitly
specify the field that will link ``Supplier`` to ``Place`` in the above
example, you could write::
class Supplier(Place):
parent = models.OneToOneField(Place, parent_link=True)
...
Multiple inheritance
--------------------
Just as with Python's subclassing, it's possible for a Django model to inherit
from multiple parent models. Keep in mind that normal Python name resolution
rules apply. The first base class that a particular name appears in (e.g.
``Meta``) will be the one that is used. We stop searching once we find the
name once. This means that if multiple parents contain a ``Meta`` class, only
the first one is going to be used. All others will be ignored.
Generally, you won't need to inherit from multiple parents. The main use-case
where this is useful is for ''mix-in'' classes: adding a particular extra
field or method to every class that inherits the mix-in. Try to keep your
inheritance hierarchies as simple and straightforward as possible so that you
won't have to struggle to work out where a particular piece of information is
coming from.
Models across files
===================

View File

@ -292,11 +292,9 @@ datetime.datetime(2005, 7, 28, 0, 0)
>>> Article.objects.all()[2:][2:3]
[<Article: Default headline>]
# Note that you can't use 'offset' without 'limit' (on some dbs), so this doesn't work:
>>> Article.objects.all()[2:]
Traceback (most recent call last):
...
AssertionError: 'offset' is not allowed without 'limit'
# Using an offset without a limit is also possible.
>>> Article.objects.all()[5:]
[<Article: Fourth article>, <Article: Article 7>, <Article: Updated article 8>]
# Also, once you have sliced you can't filter, re-order or combine
>>> Article.objects.all()[0:5].filter(id=1)

View File

@ -55,8 +55,8 @@ __test__ = {'API_TESTS':"""
>>> art.save()
>>> art.authors = [a, a2]
# Although the table and column names on Author have been set to
# custom values, nothing about using the Author model has changed...
# Although the table and column names on Author have been set to custom values,
# nothing about using the Author model has changed...
# Query the available authors
>>> Author.objects.all()
@ -71,7 +71,7 @@ __test__ = {'API_TESTS':"""
>>> Author.objects.filter(firstname__exact='John')
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'firstname' into field. Choices are: article, id, first_name, last_name
FieldError: Cannot resolve keyword 'firstname' into field. Choices are: article, first_name, id, last_name
>>> a = Author.objects.get(last_name__exact='Smith')
>>> a.first_name

View File

@ -5,6 +5,7 @@ Tests for field subclassing.
from django.db import models
from django.utils.encoding import force_unicode
from django.core import serializers
from django.core.exceptions import FieldError
class Small(object):
"""
@ -50,7 +51,7 @@ class SmallField(models.Field):
return [force_unicode(v) for v in value]
if lookup_type == 'isnull':
return []
raise TypeError('Invalid lookup type: %r' % lookup_type)
raise FieldError('Invalid lookup type: %r' % lookup_type)
def flatten_data(self, follow, obj=None):
return {self.attname: force_unicode(self._get_val_from_obj(obj))}
@ -94,7 +95,7 @@ True
>>> MyModel.objects.filter(data__lt=s)
Traceback (most recent call last):
...
TypeError: Invalid lookup type: 'lt'
FieldError: Invalid lookup type: 'lt'
# Serialization works, too.
>>> stream = serializers.serialize("json", MyModel.objects.all())

View File

@ -162,12 +162,36 @@ True
>>> Article.objects.extra(select={'id_plus_one': 'id + 1'}).values('id', 'id_plus_two')
Traceback (most recent call last):
...
FieldDoesNotExist: Article has no field named 'id_plus_two'
FieldError: Cannot resolve keyword 'id_plus_two' into field. Choices are: headline, id, id_plus_one, pub_date
# If you don't specify field names to values(), all are returned.
>>> list(Article.objects.filter(id=5).values()) == [{'id': 5, 'headline': 'Article 5', 'pub_date': datetime(2005, 8, 1, 9, 0)}]
True
# values_list() is similar to values(), except that the results are returned as
# a list of tuples, rather than a list of dictionaries. Within each tuple, the
# order of the elemnts is the same as the order of fields in the values_list()
# call.
>>> Article.objects.values_list('headline')
[(u'Article 5',), (u'Article 6',), (u'Article 4',), (u'Article 2',), (u'Article 3',), (u'Article 7',), (u'Article 1',)]
>>> Article.objects.values_list('id').order_by('id')
[(1,), (2,), (3,), (4,), (5,), (6,), (7,)]
>>> Article.objects.values_list('id', flat=True).order_by('id')
[1, 2, 3, 4, 5, 6, 7]
>>> Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id')
[(1,), (2,), (3,), (4,), (5,), (6,), (7,)]
>>> Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id_plus_one', 'id')
[(2, 1), (3, 2), (4, 3), (5, 4), (6, 5), (7, 6), (8, 7)]
>>> Article.objects.extra(select={'id_plus_one': 'id+1'}).order_by('id').values_list('id', 'id_plus_one')
[(1, 2), (2, 3), (3, 4), (4, 5), (5, 6), (6, 7), (7, 8)]
>>> Article.objects.values_list('id', 'headline', flat=True)
Traceback (most recent call last):
...
TypeError: 'flat' is not valid when values_list is called with more than one field.
# Every DateField and DateTimeField creates get_next_by_FOO() and
# get_previous_by_FOO() methods.
# In the case of identical date values, these methods will use the ID as a
@ -240,6 +264,8 @@ DoesNotExist: Article matching query does not exist.
[]
>>> Article.objects.none().filter(headline__startswith='Article')
[]
>>> Article.objects.filter(headline__startswith='Article').none()
[]
>>> Article.objects.none().count()
0
>>> [article for article in Article.objects.none().iterator()]
@ -256,12 +282,12 @@ DoesNotExist: Article matching query does not exist.
>>> Article.objects.filter(pub_date_year='2005').count()
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'pub_date_year' into field. Choices are: id, headline, pub_date
FieldError: Cannot resolve keyword 'pub_date_year' into field. Choices are: headline, id, pub_date
>>> Article.objects.filter(headline__starts='Article')
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'headline__starts' into field. Choices are: id, headline, pub_date
FieldError: Join on field 'headline' not permitted.
# Create some articles with a bit more interesting headlines for testing field lookups:
>>> now = datetime.now()

View File

@ -126,6 +126,11 @@ __test__ = {'API_TESTS':"""
>>> Publication.objects.filter(article__in=[a1,a2]).distinct()
[<Publication: Highlights for Children>, <Publication: Science News>, <Publication: Science Weekly>, <Publication: The Python Journal>]
# Excluding a related item works as you would expect, too (although the SQL
# involved is a little complex).
>>> Article.objects.exclude(publications=p2)
[<Article: Django lets you build Web apps easily>]
# If we delete a Publication, its Articles won't be able to access it.
>>> p1.delete()
>>> Publication.objects.all()

View File

@ -145,18 +145,18 @@ False
[<Article: John's second story>, <Article: This is a test>]
# The underlying query only makes one join when a related table is referenced twice.
>>> query = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
>>> null, sql, null = query._get_sql_clause()
>>> queryset = Article.objects.filter(reporter__first_name__exact='John', reporter__last_name__exact='Smith')
>>> sql = queryset.query.as_sql()[0]
>>> sql.count('INNER JOIN')
1
# The automatically joined table has a predictable name.
>>> Article.objects.filter(reporter__first_name__exact='John').extra(where=["many_to_one_article__reporter.last_name='Smith'"])
>>> Article.objects.filter(reporter__first_name__exact='John').extra(where=["many_to_one_reporter.last_name='Smith'"])
[<Article: John's second story>, <Article: This is a test>]
# And should work fine with the unicode that comes out of
# newforms.Form.cleaned_data
>>> Article.objects.filter(reporter__first_name__exact='John').extra(where=["many_to_one_article__reporter.last_name='%s'" % u'Smith'])
>>> Article.objects.filter(reporter__first_name__exact='John').extra(where=["many_to_one_reporter.last_name='%s'" % u'Smith'])
[<Article: John's second story>, <Article: This is a test>]
# Find all Articles for the Reporter whose ID is 1.
@ -179,13 +179,13 @@ False
>>> Article.objects.filter(reporter_id__exact=1)
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'reporter_id' into field. Choices are: id, headline, pub_date, reporter
FieldError: Cannot resolve keyword 'reporter_id' into field. Choices are: headline, id, pub_date, reporter
# You need to specify a comparison clause
>>> Article.objects.filter(reporter_id=1)
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'reporter_id' into field. Choices are: id, headline, pub_date, reporter
FieldError: Cannot resolve keyword 'reporter_id' into field. Choices are: headline, id, pub_date, reporter
# You can also instantiate an Article by passing
# the Reporter's ID instead of a Reporter object.
@ -250,6 +250,11 @@ TypeError: Cannot resolve keyword 'reporter_id' into field. Choices are: id, hea
>>> Reporter.objects.filter(article__reporter=r).distinct()
[<Reporter: John Smith>]
# It's possible to use values() calls across many-to-one relations. (Note, too, that we clear the ordering here so as not to drag the 'headline' field into the columns being used to determine uniqueness.)
>>> d = {'reporter__first_name': u'John', 'reporter__last_name': u'Smith'}
>>> list(Article.objects.filter(reporter=r).distinct().order_by().values('reporter__first_name', 'reporter__last_name')) == [d]
True
# If you delete a reporter, his articles will be deleted.
>>> Article.objects.all()
[<Article: John's second story>, <Article: Paul's story>, <Article: This is a test>, <Article: This is a test>, <Article: This is a test>]

View File

@ -80,6 +80,11 @@ None
>>> Article.objects.filter(reporter__isnull=True)
[<Article: Third>]
# We can achieve the same thing by filtering for the case where the reporter is
# None.
>>> Article.objects.filter(reporter=None)
[<Article: Third>]
# Set the reporter for the Third article
>>> r.article_set.add(a3)
>>> r.article_set.all()

View File

@ -1,11 +1,53 @@
"""
XX. Model inheritance
Model inheritance isn't yet supported.
Model inheritance exists in two varieties:
- abstract base classes which are a way of specifying common
information inherited by the subclasses. They don't exist as a separate
model.
- non-abstract base classes (the default), which are models in their own
right with their own database tables and everything. Their subclasses
have references back to them, created automatically.
Both styles are demonstrated here.
"""
from django.db import models
#
# Abstract base classes
#
class CommonInfo(models.Model):
name = models.CharField(max_length=50)
age = models.PositiveIntegerField()
class Meta:
abstract = True
ordering = ['name']
def __unicode__(self):
return u'%s %s' % (self.__class__.__name__, self.name)
class Worker(CommonInfo):
job = models.CharField(max_length=50)
class Student(CommonInfo):
school_class = models.CharField(max_length=10)
class Meta:
pass
#
# Multi-table inheritance
#
class Chef(models.Model):
name = models.CharField(max_length=50)
def __unicode__(self):
return u"%s the chef" % self.name
class Place(models.Model):
name = models.CharField(max_length=50)
address = models.CharField(max_length=80)
@ -13,9 +55,20 @@ class Place(models.Model):
def __unicode__(self):
return u"%s the place" % self.name
class Restaurant(Place):
class Rating(models.Model):
rating = models.IntegerField(null=True, blank=True)
class Meta:
abstract = True
ordering = ['-rating']
class Restaurant(Place, Rating):
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
chef = models.ForeignKey(Chef, null=True, blank=True)
class Meta(Rating.Meta):
db_table = 'my_restaurant'
def __unicode__(self):
return u"%s the restaurant" % self.name
@ -26,14 +79,58 @@ class ItalianRestaurant(Restaurant):
def __unicode__(self):
return u"%s the italian restaurant" % self.name
__test__ = {'API_TESTS':"""
# Make sure Restaurant has the right fields in the right order.
>>> [f.name for f in Restaurant._meta.fields]
['id', 'name', 'address', 'serves_hot_dogs', 'serves_pizza']
class Supplier(Place):
customers = models.ManyToManyField(Restaurant, related_name='provider')
# Make sure ItalianRestaurant has the right fields in the right order.
>>> [f.name for f in ItalianRestaurant._meta.fields]
['id', 'name', 'address', 'serves_hot_dogs', 'serves_pizza', 'serves_gnocchi']
def __unicode__(self):
return u"%s the supplier" % self.name
class ParkingLot(Place):
# An explicit link to the parent (we can control the attribute name).
parent = models.OneToOneField(Place, primary_key=True, parent_link=True)
main_site = models.ForeignKey(Place, related_name='lot')
def __unicode__(self):
return u"%s the parking lot" % self.name
__test__ = {'API_TESTS':"""
# The Student and Worker models both have 'name' and 'age' fields on them and
# inherit the __unicode__() method, just as with normal Python subclassing.
# This is useful if you want to factor out common information for programming
# purposes, but still completely independent separate models at the database
# level.
>>> w = Worker(name='Fred', age=35, job='Quarry worker')
>>> w.save()
>>> w2 = Worker(name='Barney', age=34, job='Quarry worker')
>>> w2.save()
>>> s = Student(name='Pebbles', age=5, school_class='1B')
>>> s.save()
>>> unicode(w)
u'Worker Fred'
>>> unicode(s)
u'Student Pebbles'
# The children inherit the Meta class of their parents (if they don't specify
# their own).
>>> Worker.objects.values('name')
[{'name': u'Barney'}, {'name': u'Fred'}]
# Since Student does not subclass CommonInfo's Meta, it has the effect of
# completely overriding it. So ordering by name doesn't take place for Students.
>>> Student._meta.ordering
[]
# However, the CommonInfo class cannot be used as a normal model (it doesn't
# exist as a model).
>>> CommonInfo.objects.all()
Traceback (most recent call last):
...
AttributeError: type object 'CommonInfo' has no attribute 'objects'
# The Place/Restaurant/ItalianRestaurant models, on the other hand, all exist
# as independent models. However, the subclasses also have transparent access
# to the fields of their ancestors.
# Create a couple of Places.
>>> p1 = Place(name='Master Shakes', address='666 W. Jersey')
@ -41,13 +138,131 @@ __test__ = {'API_TESTS':"""
>>> p2 = Place(name='Ace Hardware', address='1013 N. Ashland')
>>> p2.save()
# Test constructor for Restaurant.
>>> r = Restaurant(name='Demon Dogs', address='944 W. Fullerton', serves_hot_dogs=True, serves_pizza=False)
Test constructor for Restaurant.
>>> r = Restaurant(name='Demon Dogs', address='944 W. Fullerton',serves_hot_dogs=True, serves_pizza=False, rating=2)
>>> r.save()
# Test the constructor for ItalianRestaurant.
>>> ir = ItalianRestaurant(name='Ristorante Miron', address='1234 W. Elm', serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True)
>>> c = Chef(name="Albert")
>>> c.save()
>>> ir = ItalianRestaurant(name='Ristorante Miron', address='1234 W. Ash', serves_hot_dogs=False, serves_pizza=False, serves_gnocchi=True, rating=4, chef=c)
>>> ir.save()
>>> ir.address = '1234 W. Elm'
>>> ir.save()
# Make sure Restaurant and ItalianRestaurant have the right fields in the right
# order.
>>> [f.name for f in Restaurant._meta.fields]
['id', 'name', 'address', 'place_ptr', 'rating', 'serves_hot_dogs', 'serves_pizza', 'chef']
>>> [f.name for f in ItalianRestaurant._meta.fields]
['id', 'name', 'address', 'place_ptr', 'rating', 'serves_hot_dogs', 'serves_pizza', 'chef', 'restaurant_ptr', 'serves_gnocchi']
>>> Restaurant._meta.ordering
['-rating']
# Even though p.supplier for a Place 'p' (a parent of a Supplier), a Restaurant
# object cannot access that reverse relation, since it's not part of the
# Place-Supplier Hierarchy.
>>> Place.objects.filter(supplier__name='foo')
[]
>>> Restaurant.objects.filter(supplier__name='foo')
Traceback (most recent call last):
...
FieldError: Cannot resolve keyword 'supplier' into field. Choices are: address, chef, id, italianrestaurant, lot, name, place_ptr, provider, rating, serves_hot_dogs, serves_pizza
# Parent fields can be used directly in filters on the child model.
>>> Restaurant.objects.filter(name='Demon Dogs')
[<Restaurant: Demon Dogs the restaurant>]
>>> ItalianRestaurant.objects.filter(address='1234 W. Elm')
[<ItalianRestaurant: Ristorante Miron the italian restaurant>]
# Filters against the parent model return objects of the parent's type.
>>> Place.objects.filter(name='Demon Dogs')
[<Place: Demon Dogs the place>]
# Since the parent and child are linked by an automatically created
# OneToOneField, you can get from the parent to the child by using the child's
# name.
>>> place = Place.objects.get(name='Demon Dogs')
>>> place.restaurant
<Restaurant: Demon Dogs the restaurant>
>>> Place.objects.get(name='Ristorante Miron').restaurant.italianrestaurant
<ItalianRestaurant: Ristorante Miron the italian restaurant>
>>> Restaurant.objects.get(name='Ristorante Miron').italianrestaurant
<ItalianRestaurant: Ristorante Miron the italian restaurant>
# This won't work because the Demon Dogs restaurant is not an Italian
# restaurant.
>>> place.restaurant.italianrestaurant
Traceback (most recent call last):
...
DoesNotExist: ItalianRestaurant matching query does not exist.
# Related objects work just as they normally do.
>>> s1 = Supplier(name="Joe's Chickens", address='123 Sesame St')
>>> s1.save()
>>> s1.customers = [r, ir]
>>> s2 = Supplier(name="Luigi's Pasta", address='456 Sesame St')
>>> s2.save()
>>> s2.customers = [ir]
# This won't work because the Place we select is not a Restaurant (it's a
# Supplier).
>>> p = Place.objects.get(name="Joe's Chickens")
>>> p.restaurant
Traceback (most recent call last):
...
DoesNotExist: Restaurant matching query does not exist.
# But we can descend from p to the Supplier child, as expected.
>>> p.supplier
<Supplier: Joe's Chickens the supplier>
>>> ir.provider.order_by('-name')
[<Supplier: Luigi's Pasta the supplier>, <Supplier: Joe's Chickens the supplier>]
>>> Restaurant.objects.filter(provider__name__contains="Chickens")
[<Restaurant: Ristorante Miron the restaurant>, <Restaurant: Demon Dogs the restaurant>]
>>> ItalianRestaurant.objects.filter(provider__name__contains="Chickens")
[<ItalianRestaurant: Ristorante Miron the italian restaurant>]
>>> park1 = ParkingLot(name='Main St', address='111 Main St', main_site=s1)
>>> park1.save()
>>> park2 = ParkingLot(name='Well Lit', address='124 Sesame St', main_site=ir)
>>> park2.save()
>>> Restaurant.objects.get(lot__name='Well Lit')
<Restaurant: Ristorante Miron the restaurant>
# The update() command can update fields in parent and child classes at once
# (although it executed multiple SQL queries to do so).
>>> Restaurant.objects.filter(serves_hot_dogs=True, name__contains='D').update(name='Demon Puppies', serves_hot_dogs=False)
>>> r1 = Restaurant.objects.get(pk=r.pk)
>>> r1.serves_hot_dogs == False
True
>>> r1.name
u'Demon Puppies'
# The values() command also works on fields from parent models.
>>> d = {'rating': 4, 'name': u'Ristorante Miron'}
>>> list(ItalianRestaurant.objects.values('name', 'rating')) == [d]
True
# select_related works with fields from the parent object as if they were a
# normal part of the model.
>>> from django import db
>>> from django.conf import settings
>>> settings.DEBUG = True
>>> db.reset_queries()
>>> ItalianRestaurant.objects.all()[0].chef
<Chef: Albert the chef>
>>> len(db.connection.queries)
2
>>> ItalianRestaurant.objects.select_related('chef')[0].chef
<Chef: Albert the chef>
>>> len(db.connection.queries)
3
>>> settings.DEBUG = False
"""}

View File

@ -6,7 +6,7 @@ To define a one-to-one relationship, use ``OneToOneField()``.
In this example, a ``Place`` optionally can be a ``Restaurant``.
"""
from django.db import models
from django.db import models, connection
class Place(models.Model):
name = models.CharField(max_length=50)
@ -16,7 +16,7 @@ class Place(models.Model):
return u"%s the place" % self.name
class Restaurant(models.Model):
place = models.OneToOneField(Place)
place = models.OneToOneField(Place, primary_key=True)
serves_hot_dogs = models.BooleanField()
serves_pizza = models.BooleanField()
@ -38,6 +38,14 @@ class RelatedModel(models.Model):
link = models.OneToOneField(ManualPrimaryKey)
name = models.CharField(max_length = 50)
class MultiModel(models.Model):
link1 = models.OneToOneField(Place)
link2 = models.OneToOneField(ManualPrimaryKey)
name = models.CharField(max_length=50)
def __unicode__(self):
return u"Multimodel %s" % self.name
__test__ = {'API_TESTS':"""
# Create a couple of Places.
>>> p1 = Place(name='Demon Dogs', address='944 W. Fullerton')
@ -63,8 +71,8 @@ Traceback (most recent call last):
...
DoesNotExist: Restaurant matching query does not exist.
# Set the place using assignment notation. Because place is the primary key on Restaurant,
# the save will create a new restaurant
# Set the place using assignment notation. Because place is the primary key on
# Restaurant, the save will create a new restaurant
>>> r.place = p2
>>> r.save()
>>> p2.restaurant
@ -72,9 +80,9 @@ DoesNotExist: Restaurant matching query does not exist.
>>> r.place
<Place: Ace Hardware the place>
# Set the place back again, using assignment in the reverse direction
# Need to reget restaurant object first, because the reverse set
# can't update the existing restaurant instance
# Set the place back again, using assignment in the reverse direction. Need to
# reload restaurant object first, because the reverse set can't update the
# existing restaurant instance
>>> p1.restaurant = r
>>> r.save()
>>> p1.restaurant
@ -86,8 +94,7 @@ DoesNotExist: Restaurant matching query does not exist.
# Restaurant.objects.all() just returns the Restaurants, not the Places.
# Note that there are two restaurants - Ace Hardware the Restaurant was created
# in the call to r.place = p2. This means there are multiple restaurants referencing
# a single place...
# in the call to r.place = p2.
>>> Restaurant.objects.all()
[<Restaurant: Demon Dogs the restaurant>, <Restaurant: Ace Hardware the restaurant>]
@ -165,4 +172,22 @@ DoesNotExist: Restaurant matching query does not exist.
>>> o1.save()
>>> o2 = RelatedModel(link=o1, name="secondary")
>>> o2.save()
# You can have multiple one-to-one fields on a model, too.
>>> x1 = MultiModel(link1=p1, link2=o1, name="x1")
>>> x1.save()
>>> o1.multimodel
<MultiModel: Multimodel x1>
# This will fail because each one-to-one field must be unique (and link2=o1 was
# used for x1, above).
>>> MultiModel(link1=p2, link2=o1, name="x1").save()
Traceback (most recent call last):
...
IntegrityError: ...
# Because the unittests all use a single connection, we need to force a
# reconnect here to ensure the connection is clean (after the previous
# IntegrityError).
>>> connection.close()
"""}

View File

@ -4,11 +4,9 @@
To perform an OR lookup, or a lookup that combines ANDs and ORs,
combine QuerySet objects using & and | operators.
Alternatively, use positional arguments, and pass one or more expressions
of clauses using the variable ``django.db.models.Q`` (or any object with
a get_sql method).
Alternatively, use positional arguments, and pass one or more expressions of
clauses using the variable ``django.db.models.Q`` (or any object with an
add_to_query method).
"""
from django.db import models
@ -72,6 +70,8 @@ __test__ = {'API_TESTS':"""
# You could also use "in" to accomplish the same as above.
>>> Article.objects.filter(pk__in=[1,2,3])
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(pk__in=(1,2,3))
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
>>> Article.objects.filter(pk__in=[1,2,3,4])
[<Article: Hello>, <Article: Goodbye>, <Article: Hello and goodbye>]
@ -92,6 +92,17 @@ __test__ = {'API_TESTS':"""
>>> Article.objects.filter(Q(headline__contains='bye'), headline__startswith='Hello')
[<Article: Hello and goodbye>]
# Q objects can be negated
>>> Article.objects.filter(Q(pk=1) | ~Q(pk=2))
[<Article: Hello>, <Article: Hello and goodbye>]
>>> Article.objects.filter(~Q(pk=1) & ~Q(pk=2))
[<Article: Hello and goodbye>]
# This allows for more complex queries than filter() and exclude() alone would
# allow
>>> Article.objects.filter(Q(pk=1) & (~Q(pk=2) | Q(pk=3)))
[<Article: Hello>]
# Try some arg queries with operations other than filter.
>>> Article.objects.get(Q(headline__startswith='Hello'), Q(headline__contains='bye'))
<Article: Hello and goodbye>

View File

@ -0,0 +1,78 @@
"""
Tests for the order_with_respect_to Meta attribute.
"""
from django.db import models
class Question(models.Model):
text = models.CharField(max_length=200)
class Answer(models.Model):
text = models.CharField(max_length=200)
question = models.ForeignKey(Question)
class Meta:
order_with_respect_to = 'question'
def __unicode__(self):
return unicode(self.text)
__test__ = {'API_TESTS': """
>>> q1 = Question(text="Which Beatle starts with the letter 'R'?")
>>> q1.save()
>>> q2 = Question(text="What is your name?")
>>> q2.save()
>>> Answer(text="John", question=q1).save()
>>> Answer(text="Jonno",question=q2).save()
>>> Answer(text="Paul", question=q1).save()
>>> Answer(text="Paulo", question=q2).save()
>>> Answer(text="George", question=q1).save()
>>> Answer(text="Ringo", question=q1).save()
The answers will always be ordered in the order they were inserted.
>>> q1.answer_set.all()
[<Answer: John>, <Answer: Paul>, <Answer: George>, <Answer: Ringo>]
We can retrieve the answers related to a particular object, in the order
they were created, once we have a particular object.
>>> a1 = Answer.objects.filter(question=q1)[0]
>>> a1
<Answer: John>
>>> a2 = a1.get_next_in_order()
>>> a2
<Answer: Paul>
>>> a4 = list(Answer.objects.filter(question=q1))[-1]
>>> a4
<Answer: Ringo>
>>> a4.get_previous_in_order()
<Answer: George>
Determining (and setting) the ordering for a particular item is also possible.
>>> id_list = [o.pk for o in q1.answer_set.all()]
>>> a2.question.get_answer_order() == id_list
True
>>> a5 = Answer(text="Number five", question=q1)
>>> a5.save()
It doesn't matter which answer we use to check the order, it will always be the same.
>>> a2.question.get_answer_order() == a5.question.get_answer_order()
True
The ordering can be altered:
>>> id_list = [o.pk for o in q1.answer_set.all()]
>>> x = id_list.pop()
>>> id_list.insert(-1, x)
>>> a5.question.get_answer_order == id_list
False
>>> a5.question.set_answer_order(id_list)
>>> q1.answer_set.all()
[<Answer: John>, <Answer: Paul>, <Answer: George>, <Answer: Number five>, <Answer: Ringo>]
"""
}

View File

@ -48,6 +48,13 @@ __test__ = {'API_TESTS':"""
>>> Article.objects.order_by('pub_date', '-headline')
[<Article: Article 1>, <Article: Article 3>, <Article: Article 2>, <Article: Article 4>]
# Only the last order_by has any effect (since they each override any previous
# ordering).
>>> Article.objects.order_by('id')
[<Article: Article 1>, <Article: Article 2>, <Article: Article 3>, <Article: Article 4>]
>>> Article.objects.order_by('id').order_by('-headline')
[<Article: Article 4>, <Article: Article 3>, <Article: Article 2>, <Article: Article 1>]
# Use the 'stop' part of slicing notation to limit the results.
>>> Article.objects.order_by('headline')[:2]
[<Article: Article 1>, <Article: Article 2>]
@ -64,4 +71,10 @@ __test__ = {'API_TESTS':"""
# don't know what order the output will be in.
>>> Article.objects.order_by('?')
[...]
# Ordering can be reversed using the reverse() method on a queryset. This
# allows you to extract things like "the last two items" (reverse and then
# take the first two).
>>> Article.objects.all().reverse()[:2]
[<Article: Article 1>, <Article: Article 3>]
"""}

View File

@ -45,8 +45,6 @@ h
b
>>> print v.where
2005-01-01
>>> Thing.objects.order_by('select.when')
[<Thing: a>, <Thing: h>]
>>> Thing.objects.dates('where', 'year')
[datetime.datetime(2005, 1, 1, 0, 0), datetime.datetime(2006, 1, 1, 0, 0)]

View File

@ -55,5 +55,5 @@ __test__ = {'API_TESTS':"""
>>> Poll.objects.get(choice__name__exact="This is the answer")
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'choice' into field. Choices are: poll_choice, related_choice, id, question, creator
FieldError: Cannot resolve keyword 'choice' into field. Choices are: creator, id, poll_choice, question, related_choice
"""}

View File

@ -27,13 +27,13 @@ class Phylum(models.Model):
kingdom = models.ForeignKey(Kingdom)
def __unicode__(self):
return self.name
class Klass(models.Model):
name = models.CharField(max_length=50)
phylum = models.ForeignKey(Phylum)
def __unicode__(self):
return self.name
class Order(models.Model):
name = models.CharField(max_length=50)
klass = models.ForeignKey(Klass)
@ -63,7 +63,7 @@ def create_tree(stringtree):
names = stringtree.split()
models = [Domain, Kingdom, Phylum, Klass, Order, Family, Genus, Species]
assert len(names) == len(models), (names, models)
parent = None
for name, model in zip(names, models):
try:
@ -100,7 +100,7 @@ __test__ = {'API_TESTS':"""
# However, a select_related() call will fill in those related objects without any extra queries:
>>> db.reset_queries()
>>> person = Species.objects.select_related().get(name="sapiens")
>>> person = Species.objects.select_related(depth=10).get(name="sapiens")
>>> person.genus.family.order.klass.phylum.kingdom.domain
<Domain: Eukaryota>
>>> len(db.connection.queries)
@ -129,7 +129,7 @@ __test__ = {'API_TESTS':"""
>>> pea.genus.family.order.klass.phylum.kingdom.domain
<Domain: Eukaryota>
# Notice: one few query than above because of depth=1
# Notice: one fewer queries than above because of depth=1
>>> len(db.connection.queries)
7
@ -147,6 +147,43 @@ __test__ = {'API_TESTS':"""
>>> len(db.connection.queries)
5
>>> s = Species.objects.all().select_related(depth=1).extra(select={'a': 'select_related_species.id + 10'})[0]
>>> s.id + 10 == s.a
True
# The optional fields passed to select_related() control which related models
# we pull in. This allows for smaller queries and can act as an alternative
# (or, in addition to) the depth parameter.
# In the next two cases, we explicitly say to select the 'genus' and
# 'genus.family' models, leading to the same number of queries as before.
>>> db.reset_queries()
>>> world = Species.objects.select_related('genus__family')
>>> [o.genus.family for o in world]
[<Family: Drosophilidae>, <Family: Hominidae>, <Family: Fabaceae>, <Family: Amanitacae>]
>>> len(db.connection.queries)
1
>>> db.reset_queries()
>>> world = Species.objects.filter(genus__name='Amanita').select_related('genus__family')
>>> [o.genus.family.order for o in world]
[<Order: Agaricales>]
>>> len(db.connection.queries)
2
>>> db.reset_queries()
>>> Species.objects.all().select_related('genus__family__order').order_by('id')[0:1].get().genus.family.order.name
u'Diptera'
>>> len(db.connection.queries)
1
# Specifying both "depth" and fields is an error.
>>> Species.objects.select_related('genus__family__order', depth=4)
Traceback (most recent call last):
...
TypeError: Cannot pass both "depth" and fields to select_related()
# Reset DEBUG to where we found it.
>>> settings.DEBUG = False
"""}

View File

@ -22,7 +22,7 @@ class Author(models.Model):
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
@ -39,21 +39,21 @@ class Article(models.Model):
return self.headline
class AuthorProfile(models.Model):
author = models.OneToOneField(Author)
author = models.OneToOneField(Author, primary_key=True)
date_of_birth = models.DateField()
def __unicode__(self):
return u"Profile of %s" % self.author
class Actor(models.Model):
name = models.CharField(max_length=20, primary_key=True)
class Meta:
ordering = ('name',)
def __unicode__(self):
return self.name
class Movie(models.Model):
actor = models.ForeignKey(Actor)
title = models.CharField(max_length=50)
@ -63,7 +63,7 @@ class Movie(models.Model):
def __unicode__(self):
return self.title
class Score(models.Model):
score = models.FloatField()
@ -100,7 +100,7 @@ __test__ = {'API_TESTS':"""
>>> dom = minidom.parseString(xml)
# Deserializing has a similar interface, except that special DeserializedObject
# instances are returned. This is because data might have changed in the
# instances are returned. This is because data might have changed in the
# database since the data was serialized (we'll simulate that below).
>>> for obj in serializers.deserialize("xml", xml):
... print obj
@ -148,7 +148,7 @@ __test__ = {'API_TESTS':"""
>>> Article.objects.all()
[<Article: Just kidding; I love TV poker>, <Article: Time to reform copyright>]
# If you use your own primary key field (such as a OneToOneField),
# If you use your own primary key field (such as a OneToOneField),
# it doesn't appear in the serialized field list - it replaces the
# pk identifier.
>>> profile = AuthorProfile(author=joe, date_of_birth=datetime(1970,1,1))
@ -186,7 +186,7 @@ __test__ = {'API_TESTS':"""
>>> print serializers.serialize("json", Article.objects.all(), fields=('headline','pub_date'))
[{"pk": 1, "model": "serializers.article", "fields": {"headline": "Just kidding; I love TV poker", "pub_date": "2006-06-16 11:00:00"}}, {"pk": 2, "model": "serializers.article", "fields": {"headline": "Time to reform copyright", "pub_date": "2006-06-16 13:00:11"}}, {"pk": 3, "model": "serializers.article", "fields": {"headline": "Forward references pose no problem", "pub_date": "2006-06-16 15:00:00"}}]
# Every string is serialized as a unicode object, also primary key
# Every string is serialized as a unicode object, also primary key
# which is 'varchar'
>>> ac = Actor(name="Zażółć")
>>> mv = Movie(title="Gęślą jaźń", actor=ac)
@ -247,12 +247,13 @@ try:
pk: 2
<BLANKLINE>
>>> obs = list(serializers.deserialize("yaml", serialized))
>>> for i in obs:
>>> obs = list(serializers.deserialize("yaml", serialized))
>>> for i in obs:
... print i
<DeserializedObject: Just kidding; I love TV poker>
<DeserializedObject: Time to reform copyright>
"""
except ImportError: pass
except ImportError:
pass

View File

@ -66,7 +66,8 @@ post_save_nokwargs signal
post_save signal, Tom Smith
Is updated
>>> p1.save(raw=True)
# Calling an internal method purely so that we can trigger a "raw" save.
>>> p1.save_base(raw=True)
pre_save_nokwargs signal
pre_save signal, Tom Smith
Is raw

View File

@ -25,7 +25,7 @@ from django.conf import settings
building_docs = getattr(settings, 'BUILDING_DOCS', False)
if building_docs or settings.DATABASE_ENGINE != 'mysql':
if building_docs or settings.DATABASE_ENGINE not in ('mysql', 'mysql_old'):
__test__['API_TESTS'] += """
# the default behavior is to autocommit after each save() action
>>> def create_a_reporter_then_fail(first, last):

View File

View File

@ -0,0 +1,67 @@
"""
Tests for the update() queryset method that allows in-place, multi-object
updates.
"""
from django.db import models
class DataPoint(models.Model):
name = models.CharField(max_length=20)
value = models.CharField(max_length=20)
another_value = models.CharField(max_length=20, blank=True)
def __unicode__(self):
return unicode(self.name)
class RelatedPoint(models.Model):
name = models.CharField(max_length=20)
data = models.ForeignKey(DataPoint)
def __unicode__(self):
return unicode(self.name)
__test__ = {'API_TESTS': """
>>> DataPoint(name="d0", value="apple").save()
>>> DataPoint(name="d2", value="banana").save()
>>> d3 = DataPoint(name="d3", value="banana")
>>> d3.save()
>>> RelatedPoint(name="r1", data=d3).save()
Objects are updated by first filtering the candidates into a queryset and then
calling the update() method. It executes immediately and returns nothing.
>>> DataPoint.objects.filter(value="apple").update(name="d1")
>>> DataPoint.objects.filter(value="apple")
[<DataPoint: d1>]
We can update multiple objects at once.
>>> DataPoint.objects.filter(value="banana").update(value="pineapple")
>>> DataPoint.objects.get(name="d2").value
u'pineapple'
Foreign key fields can also be updated, although you can only update the object
referred to, not anything inside the related object.
>>> d = DataPoint.objects.get(name="d1")
>>> RelatedPoint.objects.filter(name="r1").update(data=d)
>>> RelatedPoint.objects.filter(data__name="d1")
[<RelatedPoint: r1>]
Multiple fields can be updated at once
>>> DataPoint.objects.filter(value="pineapple").update(value="fruit", another_value="peaches")
>>> d = DataPoint.objects.get(name="d2")
>>> d.value, d.another_value
(u'fruit', u'peaches')
In the rare case you want to update every instance of a model, update() is also
a manager method.
>>> DataPoint.objects.update(value='thing')
>>> DataPoint.objects.values('value').distinct()
[{'value': u'thing'}]
"""
}

View File

@ -14,7 +14,7 @@ class Choice(models.Model):
return u"Choice: %s in poll %s" % (self.choice, self.poll)
__test__ = {'API_TESTS':"""
# Regression test for the use of None as a query value. None is interpreted as
# Regression test for the use of None as a query value. None is interpreted as
# an SQL NULL, but only in __exact queries.
# Set up some initial polls and choices
>>> p1 = Poll(question='Why?')
@ -24,15 +24,20 @@ __test__ = {'API_TESTS':"""
>>> c2 = Choice(poll=p1, choice='Why Not?')
>>> c2.save()
# Exact query with value None returns nothing (=NULL in sql)
>>> Choice.objects.filter(id__exact=None)
# Exact query with value None returns nothing ("is NULL" in sql, but every 'id'
# field has a value).
>>> Choice.objects.filter(choice__exact=None)
[]
Excluding the previous result returns everything.
>>> Choice.objects.exclude(choice=None).order_by('id')
[<Choice: Choice: Because. in poll Q: Why? >, <Choice: Choice: Why Not? in poll Q: Why? >]
# Valid query, but fails because foo isn't a keyword
>>> Choice.objects.filter(foo__exact=None)
>>> Choice.objects.filter(foo__exact=None)
Traceback (most recent call last):
...
TypeError: Cannot resolve keyword 'foo' into field. Choices are: id, poll, choice
FieldError: Cannot resolve keyword 'foo' into field. Choices are: choice, id, poll
# Can't use None on anything other than __exact
>>> Choice.objects.filter(id__gt=None)

View File

@ -0,0 +1,658 @@
"""
Various complex queries that have been problematic in the past.
"""
import datetime
from django.db import models
from django.db.models.query import Q
class Tag(models.Model):
name = models.CharField(max_length=10)
parent = models.ForeignKey('self', blank=True, null=True)
def __unicode__(self):
return self.name
class Note(models.Model):
note = models.CharField(max_length=100)
misc = models.CharField(max_length=10)
class Meta:
ordering = ['note']
def __unicode__(self):
return self.note
class ExtraInfo(models.Model):
info = models.CharField(max_length=100)
note = models.ForeignKey(Note)
class Meta:
ordering = ['info']
def __unicode__(self):
return self.info
class Author(models.Model):
name = models.CharField(max_length=10)
num = models.IntegerField(unique=True)
extra = models.ForeignKey(ExtraInfo)
def __unicode__(self):
return self.name
class Item(models.Model):
name = models.CharField(max_length=10)
created = models.DateTimeField()
tags = models.ManyToManyField(Tag, blank=True, null=True)
creator = models.ForeignKey(Author)
note = models.ForeignKey(Note)
class Meta:
ordering = ['-note', 'name']
def __unicode__(self):
return self.name
class Report(models.Model):
name = models.CharField(max_length=10)
creator = models.ForeignKey(Author, to_field='num')
def __unicode__(self):
return self.name
class Ranking(models.Model):
rank = models.IntegerField()
author = models.ForeignKey(Author)
class Meta:
# A complex ordering specification. Should stress the system a bit.
ordering = ('author__extra__note', 'author__name', 'rank')
def __unicode__(self):
return '%d: %s' % (self.rank, self.author.name)
class Cover(models.Model):
title = models.CharField(max_length=50)
item = models.ForeignKey(Item)
class Meta:
ordering = ['item']
def __unicode__(self):
return self.title
class Number(models.Model):
num = models.IntegerField()
def __unicode__(self):
return unicode(self.num)
# Some funky cross-linked models for testing a couple of infinite recursion
# cases.
class X(models.Model):
y = models.ForeignKey('Y')
class Y(models.Model):
x1 = models.ForeignKey(X, related_name='y1')
# Some models with a cycle in the default ordering. This would be bad if we
# didn't catch the infinite loop.
class LoopX(models.Model):
y = models.ForeignKey('LoopY')
class Meta:
ordering = ['y']
class LoopY(models.Model):
x = models.ForeignKey(LoopX)
class Meta:
ordering = ['x']
class LoopZ(models.Model):
z = models.ForeignKey('self')
class Meta:
ordering = ['z']
__test__ = {'API_TESTS':"""
>>> t1 = Tag(name='t1')
>>> t1.save()
>>> t2 = Tag(name='t2', parent=t1)
>>> t2.save()
>>> t3 = Tag(name='t3', parent=t1)
>>> t3.save()
>>> t4 = Tag(name='t4', parent=t3)
>>> t4.save()
>>> t5 = Tag(name='t5', parent=t3)
>>> t5.save()
>>> n1 = Note(note='n1', misc='foo')
>>> n1.save()
>>> n2 = Note(note='n2', misc='bar')
>>> n2.save()
>>> n3 = Note(note='n3', misc='foo')
>>> n3.save()
Create these out of order so that sorting by 'id' will be different to sorting
by 'info'. Helps detect some problems later.
>>> e2 = ExtraInfo(info='e2', note=n2)
>>> e2.save()
>>> e1 = ExtraInfo(info='e1', note=n1)
>>> e1.save()
>>> a1 = Author(name='a1', num=1001, extra=e1)
>>> a1.save()
>>> a2 = Author(name='a2', num=2002, extra=e1)
>>> a2.save()
>>> a3 = Author(name='a3', num=3003, extra=e2)
>>> a3.save()
>>> a4 = Author(name='a4', num=4004, extra=e2)
>>> a4.save()
>>> time1 = datetime.datetime(2007, 12, 19, 22, 25, 0)
>>> time2 = datetime.datetime(2007, 12, 19, 21, 0, 0)
>>> time3 = datetime.datetime(2007, 12, 20, 22, 25, 0)
>>> time4 = datetime.datetime(2007, 12, 20, 21, 0, 0)
>>> i1 = Item(name='one', created=time1, creator=a1, note=n3)
>>> i1.save()
>>> i1.tags = [t1, t2]
>>> i2 = Item(name='two', created=time2, creator=a2, note=n2)
>>> i2.save()
>>> i2.tags = [t1, t3]
>>> i3 = Item(name='three', created=time3, creator=a2, note=n3)
>>> i3.save()
>>> i4 = Item(name='four', created=time4, creator=a4, note=n3)
>>> i4.save()
>>> i4.tags = [t4]
>>> r1 = Report(name='r1', creator=a1)
>>> r1.save()
>>> r2 = Report(name='r2', creator=a3)
>>> r2.save()
Ordering by 'rank' gives us rank2, rank1, rank3. Ordering by the Meta.ordering
will be rank3, rank2, rank1.
>>> rank1 = Ranking(rank=2, author=a2)
>>> rank1.save()
>>> rank2 = Ranking(rank=1, author=a3)
>>> rank2.save()
>>> rank3 = Ranking(rank=3, author=a1)
>>> rank3.save()
>>> c1 = Cover(title="first", item=i4)
>>> c1.save()
>>> c2 = Cover(title="second", item=i2)
>>> c2.save()
>>> n1 = Number(num=4)
>>> n1.save()
>>> n2 = Number(num=8)
>>> n2.save()
>>> n3 = Number(num=12)
>>> n3.save()
Bug #1050
>>> Item.objects.filter(tags__isnull=True)
[<Item: three>]
>>> Item.objects.filter(tags__id__isnull=True)
[<Item: three>]
Bug #1801
>>> Author.objects.filter(item=i2)
[<Author: a2>]
>>> Author.objects.filter(item=i3)
[<Author: a2>]
>>> Author.objects.filter(item=i2) & Author.objects.filter(item=i3)
[<Author: a2>]
Bug #2306
Checking that no join types are "left outer" joins.
>>> query = Item.objects.filter(tags=t2).query
>>> query.LOUTER not in [x[2] for x in query.alias_map.values()]
True
>>> Item.objects.filter(Q(tags=t1)).order_by('name')
[<Item: one>, <Item: two>]
>>> Item.objects.filter(Q(tags=t1)).filter(Q(tags=t2))
[<Item: one>]
>>> Item.objects.filter(Q(tags=t1)).filter(Q(creator__name='fred')|Q(tags=t2))
[<Item: one>]
Each filter call is processed "at once" against a single table, so this is
different from the previous example as it tries to find tags that are two
things at once (rather than two tags).
>>> Item.objects.filter(Q(tags=t1) & Q(tags=t2))
[]
>>> Item.objects.filter(Q(tags=t1), Q(creator__name='fred')|Q(tags=t2))
[]
>>> qs = Author.objects.filter(ranking__rank=2, ranking__id=rank1.id)
>>> list(qs)
[<Author: a2>]
>>> qs.query.count_active_tables()
2
>>> qs = Author.objects.filter(ranking__rank=2).filter(ranking__id=rank1.id)
>>> qs.query.count_active_tables()
3
Bug #4464
>>> Item.objects.filter(tags=t1).filter(tags=t2)
[<Item: one>]
>>> Item.objects.filter(tags__in=[t1, t2]).distinct().order_by('name')
[<Item: one>, <Item: two>]
>>> Item.objects.filter(tags__in=[t1, t2]).filter(tags=t3)
[<Item: two>]
Bug #2080, #3592
>>> Author.objects.filter(item__name='one') | Author.objects.filter(name='a3')
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(item__name='one') | Q(name='a3'))
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(name='a3') | Q(item__name='one'))
[<Author: a1>, <Author: a3>]
>>> Author.objects.filter(Q(item__name='three') | Q(report__name='r3'))
[<Author: a2>]
Bug #4289
A slight variation on the above theme: restricting the choices by the lookup
constraints.
>>> Number.objects.filter(num__lt=4)
[]
>>> Number.objects.filter(num__gt=8, num__lt=12)
[]
>>> Number.objects.filter(num__gt=8, num__lt=13)
[<Number: 12>]
>>> Number.objects.filter(Q(num__lt=4) | Q(num__gt=8, num__lt=12))
[]
>>> Number.objects.filter(Q(num__gt=8, num__lt=12) | Q(num__lt=4))
[]
>>> Number.objects.filter(Q(num__gt=8) & Q(num__lt=12) | Q(num__lt=4))
[]
>>> Number.objects.filter(Q(num__gt=7) & Q(num__lt=12) | Q(num__lt=4))
[<Number: 8>]
Bug #6074
Merging two empty result sets shouldn't leave a queryset with no constraints
(which would match everything).
>>> Author.objects.filter(Q(id__in=[]))
[]
>>> Author.objects.filter(Q(id__in=[])|Q(id__in=[]))
[]
Bug #1878, #2939
>>> Item.objects.values('creator').distinct().count()
3
# Create something with a duplicate 'name' so that we can test multi-column
# cases (which require some tricky SQL transformations under the covers).
>>> xx = Item(name='four', created=time1, creator=a2, note=n1)
>>> xx.save()
>>> Item.objects.exclude(name='two').values('creator', 'name').distinct().count()
4
>>> Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name', 'foo').distinct().count()
4
>>> Item.objects.exclude(name='two').extra(select={'foo': '%s'}, select_params=(1,)).values('creator', 'name').distinct().count()
4
>>> xx.delete()
Bug #2253
>>> q1 = Item.objects.order_by('name')
>>> q2 = Item.objects.filter(id=i1.id)
>>> q1
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> q2
[<Item: one>]
>>> (q1 | q2).order_by('name')
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> (q1 & q2).order_by('name')
[<Item: one>]
# FIXME: This is difficult to fix and very much an edge case, so punt for now.
# # This is related to the order_by() tests, below, but the old bug exhibited
# # itself here (q2 was pulling too many tables into the combined query with the
# # new ordering, but only because we have evaluated q2 already).
# >>> len((q1 & q2).order_by('name').query.tables)
# 1
>>> q1 = Item.objects.filter(tags=t1)
>>> q2 = Item.objects.filter(note=n3, tags=t2)
>>> q3 = Item.objects.filter(creator=a4)
>>> ((q1 & q2) | q3).order_by('name')
[<Item: four>, <Item: one>]
Bugs #4088, #4306
>>> Report.objects.filter(creator=1001)
[<Report: r1>]
>>> Report.objects.filter(creator__num=1001)
[<Report: r1>]
>>> Report.objects.filter(creator__id=1001)
[]
>>> Report.objects.filter(creator__id=a1.id)
[<Report: r1>]
>>> Report.objects.filter(creator__name='a1')
[<Report: r1>]
Bug #4510
>>> Author.objects.filter(report__name='r1')
[<Author: a1>]
Bug #5324, #6704
>>> Item.objects.filter(tags__name='t4')
[<Item: four>]
>>> Item.objects.exclude(tags__name='t4').order_by('name').distinct()
[<Item: one>, <Item: three>, <Item: two>]
>>> Item.objects.exclude(tags__name='t4').order_by('name').distinct().reverse()
[<Item: two>, <Item: three>, <Item: one>]
>>> Author.objects.exclude(item__name='one').distinct().order_by('name')
[<Author: a2>, <Author: a3>, <Author: a4>]
# Excluding across a m2m relation when there is more than one related object
# associated was problematic.
>>> Item.objects.exclude(tags__name='t1').order_by('name')
[<Item: four>, <Item: three>]
>>> Item.objects.exclude(tags__name='t1').exclude(tags__name='t4')
[<Item: three>]
# Excluding from a relation that cannot be NULL should not use outer joins.
>>> query = Item.objects.exclude(creator__in=[a1, a2]).query
>>> query.LOUTER not in [x[2] for x in query.alias_map.values()]
True
Similarly, when one of the joins cannot possibly, ever, involve NULL values (Author -> ExtraInfo, in the following), it should never be promoted to a left outer join. So hte following query should only involve one "left outer" join (Author -> Item is 0-to-many).
>>> qs = Author.objects.filter(id=a1.id).filter(Q(extra__note=n1)|Q(item__note=n3))
>>> len([x[2] for x in qs.query.alias_map.values() if x[2] == query.LOUTER])
1
The previous changes shouldn't affect nullable foreign key joins.
>>> Tag.objects.filter(parent__isnull=True).order_by('name')
[<Tag: t1>]
>>> Tag.objects.exclude(parent__isnull=True).order_by('name')
[<Tag: t2>, <Tag: t3>, <Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__name='t1') | Q(parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__isnull=True) | Q(parent__name='t1')).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.exclude(Q(parent__parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
>>> Tag.objects.filter(~Q(parent__parent__isnull=True)).order_by('name')
[<Tag: t4>, <Tag: t5>]
Bug #2091
>>> t = Tag.objects.get(name='t4')
>>> Item.objects.filter(tags__in=[t])
[<Item: four>]
Combining querysets built on different models should behave in a well-defined
fashion. We raise an error.
>>> Author.objects.all() & Tag.objects.all()
Traceback (most recent call last):
...
AssertionError: Cannot combine queries on two different base models.
>>> Author.objects.all() | Tag.objects.all()
Traceback (most recent call last):
...
AssertionError: Cannot combine queries on two different base models.
Bug #3141
>>> Author.objects.extra(select={'foo': '1'}).count()
4
>>> Author.objects.extra(select={'foo': '%s'}, select_params=(1,)).count()
4
Bug #2400
>>> Author.objects.filter(item__isnull=True)
[<Author: a3>]
>>> Tag.objects.filter(item__isnull=True)
[<Tag: t5>]
Bug #2496
>>> Item.objects.extra(tables=['queries_author']).select_related().order_by('name')[:1]
[<Item: four>]
Bug #2076
# Ordering on related tables should be possible, even if the table is not
# otherwise involved.
>>> Item.objects.order_by('note__note', 'name')
[<Item: two>, <Item: four>, <Item: one>, <Item: three>]
# Ordering on a related field should use the remote model's default ordering as
# a final step.
>>> Author.objects.order_by('extra', '-name')
[<Author: a2>, <Author: a1>, <Author: a4>, <Author: a3>]
# Using remote model default ordering can span multiple models (in this case,
# Cover is ordered by Item's default, which uses Note's default).
>>> Cover.objects.all()
[<Cover: first>, <Cover: second>]
# If you're not careful, it's possible to introduce infinite loops via default
# ordering on foreign keys in a cycle. We detect that.
>>> LoopX.objects.all()
Traceback (most recent call last):
...
FieldError: Infinite loop caused by ordering.
>>> LoopZ.objects.all()
Traceback (most recent call last):
...
FieldError: Infinite loop caused by ordering.
# ... but you can still order in a non-recursive fashion amongst linked fields
# (the previous test failed because the default ordering was recursive).
>>> LoopX.objects.all().order_by('y__x__y__x__id')
[]
# If the remote model does not have a default ordering, we order by its 'id'
# field.
>>> Item.objects.order_by('creator', 'name')
[<Item: one>, <Item: three>, <Item: two>, <Item: four>]
# Cross model ordering is possible in Meta, too.
>>> Ranking.objects.all()
[<Ranking: 3: a1>, <Ranking: 2: a2>, <Ranking: 1: a3>]
>>> Ranking.objects.all().order_by('rank')
[<Ranking: 1: a3>, <Ranking: 2: a2>, <Ranking: 3: a1>]
# Ordering by a many-valued attribute (e.g. a many-to-many or reverse
# ForeignKey) is legal, but the results might not make sense. That isn't
# Django's problem. Garbage in, garbage out.
>>> Item.objects.all().order_by('tags', 'id')
[<Item: one>, <Item: two>, <Item: one>, <Item: two>, <Item: four>]
# If we replace the default ordering, Django adjusts the required tables
# automatically. Item normally requires a join with Note to do the default
# ordering, but that isn't needed here.
>>> qs = Item.objects.order_by('name')
>>> qs
[<Item: four>, <Item: one>, <Item: three>, <Item: two>]
>>> len(qs.query.tables)
1
# Ordering of extra() pieces is possible, too and you can mix extra fields and
# model fields in the ordering.
>>> Ranking.objects.extra(tables=['django_site'], order_by=['-django_site.id', 'rank'])
[<Ranking: 1: a3>, <Ranking: 2: a2>, <Ranking: 3: a1>]
>>> qs = Ranking.objects.extra(select={'good': 'case when rank > 2 then 1 else 0 end'})
>>> [o.good for o in qs.extra(order_by=('-good',))] == [True, False, False]
True
>>> qs.extra(order_by=('-good', 'id'))
[<Ranking: 3: a1>, <Ranking: 2: a2>, <Ranking: 1: a3>]
# Despite having some extra aliases in the query, we can still omit them in a
# values() query.
>>> qs.values('id', 'rank').order_by('id')
[{'id': 1, 'rank': 2}, {'id': 2, 'rank': 1}, {'id': 3, 'rank': 3}]
Bugs #2874, #3002
>>> qs = Item.objects.select_related().order_by('note__note', 'name')
>>> list(qs)
[<Item: two>, <Item: four>, <Item: one>, <Item: three>]
# This is also a good select_related() test because there are multiple Note
# entries in the SQL. The two Note items should be different.
>>> qs[0].note, qs[0].creator.extra.note
(<Note: n2>, <Note: n1>)
Bug #3037
>>> Item.objects.filter(Q(creator__name='a3', name='two')|Q(creator__name='a4', name='four'))
[<Item: four>]
Bug #5321, #7070
Ordering columns must be included in the output columns. Note that this means
results that might otherwise be distinct are not (if there are multiple values
in the ordering cols), as in this example. This isn't a bug; it's a warning to
be careful with the selection of ordering columns.
>>> Note.objects.values('misc').distinct().order_by('note', '-misc')
[{'misc': u'foo'}, {'misc': u'bar'}, {'misc': u'foo'}]
Bug #4358
If you don't pass any fields to values(), relation fields are returned as
"foo_id" keys, not "foo". For consistency, you should be able to pass "foo_id"
in the fields list and have it work, too. We actually allow both "foo" and
"foo_id".
# The *_id version is returned by default.
>>> 'note_id' in ExtraInfo.objects.values()[0]
True
# You can also pass it in explicitly.
>>> ExtraInfo.objects.values('note_id')
[{'note_id': 1}, {'note_id': 2}]
# ...or use the field name.
>>> ExtraInfo.objects.values('note')
[{'note': 1}, {'note': 2}]
Bug #5261
>>> Note.objects.exclude(Q())
[<Note: n1>, <Note: n2>, <Note: n3>]
Bug #3045, #3288
Once upon a time, select_related() with circular relations would loop
infinitely if you forgot to specify "depth". Now we set an arbitrary default
upper bound.
>>> X.objects.all()
[]
>>> X.objects.select_related()
[]
Bug #3739
The all() method on querysets returns a copy of the queryset.
>>> q1 = Item.objects.order_by('name')
>>> id(q1) == id(q1.all())
False
Bug #2902
Parameters can be given to extra_select, *if* you use a SortedDict.
(First we need to know which order the keys fall in "naturally" on your system,
so we can put things in the wrong way around from normal. A normal dict would
thus fail.)
>>> from django.utils.datastructures import SortedDict
>>> s = [('a', '%s'), ('b', '%s')]
>>> params = ['one', 'two']
>>> if {'a': 1, 'b': 2}.keys() == ['a', 'b']:
... s.reverse()
... params.reverse()
# This slightly odd comparison works aorund the fact that PostgreSQL will
# return 'one' and 'two' as strings, not Unicode objects. It's a side-effect of
# using constants here and not a real concern.
>>> d = Item.objects.extra(select=SortedDict(s), select_params=params).values('a', 'b')[0]
>>> d == {'a': u'one', 'b': u'two'}
True
# Order by the number of tags attached to an item.
>>> l = Item.objects.extra(select={'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'}).order_by('-count')
>>> [o.count for o in l]
[2, 2, 1, 0]
Bug #6154
Multiple filter statements are joined using "AND" all the time.
>>> Author.objects.filter(id=a1.id).filter(Q(extra__note=n1)|Q(item__note=n3))
[<Author: a1>]
>>> Author.objects.filter(Q(extra__note=n1)|Q(item__note=n3)).filter(id=a1.id)
[<Author: a1>]
Bug #6981
>>> Tag.objects.select_related('parent').order_by('name')
[<Tag: t1>, <Tag: t2>, <Tag: t3>, <Tag: t4>, <Tag: t5>]
Bug #6180, #6203 -- dates with limits and/or counts
>>> Item.objects.count()
4
>>> Item.objects.dates('created', 'month').count()
1
>>> Item.objects.dates('created', 'day').count()
2
>>> len(Item.objects.dates('created', 'day'))
2
>>> Item.objects.dates('created', 'day')[0]
datetime.datetime(2007, 12, 19, 0, 0)
Bug #7087 -- dates with extra select columns
>>> Item.objects.dates('created', 'day').extra(select={'a': 1})
[datetime.datetime(2007, 12, 19, 0, 0), datetime.datetime(2007, 12, 20, 0, 0)]
Test that parallel iterators work.
>>> qs = Tag.objects.all()
>>> i1, i2 = iter(qs), iter(qs)
>>> i1.next(), i1.next()
(<Tag: t1>, <Tag: t2>)
>>> i2.next(), i2.next(), i2.next()
(<Tag: t1>, <Tag: t2>, <Tag: t3>)
>>> i1.next()
<Tag: t3>
>>> qs = X.objects.all()
>>> bool(qs)
False
>>> bool(qs)
False
We can do slicing beyond what is currently in the result cache, too.
## FIXME!! This next test causes really weird PostgreSQL behaviour, but it's
## only apparent much later when the full test suite runs. I don't understand
## what's going on here yet.
##
## # We need to mess with the implemenation internals a bit here to decrease the
## # cache fill size so that we don't read all the results at once.
## >>> from django.db.models import query
## >>> query.ITER_CHUNK_SIZE = 2
## >>> qs = Tag.objects.all()
##
## # Fill the cache with the first chunk.
## >>> bool(qs)
## True
## >>> len(qs._result_cache)
## 2
##
## # Query beyond the end of the cache and check that it is filled out as required.
## >>> qs[4]
## <Tag: t5>
## >>> len(qs._result_cache)
## 5
##
## # But querying beyond the end of the result set will fail.
## >>> qs[100]
## Traceback (most recent call last):
## ...
## IndexError: ...
Bug #7045 -- extra tables used to crash SQL construction on the second use.
>>> qs = Ranking.objects.extra(tables=['django_site'])
>>> s = qs.query.as_sql()
>>> s = qs.query.as_sql() # test passes if this doesn't raise an exception.
"""}

View File

@ -77,7 +77,7 @@ class USStateData(models.Model):
class XMLData(models.Model):
data = models.XMLField(null=True)
class Tag(models.Model):
"""A tag on an item."""
data = models.SlugField()
@ -93,40 +93,39 @@ class GenericData(models.Model):
data = models.CharField(max_length=30)
tags = generic.GenericRelation(Tag)
# The following test classes are all for validation
# of related objects; in particular, forward, backward,
# and self references.
class Anchor(models.Model):
"""This is a model that can be used as
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(max_length=30)
class UniqueAnchor(models.Model):
"""This is a model that can be used as
"""This is a model that can be used as
something for other models to point at"""
data = models.CharField(unique=True, max_length=30)
class FKData(models.Model):
data = models.ForeignKey(Anchor, null=True)
class M2MData(models.Model):
data = models.ManyToManyField(Anchor, null=True)
class O2OData(models.Model):
# One to one field can't be null, since it is a PK.
data = models.OneToOneField(Anchor)
# One to one field can't be null here, since it is a PK.
data = models.OneToOneField(Anchor, primary_key=True)
class FKSelfData(models.Model):
data = models.ForeignKey('self', null=True)
class M2MSelfData(models.Model):
data = models.ManyToManyField('self', null=True, symmetrical=False)
class FKDataToField(models.Model):
data = models.ForeignKey(UniqueAnchor, null=True, to_field='data')
@ -142,7 +141,7 @@ class FKDataToO2O(models.Model):
class BooleanPKData(models.Model):
data = models.BooleanField(primary_key=True)
class CharPKData(models.Model):
data = models.CharField(max_length=30, primary_key=True)

View File

@ -31,13 +31,13 @@ except ImportError:
def data_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data
models.Model.save(instance, raw=True)
models.Model.save_base(instance, raw=True)
return instance
def generic_create(pk, klass, data):
instance = klass(id=pk)
instance.data = data[0]
models.Model.save(instance, raw=True)
models.Model.save_base(instance, raw=True)
for tag in data[1:]:
instance.tags.create(data=tag)
return instance
@ -45,25 +45,25 @@ def generic_create(pk, klass, data):
def fk_create(pk, klass, data):
instance = klass(id=pk)
setattr(instance, 'data_id', data)
models.Model.save(instance, raw=True)
models.Model.save_base(instance, raw=True)
return instance
def m2m_create(pk, klass, data):
instance = klass(id=pk)
models.Model.save(instance, raw=True)
models.Model.save_base(instance, raw=True)
instance.data = data
return instance
def o2o_create(pk, klass, data):
instance = klass()
instance.data_id = data
models.Model.save(instance, raw=True)
models.Model.save_base(instance, raw=True)
return instance
def pk_create(pk, klass, data):
instance = klass()
instance.data = data
models.Model.save(instance, raw=True)
models.Model.save_base(instance, raw=True)
return instance
# A set of functions that can be used to compare
@ -309,7 +309,7 @@ def fieldsTest(format, self):
management.call_command('flush', verbosity=0, interactive=False)
obj = ComplexModel(field1='first',field2='second',field3='third')
obj.save(raw=True)
obj.save_base(raw=True)
# Serialize then deserialize the test database
serialized_data = serializers.serialize(format, [obj], indent=2, fields=('field1','field3'))
@ -325,7 +325,7 @@ def streamTest(format, self):
management.call_command('flush', verbosity=0, interactive=False)
obj = ComplexModel(field1='first',field2='second',field3='third')
obj.save(raw=True)
obj.save_base(raw=True)
# Serialize the test database to a stream
stream = StringIO()