django1/django/db/models/sql/compiler.py

1610 lines
72 KiB
Python
Raw Normal View History

import collections
import re
from functools import partial
from itertools import chain
from django.core.exceptions import EmptyResultSet, FieldError
from django.db import DatabaseError, NotSupportedError
from django.db.models.constants import LOOKUP_SEP
from django.db.models.expressions import F, OrderBy, Random, RawSQL, Ref, Value
from django.db.models.functions import Cast
from django.db.models.query_utils import Q, select_related_descend
from django.db.models.sql.constants import (
CURSOR, GET_ITERATOR_CHUNK_SIZE, MULTI, NO_RESULTS, ORDER_DIR, SINGLE,
)
from django.db.models.sql.query import Query, get_order_dir
from django.db.transaction import TransactionManagementError
from django.utils.functional import cached_property
from django.utils.hashable import make_hashable
from django.utils.regex_helper import _lazy_re_compile
class SQLCompiler:
# Multiline ordering SQL clause may appear from RawSQL.
ordering_parts = _lazy_re_compile(
r'^(.*)\s(?:ASC|DESC).*',
re.MULTILINE | re.DOTALL,
)
def __init__(self, query, connection, using):
self.query = query
self.connection = connection
self.using = using
self.quote_cache = {'*': '*'}
# The select, klass_info, and annotations are needed by QuerySet.iterator()
# these are set as a side-effect of executing the query. Note that we calculate
# separately a list of extra select columns needed for grammatical correctness
# of the query, but these columns are not included in self.select.
self.select = None
self.annotation_col_map = None
self.klass_info = None
self._meta_ordering = None
def setup_query(self):
if all(self.query.alias_refcount[a] == 0 for a in self.query.alias_map):
self.query.get_initial_alias()
self.select, self.klass_info, self.annotation_col_map = self.get_select()
self.col_count = len(self.select)
def pre_sql_setup(self):
"""
Do any necessary class setup immediately prior to producing SQL. This
is for things that can't necessarily be done in __init__ because we
might not have all the pieces in place at that time.
"""
self.setup_query()
order_by = self.get_order_by()
self.where, self.having = self.query.where.split_having()
extra_select = self.get_extra_select(order_by, self.select)
self.has_extra_select = bool(extra_select)
group_by = self.get_group_by(self.select + extra_select, order_by)
return extra_select, order_by, group_by
def get_group_by(self, select, order_by):
"""
Return a list of 2-tuples of form (sql, params).
The logic of what exactly the GROUP BY clause contains is hard
to describe in other words than "if it passes the test suite,
then it is correct".
"""
# Some examples:
# SomeModel.objects.annotate(Count('somecol'))
# GROUP BY: all fields of the model
#
# SomeModel.objects.values('name').annotate(Count('somecol'))
# GROUP BY: name
#
# SomeModel.objects.annotate(Count('somecol')).values('name')
# GROUP BY: all cols of the model
#
# SomeModel.objects.values('name', 'pk').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# SomeModel.objects.values('name').annotate(Count('somecol')).values('pk')
# GROUP BY: name, pk
#
# In fact, the self.query.group_by is the minimal set to GROUP BY. It
# can't be ever restricted to a smaller set, but additional columns in
# HAVING, ORDER BY, and SELECT clauses are added to it. Unfortunately
# the end result is that it is impossible to force the query to have
# a chosen GROUP BY clause - you can almost do this by using the form:
# .values(*wanted_cols).annotate(AnAggregate())
# but any later annotations, extra selects, values calls that
# refer some column outside of the wanted_cols, order_by, or even
# filter calls can alter the GROUP BY clause.
# The query.group_by is either None (no GROUP BY at all), True
# (group by select fields), or a list of expressions to be added
# to the group by.
if self.query.group_by is None:
return []
expressions = []
if self.query.group_by is not True:
# If the group by is set to a list (by .values() call most likely),
# then we need to add everything in it to the GROUP BY clause.
# Backwards compatibility hack for setting query.group_by. Remove
# when we have public API way of forcing the GROUP BY clause.
# Converts string references to expressions.
for expr in self.query.group_by:
if not hasattr(expr, 'as_sql'):
expressions.append(self.query.resolve_ref(expr))
else:
expressions.append(expr)
# Note that even if the group_by is set, it is only the minimal
# set to group by. So, we need to add cols in select, order_by, and
# having into the select in any case.
ref_sources = {
expr.source for expr in expressions if isinstance(expr, Ref)
}
for expr, _, _ in select:
# Skip members of the select clause that are already included
# by reference.
if expr in ref_sources:
continue
cols = expr.get_group_by_cols()
for col in cols:
expressions.append(col)
for expr, (sql, params, is_ref) in order_by:
# Skip References to the select clause, as all expressions in the
# select clause are already part of the group by.
if not is_ref:
expressions.extend(expr.get_group_by_cols())
having_group_by = self.having.get_group_by_cols() if self.having else ()
for expr in having_group_by:
expressions.append(expr)
result = []
seen = set()
expressions = self.collapse_group_by(expressions, having_group_by)
for expr in expressions:
sql, params = self.compile(expr)
sql, params = expr.select_format(self, sql, params)
params_hash = make_hashable(params)
if (sql, params_hash) not in seen:
result.append((sql, params))
seen.add((sql, params_hash))
return result
def collapse_group_by(self, expressions, having):
# If the DB can group by primary key, then group by the primary key of
# query's main model. Note that for PostgreSQL the GROUP BY clause must
# include the primary key of every table, but for MySQL it is enough to
# have the main table's primary key.
if self.connection.features.allows_group_by_pk:
# Determine if the main model's primary key is in the query.
pk = None
for expr in expressions:
# Is this a reference to query's base table primary key? If the
# expression isn't a Col-like, then skip the expression.
if (getattr(expr, 'target', None) == self.query.model._meta.pk and
getattr(expr, 'alias', None) == self.query.base_table):
pk = expr
break
# If the main model's primary key is in the query, group by that
# field, HAVING expressions, and expressions associated with tables
# that don't have a primary key included in the grouped columns.
if pk:
pk_aliases = {
expr.alias for expr in expressions
if hasattr(expr, 'target') and expr.target.primary_key
}
expressions = [pk] + [
expr for expr in expressions
if expr in having or (
getattr(expr, 'alias', None) is not None and expr.alias not in pk_aliases
)
]
elif self.connection.features.allows_group_by_selected_pks:
# Filter out all expressions associated with a table's primary key
# present in the grouped columns. This is done by identifying all
# tables that have their primary key included in the grouped
# columns and removing non-primary key columns referring to them.
# Unmanaged models are excluded because they could be representing
# database views on which the optimization might not be allowed.
pks = {
expr for expr in expressions
if (
hasattr(expr, 'target') and
expr.target.primary_key and
self.connection.features.allows_group_by_selected_pks_on_model(expr.target.model)
)
}
aliases = {expr.alias for expr in pks}
expressions = [
expr for expr in expressions if expr in pks or getattr(expr, 'alias', None) not in aliases
]
return expressions
def get_select(self):
"""
Return three values:
- a list of 3-tuples of (expression, (sql, params), alias)
- a klass_info structure,
- a dictionary of annotations
The (sql, params) is what the expression will produce, and alias is the
"AS alias" for the column (possibly None).
The klass_info structure contains the following information:
- The base model of the query.
- Which columns for that model are present in the query (by
position of the select clause).
- related_klass_infos: [f, klass_info] to descent into
The annotations is a dictionary of {'attname': column position} values.
"""
select = []
klass_info = None
annotations = {}
select_idx = 0
for alias, (sql, params) in self.query.extra_select.items():
annotations[alias] = select_idx
select.append((RawSQL(sql, params), alias))
select_idx += 1
assert not (self.query.select and self.query.default_cols)
if self.query.default_cols:
cols = self.get_default_columns()
else:
# self.query.select is a special case. These columns never go to
# any model.
cols = self.query.select
if cols:
select_list = []
for col in cols:
select_list.append(select_idx)
select.append((col, None))
select_idx += 1
klass_info = {
'model': self.query.model,
'select_fields': select_list,
}
for alias, annotation in self.query.annotation_select.items():
annotations[alias] = select_idx
select.append((annotation, alias))
select_idx += 1
if self.query.select_related:
related_klass_infos = self.get_related_selections(select)
klass_info['related_klass_infos'] = related_klass_infos
def get_select_from_parent(klass_info):
for ki in klass_info['related_klass_infos']:
if ki['from_parent']:
ki['select_fields'] = (klass_info['select_fields'] +
ki['select_fields'])
get_select_from_parent(ki)
get_select_from_parent(klass_info)
ret = []
for col, alias in select:
try:
sql, params = self.compile(col)
except EmptyResultSet:
# Select a predicate that's always False.
sql, params = '0', ()
else:
sql, params = col.select_format(self, sql, params)
ret.append((col, (sql, params), alias))
return ret, klass_info, annotations
def get_order_by(self):
"""
Return a list of 2-tuples of form (expr, (sql, params, is_ref)) for the
ORDER BY clause.
The order_by clause can alter the select clause (for example it
can add aliases to clauses that do not yet have one, or it can
add totally new select clauses).
"""
if self.query.extra_order_by:
ordering = self.query.extra_order_by
elif not self.query.default_ordering:
ordering = self.query.order_by
elif self.query.order_by:
ordering = self.query.order_by
elif self.query.get_meta().ordering:
ordering = self.query.get_meta().ordering
self._meta_ordering = ordering
else:
ordering = []
if self.query.standard_ordering:
asc, desc = ORDER_DIR['ASC']
else:
asc, desc = ORDER_DIR['DESC']
order_by = []
2016-12-29 21:55:17 +08:00
for field in ordering:
if hasattr(field, 'resolve_expression'):
if isinstance(field, Value):
# output_field must be resolved for constants.
field = Cast(field, field.output_field)
if not isinstance(field, OrderBy):
field = field.asc()
if not self.query.standard_ordering:
field = field.copy()
field.reverse_ordering()
order_by.append((field, False))
continue
if field == '?': # random
order_by.append((OrderBy(Random()), False))
continue
col, order = get_order_dir(field, asc)
descending = order == 'DESC'
if col in self.query.annotation_select:
# Reference to expression in SELECT clause
order_by.append((
OrderBy(Ref(col, self.query.annotation_select[col]), descending=descending),
True))
continue
if col in self.query.annotations:
# References to an expression which is masked out of the SELECT
# clause.
expr = self.query.annotations[col]
if isinstance(expr, Value):
# output_field must be resolved for constants.
expr = Cast(expr, expr.output_field)
order_by.append((OrderBy(expr, descending=descending), False))
continue
if '.' in field:
# This came in through an extra(order_by=...) addition. Pass it
# on verbatim.
table, col = col.split('.', 1)
order_by.append((
OrderBy(
RawSQL('%s.%s' % (self.quote_name_unless_alias(table), col), []),
descending=descending
), False))
continue
if not self.query.extra or col not in self.query.extra:
# 'col' is of the form 'field' or 'field1__field2' or
# '-field1__field2__field', etc.
order_by.extend(self.find_ordering_name(
field, self.query.get_meta(), default_order=asc))
else:
if col not in self.query.extra_select:
order_by.append((
OrderBy(RawSQL(*self.query.extra[col]), descending=descending),
False))
else:
order_by.append((
OrderBy(Ref(col, RawSQL(*self.query.extra[col])), descending=descending),
True))
result = []
seen = set()
for expr, is_ref in order_by:
resolved = expr.resolve_expression(self.query, allow_joins=True, reuse=None)
if self.query.combinator:
src = resolved.get_source_expressions()[0]
expr_src = expr.get_source_expressions()[0]
# Relabel order by columns to raw numbers if this is a combined
# query; necessary since the columns can't be referenced by the
# fully qualified name and the simple column names may collide.
for idx, (sel_expr, _, col_alias) in enumerate(self.select):
if is_ref and col_alias == src.refs:
src = src.source
elif col_alias and not (
isinstance(expr_src, F) and col_alias == expr_src.name
):
continue
if src == sel_expr:
resolved.set_source_expressions([RawSQL('%d' % (idx + 1), ())])
break
else:
if col_alias:
raise DatabaseError('ORDER BY term does not match any column in the result set.')
# Add column used in ORDER BY clause without an alias to
# the selected columns.
self.query.add_select_col(src)
resolved.set_source_expressions([RawSQL('%d' % len(self.query.select), ())])
sql, params = self.compile(resolved)
# Don't add the same column twice, but the order direction is
# not taken into account so we strip it. When this entire method
# is refactored into expressions, then we can check each part as we
# generate it.
without_ordering = self.ordering_parts.search(sql)[1]
params_hash = make_hashable(params)
if (without_ordering, params_hash) in seen:
continue
seen.add((without_ordering, params_hash))
result.append((resolved, (sql, params, is_ref)))
return result
def get_extra_select(self, order_by, select):
extra_select = []
if self.query.distinct and not self.query.distinct_fields:
select_sql = [t[1] for t in select]
for expr, (sql, params, is_ref) in order_by:
without_ordering = self.ordering_parts.search(sql)[1]
if not is_ref and (without_ordering, params) not in select_sql:
extra_select.append((expr, (without_ordering, params), None))
return extra_select
def quote_name_unless_alias(self, name):
"""
A wrapper around connection.ops.quote_name that doesn't quote aliases
for table names. This avoids problems with some SQL dialects that treat
quoted strings specially (e.g. PostgreSQL).
"""
if name in self.quote_cache:
return self.quote_cache[name]
if ((name in self.query.alias_map and name not in self.query.table_map) or
name in self.query.extra_select or (
self.query.external_aliases.get(name) and name not in self.query.table_map)):
self.quote_cache[name] = name
return name
r = self.connection.ops.quote_name(name)
self.quote_cache[name] = r
return r
def compile(self, node):
vendor_impl = getattr(node, 'as_' + self.connection.vendor, None)
Fixed #16187 -- refactored ORM lookup system Allowed users to specify which lookups or transforms ("nested lookus") are available for fields. The implementation is now class based. Squashed commit of the following: commit fa7a7195f1952a9c8dea7f6e89ee13f81757eda7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 18 10:53:24 2014 +0200 Added lookup registration API docs commit eb1c8ce164325e0d8641f14202e12486c70efdb6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Tue Jan 14 18:59:36 2014 +0200 Release notes and other minor docs changes commit 11501c29c9352d17f22f3a0f59d3b805913dedcc Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 20:53:03 2014 +0200 Forgot to add custom_lookups tests in prev commit commit 83173b960ea7eb2b24d573f326be59948df33536 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:59:12 2014 +0200 Renamed Extract -> Transform commit 3b18d9f3a1bcdd93280f79654eba0efa209377bd Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:51:53 2014 +0200 Removed suggestion of temporary lookup registration from docs commit 21d0c7631c161fc0c67911480be5d3f13f1afa68 Merge: 2509006 f2dc442 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 09:38:23 2014 -0800 Merge pull request #2 from mjtamlyn/lookups_3 Reworked custom lookups docs. commit f2dc4429a1da04c858364972eea57a35a868dab4 Author: Marc Tamlyn <marc.tamlyn@gmail.com> Date: Sun Jan 12 13:15:05 2014 +0000 Reworked custom lookups docs. Mostly just formatting and rewording, but also replaced the example using ``YearExtract`` to use an example which is unlikely to ever be possible directly in the ORM. commit 250900650628d1f11beadb22814abd666029fb81 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 13:19:13 2014 +0200 Removed unused import commit 4fba5dfaa022653ffa72497258ffd8f8b7476f92 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:34:41 2014 +0200 Added docs to index commit 6d53963f375c77a1f287833b19b976d23f36c30b Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:10:24 2014 +0200 Dead code removal commit f9cc0390078e21f1ea5a7bc1f15b09f8f6b0904d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 19:00:43 2014 +0200 A new try for docs commit 33aa18a6e3c831930bda0028222a26f9c1d96e66 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:57:12 2014 +0200 Renamed get_cols to get_group_by_cols commit c7d5f8661b7d364962bed2e6f81161c1b4f1bcc3 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:45:53 2014 +0200 Altered query string customization for backends vendors The new way is trying to call first method 'as_' + connection.vendor. If that doesn't exist, then call as_sql(). Also altered how lookup registration is done. There is now RegisterLookupMixin class that is used by Field, Extract and sql.Aggregate. This allows one to register lookups for extracts and aggregates in the same way lookup registration is done for fields. commit 90e7004ec14e15503f828cc9bde2a7dab593814d Merge: 66649ff f7c2c0a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:21:01 2014 +0200 Merge branch 'master' into lookups_3 commit 66649ff891c7c73c7eecf6038c9a6802611b5d8a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:16:01 2014 +0200 Some rewording in docs commit 31b8faa62714b4b6b6057a9f5cc106c4dd73caab Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 29 15:52:29 2013 +0200 Cleanup based on review comments commit 1016159f34674c0df871ed891cde72be8340bb5d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 18:37:04 2013 +0200 Proof-of-concept fix for #16731 Implemented only for SQLite and PostgreSQL, and only for startswith and istartswith lookups. commit 193cd097ca8f2cc6a911e57b8e3fb726f96ee6a6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 17:57:58 2013 +0200 Fixed #11722 -- iexact=F() produced invalid SQL commit 08ed3c3b49e100ed9019831e770c25c8f61b70f9 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:59:52 2013 +0200 Made Lookup and Extract available from django.db.models commit b99c8d83c972786c6fcd0e84c9e5cb08c1368300 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:06:29 2013 +0200 Fixed review notes by Loic commit 049eebc0703c151127f4f0265beceea7b8b39e72 Merge: ed8fab7 b80a835 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:53:10 2013 +0200 Merge branch 'master' into lookups_3 Conflicts: django/db/models/fields/__init__.py django/db/models/sql/compiler.py django/db/models/sql/query.py tests/null_queries/tests.py commit ed8fab7fe8867ff3eb801c3697a426478387bb2f Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:47:23 2013 +0200 Made Extracts aware of full lookup path commit 27a57b7aed91b2f346abc4a77da838bffa17c727 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:10:11 2013 +0200 Removed debugger import commit 074e0f5aca0572e368c11e6d2c73c9026e7d63d7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:02:16 2013 +0200 GIS lookup support added commit 760e28e72bae475b442b026650969b0d182dbe53 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 20:04:31 2013 +0200 Removed usage of Constraint, used Lookup instead commit eac47766844b90e7d3269e7a8c012eee34ec0093 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:22:30 2013 +0200 Minor cleanup of Lookup API commit 2adf50428d59a783078b0da3d5d035106640c899 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:14:19 2013 +0200 Added documentation, polished implementation commit 32c04357a87e3727a34f8c5e6ec0114d1fbbb303 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:10:15 2013 +0200 Avoid OrderedDict creation on lookup aggregate check commit 7c8b3a32cc17b4dbca160921d48125f1631e0df4 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:04:34 2013 +0200 Implemented nested lookups But there is no support of using lookups outside filtering yet. commit 4d219d4cdef21d9c14e5d6b9299d583d1975fcba Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Wed Nov 27 22:07:30 2013 +0200 Initial implementation of custom lookups
2014-01-18 17:09:43 +08:00
if vendor_impl:
sql, params = vendor_impl(self, self.connection)
Fixed #16187 -- refactored ORM lookup system Allowed users to specify which lookups or transforms ("nested lookus") are available for fields. The implementation is now class based. Squashed commit of the following: commit fa7a7195f1952a9c8dea7f6e89ee13f81757eda7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 18 10:53:24 2014 +0200 Added lookup registration API docs commit eb1c8ce164325e0d8641f14202e12486c70efdb6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Tue Jan 14 18:59:36 2014 +0200 Release notes and other minor docs changes commit 11501c29c9352d17f22f3a0f59d3b805913dedcc Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 20:53:03 2014 +0200 Forgot to add custom_lookups tests in prev commit commit 83173b960ea7eb2b24d573f326be59948df33536 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:59:12 2014 +0200 Renamed Extract -> Transform commit 3b18d9f3a1bcdd93280f79654eba0efa209377bd Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:51:53 2014 +0200 Removed suggestion of temporary lookup registration from docs commit 21d0c7631c161fc0c67911480be5d3f13f1afa68 Merge: 2509006 f2dc442 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 09:38:23 2014 -0800 Merge pull request #2 from mjtamlyn/lookups_3 Reworked custom lookups docs. commit f2dc4429a1da04c858364972eea57a35a868dab4 Author: Marc Tamlyn <marc.tamlyn@gmail.com> Date: Sun Jan 12 13:15:05 2014 +0000 Reworked custom lookups docs. Mostly just formatting and rewording, but also replaced the example using ``YearExtract`` to use an example which is unlikely to ever be possible directly in the ORM. commit 250900650628d1f11beadb22814abd666029fb81 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 13:19:13 2014 +0200 Removed unused import commit 4fba5dfaa022653ffa72497258ffd8f8b7476f92 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:34:41 2014 +0200 Added docs to index commit 6d53963f375c77a1f287833b19b976d23f36c30b Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:10:24 2014 +0200 Dead code removal commit f9cc0390078e21f1ea5a7bc1f15b09f8f6b0904d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 19:00:43 2014 +0200 A new try for docs commit 33aa18a6e3c831930bda0028222a26f9c1d96e66 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:57:12 2014 +0200 Renamed get_cols to get_group_by_cols commit c7d5f8661b7d364962bed2e6f81161c1b4f1bcc3 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:45:53 2014 +0200 Altered query string customization for backends vendors The new way is trying to call first method 'as_' + connection.vendor. If that doesn't exist, then call as_sql(). Also altered how lookup registration is done. There is now RegisterLookupMixin class that is used by Field, Extract and sql.Aggregate. This allows one to register lookups for extracts and aggregates in the same way lookup registration is done for fields. commit 90e7004ec14e15503f828cc9bde2a7dab593814d Merge: 66649ff f7c2c0a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:21:01 2014 +0200 Merge branch 'master' into lookups_3 commit 66649ff891c7c73c7eecf6038c9a6802611b5d8a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:16:01 2014 +0200 Some rewording in docs commit 31b8faa62714b4b6b6057a9f5cc106c4dd73caab Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 29 15:52:29 2013 +0200 Cleanup based on review comments commit 1016159f34674c0df871ed891cde72be8340bb5d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 18:37:04 2013 +0200 Proof-of-concept fix for #16731 Implemented only for SQLite and PostgreSQL, and only for startswith and istartswith lookups. commit 193cd097ca8f2cc6a911e57b8e3fb726f96ee6a6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 17:57:58 2013 +0200 Fixed #11722 -- iexact=F() produced invalid SQL commit 08ed3c3b49e100ed9019831e770c25c8f61b70f9 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:59:52 2013 +0200 Made Lookup and Extract available from django.db.models commit b99c8d83c972786c6fcd0e84c9e5cb08c1368300 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:06:29 2013 +0200 Fixed review notes by Loic commit 049eebc0703c151127f4f0265beceea7b8b39e72 Merge: ed8fab7 b80a835 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:53:10 2013 +0200 Merge branch 'master' into lookups_3 Conflicts: django/db/models/fields/__init__.py django/db/models/sql/compiler.py django/db/models/sql/query.py tests/null_queries/tests.py commit ed8fab7fe8867ff3eb801c3697a426478387bb2f Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:47:23 2013 +0200 Made Extracts aware of full lookup path commit 27a57b7aed91b2f346abc4a77da838bffa17c727 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:10:11 2013 +0200 Removed debugger import commit 074e0f5aca0572e368c11e6d2c73c9026e7d63d7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:02:16 2013 +0200 GIS lookup support added commit 760e28e72bae475b442b026650969b0d182dbe53 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 20:04:31 2013 +0200 Removed usage of Constraint, used Lookup instead commit eac47766844b90e7d3269e7a8c012eee34ec0093 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:22:30 2013 +0200 Minor cleanup of Lookup API commit 2adf50428d59a783078b0da3d5d035106640c899 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:14:19 2013 +0200 Added documentation, polished implementation commit 32c04357a87e3727a34f8c5e6ec0114d1fbbb303 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:10:15 2013 +0200 Avoid OrderedDict creation on lookup aggregate check commit 7c8b3a32cc17b4dbca160921d48125f1631e0df4 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:04:34 2013 +0200 Implemented nested lookups But there is no support of using lookups outside filtering yet. commit 4d219d4cdef21d9c14e5d6b9299d583d1975fcba Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Wed Nov 27 22:07:30 2013 +0200 Initial implementation of custom lookups
2014-01-18 17:09:43 +08:00
else:
sql, params = node.as_sql(self, self.connection)
return sql, params
Fixed #16187 -- refactored ORM lookup system Allowed users to specify which lookups or transforms ("nested lookus") are available for fields. The implementation is now class based. Squashed commit of the following: commit fa7a7195f1952a9c8dea7f6e89ee13f81757eda7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 18 10:53:24 2014 +0200 Added lookup registration API docs commit eb1c8ce164325e0d8641f14202e12486c70efdb6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Tue Jan 14 18:59:36 2014 +0200 Release notes and other minor docs changes commit 11501c29c9352d17f22f3a0f59d3b805913dedcc Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 20:53:03 2014 +0200 Forgot to add custom_lookups tests in prev commit commit 83173b960ea7eb2b24d573f326be59948df33536 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:59:12 2014 +0200 Renamed Extract -> Transform commit 3b18d9f3a1bcdd93280f79654eba0efa209377bd Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:51:53 2014 +0200 Removed suggestion of temporary lookup registration from docs commit 21d0c7631c161fc0c67911480be5d3f13f1afa68 Merge: 2509006 f2dc442 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 09:38:23 2014 -0800 Merge pull request #2 from mjtamlyn/lookups_3 Reworked custom lookups docs. commit f2dc4429a1da04c858364972eea57a35a868dab4 Author: Marc Tamlyn <marc.tamlyn@gmail.com> Date: Sun Jan 12 13:15:05 2014 +0000 Reworked custom lookups docs. Mostly just formatting and rewording, but also replaced the example using ``YearExtract`` to use an example which is unlikely to ever be possible directly in the ORM. commit 250900650628d1f11beadb22814abd666029fb81 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 13:19:13 2014 +0200 Removed unused import commit 4fba5dfaa022653ffa72497258ffd8f8b7476f92 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:34:41 2014 +0200 Added docs to index commit 6d53963f375c77a1f287833b19b976d23f36c30b Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:10:24 2014 +0200 Dead code removal commit f9cc0390078e21f1ea5a7bc1f15b09f8f6b0904d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 19:00:43 2014 +0200 A new try for docs commit 33aa18a6e3c831930bda0028222a26f9c1d96e66 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:57:12 2014 +0200 Renamed get_cols to get_group_by_cols commit c7d5f8661b7d364962bed2e6f81161c1b4f1bcc3 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:45:53 2014 +0200 Altered query string customization for backends vendors The new way is trying to call first method 'as_' + connection.vendor. If that doesn't exist, then call as_sql(). Also altered how lookup registration is done. There is now RegisterLookupMixin class that is used by Field, Extract and sql.Aggregate. This allows one to register lookups for extracts and aggregates in the same way lookup registration is done for fields. commit 90e7004ec14e15503f828cc9bde2a7dab593814d Merge: 66649ff f7c2c0a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:21:01 2014 +0200 Merge branch 'master' into lookups_3 commit 66649ff891c7c73c7eecf6038c9a6802611b5d8a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:16:01 2014 +0200 Some rewording in docs commit 31b8faa62714b4b6b6057a9f5cc106c4dd73caab Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 29 15:52:29 2013 +0200 Cleanup based on review comments commit 1016159f34674c0df871ed891cde72be8340bb5d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 18:37:04 2013 +0200 Proof-of-concept fix for #16731 Implemented only for SQLite and PostgreSQL, and only for startswith and istartswith lookups. commit 193cd097ca8f2cc6a911e57b8e3fb726f96ee6a6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 17:57:58 2013 +0200 Fixed #11722 -- iexact=F() produced invalid SQL commit 08ed3c3b49e100ed9019831e770c25c8f61b70f9 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:59:52 2013 +0200 Made Lookup and Extract available from django.db.models commit b99c8d83c972786c6fcd0e84c9e5cb08c1368300 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:06:29 2013 +0200 Fixed review notes by Loic commit 049eebc0703c151127f4f0265beceea7b8b39e72 Merge: ed8fab7 b80a835 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:53:10 2013 +0200 Merge branch 'master' into lookups_3 Conflicts: django/db/models/fields/__init__.py django/db/models/sql/compiler.py django/db/models/sql/query.py tests/null_queries/tests.py commit ed8fab7fe8867ff3eb801c3697a426478387bb2f Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:47:23 2013 +0200 Made Extracts aware of full lookup path commit 27a57b7aed91b2f346abc4a77da838bffa17c727 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:10:11 2013 +0200 Removed debugger import commit 074e0f5aca0572e368c11e6d2c73c9026e7d63d7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:02:16 2013 +0200 GIS lookup support added commit 760e28e72bae475b442b026650969b0d182dbe53 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 20:04:31 2013 +0200 Removed usage of Constraint, used Lookup instead commit eac47766844b90e7d3269e7a8c012eee34ec0093 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:22:30 2013 +0200 Minor cleanup of Lookup API commit 2adf50428d59a783078b0da3d5d035106640c899 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:14:19 2013 +0200 Added documentation, polished implementation commit 32c04357a87e3727a34f8c5e6ec0114d1fbbb303 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:10:15 2013 +0200 Avoid OrderedDict creation on lookup aggregate check commit 7c8b3a32cc17b4dbca160921d48125f1631e0df4 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:04:34 2013 +0200 Implemented nested lookups But there is no support of using lookups outside filtering yet. commit 4d219d4cdef21d9c14e5d6b9299d583d1975fcba Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Wed Nov 27 22:07:30 2013 +0200 Initial implementation of custom lookups
2014-01-18 17:09:43 +08:00
def get_combinator_sql(self, combinator, all):
features = self.connection.features
compilers = [
query.get_compiler(self.using, self.connection)
for query in self.query.combined_queries if not query.is_empty()
]
if not features.supports_slicing_ordering_in_compound:
for query, compiler in zip(self.query.combined_queries, compilers):
if query.low_mark or query.high_mark:
raise DatabaseError('LIMIT/OFFSET not allowed in subqueries of compound statements.')
if compiler.get_order_by():
raise DatabaseError('ORDER BY not allowed in subqueries of compound statements.')
parts = ()
for compiler in compilers:
try:
# If the columns list is limited, then all combined queries
# must have the same columns list. Set the selects defined on
# the query on all combined queries, if not already set.
if not compiler.query.values_select and self.query.values_select:
compiler.query = compiler.query.clone()
compiler.query.set_values((
*self.query.extra_select,
*self.query.values_select,
*self.query.annotation_select,
))
part_sql, part_args = compiler.as_sql()
if compiler.query.combinator:
# Wrap in a subquery if wrapping in parentheses isn't
# supported.
if not features.supports_parentheses_in_compound:
part_sql = 'SELECT * FROM ({})'.format(part_sql)
# Add parentheses when combining with compound query if not
# already added for all compound queries.
elif not features.supports_slicing_ordering_in_compound:
part_sql = '({})'.format(part_sql)
parts += ((part_sql, part_args),)
except EmptyResultSet:
# Omit the empty queryset with UNION and with DIFFERENCE if the
# first queryset is nonempty.
if combinator == 'union' or (combinator == 'difference' and parts):
continue
raise
if not parts:
raise EmptyResultSet
combinator_sql = self.connection.ops.set_operators[combinator]
if all and combinator == 'union':
combinator_sql += ' ALL'
braces = '({})' if features.supports_slicing_ordering_in_compound else '{}'
sql_parts, args_parts = zip(*((braces.format(sql), args) for sql, args in parts))
result = [' {} '.format(combinator_sql).join(sql_parts)]
params = []
for part in args_parts:
params.extend(part)
return result, params
def as_sql(self, with_limits=True, with_col_aliases=False):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
If 'with_limits' is False, any limit/offset information is not included
in the query.
"""
refcounts_before = self.query.alias_refcount.copy()
try:
extra_select, order_by, group_by = self.pre_sql_setup()
for_update_part = None
# Is a LIMIT/OFFSET clause needed?
with_limit_offset = with_limits and (self.query.high_mark is not None or self.query.low_mark)
combinator = self.query.combinator
features = self.connection.features
if combinator:
if not getattr(features, 'supports_select_{}'.format(combinator)):
raise NotSupportedError('{} is not supported on this database backend.'.format(combinator))
result, params = self.get_combinator_sql(combinator, self.query.combinator_all)
else:
distinct_fields, distinct_params = self.get_distinct()
# This must come after 'select', 'ordering', and 'distinct'
# (see docstring of get_from_clause() for details).
from_, f_params = self.get_from_clause()
where, w_params = self.compile(self.where) if self.where is not None else ("", [])
having, h_params = self.compile(self.having) if self.having is not None else ("", [])
result = ['SELECT']
params = []
if self.query.distinct:
distinct_result, distinct_params = self.connection.ops.distinct_sql(
distinct_fields,
distinct_params,
)
result += distinct_result
params += distinct_params
out_cols = []
col_idx = 1
for _, (s_sql, s_params), alias in self.select + extra_select:
if alias:
s_sql = '%s AS %s' % (s_sql, self.connection.ops.quote_name(alias))
elif with_col_aliases:
s_sql = '%s AS %s' % (s_sql, 'Col%d' % col_idx)
col_idx += 1
params.extend(s_params)
out_cols.append(s_sql)
result += [', '.join(out_cols), 'FROM', *from_]
params.extend(f_params)
if self.query.select_for_update and self.connection.features.has_select_for_update:
if self.connection.get_autocommit():
raise TransactionManagementError('select_for_update cannot be used outside of a transaction.')
if with_limit_offset and not self.connection.features.supports_select_for_update_with_limit:
raise NotSupportedError(
'LIMIT/OFFSET is not supported with '
'select_for_update on this database backend.'
)
nowait = self.query.select_for_update_nowait
skip_locked = self.query.select_for_update_skip_locked
of = self.query.select_for_update_of
# If it's a NOWAIT/SKIP LOCKED/OF query but the backend
# doesn't support it, raise NotSupportedError to prevent a
# possible deadlock.
if nowait and not self.connection.features.has_select_for_update_nowait:
raise NotSupportedError('NOWAIT is not supported on this database backend.')
elif skip_locked and not self.connection.features.has_select_for_update_skip_locked:
raise NotSupportedError('SKIP LOCKED is not supported on this database backend.')
elif of and not self.connection.features.has_select_for_update_of:
raise NotSupportedError('FOR UPDATE OF is not supported on this database backend.')
for_update_part = self.connection.ops.for_update_sql(
nowait=nowait,
skip_locked=skip_locked,
of=self.get_select_for_update_of_arguments(),
)
if for_update_part and self.connection.features.for_update_after_from:
result.append(for_update_part)
if where:
result.append('WHERE %s' % where)
params.extend(w_params)
grouping = []
for g_sql, g_params in group_by:
grouping.append(g_sql)
params.extend(g_params)
if grouping:
if distinct_fields:
raise NotImplementedError('annotate() + distinct(fields) is not implemented.')
order_by = order_by or self.connection.ops.force_no_ordering()
result.append('GROUP BY %s' % ', '.join(grouping))
if self._meta_ordering:
order_by = None
if having:
result.append('HAVING %s' % having)
params.extend(h_params)
if self.query.explain_query:
result.insert(0, self.connection.ops.explain_query_prefix(
self.query.explain_format,
**self.query.explain_options
))
if order_by:
ordering = []
for _, (o_sql, o_params, _) in order_by:
ordering.append(o_sql)
params.extend(o_params)
result.append('ORDER BY %s' % ', '.join(ordering))
if with_limit_offset:
result.append(self.connection.ops.limit_offset_sql(self.query.low_mark, self.query.high_mark))
if for_update_part and not self.connection.features.for_update_after_from:
result.append(for_update_part)
if self.query.subquery and extra_select:
# If the query is used as a subquery, the extra selects would
# result in more columns than the left-hand side expression is
# expecting. This can happen when a subquery uses a combination
# of order_by() and distinct(), forcing the ordering expressions
# to be selected as well. Wrap the query in another subquery
# to exclude extraneous selects.
sub_selects = []
sub_params = []
for index, (select, _, alias) in enumerate(self.select, start=1):
if not alias and with_col_aliases:
alias = 'col%d' % index
if alias:
sub_selects.append("%s.%s" % (
self.connection.ops.quote_name('subquery'),
self.connection.ops.quote_name(alias),
))
else:
select_clone = select.relabeled_clone({select.alias: 'subquery'})
subselect, subparams = select_clone.as_sql(self, self.connection)
sub_selects.append(subselect)
sub_params.extend(subparams)
return 'SELECT %s FROM (%s) subquery' % (
', '.join(sub_selects),
' '.join(result),
), tuple(sub_params + params)
return ' '.join(result), tuple(params)
finally:
# Finally do cleanup - get rid of the joins we created above.
self.query.reset_refcounts(refcounts_before)
def get_default_columns(self, start_alias=None, opts=None, from_parent=None):
"""
Compute the default columns for selecting every field in the base
model. Will sometimes be called to pull in related models (e.g. via
select_related), in which case "opts" and "start_alias" will be given
to provide a starting point for the traversal.
Return a list of strings, quoted appropriately for use in SQL
directly, as well as a set of aliases used in the select statement (if
'as_pairs' is True, return a list of (alias, col_name) pairs instead
of strings as the first component and None as the second component).
"""
result = []
if opts is None:
opts = self.query.get_meta()
only_load = self.deferred_to_columns()
start_alias = start_alias or self.query.get_initial_alias()
# The 'seen_models' is used to optimize checking the needed parent
# alias for a given field. This also includes None -> start_alias to
# be used by local fields.
seen_models = {None: start_alias}
for field in opts.concrete_fields:
model = field.model._meta.concrete_model
# A proxy model will have a different model and concrete_model. We
# will assign None if the field belongs to this model.
if model == opts.model:
model = None
if from_parent and model is not None and issubclass(
from_parent._meta.concrete_model, model._meta.concrete_model):
# Avoid loading data for already loaded parents.
# We end up here in the case select_related() resolution
# proceeds from parent model to child model. In that case the
# parent model data is already present in the SELECT clause,
# and we want to avoid reloading the same data again.
continue
if field.model in only_load and field.attname not in only_load[field.model]:
continue
alias = self.query.join_parent_model(opts, model, start_alias,
seen_models)
column = field.get_col(alias)
result.append(column)
return result
def get_distinct(self):
"""
Return a quoted list of fields to use in DISTINCT ON part of the query.
This method can alter the tables in the query, and thus it must be
called before get_from_clause().
"""
result = []
params = []
opts = self.query.get_meta()
for name in self.query.distinct_fields:
parts = name.split(LOOKUP_SEP)
_, targets, alias, joins, path, _, transform_function = self._setup_joins(parts, opts, None)
targets, alias, _ = self.query.trim_joins(targets, joins, path)
for target in targets:
if name in self.query.annotation_select:
result.append(name)
else:
r, p = self.compile(transform_function(target, alias))
result.append(r)
params.append(p)
return result, params
def find_ordering_name(self, name, opts, alias=None, default_order='ASC',
already_seen=None):
"""
Return the table alias (the name might be ambiguous, the alias will
not be) and column name for ordering by the given 'name' parameter.
The 'name' is of the form 'field1__field2__...__fieldN'.
"""
name, order = get_order_dir(name, default_order)
descending = order == 'DESC'
pieces = name.split(LOOKUP_SEP)
field, targets, alias, joins, path, opts, transform_function = self._setup_joins(pieces, opts, alias)
# If we get to this point and the field is a relation to another model,
# append the default ordering for that model unless it is the pk
# shortcut or the attribute name of the field that is specified.
if (
field.is_relation and
opts.ordering and
getattr(field, 'attname', None) != pieces[-1] and
name != 'pk'
):
# Firstly, avoid infinite loops.
already_seen = already_seen or set()
join_tuple = tuple(getattr(self.query.alias_map[j], 'join_cols', None) for j in joins)
if join_tuple in already_seen:
raise FieldError('Infinite loop caused by ordering.')
already_seen.add(join_tuple)
results = []
for item in opts.ordering:
if hasattr(item, 'resolve_expression') and not isinstance(item, OrderBy):
item = item.desc() if descending else item.asc()
if isinstance(item, OrderBy):
results.append((item, False))
continue
results.extend(self.find_ordering_name(item, opts, alias,
order, already_seen))
return results
targets, alias, _ = self.query.trim_joins(targets, joins, path)
return [(OrderBy(transform_function(t, alias), descending=descending), False) for t in targets]
def _setup_joins(self, pieces, opts, alias):
"""
Helper method for get_order_by() and get_distinct().
get_ordering() and get_distinct() must produce same target columns on
same input, as the prefixes of get_ordering() and get_distinct() must
match. Executing SQL where this is not true is an error.
"""
alias = alias or self.query.get_initial_alias()
field, targets, opts, joins, path, transform_function = self.query.setup_joins(pieces, opts, alias)
alias = joins[-1]
return field, targets, alias, joins, path, opts, transform_function
def get_from_clause(self):
"""
Return a list of strings that are joined together to go after the
"FROM" part of the query, as well as a list any extra parameters that
need to be included. Subclasses, can override this to create a
from-clause via a "select".
This should only be called after any SQL construction methods that
might change the tables that are needed. This means the select columns,
ordering, and distinct must be done first.
"""
result = []
params = []
for alias in tuple(self.query.alias_map):
if not self.query.alias_refcount[alias]:
continue
try:
from_clause = self.query.alias_map[alias]
except KeyError:
# Extra tables can end up in self.tables, but not in the
# alias_map if they aren't in a join. That's OK. We skip them.
continue
clause_sql, clause_params = self.compile(from_clause)
result.append(clause_sql)
params.extend(clause_params)
for t in self.query.extra_tables:
alias, _ = self.query.table_alias(t)
# Only add the alias if it's not already present (the table_alias()
# call increments the refcount, so an alias refcount of one means
# this is the only reference).
if alias not in self.query.alias_map or self.query.alias_refcount[alias] == 1:
result.append(', %s' % self.quote_name_unless_alias(alias))
return result, params
def get_related_selections(self, select, opts=None, root_alias=None, cur_depth=1,
requested=None, restricted=None):
"""
Fill in the information needed for a select_related query. The current
depth is measured as the number of connections away from the root model
(for example, cur_depth=1 means we are looking at models with direct
connections to the root model).
"""
def _get_field_choices():
direct_choices = (f.name for f in opts.fields if f.is_relation)
reverse_choices = (
f.field.related_query_name()
for f in opts.related_objects if f.field.unique
)
return chain(direct_choices, reverse_choices, self.query._filtered_relations)
related_klass_infos = []
if not restricted and cur_depth > self.query.max_depth:
# We've recursed far enough; bail out.
return related_klass_infos
if not opts:
opts = self.query.get_meta()
root_alias = self.query.get_initial_alias()
only_load = self.query.get_loaded_field_names()
# Setup for the case when only particular related fields should be
# included in the related selection.
fields_found = set()
if requested is None:
restricted = isinstance(self.query.select_related, dict)
if restricted:
requested = self.query.select_related
def get_related_klass_infos(klass_info, related_klass_infos):
klass_info['related_klass_infos'] = related_klass_infos
for f in opts.fields:
field_model = f.model._meta.concrete_model
fields_found.add(f.name)
if restricted:
next = requested.get(f.name, {})
if not f.is_relation:
# If a non-related field is used like a relation,
# or if a single non-relational field is given.
if next or f.name in requested:
raise FieldError(
"Non-relational field given in select_related: '%s'. "
"Choices are: %s" % (
f.name,
", ".join(_get_field_choices()) or '(none)',
)
)
else:
next = False
if not select_related_descend(f, restricted, requested,
only_load.get(field_model)):
continue
klass_info = {
'model': f.remote_field.model,
'field': f,
'reverse': False,
'local_setter': f.set_cached_value,
'remote_setter': f.remote_field.set_cached_value if f.unique else lambda x, y: None,
'from_parent': False,
}
related_klass_infos.append(klass_info)
select_fields = []
_, _, _, joins, _, _ = self.query.setup_joins(
[f.name], opts, root_alias)
alias = joins[-1]
columns = self.get_default_columns(start_alias=alias, opts=f.remote_field.model._meta)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_klass_infos = self.get_related_selections(
select, f.remote_field.model._meta, alias, cur_depth + 1, next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
if restricted:
related_fields = [
(o.field, o.related_model)
for o in opts.related_objects
if o.field.unique and not o.many_to_many
]
for f, model in related_fields:
if not select_related_descend(f, restricted, requested,
only_load.get(model), reverse=True):
continue
related_field_name = f.related_query_name()
fields_found.add(related_field_name)
join_info = self.query.setup_joins([related_field_name], opts, root_alias)
alias = join_info.joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': f.remote_field.set_cached_value,
'remote_setter': f.set_cached_value,
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta, from_parent=opts.model)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next = requested.get(f.related_query_name(), {})
next_klass_infos = self.get_related_selections(
select, model._meta, alias, cur_depth + 1,
next, restricted)
get_related_klass_infos(klass_info, next_klass_infos)
def local_setter(obj, from_obj):
# Set a reverse fk object when relation is non-empty.
if from_obj:
f.remote_field.set_cached_value(from_obj, obj)
def remote_setter(name, obj, from_obj):
setattr(from_obj, name, obj)
for name in list(requested):
# Filtered relations work only on the topmost level.
if cur_depth > 1:
break
if name in self.query._filtered_relations:
fields_found.add(name)
f, _, join_opts, joins, _, _ = self.query.setup_joins([name], opts, root_alias)
model = join_opts.model
alias = joins[-1]
from_parent = issubclass(model, opts.model) and model is not opts.model
klass_info = {
'model': model,
'field': f,
'reverse': True,
'local_setter': local_setter,
'remote_setter': partial(remote_setter, name),
'from_parent': from_parent,
}
related_klass_infos.append(klass_info)
select_fields = []
columns = self.get_default_columns(
start_alias=alias, opts=model._meta,
from_parent=opts.model,
)
for col in columns:
select_fields.append(len(select))
select.append((col, None))
klass_info['select_fields'] = select_fields
next_requested = requested.get(name, {})
next_klass_infos = self.get_related_selections(
select, opts=model._meta, root_alias=alias,
cur_depth=cur_depth + 1, requested=next_requested,
restricted=restricted,
)
get_related_klass_infos(klass_info, next_klass_infos)
fields_not_found = set(requested).difference(fields_found)
if fields_not_found:
invalid_fields = ("'%s'" % s for s in fields_not_found)
raise FieldError(
'Invalid field name(s) given in select_related: %s. '
'Choices are: %s' % (
', '.join(invalid_fields),
', '.join(_get_field_choices()) or '(none)',
)
)
return related_klass_infos
def get_select_for_update_of_arguments(self):
"""
Return a quoted list of arguments for the SELECT FOR UPDATE OF part of
the query.
"""
def _get_parent_klass_info(klass_info):
concrete_model = klass_info['model']._meta.concrete_model
for parent_model, parent_link in concrete_model._meta.parents.items():
parent_list = parent_model._meta.get_parent_list()
yield {
'model': parent_model,
'field': parent_link,
'reverse': False,
'select_fields': [
select_index
for select_index in klass_info['select_fields']
# Selected columns from a model or its parents.
if (
self.select[select_index][0].target.model == parent_model or
self.select[select_index][0].target.model in parent_list
)
],
}
def _get_first_selected_col_from_model(klass_info):
"""
Find the first selected column from a model. If it doesn't exist,
don't lock a model.
select_fields is filled recursively, so it also contains fields
from the parent models.
"""
concrete_model = klass_info['model']._meta.concrete_model
for select_index in klass_info['select_fields']:
if self.select[select_index][0].target.model == concrete_model:
return self.select[select_index][0]
def _get_field_choices():
"""Yield all allowed field paths in breadth-first search order."""
queue = collections.deque([(None, self.klass_info)])
while queue:
parent_path, klass_info = queue.popleft()
if parent_path is None:
path = []
yield 'self'
else:
field = klass_info['field']
if klass_info['reverse']:
field = field.remote_field
path = parent_path + [field.name]
yield LOOKUP_SEP.join(path)
queue.extend(
(path, klass_info)
for klass_info in _get_parent_klass_info(klass_info)
)
queue.extend(
(path, klass_info)
for klass_info in klass_info.get('related_klass_infos', [])
)
result = []
invalid_names = []
for name in self.query.select_for_update_of:
klass_info = self.klass_info
if name == 'self':
col = _get_first_selected_col_from_model(klass_info)
else:
for part in name.split(LOOKUP_SEP):
klass_infos = (
*klass_info.get('related_klass_infos', []),
*_get_parent_klass_info(klass_info),
)
for related_klass_info in klass_infos:
field = related_klass_info['field']
if related_klass_info['reverse']:
field = field.remote_field
if field.name == part:
klass_info = related_klass_info
break
else:
klass_info = None
break
if klass_info is None:
invalid_names.append(name)
continue
col = _get_first_selected_col_from_model(klass_info)
if col is not None:
if self.connection.features.select_for_update_of_column:
result.append(self.compile(col)[0])
else:
result.append(self.quote_name_unless_alias(col.alias))
if invalid_names:
raise FieldError(
'Invalid field name(s) given in select_for_update(of=(...)): %s. '
'Only relational fields followed in the query are allowed. '
'Choices are: %s.' % (
', '.join(invalid_names),
', '.join(_get_field_choices()),
)
)
return result
def deferred_to_columns(self):
"""
Convert the self.deferred_loading data structure to mapping of table
names to sets of column names which are to be loaded. Return the
dictionary.
"""
columns = {}
self.query.deferred_to_data(columns, self.query.get_loaded_field_names_cb)
return columns
def get_converters(self, expressions):
converters = {}
for i, expression in enumerate(expressions):
if expression:
backend_converters = self.connection.ops.get_db_converters(expression)
field_converters = expression.get_db_converters(self.connection)
if backend_converters or field_converters:
converters[i] = (backend_converters + field_converters, expression)
return converters
def apply_converters(self, rows, converters):
connection = self.connection
converters = list(converters.items())
for row in map(list, rows):
for pos, (convs, expression) in converters:
value = row[pos]
for converter in convs:
value = converter(value, expression, connection)
row[pos] = value
yield row
def results_iter(self, results=None, tuple_expected=False, chunked_fetch=False,
chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""Return an iterator over the results from executing this query."""
if results is None:
results = self.execute_sql(MULTI, chunked_fetch=chunked_fetch, chunk_size=chunk_size)
fields = [s[0] for s in self.select[0:self.col_count]]
converters = self.get_converters(fields)
rows = chain.from_iterable(results)
if converters:
rows = self.apply_converters(rows, converters)
if tuple_expected:
rows = map(tuple, rows)
return rows
def has_results(self):
"""
Backends (e.g. NoSQL) can override this in order to use optimized
versions of "query has any results."
"""
# This is always executed on a query clone, so we can modify self.query
self.query.add_extra({'a': 1}, None, None, None, None, None)
self.query.set_extra_mask(['a'])
return bool(self.execute_sql(SINGLE))
def execute_sql(self, result_type=MULTI, chunked_fetch=False, chunk_size=GET_ITERATOR_CHUNK_SIZE):
"""
Run the query against the database and return the result(s). The
return value is a single data item if result_type is SINGLE, or an
iterator over the results if the result_type is MULTI.
result_type is either MULTI (use fetchmany() to retrieve all rows),
SINGLE (only retrieve a single row), or None. In this last case, the
cursor is returned if any query is executed, since it's used by
subclasses such as InsertQuery). It's possible, however, that no query
is needed, as the filters describe an empty set. In that case, None is
returned, to avoid any unnecessary database interaction.
"""
result_type = result_type or NO_RESULTS
try:
sql, params = self.as_sql()
if not sql:
raise EmptyResultSet
except EmptyResultSet:
if result_type == MULTI:
return iter([])
else:
return
if chunked_fetch:
cursor = self.connection.chunked_cursor()
else:
cursor = self.connection.cursor()
try:
cursor.execute(sql, params)
except Exception:
# Might fail for server-side cursors (e.g. connection closed)
cursor.close()
raise
if result_type == CURSOR:
# Give the caller the cursor to process and close.
return cursor
if result_type == SINGLE:
try:
val = cursor.fetchone()
if val:
return val[0:self.col_count]
return val
finally:
# done with the cursor
cursor.close()
if result_type == NO_RESULTS:
cursor.close()
return
result = cursor_iter(
cursor, self.connection.features.empty_fetchmany_value,
self.col_count if self.has_extra_select else None,
chunk_size,
)
if not chunked_fetch or not self.connection.features.can_use_chunked_reads:
try:
# If we are using non-chunked reads, we return the same data
# structure as normally, but ensure it is all read into memory
# before going any further. Use chunked_fetch if requested,
# unless the database doesn't support it.
return list(result)
finally:
# done with the cursor
cursor.close()
return result
def as_subquery_condition(self, alias, columns, compiler):
qn = compiler.quote_name_unless_alias
qn2 = self.connection.ops.quote_name
for index, select_col in enumerate(self.query.select):
lhs_sql, lhs_params = self.compile(select_col)
rhs = '%s.%s' % (qn(alias), qn2(columns[index]))
self.query.where.add(
RawSQL('%s = %s' % (lhs_sql, rhs), lhs_params), 'AND')
sql, params = self.as_sql()
return 'EXISTS (%s)' % sql, params
def explain_query(self):
result = list(self.execute_sql())
# Some backends return 1 item tuples with strings, and others return
# tuples with integers and strings. Flatten them out into strings.
for row in result[0]:
if not isinstance(row, str):
yield ' '.join(str(c) for c in row)
else:
yield row
class SQLInsertCompiler(SQLCompiler):
returning_fields = None
returning_params = tuple()
def field_as_sql(self, field, val):
"""
Take a field and a value intended to be saved on that field, and
return placeholder SQL and accompanying params. Check for raw values,
expressions, and fields with get_placeholder() defined in that order.
When field is None, consider the value raw and use it as the
placeholder, with no corresponding parameters returned.
"""
if field is None:
# A field value of None means the value is raw.
sql, params = val, []
elif hasattr(val, 'as_sql'):
# This is an expression, let's compile it.
sql, params = self.compile(val)
elif hasattr(field, 'get_placeholder'):
# Some fields (e.g. geo fields) need special munging before
# they can be inserted.
sql, params = field.get_placeholder(val, self, self.connection), [val]
else:
# Return the common case for the placeholder
sql, params = '%s', [val]
# The following hook is only used by Oracle Spatial, which sometimes
# needs to yield 'NULL' and [] as its placeholder and params instead
# of '%s' and [None]. The 'NULL' placeholder is produced earlier by
# OracleOperations.get_geom_placeholder(). The following line removes
# the corresponding None parameter. See ticket #10888.
params = self.connection.ops.modify_insert_params(sql, params)
return sql, params
def prepare_value(self, field, value):
"""
Prepare a value to be used in a query by resolving it if it is an
expression and otherwise calling the field's get_db_prep_save().
"""
if hasattr(value, 'resolve_expression'):
value = value.resolve_expression(self.query, allow_joins=False, for_save=True)
# Don't allow values containing Col expressions. They refer to
# existing columns on a row, but in the case of insert the row
# doesn't exist yet.
if value.contains_column_references:
raise ValueError(
'Failed to insert expression "%s" on %s. F() expressions '
'can only be used to update, not to insert.' % (value, field)
)
if value.contains_aggregate:
raise FieldError(
'Aggregate functions are not allowed in this query '
'(%s=%r).' % (field.name, value)
)
if value.contains_over_clause:
raise FieldError(
'Window expressions are not allowed in this query (%s=%r).'
% (field.name, value)
)
else:
value = field.get_db_prep_save(value, connection=self.connection)
return value
def pre_save_val(self, field, obj):
"""
Get the given field's value off the given obj. pre_save() is used for
things like auto_now on DateTimeField. Skip it if this is a raw query.
"""
if self.query.raw:
return getattr(obj, field.attname)
return field.pre_save(obj, add=True)
def assemble_as_sql(self, fields, value_rows):
"""
Take a sequence of N fields and a sequence of M rows of values, and
generate placeholder SQL and parameters for each field and value.
Return a pair containing:
* a sequence of M rows of N SQL placeholder strings, and
* a sequence of M rows of corresponding parameter values.
Each placeholder string may contain any number of '%s' interpolation
strings, and each parameter row will contain exactly as many params
as the total number of '%s's in the corresponding placeholder row.
"""
if not value_rows:
return [], []
# list of (sql, [params]) tuples for each object to be saved
# Shape: [n_objs][n_fields][2]
rows_of_fields_as_sql = (
(self.field_as_sql(field, v) for field, v in zip(fields, row))
for row in value_rows
)
# tuple like ([sqls], [[params]s]) for each object to be saved
# Shape: [n_objs][2][n_fields]
sql_and_param_pair_rows = (zip(*row) for row in rows_of_fields_as_sql)
# Extract separate lists for placeholders and params.
# Each of these has shape [n_objs][n_fields]
placeholder_rows, param_rows = zip(*sql_and_param_pair_rows)
# Params for each field are still lists, and need to be flattened.
param_rows = [[p for ps in row for p in ps] for row in param_rows]
return placeholder_rows, param_rows
def as_sql(self):
# We don't need quote_name_unless_alias() here, since these are all
# going to be column names (so we can avoid the extra overhead).
qn = self.connection.ops.quote_name
opts = self.query.get_meta()
insert_statement = self.connection.ops.insert_statement(ignore_conflicts=self.query.ignore_conflicts)
result = ['%s %s' % (insert_statement, qn(opts.db_table))]
fields = self.query.fields or [opts.pk]
result.append('(%s)' % ', '.join(qn(f.column) for f in fields))
if self.query.fields:
value_rows = [
[self.prepare_value(field, self.pre_save_val(field, obj)) for field in fields]
for obj in self.query.objs
]
else:
# An empty object.
value_rows = [[self.connection.ops.pk_default_value()] for _ in self.query.objs]
fields = [None]
# Currently the backends just accept values when generating bulk
# queries and generate their own placeholders. Doing that isn't
# necessary and it should be possible to use placeholders and
# expressions in bulk inserts too.
can_bulk = (not self.returning_fields and self.connection.features.has_bulk_insert)
placeholder_rows, param_rows = self.assemble_as_sql(fields, value_rows)
ignore_conflicts_suffix_sql = self.connection.ops.ignore_conflicts_suffix_sql(
ignore_conflicts=self.query.ignore_conflicts
)
if self.returning_fields and self.connection.features.can_return_columns_from_insert:
if self.connection.features.can_return_rows_from_bulk_insert:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
params = param_rows
else:
result.append("VALUES (%s)" % ", ".join(placeholder_rows[0]))
params = [param_rows[0]]
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
# Skip empty r_sql to allow subclasses to customize behavior for
# 3rd party backends. Refs #19096.
r_sql, self.returning_params = self.connection.ops.return_insert_columns(self.returning_fields)
if r_sql:
result.append(r_sql)
params += [self.returning_params]
return [(" ".join(result), tuple(chain.from_iterable(params)))]
if can_bulk:
result.append(self.connection.ops.bulk_insert_sql(fields, placeholder_rows))
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [(" ".join(result), tuple(p for ps in param_rows for p in ps))]
else:
if ignore_conflicts_suffix_sql:
result.append(ignore_conflicts_suffix_sql)
return [
(" ".join(result + ["VALUES (%s)" % ", ".join(p)]), vals)
for p, vals in zip(placeholder_rows, param_rows)
]
def execute_sql(self, returning_fields=None):
assert not (
returning_fields and len(self.query.objs) != 1 and
not self.connection.features.can_return_rows_from_bulk_insert
)
self.returning_fields = returning_fields
with self.connection.cursor() as cursor:
for sql, params in self.as_sql():
cursor.execute(sql, params)
if not self.returning_fields:
return []
if self.connection.features.can_return_rows_from_bulk_insert and len(self.query.objs) > 1:
return self.connection.ops.fetch_returned_insert_rows(cursor)
if self.connection.features.can_return_columns_from_insert:
assert len(self.query.objs) == 1
return [self.connection.ops.fetch_returned_insert_columns(cursor, self.returning_params)]
return [(self.connection.ops.last_insert_id(
2016-03-29 06:33:29 +08:00
cursor, self.query.get_meta().db_table, self.query.get_meta().pk.column
),)]
class SQLDeleteCompiler(SQLCompiler):
@cached_property
def single_alias(self):
# Ensure base table is in aliases.
self.query.get_initial_alias()
return sum(self.query.alias_refcount[t] > 0 for t in self.query.alias_map) == 1
def _as_sql(self, query):
result = [
'DELETE FROM %s' % self.quote_name_unless_alias(query.base_table)
]
where, params = self.compile(query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(params)
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
if self.single_alias:
return self._as_sql(self.query)
innerq = self.query.clone()
innerq.__class__ = Query
innerq.clear_select_clause()
pk = self.query.model._meta.pk
innerq.select = [
pk.get_col(self.query.get_initial_alias())
]
outerq = Query(self.query.model)
outerq.where = self.query.where_class()
if not self.connection.features.update_can_self_select:
# Force the materialization of the inner query to allow reference
# to the target table on MySQL.
sql, params = innerq.get_compiler(connection=self.connection).as_sql()
innerq = RawSQL('SELECT * FROM (%s) subquery' % sql, params)
outerq.add_q(Q(pk__in=innerq))
return self._as_sql(outerq)
class SQLUpdateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
self.pre_sql_setup()
if not self.query.values:
return '', ()
qn = self.quote_name_unless_alias
values, update_params = [], []
for field, model, val in self.query.values:
if hasattr(val, 'resolve_expression'):
val = val.resolve_expression(self.query, allow_joins=False, for_save=True)
if val.contains_aggregate:
raise FieldError(
'Aggregate functions are not allowed in this query '
'(%s=%r).' % (field.name, val)
)
if val.contains_over_clause:
raise FieldError(
'Window expressions are not allowed in this query '
'(%s=%r).' % (field.name, val)
)
elif hasattr(val, 'prepare_database_save'):
if field.remote_field:
val = field.get_db_prep_save(
val.prepare_database_save(field),
connection=self.connection,
)
else:
raise TypeError(
"Tried to update field %s with a model instance, %r. "
"Use a value compatible with %s."
% (field, val, field.__class__.__name__)
)
else:
val = field.get_db_prep_save(val, connection=self.connection)
# Getting the placeholder for the field.
if hasattr(field, 'get_placeholder'):
placeholder = field.get_placeholder(val, self, self.connection)
else:
placeholder = '%s'
name = field.column
if hasattr(val, 'as_sql'):
Fixed #16187 -- refactored ORM lookup system Allowed users to specify which lookups or transforms ("nested lookus") are available for fields. The implementation is now class based. Squashed commit of the following: commit fa7a7195f1952a9c8dea7f6e89ee13f81757eda7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 18 10:53:24 2014 +0200 Added lookup registration API docs commit eb1c8ce164325e0d8641f14202e12486c70efdb6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Tue Jan 14 18:59:36 2014 +0200 Release notes and other minor docs changes commit 11501c29c9352d17f22f3a0f59d3b805913dedcc Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 20:53:03 2014 +0200 Forgot to add custom_lookups tests in prev commit commit 83173b960ea7eb2b24d573f326be59948df33536 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:59:12 2014 +0200 Renamed Extract -> Transform commit 3b18d9f3a1bcdd93280f79654eba0efa209377bd Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:51:53 2014 +0200 Removed suggestion of temporary lookup registration from docs commit 21d0c7631c161fc0c67911480be5d3f13f1afa68 Merge: 2509006 f2dc442 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 09:38:23 2014 -0800 Merge pull request #2 from mjtamlyn/lookups_3 Reworked custom lookups docs. commit f2dc4429a1da04c858364972eea57a35a868dab4 Author: Marc Tamlyn <marc.tamlyn@gmail.com> Date: Sun Jan 12 13:15:05 2014 +0000 Reworked custom lookups docs. Mostly just formatting and rewording, but also replaced the example using ``YearExtract`` to use an example which is unlikely to ever be possible directly in the ORM. commit 250900650628d1f11beadb22814abd666029fb81 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 13:19:13 2014 +0200 Removed unused import commit 4fba5dfaa022653ffa72497258ffd8f8b7476f92 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:34:41 2014 +0200 Added docs to index commit 6d53963f375c77a1f287833b19b976d23f36c30b Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:10:24 2014 +0200 Dead code removal commit f9cc0390078e21f1ea5a7bc1f15b09f8f6b0904d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 19:00:43 2014 +0200 A new try for docs commit 33aa18a6e3c831930bda0028222a26f9c1d96e66 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:57:12 2014 +0200 Renamed get_cols to get_group_by_cols commit c7d5f8661b7d364962bed2e6f81161c1b4f1bcc3 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:45:53 2014 +0200 Altered query string customization for backends vendors The new way is trying to call first method 'as_' + connection.vendor. If that doesn't exist, then call as_sql(). Also altered how lookup registration is done. There is now RegisterLookupMixin class that is used by Field, Extract and sql.Aggregate. This allows one to register lookups for extracts and aggregates in the same way lookup registration is done for fields. commit 90e7004ec14e15503f828cc9bde2a7dab593814d Merge: 66649ff f7c2c0a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:21:01 2014 +0200 Merge branch 'master' into lookups_3 commit 66649ff891c7c73c7eecf6038c9a6802611b5d8a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:16:01 2014 +0200 Some rewording in docs commit 31b8faa62714b4b6b6057a9f5cc106c4dd73caab Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 29 15:52:29 2013 +0200 Cleanup based on review comments commit 1016159f34674c0df871ed891cde72be8340bb5d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 18:37:04 2013 +0200 Proof-of-concept fix for #16731 Implemented only for SQLite and PostgreSQL, and only for startswith and istartswith lookups. commit 193cd097ca8f2cc6a911e57b8e3fb726f96ee6a6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 17:57:58 2013 +0200 Fixed #11722 -- iexact=F() produced invalid SQL commit 08ed3c3b49e100ed9019831e770c25c8f61b70f9 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:59:52 2013 +0200 Made Lookup and Extract available from django.db.models commit b99c8d83c972786c6fcd0e84c9e5cb08c1368300 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:06:29 2013 +0200 Fixed review notes by Loic commit 049eebc0703c151127f4f0265beceea7b8b39e72 Merge: ed8fab7 b80a835 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:53:10 2013 +0200 Merge branch 'master' into lookups_3 Conflicts: django/db/models/fields/__init__.py django/db/models/sql/compiler.py django/db/models/sql/query.py tests/null_queries/tests.py commit ed8fab7fe8867ff3eb801c3697a426478387bb2f Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:47:23 2013 +0200 Made Extracts aware of full lookup path commit 27a57b7aed91b2f346abc4a77da838bffa17c727 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:10:11 2013 +0200 Removed debugger import commit 074e0f5aca0572e368c11e6d2c73c9026e7d63d7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:02:16 2013 +0200 GIS lookup support added commit 760e28e72bae475b442b026650969b0d182dbe53 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 20:04:31 2013 +0200 Removed usage of Constraint, used Lookup instead commit eac47766844b90e7d3269e7a8c012eee34ec0093 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:22:30 2013 +0200 Minor cleanup of Lookup API commit 2adf50428d59a783078b0da3d5d035106640c899 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:14:19 2013 +0200 Added documentation, polished implementation commit 32c04357a87e3727a34f8c5e6ec0114d1fbbb303 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:10:15 2013 +0200 Avoid OrderedDict creation on lookup aggregate check commit 7c8b3a32cc17b4dbca160921d48125f1631e0df4 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:04:34 2013 +0200 Implemented nested lookups But there is no support of using lookups outside filtering yet. commit 4d219d4cdef21d9c14e5d6b9299d583d1975fcba Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Wed Nov 27 22:07:30 2013 +0200 Initial implementation of custom lookups
2014-01-18 17:09:43 +08:00
sql, params = self.compile(val)
values.append('%s = %s' % (qn(name), placeholder % sql))
update_params.extend(params)
elif val is not None:
values.append('%s = %s' % (qn(name), placeholder))
update_params.append(val)
else:
values.append('%s = NULL' % qn(name))
table = self.query.base_table
result = [
'UPDATE %s SET' % qn(table),
', '.join(values),
]
Fixed #16187 -- refactored ORM lookup system Allowed users to specify which lookups or transforms ("nested lookus") are available for fields. The implementation is now class based. Squashed commit of the following: commit fa7a7195f1952a9c8dea7f6e89ee13f81757eda7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 18 10:53:24 2014 +0200 Added lookup registration API docs commit eb1c8ce164325e0d8641f14202e12486c70efdb6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Tue Jan 14 18:59:36 2014 +0200 Release notes and other minor docs changes commit 11501c29c9352d17f22f3a0f59d3b805913dedcc Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 20:53:03 2014 +0200 Forgot to add custom_lookups tests in prev commit commit 83173b960ea7eb2b24d573f326be59948df33536 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:59:12 2014 +0200 Renamed Extract -> Transform commit 3b18d9f3a1bcdd93280f79654eba0efa209377bd Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 19:51:53 2014 +0200 Removed suggestion of temporary lookup registration from docs commit 21d0c7631c161fc0c67911480be5d3f13f1afa68 Merge: 2509006 f2dc442 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 09:38:23 2014 -0800 Merge pull request #2 from mjtamlyn/lookups_3 Reworked custom lookups docs. commit f2dc4429a1da04c858364972eea57a35a868dab4 Author: Marc Tamlyn <marc.tamlyn@gmail.com> Date: Sun Jan 12 13:15:05 2014 +0000 Reworked custom lookups docs. Mostly just formatting and rewording, but also replaced the example using ``YearExtract`` to use an example which is unlikely to ever be possible directly in the ORM. commit 250900650628d1f11beadb22814abd666029fb81 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Jan 12 13:19:13 2014 +0200 Removed unused import commit 4fba5dfaa022653ffa72497258ffd8f8b7476f92 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:34:41 2014 +0200 Added docs to index commit 6d53963f375c77a1f287833b19b976d23f36c30b Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 22:10:24 2014 +0200 Dead code removal commit f9cc0390078e21f1ea5a7bc1f15b09f8f6b0904d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 19:00:43 2014 +0200 A new try for docs commit 33aa18a6e3c831930bda0028222a26f9c1d96e66 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:57:12 2014 +0200 Renamed get_cols to get_group_by_cols commit c7d5f8661b7d364962bed2e6f81161c1b4f1bcc3 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 14:45:53 2014 +0200 Altered query string customization for backends vendors The new way is trying to call first method 'as_' + connection.vendor. If that doesn't exist, then call as_sql(). Also altered how lookup registration is done. There is now RegisterLookupMixin class that is used by Field, Extract and sql.Aggregate. This allows one to register lookups for extracts and aggregates in the same way lookup registration is done for fields. commit 90e7004ec14e15503f828cc9bde2a7dab593814d Merge: 66649ff f7c2c0a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:21:01 2014 +0200 Merge branch 'master' into lookups_3 commit 66649ff891c7c73c7eecf6038c9a6802611b5d8a Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Jan 11 13:16:01 2014 +0200 Some rewording in docs commit 31b8faa62714b4b6b6057a9f5cc106c4dd73caab Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 29 15:52:29 2013 +0200 Cleanup based on review comments commit 1016159f34674c0df871ed891cde72be8340bb5d Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 18:37:04 2013 +0200 Proof-of-concept fix for #16731 Implemented only for SQLite and PostgreSQL, and only for startswith and istartswith lookups. commit 193cd097ca8f2cc6a911e57b8e3fb726f96ee6a6 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 28 17:57:58 2013 +0200 Fixed #11722 -- iexact=F() produced invalid SQL commit 08ed3c3b49e100ed9019831e770c25c8f61b70f9 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:59:52 2013 +0200 Made Lookup and Extract available from django.db.models commit b99c8d83c972786c6fcd0e84c9e5cb08c1368300 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 23:06:29 2013 +0200 Fixed review notes by Loic commit 049eebc0703c151127f4f0265beceea7b8b39e72 Merge: ed8fab7 b80a835 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:53:10 2013 +0200 Merge branch 'master' into lookups_3 Conflicts: django/db/models/fields/__init__.py django/db/models/sql/compiler.py django/db/models/sql/query.py tests/null_queries/tests.py commit ed8fab7fe8867ff3eb801c3697a426478387bb2f Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Dec 21 22:47:23 2013 +0200 Made Extracts aware of full lookup path commit 27a57b7aed91b2f346abc4a77da838bffa17c727 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:10:11 2013 +0200 Removed debugger import commit 074e0f5aca0572e368c11e6d2c73c9026e7d63d7 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 21:02:16 2013 +0200 GIS lookup support added commit 760e28e72bae475b442b026650969b0d182dbe53 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 20:04:31 2013 +0200 Removed usage of Constraint, used Lookup instead commit eac47766844b90e7d3269e7a8c012eee34ec0093 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:22:30 2013 +0200 Minor cleanup of Lookup API commit 2adf50428d59a783078b0da3d5d035106640c899 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sun Dec 1 02:14:19 2013 +0200 Added documentation, polished implementation commit 32c04357a87e3727a34f8c5e6ec0114d1fbbb303 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:10:15 2013 +0200 Avoid OrderedDict creation on lookup aggregate check commit 7c8b3a32cc17b4dbca160921d48125f1631e0df4 Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Sat Nov 30 23:04:34 2013 +0200 Implemented nested lookups But there is no support of using lookups outside filtering yet. commit 4d219d4cdef21d9c14e5d6b9299d583d1975fcba Author: Anssi Kääriäinen <akaariai@gmail.com> Date: Wed Nov 27 22:07:30 2013 +0200 Initial implementation of custom lookups
2014-01-18 17:09:43 +08:00
where, params = self.compile(self.query.where)
if where:
result.append('WHERE %s' % where)
return ' '.join(result), tuple(update_params + params)
def execute_sql(self, result_type):
"""
Execute the specified update. Return the number of rows affected by
the primary update query. The "primary update query" is the first
non-empty query that is executed. Row counts for any subsequent,
related queries are not available.
"""
cursor = super().execute_sql(result_type)
try:
rows = cursor.rowcount if cursor else 0
is_empty = cursor is None
finally:
if cursor:
cursor.close()
for query in self.query.get_related_updates():
aux_rows = query.get_compiler(self.using).execute_sql(result_type)
if is_empty and aux_rows:
rows = aux_rows
is_empty = False
return rows
def pre_sql_setup(self):
"""
If the update depends on results from other tables, munge the "where"
conditions to match the format required for (portable) SQL updates.
If multiple updates are required, pull out the id values to update at
this point so that they don't change as a result of the progressive
updates.
"""
refcounts_before = self.query.alias_refcount.copy()
# Ensure base table is in the query
self.query.get_initial_alias()
count = self.query.count_active_tables()
if not self.query.related_updates and count == 1:
return
query = self.query.chain(klass=Query)
query.select_related = False
query.clear_ordering(True)
query.extra = {}
query.select = []
query.add_fields([query.get_meta().pk.name])
super().pre_sql_setup()
must_pre_select = count > 1 and not self.connection.features.update_can_self_select
# Now we adjust the current query: reset the where clause and get rid
# of all the tables we don't need (since they're in the sub-select).
self.query.where = self.query.where_class()
if self.query.related_updates or must_pre_select:
# Either we're using the idents in multiple update queries (so
# don't want them to change), or the db backend doesn't support
# selecting from the updating table (e.g. MySQL).
idents = []
for rows in query.get_compiler(self.using).execute_sql(MULTI):
idents.extend(r[0] for r in rows)
self.query.add_filter(('pk__in', idents))
self.query.related_ids = idents
else:
# The fast path. Filters and updates in one query.
self.query.add_filter(('pk__in', query))
self.query.reset_refcounts(refcounts_before)
class SQLAggregateCompiler(SQLCompiler):
def as_sql(self):
"""
Create the SQL for this query. Return the SQL string and list of
parameters.
"""
Fixed #17260 -- Added time zone aware aggregation and lookups. Thanks Carl Meyer for the review. Squashed commit of the following: commit 4f290bdb60b7d8534abf4ca901bd0844612dcbda Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Wed Feb 13 21:21:30 2013 +0100 Used '0:00' instead of 'UTC' which doesn't always exist in Oracle. Thanks Ian Kelly for the suggestion. commit 01b6366f3ce67d57a58ca8f25e5be77911748638 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Wed Feb 13 13:38:43 2013 +0100 Made tzname a parameter of datetime_extract/trunc_sql. This is required to work around a bug in Oracle. commit 924a144ef8a80ba4daeeafbe9efaa826566e9d02 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Wed Feb 13 14:47:44 2013 +0100 Added support for parameters in SELECT clauses. commit b4351d2890cd1090d3ff2d203fe148937324c935 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 22:30:22 2013 +0100 Documented backwards incompatibilities in the two previous commits. commit 91ef84713c81bd455f559dacf790e586d08cacb9 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 09:42:31 2013 +0100 Used QuerySet.datetimes for the admin's date_hierarchy. commit 0d0de288a5210fa106cd4350961eb2006535cc5c Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 09:29:38 2013 +0100 Used QuerySet.datetimes in date-based generic views. commit 9c0859ff7c0b00734afe7fc15609d43d83215072 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:43:25 2013 +0100 Implemented QuerySet.datetimes on Oracle. commit 68ab511a4ffbd2b811bf5da174d47e4dd90f28fc Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:43:14 2013 +0100 Implemented QuerySet.datetimes on MySQL. commit 22d52681d347a8cdf568dc31ed032cbc61d049ef Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:42:29 2013 +0100 Implemented QuerySet.datetimes on SQLite. commit f6800fd04c93722b45f9236976389e0b2fe436f5 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:43:03 2013 +0100 Implemented QuerySet.datetimes on PostgreSQL. commit 0c829c23f4cf4d6804cadcc93032dd4c26b8c65e Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:41:08 2013 +0100 Added datetime-handling infrastructure in the ORM layers. commit 104d82a7778cf3f0f5d03dfa53709c26df45daad Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 10:05:55 2013 +0100 Updated null_queries tests to avoid clashing with the __second lookup. commit c01bbb32358201b3ac8cb4291ef87b7612a2b8e6 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 23:07:41 2013 +0100 Updated tests of .dates(). Replaced .dates() by .datetimes() for DateTimeFields. Replaced dates with datetimes in the expected output for DateFields. commit 50fb7a52462fecf0127b38e7f3df322aeb287c43 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:40:09 2013 +0100 Updated and added tests for QuerySet.datetimes. commit a8451a5004c437190e264667b1e6fb8acc3c1eeb Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 22:34:46 2013 +0100 Documented the new time lookups and updated the date lookups. commit 29413eab2bd1d5e004598900c0dadc0521bbf4d3 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 16:15:49 2013 +0100 Documented QuerySet.datetimes and updated QuerySet.dates.
2013-02-10 23:15:49 +08:00
sql, params = [], []
for annotation in self.query.annotation_select.values():
ann_sql, ann_params = self.compile(annotation)
ann_sql, ann_params = annotation.select_format(self, ann_sql, ann_params)
sql.append(ann_sql)
params.extend(ann_params)
self.col_count = len(self.query.annotation_select)
Fixed #17260 -- Added time zone aware aggregation and lookups. Thanks Carl Meyer for the review. Squashed commit of the following: commit 4f290bdb60b7d8534abf4ca901bd0844612dcbda Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Wed Feb 13 21:21:30 2013 +0100 Used '0:00' instead of 'UTC' which doesn't always exist in Oracle. Thanks Ian Kelly for the suggestion. commit 01b6366f3ce67d57a58ca8f25e5be77911748638 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Wed Feb 13 13:38:43 2013 +0100 Made tzname a parameter of datetime_extract/trunc_sql. This is required to work around a bug in Oracle. commit 924a144ef8a80ba4daeeafbe9efaa826566e9d02 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Wed Feb 13 14:47:44 2013 +0100 Added support for parameters in SELECT clauses. commit b4351d2890cd1090d3ff2d203fe148937324c935 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 22:30:22 2013 +0100 Documented backwards incompatibilities in the two previous commits. commit 91ef84713c81bd455f559dacf790e586d08cacb9 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 09:42:31 2013 +0100 Used QuerySet.datetimes for the admin's date_hierarchy. commit 0d0de288a5210fa106cd4350961eb2006535cc5c Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 09:29:38 2013 +0100 Used QuerySet.datetimes in date-based generic views. commit 9c0859ff7c0b00734afe7fc15609d43d83215072 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:43:25 2013 +0100 Implemented QuerySet.datetimes on Oracle. commit 68ab511a4ffbd2b811bf5da174d47e4dd90f28fc Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:43:14 2013 +0100 Implemented QuerySet.datetimes on MySQL. commit 22d52681d347a8cdf568dc31ed032cbc61d049ef Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:42:29 2013 +0100 Implemented QuerySet.datetimes on SQLite. commit f6800fd04c93722b45f9236976389e0b2fe436f5 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:43:03 2013 +0100 Implemented QuerySet.datetimes on PostgreSQL. commit 0c829c23f4cf4d6804cadcc93032dd4c26b8c65e Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:41:08 2013 +0100 Added datetime-handling infrastructure in the ORM layers. commit 104d82a7778cf3f0f5d03dfa53709c26df45daad Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Mon Feb 11 10:05:55 2013 +0100 Updated null_queries tests to avoid clashing with the __second lookup. commit c01bbb32358201b3ac8cb4291ef87b7612a2b8e6 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 23:07:41 2013 +0100 Updated tests of .dates(). Replaced .dates() by .datetimes() for DateTimeFields. Replaced dates with datetimes in the expected output for DateFields. commit 50fb7a52462fecf0127b38e7f3df322aeb287c43 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 21:40:09 2013 +0100 Updated and added tests for QuerySet.datetimes. commit a8451a5004c437190e264667b1e6fb8acc3c1eeb Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 22:34:46 2013 +0100 Documented the new time lookups and updated the date lookups. commit 29413eab2bd1d5e004598900c0dadc0521bbf4d3 Author: Aymeric Augustin <aymeric.augustin@m4x.org> Date: Sun Feb 10 16:15:49 2013 +0100 Documented QuerySet.datetimes and updated QuerySet.dates.
2013-02-10 23:15:49 +08:00
sql = ', '.join(sql)
params = tuple(params)
sql = 'SELECT %s FROM (%s) subquery' % (sql, self.query.subquery)
params = params + self.query.sub_params
return sql, params
def cursor_iter(cursor, sentinel, col_count, itersize):
"""
Yield blocks of rows from a cursor and ensure the cursor is closed when
done.
"""
try:
for rows in iter((lambda: cursor.fetchmany(itersize)), sentinel):
yield rows if col_count is None else [r[:col_count] for r in rows]
finally:
cursor.close()