Fixed #24211 -- Removed ValuesQuerySet() and ValuesListQuerySet().
Thanks Anssi Kääriäinen, Marc Tamlyn, and Tim Graham for the reviews.
This commit is contained in:
parent
dbabf43920
commit
4c3bfe9053
|
@ -192,7 +192,6 @@ class RelatedGeoModelTest(TestCase):
|
||||||
|
|
||||||
def test07_values(self):
|
def test07_values(self):
|
||||||
"Testing values() and values_list() and GeoQuerySets."
|
"Testing values() and values_list() and GeoQuerySets."
|
||||||
# GeoQuerySet and GeoValuesQuerySet, and GeoValuesListQuerySet respectively.
|
|
||||||
gqs = Location.objects.all()
|
gqs = Location.objects.all()
|
||||||
gvqs = Location.objects.values()
|
gvqs = Location.objects.values()
|
||||||
gvlqs = Location.objects.values_list()
|
gvlqs = Location.objects.values_list()
|
||||||
|
@ -264,7 +263,7 @@ class RelatedGeoModelTest(TestCase):
|
||||||
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
|
"Testing `Count` aggregate use with the `GeoManager` on non geo-fields. See #11087."
|
||||||
# Should only be one author (Trevor Paglen) returned by this query, and
|
# Should only be one author (Trevor Paglen) returned by this query, and
|
||||||
# the annotation should have 3 for the number of books, see #11087.
|
# the annotation should have 3 for the number of books, see #11087.
|
||||||
# Also testing with a `GeoValuesQuerySet`, see #11489.
|
# Also testing with a values(), see #11489.
|
||||||
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
|
qs = Author.objects.annotate(num_books=Count('books')).filter(num_books__gt=1)
|
||||||
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
|
vqs = Author.objects.values('name').annotate(num_books=Count('books')).filter(num_books__gt=1)
|
||||||
self.assertEqual(1, len(qs))
|
self.assertEqual(1, len(qs))
|
||||||
|
|
|
@ -12,7 +12,7 @@ from django.core import exceptions
|
||||||
from django.db import (connections, router, transaction, IntegrityError,
|
from django.db import (connections, router, transaction, IntegrityError,
|
||||||
DJANGO_VERSION_PICKLE_KEY)
|
DJANGO_VERSION_PICKLE_KEY)
|
||||||
from django.db.models.constants import LOOKUP_SEP
|
from django.db.models.constants import LOOKUP_SEP
|
||||||
from django.db.models.fields import AutoField, Empty
|
from django.db.models.fields import AutoField
|
||||||
from django.db.models.query_utils import Q, deferred_class_factory, InvalidQuery
|
from django.db.models.query_utils import Q, deferred_class_factory, InvalidQuery
|
||||||
from django.db.models.deletion import Collector
|
from django.db.models.deletion import Collector
|
||||||
from django.db.models.sql.constants import CURSOR
|
from django.db.models.sql.constants import CURSOR
|
||||||
|
@ -34,16 +34,130 @@ REPR_OUTPUT_SIZE = 20
|
||||||
EmptyResultSet = sql.EmptyResultSet
|
EmptyResultSet = sql.EmptyResultSet
|
||||||
|
|
||||||
|
|
||||||
def _pickle_queryset(class_bases, class_dict):
|
class BaseIterator(object):
|
||||||
"""
|
def __init__(self, queryset):
|
||||||
Used by `__reduce__` to create the initial version of the `QuerySet` class
|
self.queryset = queryset
|
||||||
onto which the output of `__getstate__` will be applied.
|
|
||||||
|
|
||||||
See `__reduce__` for more details.
|
|
||||||
|
class ModelIterator(BaseIterator):
|
||||||
"""
|
"""
|
||||||
new = Empty()
|
Iterator that yields a model instance for each row.
|
||||||
new.__class__ = type(class_bases[0].__name__, class_bases, class_dict)
|
"""
|
||||||
return new
|
|
||||||
|
def __iter__(self):
|
||||||
|
queryset = self.queryset
|
||||||
|
db = queryset.db
|
||||||
|
compiler = queryset.query.get_compiler(using=db)
|
||||||
|
# Execute the query. This will also fill compiler.select, klass_info,
|
||||||
|
# and annotations.
|
||||||
|
results = compiler.execute_sql()
|
||||||
|
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
|
||||||
|
compiler.annotation_col_map)
|
||||||
|
if klass_info is None:
|
||||||
|
return
|
||||||
|
model_cls = klass_info['model']
|
||||||
|
select_fields = klass_info['select_fields']
|
||||||
|
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
|
||||||
|
init_list = [f[0].output_field.attname
|
||||||
|
for f in select[model_fields_start:model_fields_end]]
|
||||||
|
if len(init_list) != len(model_cls._meta.concrete_fields):
|
||||||
|
init_set = set(init_list)
|
||||||
|
skip = [f.attname for f in model_cls._meta.concrete_fields
|
||||||
|
if f.attname not in init_set]
|
||||||
|
model_cls = deferred_class_factory(model_cls, skip)
|
||||||
|
related_populators = get_related_populators(klass_info, select, db)
|
||||||
|
for row in compiler.results_iter(results):
|
||||||
|
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
|
||||||
|
if related_populators:
|
||||||
|
for rel_populator in related_populators:
|
||||||
|
rel_populator.populate(row, obj)
|
||||||
|
if annotation_col_map:
|
||||||
|
for attr_name, col_pos in annotation_col_map.items():
|
||||||
|
setattr(obj, attr_name, row[col_pos])
|
||||||
|
|
||||||
|
# Add the known related objects to the model, if there are any
|
||||||
|
if queryset._known_related_objects:
|
||||||
|
for field, rel_objs in queryset._known_related_objects.items():
|
||||||
|
# Avoid overwriting objects loaded e.g. by select_related
|
||||||
|
if hasattr(obj, field.get_cache_name()):
|
||||||
|
continue
|
||||||
|
pk = getattr(obj, field.get_attname())
|
||||||
|
try:
|
||||||
|
rel_obj = rel_objs[pk]
|
||||||
|
except KeyError:
|
||||||
|
pass # may happen in qs1 | qs2 scenarios
|
||||||
|
else:
|
||||||
|
setattr(obj, field.name, rel_obj)
|
||||||
|
|
||||||
|
yield obj
|
||||||
|
|
||||||
|
|
||||||
|
class ValuesIterator(BaseIterator):
|
||||||
|
"""
|
||||||
|
Iterator returned by QuerySet.values() that yields a dict
|
||||||
|
for each row.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
queryset = self.queryset
|
||||||
|
query = queryset.query
|
||||||
|
compiler = query.get_compiler(queryset.db)
|
||||||
|
|
||||||
|
field_names = list(query.values_select)
|
||||||
|
extra_names = list(query.extra_select)
|
||||||
|
annotation_names = list(query.annotation_select)
|
||||||
|
|
||||||
|
# extra(select=...) cols are always at the start of the row.
|
||||||
|
names = extra_names + field_names + annotation_names
|
||||||
|
|
||||||
|
for row in compiler.results_iter():
|
||||||
|
yield dict(zip(names, row))
|
||||||
|
|
||||||
|
|
||||||
|
class ValuesListIterator(BaseIterator):
|
||||||
|
"""
|
||||||
|
Iterator returned by QuerySet.values_lists(flat=False)
|
||||||
|
that yields a tuple for each row.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
queryset = self.queryset
|
||||||
|
query = queryset.query
|
||||||
|
compiler = query.get_compiler(queryset.db)
|
||||||
|
|
||||||
|
if not query.extra_select and not query.annotation_select:
|
||||||
|
for row in compiler.results_iter():
|
||||||
|
yield tuple(row)
|
||||||
|
else:
|
||||||
|
field_names = list(query.values_select)
|
||||||
|
extra_names = list(query.extra_select)
|
||||||
|
annotation_names = list(query.annotation_select)
|
||||||
|
|
||||||
|
# extra(select=...) cols are always at the start of the row.
|
||||||
|
names = extra_names + field_names + annotation_names
|
||||||
|
|
||||||
|
if queryset._fields:
|
||||||
|
# Reorder according to fields.
|
||||||
|
fields = list(queryset._fields) + [f for f in annotation_names if f not in queryset._fields]
|
||||||
|
else:
|
||||||
|
fields = names
|
||||||
|
|
||||||
|
for row in compiler.results_iter():
|
||||||
|
data = dict(zip(names, row))
|
||||||
|
yield tuple(data[f] for f in fields)
|
||||||
|
|
||||||
|
|
||||||
|
class FlatValuesListIterator(BaseIterator):
|
||||||
|
"""
|
||||||
|
Iterator returned by QuerySet.values_lists(flat=True) that
|
||||||
|
yields single values.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def __iter__(self):
|
||||||
|
queryset = self.queryset
|
||||||
|
compiler = queryset.query.get_compiler(queryset.db)
|
||||||
|
for row in compiler.results_iter():
|
||||||
|
yield row[0]
|
||||||
|
|
||||||
|
|
||||||
class QuerySet(object):
|
class QuerySet(object):
|
||||||
|
@ -62,6 +176,8 @@ class QuerySet(object):
|
||||||
self._prefetch_related_lookups = []
|
self._prefetch_related_lookups = []
|
||||||
self._prefetch_done = False
|
self._prefetch_done = False
|
||||||
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
|
self._known_related_objects = {} # {rel_field, {pk: rel_obj}}
|
||||||
|
self._iterator_class = ModelIterator
|
||||||
|
self._fields = None
|
||||||
|
|
||||||
def as_manager(cls):
|
def as_manager(cls):
|
||||||
# Address the circular dependency between `Queryset` and `Manager`.
|
# Address the circular dependency between `Queryset` and `Manager`.
|
||||||
|
@ -115,26 +231,6 @@ class QuerySet(object):
|
||||||
|
|
||||||
self.__dict__.update(state)
|
self.__dict__.update(state)
|
||||||
|
|
||||||
def __reduce__(self):
|
|
||||||
"""
|
|
||||||
Used by pickle to deal with the types that we create dynamically when
|
|
||||||
specialized queryset such as `ValuesQuerySet` are used in conjunction
|
|
||||||
with querysets that are *subclasses* of `QuerySet`.
|
|
||||||
|
|
||||||
See `_clone` implementation for more details.
|
|
||||||
"""
|
|
||||||
if hasattr(self, '_specialized_queryset_class'):
|
|
||||||
class_bases = (
|
|
||||||
self._specialized_queryset_class,
|
|
||||||
self._base_queryset_class,
|
|
||||||
)
|
|
||||||
class_dict = {
|
|
||||||
'_specialized_queryset_class': self._specialized_queryset_class,
|
|
||||||
'_base_queryset_class': self._base_queryset_class,
|
|
||||||
}
|
|
||||||
return _pickle_queryset, (class_bases, class_dict), self.__getstate__()
|
|
||||||
return super(QuerySet, self).__reduce__()
|
|
||||||
|
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
data = list(self[:REPR_OUTPUT_SIZE + 1])
|
data = list(self[:REPR_OUTPUT_SIZE + 1])
|
||||||
if len(data) > REPR_OUTPUT_SIZE:
|
if len(data) > REPR_OUTPUT_SIZE:
|
||||||
|
@ -232,50 +328,7 @@ class QuerySet(object):
|
||||||
An iterator over the results from applying this QuerySet to the
|
An iterator over the results from applying this QuerySet to the
|
||||||
database.
|
database.
|
||||||
"""
|
"""
|
||||||
db = self.db
|
return self._iterator_class(self)
|
||||||
compiler = self.query.get_compiler(using=db)
|
|
||||||
# Execute the query. This will also fill compiler.select, klass_info,
|
|
||||||
# and annotations.
|
|
||||||
results = compiler.execute_sql()
|
|
||||||
select, klass_info, annotation_col_map = (compiler.select, compiler.klass_info,
|
|
||||||
compiler.annotation_col_map)
|
|
||||||
if klass_info is None:
|
|
||||||
return
|
|
||||||
model_cls = klass_info['model']
|
|
||||||
select_fields = klass_info['select_fields']
|
|
||||||
model_fields_start, model_fields_end = select_fields[0], select_fields[-1] + 1
|
|
||||||
init_list = [f[0].output_field.attname
|
|
||||||
for f in select[model_fields_start:model_fields_end]]
|
|
||||||
if len(init_list) != len(model_cls._meta.concrete_fields):
|
|
||||||
init_set = set(init_list)
|
|
||||||
skip = [f.attname for f in model_cls._meta.concrete_fields
|
|
||||||
if f.attname not in init_set]
|
|
||||||
model_cls = deferred_class_factory(model_cls, skip)
|
|
||||||
related_populators = get_related_populators(klass_info, select, db)
|
|
||||||
for row in compiler.results_iter(results):
|
|
||||||
obj = model_cls.from_db(db, init_list, row[model_fields_start:model_fields_end])
|
|
||||||
if related_populators:
|
|
||||||
for rel_populator in related_populators:
|
|
||||||
rel_populator.populate(row, obj)
|
|
||||||
if annotation_col_map:
|
|
||||||
for attr_name, col_pos in annotation_col_map.items():
|
|
||||||
setattr(obj, attr_name, row[col_pos])
|
|
||||||
|
|
||||||
# Add the known related objects to the model, if there are any
|
|
||||||
if self._known_related_objects:
|
|
||||||
for field, rel_objs in self._known_related_objects.items():
|
|
||||||
# Avoid overwriting objects loaded e.g. by select_related
|
|
||||||
if hasattr(obj, field.get_cache_name()):
|
|
||||||
continue
|
|
||||||
pk = getattr(obj, field.get_attname())
|
|
||||||
try:
|
|
||||||
rel_obj = rel_objs[pk]
|
|
||||||
except KeyError:
|
|
||||||
pass # may happen in qs1 | qs2 scenarios
|
|
||||||
else:
|
|
||||||
setattr(obj, field.name, rel_obj)
|
|
||||||
|
|
||||||
yield obj
|
|
||||||
|
|
||||||
def aggregate(self, *args, **kwargs):
|
def aggregate(self, *args, **kwargs):
|
||||||
"""
|
"""
|
||||||
|
@ -518,6 +571,9 @@ class QuerySet(object):
|
||||||
assert self.query.can_filter(), \
|
assert self.query.can_filter(), \
|
||||||
"Cannot use 'limit' or 'offset' with delete."
|
"Cannot use 'limit' or 'offset' with delete."
|
||||||
|
|
||||||
|
if self._fields is not None:
|
||||||
|
raise TypeError("Cannot call delete() after .values() or .values_list()")
|
||||||
|
|
||||||
del_query = self._clone()
|
del_query = self._clone()
|
||||||
|
|
||||||
# The delete is actually 2 queries - one to find related objects,
|
# The delete is actually 2 queries - one to find related objects,
|
||||||
|
@ -600,18 +656,64 @@ class QuerySet(object):
|
||||||
params=params, translations=translations,
|
params=params, translations=translations,
|
||||||
using=using)
|
using=using)
|
||||||
|
|
||||||
|
def _values(self, *fields):
|
||||||
|
clone = self._clone()
|
||||||
|
clone._fields = fields
|
||||||
|
|
||||||
|
query = clone.query
|
||||||
|
query.select_related = False
|
||||||
|
query.clear_deferred_loading()
|
||||||
|
query.clear_select_fields()
|
||||||
|
|
||||||
|
if query.group_by is True:
|
||||||
|
query.add_fields((f.attname for f in self.model._meta.concrete_fields), False)
|
||||||
|
query.set_group_by()
|
||||||
|
query.clear_select_fields()
|
||||||
|
|
||||||
|
if fields:
|
||||||
|
field_names = []
|
||||||
|
extra_names = []
|
||||||
|
annotation_names = []
|
||||||
|
if not query._extra and not query._annotations:
|
||||||
|
# Shortcut - if there are no extra or annotations, then
|
||||||
|
# the values() clause must be just field names.
|
||||||
|
field_names = list(fields)
|
||||||
|
else:
|
||||||
|
query.default_cols = False
|
||||||
|
for f in fields:
|
||||||
|
if f in query.extra_select:
|
||||||
|
extra_names.append(f)
|
||||||
|
elif f in query.annotation_select:
|
||||||
|
annotation_names.append(f)
|
||||||
|
else:
|
||||||
|
field_names.append(f)
|
||||||
|
query.set_extra_mask(extra_names)
|
||||||
|
query.set_annotation_mask(annotation_names)
|
||||||
|
else:
|
||||||
|
field_names = [f.attname for f in self.model._meta.concrete_fields]
|
||||||
|
|
||||||
|
query.values_select = field_names
|
||||||
|
query.add_fields(field_names, True)
|
||||||
|
|
||||||
|
return clone
|
||||||
|
|
||||||
def values(self, *fields):
|
def values(self, *fields):
|
||||||
return self._clone(klass=ValuesQuerySet, setup=True, _fields=fields)
|
clone = self._values(*fields)
|
||||||
|
clone._iterator_class = ValuesIterator
|
||||||
|
return clone
|
||||||
|
|
||||||
def values_list(self, *fields, **kwargs):
|
def values_list(self, *fields, **kwargs):
|
||||||
flat = kwargs.pop('flat', False)
|
flat = kwargs.pop('flat', False)
|
||||||
if kwargs:
|
if kwargs:
|
||||||
raise TypeError('Unexpected keyword arguments to values_list: %s'
|
raise TypeError('Unexpected keyword arguments to values_list: %s'
|
||||||
% (list(kwargs),))
|
% (list(kwargs),))
|
||||||
|
|
||||||
if flat and len(fields) > 1:
|
if flat and len(fields) > 1:
|
||||||
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
|
raise TypeError("'flat' is not valid when values_list is called with more than one field.")
|
||||||
return self._clone(klass=ValuesListQuerySet, setup=True, flat=flat,
|
|
||||||
_fields=fields)
|
clone = self._values(*fields)
|
||||||
|
clone._iterator_class = FlatValuesListIterator if flat else ValuesListIterator
|
||||||
|
return clone
|
||||||
|
|
||||||
def dates(self, field_name, kind, order='ASC'):
|
def dates(self, field_name, kind, order='ASC'):
|
||||||
"""
|
"""
|
||||||
|
@ -779,26 +881,26 @@ class QuerySet(object):
|
||||||
annotations[arg.default_alias] = arg
|
annotations[arg.default_alias] = arg
|
||||||
annotations.update(kwargs)
|
annotations.update(kwargs)
|
||||||
|
|
||||||
obj = self._clone()
|
clone = self._clone()
|
||||||
names = getattr(self, '_fields', None)
|
names = self._fields
|
||||||
if names is None:
|
if names is None:
|
||||||
names = {f.name for f in self.model._meta.get_fields()}
|
names = {f.name for f in self.model._meta.get_fields()}
|
||||||
|
|
||||||
# Add the annotations to the query
|
|
||||||
for alias, annotation in annotations.items():
|
for alias, annotation in annotations.items():
|
||||||
if alias in names:
|
if alias in names:
|
||||||
raise ValueError("The annotation '%s' conflicts with a field on "
|
raise ValueError("The annotation '%s' conflicts with a field on "
|
||||||
"the model." % alias)
|
"the model." % alias)
|
||||||
obj.query.add_annotation(annotation, alias, is_summary=False)
|
clone.query.add_annotation(annotation, alias, is_summary=False)
|
||||||
# expressions need to be added to the query before we know if they contain aggregates
|
|
||||||
added_aggregates = []
|
|
||||||
for alias, annotation in obj.query.annotations.items():
|
|
||||||
if alias in annotations and annotation.contains_aggregate:
|
|
||||||
added_aggregates.append(alias)
|
|
||||||
if added_aggregates:
|
|
||||||
obj._setup_aggregate_query(list(added_aggregates))
|
|
||||||
|
|
||||||
return obj
|
for alias, annotation in clone.query.annotations.items():
|
||||||
|
if alias in annotations and annotation.contains_aggregate:
|
||||||
|
if clone._fields is None:
|
||||||
|
clone.query.group_by = True
|
||||||
|
else:
|
||||||
|
clone.query.set_group_by()
|
||||||
|
break
|
||||||
|
|
||||||
|
return clone
|
||||||
|
|
||||||
def order_by(self, *field_names):
|
def order_by(self, *field_names):
|
||||||
"""
|
"""
|
||||||
|
@ -848,6 +950,8 @@ class QuerySet(object):
|
||||||
parameter, in which case all deferrals are removed (None acts as a
|
parameter, in which case all deferrals are removed (None acts as a
|
||||||
reset option).
|
reset option).
|
||||||
"""
|
"""
|
||||||
|
if self._fields is not None:
|
||||||
|
raise TypeError("Cannot call defer() after .values() or .values_list()")
|
||||||
clone = self._clone()
|
clone = self._clone()
|
||||||
if fields == (None,):
|
if fields == (None,):
|
||||||
clone.query.clear_deferred_loading()
|
clone.query.clear_deferred_loading()
|
||||||
|
@ -861,6 +965,8 @@ class QuerySet(object):
|
||||||
method and that are not already specified as deferred are loaded
|
method and that are not already specified as deferred are loaded
|
||||||
immediately when the queryset is evaluated.
|
immediately when the queryset is evaluated.
|
||||||
"""
|
"""
|
||||||
|
if self._fields is not None:
|
||||||
|
raise TypeError("Cannot call only() after .values() or .values_list()")
|
||||||
if fields == (None,):
|
if fields == (None,):
|
||||||
# Can only pass None to defer(), not only(), as the rest option.
|
# Can only pass None to defer(), not only(), as the rest option.
|
||||||
# That won't stop people trying to do this, so let's be explicit.
|
# That won't stop people trying to do this, so let's be explicit.
|
||||||
|
@ -934,29 +1040,19 @@ class QuerySet(object):
|
||||||
self.model._base_manager._insert(batch, fields=fields,
|
self.model._base_manager._insert(batch, fields=fields,
|
||||||
using=self.db)
|
using=self.db)
|
||||||
|
|
||||||
def _clone(self, klass=None, setup=False, **kwargs):
|
def _clone(self, **kwargs):
|
||||||
if klass is None:
|
|
||||||
klass = self.__class__
|
|
||||||
elif not issubclass(self.__class__, klass):
|
|
||||||
base_queryset_class = getattr(self, '_base_queryset_class', self.__class__)
|
|
||||||
class_bases = (klass, base_queryset_class)
|
|
||||||
class_dict = {
|
|
||||||
'_base_queryset_class': base_queryset_class,
|
|
||||||
'_specialized_queryset_class': klass,
|
|
||||||
}
|
|
||||||
klass = type(klass.__name__, class_bases, class_dict)
|
|
||||||
|
|
||||||
query = self.query.clone()
|
query = self.query.clone()
|
||||||
if self._sticky_filter:
|
if self._sticky_filter:
|
||||||
query.filter_is_sticky = True
|
query.filter_is_sticky = True
|
||||||
c = klass(model=self.model, query=query, using=self._db, hints=self._hints)
|
clone = self.__class__(model=self.model, query=query, using=self._db, hints=self._hints)
|
||||||
c._for_write = self._for_write
|
clone._for_write = self._for_write
|
||||||
c._prefetch_related_lookups = self._prefetch_related_lookups[:]
|
clone._prefetch_related_lookups = self._prefetch_related_lookups[:]
|
||||||
c._known_related_objects = self._known_related_objects
|
clone._known_related_objects = self._known_related_objects
|
||||||
c.__dict__.update(kwargs)
|
clone._iterator_class = self._iterator_class
|
||||||
if setup and hasattr(c, '_setup_query'):
|
clone._fields = self._fields
|
||||||
c._setup_query()
|
|
||||||
return c
|
clone.__dict__.update(kwargs)
|
||||||
|
return clone
|
||||||
|
|
||||||
def _fetch_all(self):
|
def _fetch_all(self):
|
||||||
if self._result_cache is None:
|
if self._result_cache is None:
|
||||||
|
@ -980,11 +1076,14 @@ class QuerySet(object):
|
||||||
|
|
||||||
def _merge_sanity_check(self, other):
|
def _merge_sanity_check(self, other):
|
||||||
"""
|
"""
|
||||||
Checks that we are merging two comparable QuerySet classes. By default
|
Checks that we are merging two comparable QuerySet classes.
|
||||||
this does nothing, but see the ValuesQuerySet for an example of where
|
|
||||||
it's useful.
|
|
||||||
"""
|
"""
|
||||||
pass
|
if self._fields is not None and (
|
||||||
|
set(self.query.values_select) != set(other.query.values_select) or
|
||||||
|
set(self.query.extra_select) != set(other.query.extra_select) or
|
||||||
|
set(self.query.annotation_select) != set(other.query.annotation_select)):
|
||||||
|
raise TypeError("Merging '%s' classes must involve the same values in each case."
|
||||||
|
% self.__class__.__name__)
|
||||||
|
|
||||||
def _merge_known_related_objects(self, other):
|
def _merge_known_related_objects(self, other):
|
||||||
"""
|
"""
|
||||||
|
@ -993,23 +1092,29 @@ class QuerySet(object):
|
||||||
for field, objects in other._known_related_objects.items():
|
for field, objects in other._known_related_objects.items():
|
||||||
self._known_related_objects.setdefault(field, {}).update(objects)
|
self._known_related_objects.setdefault(field, {}).update(objects)
|
||||||
|
|
||||||
def _setup_aggregate_query(self, aggregates):
|
|
||||||
"""
|
|
||||||
Prepare the query for computing a result that contains aggregate annotations.
|
|
||||||
"""
|
|
||||||
if self.query.group_by is None:
|
|
||||||
self.query.group_by = True
|
|
||||||
|
|
||||||
def _prepare(self):
|
def _prepare(self):
|
||||||
|
if self._fields is not None:
|
||||||
|
# values() queryset can only be used as nested queries
|
||||||
|
# if they are set up to select only a single field.
|
||||||
|
if len(self._fields or self.model._meta.concrete_fields) > 1:
|
||||||
|
raise TypeError('Cannot use multi-field values as a filter value.')
|
||||||
return self
|
return self
|
||||||
|
|
||||||
def _as_sql(self, connection):
|
def _as_sql(self, connection):
|
||||||
"""
|
"""
|
||||||
Returns the internal query's SQL and parameters (as a tuple).
|
Returns the internal query's SQL and parameters (as a tuple).
|
||||||
"""
|
"""
|
||||||
obj = self.values("pk")
|
if self._fields is not None:
|
||||||
if obj._db is None or connection == connections[obj._db]:
|
# values() queryset can only be used as nested queries
|
||||||
return obj.query.get_compiler(connection=connection).as_nested_sql()
|
# if they are set up to select only a single field.
|
||||||
|
if len(self._fields or self.model._meta.concrete_fields) > 1:
|
||||||
|
raise TypeError('Cannot use multi-field values as a filter value.')
|
||||||
|
clone = self._clone()
|
||||||
|
else:
|
||||||
|
clone = self.values('pk')
|
||||||
|
|
||||||
|
if clone._db is None or connection == connections[clone._db]:
|
||||||
|
return clone.query.get_compiler(connection=connection).as_nested_sql()
|
||||||
raise ValueError("Can't do subqueries with queries on different DBs.")
|
raise ValueError("Can't do subqueries with queries on different DBs.")
|
||||||
|
|
||||||
# When used as part of a nested query, a queryset will never be an "always
|
# When used as part of a nested query, a queryset will never be an "always
|
||||||
|
@ -1035,6 +1140,9 @@ class QuerySet(object):
|
||||||
def is_compatible_query_object_type(self, opts):
|
def is_compatible_query_object_type(self, opts):
|
||||||
model = self.model
|
model = self.model
|
||||||
return (
|
return (
|
||||||
|
# We trust that users of values() know what they are doing.
|
||||||
|
self._fields is not None or
|
||||||
|
# Otherwise check that models are compatible.
|
||||||
model == opts.concrete_model or
|
model == opts.concrete_model or
|
||||||
opts.concrete_model in model._meta.get_parent_list() or
|
opts.concrete_model in model._meta.get_parent_list() or
|
||||||
model in opts.get_parent_list()
|
model in opts.get_parent_list()
|
||||||
|
@ -1057,195 +1165,6 @@ class EmptyQuerySet(six.with_metaclass(InstanceCheckMeta)):
|
||||||
raise TypeError("EmptyQuerySet can't be instantiated")
|
raise TypeError("EmptyQuerySet can't be instantiated")
|
||||||
|
|
||||||
|
|
||||||
class ValuesQuerySet(QuerySet):
|
|
||||||
def __init__(self, *args, **kwargs):
|
|
||||||
super(ValuesQuerySet, self).__init__(*args, **kwargs)
|
|
||||||
# select_related isn't supported in values(). (FIXME -#3358)
|
|
||||||
self.query.select_related = False
|
|
||||||
|
|
||||||
# QuerySet.clone() will also set up the _fields attribute with the
|
|
||||||
# names of the model fields to select.
|
|
||||||
|
|
||||||
def only(self, *fields):
|
|
||||||
raise NotImplementedError("ValuesQuerySet does not implement only()")
|
|
||||||
|
|
||||||
def defer(self, *fields):
|
|
||||||
raise NotImplementedError("ValuesQuerySet does not implement defer()")
|
|
||||||
|
|
||||||
def iterator(self):
|
|
||||||
# Purge any extra columns that haven't been explicitly asked for
|
|
||||||
extra_names = list(self.query.extra_select)
|
|
||||||
field_names = self.field_names
|
|
||||||
annotation_names = list(self.query.annotation_select)
|
|
||||||
|
|
||||||
names = extra_names + field_names + annotation_names
|
|
||||||
|
|
||||||
for row in self.query.get_compiler(self.db).results_iter():
|
|
||||||
yield dict(zip(names, row))
|
|
||||||
|
|
||||||
def delete(self):
|
|
||||||
# values().delete() doesn't work currently - make sure it raises an
|
|
||||||
# user friendly error.
|
|
||||||
raise TypeError("Queries with .values() or .values_list() applied "
|
|
||||||
"can't be deleted")
|
|
||||||
|
|
||||||
def _setup_query(self):
|
|
||||||
"""
|
|
||||||
Constructs the field_names list that the values query will be
|
|
||||||
retrieving.
|
|
||||||
|
|
||||||
Called by the _clone() method after initializing the rest of the
|
|
||||||
instance.
|
|
||||||
"""
|
|
||||||
if self.query.group_by is True:
|
|
||||||
self.query.add_fields([f.attname for f in self.model._meta.concrete_fields], False)
|
|
||||||
self.query.set_group_by()
|
|
||||||
self.query.clear_deferred_loading()
|
|
||||||
self.query.clear_select_fields()
|
|
||||||
if self._fields:
|
|
||||||
self.extra_names = []
|
|
||||||
self.annotation_names = []
|
|
||||||
if not self.query._extra and not self.query._annotations:
|
|
||||||
# Short cut - if there are no extra or annotations, then
|
|
||||||
# the values() clause must be just field names.
|
|
||||||
self.field_names = list(self._fields)
|
|
||||||
else:
|
|
||||||
self.query.default_cols = False
|
|
||||||
self.field_names = []
|
|
||||||
for f in self._fields:
|
|
||||||
# we inspect the full extra_select list since we might
|
|
||||||
# be adding back an extra select item that we hadn't
|
|
||||||
# had selected previously.
|
|
||||||
if self.query._extra and f in self.query._extra:
|
|
||||||
self.extra_names.append(f)
|
|
||||||
elif f in self.query.annotation_select:
|
|
||||||
self.annotation_names.append(f)
|
|
||||||
else:
|
|
||||||
self.field_names.append(f)
|
|
||||||
else:
|
|
||||||
# Default to all fields.
|
|
||||||
self.extra_names = None
|
|
||||||
self.field_names = [f.attname for f in self.model._meta.concrete_fields]
|
|
||||||
self.annotation_names = None
|
|
||||||
|
|
||||||
self.query.select = []
|
|
||||||
if self.extra_names is not None:
|
|
||||||
self.query.set_extra_mask(self.extra_names)
|
|
||||||
self.query.add_fields(self.field_names, True)
|
|
||||||
if self.annotation_names is not None:
|
|
||||||
self.query.set_annotation_mask(self.annotation_names)
|
|
||||||
|
|
||||||
def _clone(self, klass=None, setup=False, **kwargs):
|
|
||||||
"""
|
|
||||||
Cloning a ValuesQuerySet preserves the current fields.
|
|
||||||
"""
|
|
||||||
c = super(ValuesQuerySet, self)._clone(klass, **kwargs)
|
|
||||||
if not hasattr(c, '_fields'):
|
|
||||||
# Only clone self._fields if _fields wasn't passed into the cloning
|
|
||||||
# call directly.
|
|
||||||
c._fields = self._fields[:]
|
|
||||||
c.field_names = self.field_names
|
|
||||||
c.extra_names = self.extra_names
|
|
||||||
c.annotation_names = self.annotation_names
|
|
||||||
if setup and hasattr(c, '_setup_query'):
|
|
||||||
c._setup_query()
|
|
||||||
return c
|
|
||||||
|
|
||||||
def _merge_sanity_check(self, other):
|
|
||||||
super(ValuesQuerySet, self)._merge_sanity_check(other)
|
|
||||||
if (set(self.extra_names) != set(other.extra_names) or
|
|
||||||
set(self.field_names) != set(other.field_names) or
|
|
||||||
self.annotation_names != other.annotation_names):
|
|
||||||
raise TypeError("Merging '%s' classes must involve the same values in each case."
|
|
||||||
% self.__class__.__name__)
|
|
||||||
|
|
||||||
def _setup_aggregate_query(self, aggregates):
|
|
||||||
"""
|
|
||||||
Prepare the query for computing a result that contains aggregate annotations.
|
|
||||||
"""
|
|
||||||
self.query.set_group_by()
|
|
||||||
|
|
||||||
if self.annotation_names is not None:
|
|
||||||
self.annotation_names.extend(aggregates)
|
|
||||||
self.query.set_annotation_mask(self.annotation_names)
|
|
||||||
|
|
||||||
super(ValuesQuerySet, self)._setup_aggregate_query(aggregates)
|
|
||||||
|
|
||||||
def _as_sql(self, connection):
|
|
||||||
"""
|
|
||||||
For ValuesQuerySet (and subclasses like ValuesListQuerySet), they can
|
|
||||||
only be used as nested queries if they're already set up to select only
|
|
||||||
a single field (in which case, that is the field column that is
|
|
||||||
returned). This differs from QuerySet.as_sql(), where the column to
|
|
||||||
select is set up by Django.
|
|
||||||
"""
|
|
||||||
if ((self._fields and len(self._fields) > 1) or
|
|
||||||
(not self._fields and len(self.model._meta.fields) > 1)):
|
|
||||||
raise TypeError('Cannot use a multi-field %s as a filter value.'
|
|
||||||
% self.__class__.__name__)
|
|
||||||
|
|
||||||
obj = self._clone()
|
|
||||||
if obj._db is None or connection == connections[obj._db]:
|
|
||||||
return obj.query.get_compiler(connection=connection).as_nested_sql()
|
|
||||||
raise ValueError("Can't do subqueries with queries on different DBs.")
|
|
||||||
|
|
||||||
def _prepare(self):
|
|
||||||
"""
|
|
||||||
Validates that we aren't trying to do a query like
|
|
||||||
value__in=qs.values('value1', 'value2'), which isn't valid.
|
|
||||||
"""
|
|
||||||
if ((self._fields and len(self._fields) > 1) or
|
|
||||||
(not self._fields and len(self.model._meta.fields) > 1)):
|
|
||||||
raise TypeError('Cannot use a multi-field %s as a filter value.'
|
|
||||||
% self.__class__.__name__)
|
|
||||||
return self
|
|
||||||
|
|
||||||
def is_compatible_query_object_type(self, opts):
|
|
||||||
"""
|
|
||||||
ValueQuerySets do not need to be checked for compatibility.
|
|
||||||
We trust that users of ValueQuerySets know what they are doing.
|
|
||||||
"""
|
|
||||||
return True
|
|
||||||
|
|
||||||
|
|
||||||
class ValuesListQuerySet(ValuesQuerySet):
|
|
||||||
def iterator(self):
|
|
||||||
compiler = self.query.get_compiler(self.db)
|
|
||||||
if self.flat and len(self._fields) == 1:
|
|
||||||
for row in compiler.results_iter():
|
|
||||||
yield row[0]
|
|
||||||
elif not self.query.extra_select and not self.query.annotation_select:
|
|
||||||
for row in compiler.results_iter():
|
|
||||||
yield tuple(row)
|
|
||||||
else:
|
|
||||||
# When extra(select=...) or an annotation is involved, the extra
|
|
||||||
# cols are always at the start of the row, and we need to reorder
|
|
||||||
# the fields to match the order in self._fields.
|
|
||||||
extra_names = list(self.query.extra_select)
|
|
||||||
field_names = self.field_names
|
|
||||||
annotation_names = list(self.query.annotation_select)
|
|
||||||
|
|
||||||
names = extra_names + field_names + annotation_names
|
|
||||||
|
|
||||||
# If a field list has been specified, use it. Otherwise, use the
|
|
||||||
# full list of fields, including extras and annotations.
|
|
||||||
if self._fields:
|
|
||||||
fields = list(self._fields) + [f for f in annotation_names if f not in self._fields]
|
|
||||||
else:
|
|
||||||
fields = names
|
|
||||||
|
|
||||||
for row in compiler.results_iter():
|
|
||||||
data = dict(zip(names, row))
|
|
||||||
yield tuple(data[f] for f in fields)
|
|
||||||
|
|
||||||
def _clone(self, *args, **kwargs):
|
|
||||||
clone = super(ValuesListQuerySet, self)._clone(*args, **kwargs)
|
|
||||||
if not hasattr(clone, "flat"):
|
|
||||||
# Only assign flat if the clone didn't already get it from kwargs
|
|
||||||
clone.flat = self.flat
|
|
||||||
return clone
|
|
||||||
|
|
||||||
|
|
||||||
class RawQuerySet(object):
|
class RawQuerySet(object):
|
||||||
"""
|
"""
|
||||||
Provides an iterator which converts the results of raw SQL queries into
|
Provides an iterator which converts the results of raw SQL queries into
|
||||||
|
|
|
@ -148,7 +148,14 @@ class Query(object):
|
||||||
self.distinct_fields = []
|
self.distinct_fields = []
|
||||||
self.select_for_update = False
|
self.select_for_update = False
|
||||||
self.select_for_update_nowait = False
|
self.select_for_update_nowait = False
|
||||||
|
|
||||||
self.select_related = False
|
self.select_related = False
|
||||||
|
# Arbitrary limit for select_related to prevents infinite recursion.
|
||||||
|
self.max_depth = 5
|
||||||
|
|
||||||
|
# Holds the selects defined by a call to values() or values_list()
|
||||||
|
# excluding annotation_select and extra_select.
|
||||||
|
self.values_select = []
|
||||||
|
|
||||||
# SQL annotation-related attributes
|
# SQL annotation-related attributes
|
||||||
# The _annotations will be an OrderedDict when used. Due to the cost
|
# The _annotations will be an OrderedDict when used. Due to the cost
|
||||||
|
@ -158,10 +165,6 @@ class Query(object):
|
||||||
self.annotation_select_mask = None
|
self.annotation_select_mask = None
|
||||||
self._annotation_select_cache = None
|
self._annotation_select_cache = None
|
||||||
|
|
||||||
# Arbitrary maximum limit for select_related. Prevents infinite
|
|
||||||
# recursion. Can be changed by the depth parameter to select_related().
|
|
||||||
self.max_depth = 5
|
|
||||||
|
|
||||||
# These are for extensions. The contents are more or less appended
|
# These are for extensions. The contents are more or less appended
|
||||||
# verbatim to the appropriate clause.
|
# verbatim to the appropriate clause.
|
||||||
# The _extra attribute is an OrderedDict, lazily created similarly to
|
# The _extra attribute is an OrderedDict, lazily created similarly to
|
||||||
|
@ -273,6 +276,7 @@ class Query(object):
|
||||||
obj.select_for_update = self.select_for_update
|
obj.select_for_update = self.select_for_update
|
||||||
obj.select_for_update_nowait = self.select_for_update_nowait
|
obj.select_for_update_nowait = self.select_for_update_nowait
|
||||||
obj.select_related = self.select_related
|
obj.select_related = self.select_related
|
||||||
|
obj.values_select = self.values_select[:]
|
||||||
obj._annotations = self._annotations.copy() if self._annotations is not None else None
|
obj._annotations = self._annotations.copy() if self._annotations is not None else None
|
||||||
if self.annotation_select_mask is None:
|
if self.annotation_select_mask is None:
|
||||||
obj.annotation_select_mask = None
|
obj.annotation_select_mask = None
|
||||||
|
@ -1616,6 +1620,7 @@ class Query(object):
|
||||||
columns.
|
columns.
|
||||||
"""
|
"""
|
||||||
self.select = []
|
self.select = []
|
||||||
|
self.values_select = []
|
||||||
|
|
||||||
def add_select(self, col):
|
def add_select(self, col):
|
||||||
self.default_cols = False
|
self.default_cols = False
|
||||||
|
|
|
@ -204,7 +204,7 @@ class SubqueryConstraint(object):
|
||||||
if query._db and connection.alias != query._db:
|
if query._db and connection.alias != query._db:
|
||||||
raise ValueError("Can't do subqueries with queries on different DBs.")
|
raise ValueError("Can't do subqueries with queries on different DBs.")
|
||||||
# Do not override already existing values.
|
# Do not override already existing values.
|
||||||
if not hasattr(query, 'field_names'):
|
if query._fields is None:
|
||||||
query = query.values(*self.targets)
|
query = query.values(*self.targets)
|
||||||
else:
|
else:
|
||||||
query = query._clone()
|
query = query._clone()
|
||||||
|
|
|
@ -514,8 +514,8 @@ values
|
||||||
|
|
||||||
.. method:: values(*fields)
|
.. method:: values(*fields)
|
||||||
|
|
||||||
Returns a ``ValuesQuerySet`` — a ``QuerySet`` subclass that returns
|
Returns a ``QuerySet`` that returns dictionaries, rather than model instances,
|
||||||
dictionaries when used as an iterable, rather than model-instance objects.
|
when used as an iterable.
|
||||||
|
|
||||||
Each of those dictionaries represents an object, with the keys corresponding to
|
Each of those dictionaries represents an object, with the keys corresponding to
|
||||||
the attribute names of model objects.
|
the attribute names of model objects.
|
||||||
|
@ -585,14 +585,12 @@ A few subtleties that are worth mentioning:
|
||||||
:meth:`defer()` after ``values()`` was allowed, but it either crashed or
|
:meth:`defer()` after ``values()`` was allowed, but it either crashed or
|
||||||
returned incorrect results.
|
returned incorrect results.
|
||||||
|
|
||||||
A ``ValuesQuerySet`` is useful when you know you're only going to need values
|
It is useful when you know you're only going to need values from a small number
|
||||||
from a small number of the available fields and you won't need the
|
of the available fields and you won't need the functionality of a model
|
||||||
functionality of a model instance object. It's more efficient to select only
|
instance object. It's more efficient to select only the fields you need to use.
|
||||||
the fields you need to use.
|
|
||||||
|
|
||||||
Finally, note that a ``ValuesQuerySet`` is a subclass of ``QuerySet`` and it
|
Finally, note that you can call ``filter()``, ``order_by()``, etc. after the
|
||||||
implements most of the same methods. You can call ``filter()`` on it,
|
``values()`` call, that means that these two calls are identical::
|
||||||
``order_by()``, etc. That means that these two calls are identical::
|
|
||||||
|
|
||||||
Blog.objects.values().order_by('id')
|
Blog.objects.values().order_by('id')
|
||||||
Blog.objects.order_by('id').values()
|
Blog.objects.order_by('id').values()
|
||||||
|
@ -645,11 +643,6 @@ It is an error to pass in ``flat`` when there is more than one field.
|
||||||
If you don't pass any values to ``values_list()``, it will return all the
|
If you don't pass any values to ``values_list()``, it will return all the
|
||||||
fields in the model, in the order they were declared.
|
fields in the model, in the order they were declared.
|
||||||
|
|
||||||
Note that this method returns a ``ValuesListQuerySet``. This class behaves
|
|
||||||
like a list. Most of the time this is enough, but if you require an actual
|
|
||||||
Python list object, you can simply call ``list()`` on it, which will evaluate
|
|
||||||
the queryset.
|
|
||||||
|
|
||||||
dates
|
dates
|
||||||
~~~~~
|
~~~~~
|
||||||
|
|
||||||
|
@ -2280,10 +2273,10 @@ This queryset will be evaluated as subselect statement::
|
||||||
|
|
||||||
SELECT ... WHERE blog.id IN (SELECT id FROM ... WHERE NAME LIKE '%Cheddar%')
|
SELECT ... WHERE blog.id IN (SELECT id FROM ... WHERE NAME LIKE '%Cheddar%')
|
||||||
|
|
||||||
If you pass in a ``ValuesQuerySet`` or ``ValuesListQuerySet`` (the result of
|
If you pass in a ``QuerySet`` resulting from ``values()`` or ``values_list()``
|
||||||
calling ``values()`` or ``values_list()`` on a queryset) as the value to an
|
as the value to an ``__in`` lookup, you need to ensure you are only extracting
|
||||||
``__in`` lookup, you need to ensure you are only extracting one field in the
|
one field in the result. For example, this will work (filtering on the blog
|
||||||
result. For example, this will work (filtering on the blog names)::
|
names)::
|
||||||
|
|
||||||
inner_qs = Blog.objects.filter(name__contains='Ch').values('name')
|
inner_qs = Blog.objects.filter(name__contains='Ch').values('name')
|
||||||
entries = Entry.objects.filter(blog__name__in=inner_qs)
|
entries = Entry.objects.filter(blog__name__in=inner_qs)
|
||||||
|
|
|
@ -178,6 +178,8 @@ Miscellaneous
|
||||||
|
|
||||||
.. _`httplib.responses`: https://docs.python.org/2/library/httplib.html#httplib.responses
|
.. _`httplib.responses`: https://docs.python.org/2/library/httplib.html#httplib.responses
|
||||||
|
|
||||||
|
* ``ValuesQuerySet`` and ``ValuesListQuerySet`` have been removed.
|
||||||
|
|
||||||
.. _deprecated-features-1.9:
|
.. _deprecated-features-1.9:
|
||||||
|
|
||||||
Features deprecated in 1.9
|
Features deprecated in 1.9
|
||||||
|
|
|
@ -511,7 +511,7 @@ class AggregationTests(TestCase):
|
||||||
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
|
# Regression for #14707 -- If you're using a values query set, some potential conflicts are avoided.
|
||||||
|
|
||||||
# age is a field on Author, so it shouldn't be allowed as an aggregate.
|
# age is a field on Author, so it shouldn't be allowed as an aggregate.
|
||||||
# But age isn't included in the ValuesQuerySet, so it is.
|
# But age isn't included in values(), so it is.
|
||||||
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
|
results = Author.objects.values('name').annotate(age=Count('book_contact_set')).order_by('name')
|
||||||
self.assertEqual(len(results), 9)
|
self.assertEqual(len(results), 9)
|
||||||
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
|
self.assertEqual(results[0]['name'], 'Adrian Holovaty')
|
||||||
|
|
|
@ -10,7 +10,7 @@ from django.db import DatabaseError
|
||||||
from django.db.models.fields import Field
|
from django.db.models.fields import Field
|
||||||
from django.db.models.fields.related import ForeignObjectRel
|
from django.db.models.fields.related import ForeignObjectRel
|
||||||
from django.db.models.manager import BaseManager
|
from django.db.models.manager import BaseManager
|
||||||
from django.db.models.query import QuerySet, EmptyQuerySet, ValuesListQuerySet, MAX_GET_RESULTS
|
from django.db.models.query import QuerySet, EmptyQuerySet, MAX_GET_RESULTS
|
||||||
from django.test import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
|
from django.test import TestCase, TransactionTestCase, skipIfDBFeature, skipUnlessDBFeature
|
||||||
from django.utils import six
|
from django.utils import six
|
||||||
from django.utils.translation import ugettext_lazy
|
from django.utils.translation import ugettext_lazy
|
||||||
|
@ -383,7 +383,6 @@ class ModelTest(TestCase):
|
||||||
with self.assertNumQueries(0):
|
with self.assertNumQueries(0):
|
||||||
qs = Article.objects.none().values_list('pk')
|
qs = Article.objects.none().values_list('pk')
|
||||||
self.assertIsInstance(qs, EmptyQuerySet)
|
self.assertIsInstance(qs, EmptyQuerySet)
|
||||||
self.assertIsInstance(qs, ValuesListQuerySet)
|
|
||||||
self.assertEqual(len(qs), 0)
|
self.assertEqual(len(qs), 0)
|
||||||
|
|
||||||
def test_emptyqs_customqs(self):
|
def test_emptyqs_customqs(self):
|
||||||
|
|
|
@ -753,12 +753,12 @@ class Queries1Tests(BaseQuerysetTest):
|
||||||
# Multi-valued values() and values_list() querysets should raise errors.
|
# Multi-valued values() and values_list() querysets should raise errors.
|
||||||
self.assertRaisesMessage(
|
self.assertRaisesMessage(
|
||||||
TypeError,
|
TypeError,
|
||||||
'Cannot use a multi-field ValuesQuerySet as a filter value.',
|
'Cannot use multi-field values as a filter value.',
|
||||||
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
|
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values('name', 'id'))
|
||||||
)
|
)
|
||||||
self.assertRaisesMessage(
|
self.assertRaisesMessage(
|
||||||
TypeError,
|
TypeError,
|
||||||
'Cannot use a multi-field ValuesListQuerySet as a filter value.',
|
'Cannot use multi-field values as a filter value.',
|
||||||
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
|
lambda: Tag.objects.filter(name__in=Tag.objects.filter(parent=self.t1).values_list('name', 'id'))
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -1288,13 +1288,12 @@ class Queries3Tests(BaseQuerysetTest):
|
||||||
)
|
)
|
||||||
|
|
||||||
def test_ticket22023(self):
|
def test_ticket22023(self):
|
||||||
# only() and defer() are not applicable for ValuesQuerySet
|
with self.assertRaisesMessage(TypeError,
|
||||||
with self.assertRaisesMessage(NotImplementedError,
|
"Cannot call only() after .values() or .values_list()"):
|
||||||
"ValuesQuerySet does not implement only()"):
|
|
||||||
Valid.objects.values().only()
|
Valid.objects.values().only()
|
||||||
|
|
||||||
with self.assertRaisesMessage(NotImplementedError,
|
with self.assertRaisesMessage(TypeError,
|
||||||
"ValuesQuerySet does not implement defer()"):
|
"Cannot call defer() after .values() or .values_list()"):
|
||||||
Valid.objects.values().defer()
|
Valid.objects.values().defer()
|
||||||
|
|
||||||
|
|
||||||
|
|
|
@ -99,8 +99,7 @@ class PickleabilityTestCase(TestCase):
|
||||||
def test_specialized_queryset(self):
|
def test_specialized_queryset(self):
|
||||||
self.assert_pickles(Happening.objects.values('name'))
|
self.assert_pickles(Happening.objects.values('name'))
|
||||||
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
|
self.assert_pickles(Happening.objects.values('name').dates('when', 'year'))
|
||||||
|
# With related field (#14515)
|
||||||
# ValuesQuerySet with related field (#14515)
|
|
||||||
self.assert_pickles(
|
self.assert_pickles(
|
||||||
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
|
Event.objects.select_related('group').order_by('title').values_list('title', 'group__name')
|
||||||
)
|
)
|
||||||
|
|
Loading…
Reference in New Issue