2006-05-02 09:31:56 +08:00
from django . db import backend , connection , transaction
from django . db . models . fields import DateField , FieldDoesNotExist
2007-01-25 19:24:17 +08:00
from django . db . models . fields . generic import GenericRelation
2006-05-02 09:31:56 +08:00
from django . db . models import signals
from django . dispatch import dispatcher
from django . utils . datastructures import SortedDict
import operator
2006-06-01 12:47:56 +08:00
import re
2006-05-02 09:31:56 +08:00
# For Python 2.3
if not hasattr ( __builtins__ , ' set ' ) :
from sets import Set as set
2006-07-01 11:14:33 +08:00
# The string constant used to separate query parts
2006-05-02 09:31:56 +08:00
LOOKUP_SEPARATOR = ' __ '
2006-07-01 11:21:32 +08:00
# The list of valid query types
QUERY_TERMS = (
' exact ' , ' iexact ' , ' contains ' , ' icontains ' ,
' gt ' , ' gte ' , ' lt ' , ' lte ' , ' in ' ,
2006-07-01 11:14:33 +08:00
' startswith ' , ' istartswith ' , ' endswith ' , ' iendswith ' ,
2006-07-21 04:13:35 +08:00
' range ' , ' year ' , ' month ' , ' day ' , ' isnull ' , ' search ' ,
2006-07-01 11:14:33 +08:00
)
2006-05-02 09:31:56 +08:00
# Size of each "chunk" for get_iterator calls.
# Larger values are slightly faster at the expense of more storage space.
GET_ITERATOR_CHUNK_SIZE = 100
2007-01-23 10:11:08 +08:00
class EmptyResultSet ( Exception ) :
pass
2006-05-02 09:31:56 +08:00
####################
# HELPER FUNCTIONS #
####################
# Django currently supports two forms of ordering.
# Form 1 (deprecated) example:
# order_by=(('pub_date', 'DESC'), ('headline', 'ASC'), (None, 'RANDOM'))
# Form 2 (new-style) example:
# order_by=('-pub_date', 'headline', '?')
# Form 1 is deprecated and will no longer be supported for Django's first
# official release. The following code converts from Form 1 to Form 2.
LEGACY_ORDERING_MAPPING = { ' ASC ' : ' _ ' , ' DESC ' : ' -_ ' , ' RANDOM ' : ' ? ' }
def handle_legacy_orderlist ( order_list ) :
if not order_list or isinstance ( order_list [ 0 ] , basestring ) :
return order_list
else :
import warnings
new_order_list = [ LEGACY_ORDERING_MAPPING [ j . upper ( ) ] . replace ( ' _ ' , str ( i ) ) for i , j in order_list ]
warnings . warn ( " %r ordering syntax is deprecated. Use %r instead. " % ( order_list , new_order_list ) , DeprecationWarning )
return new_order_list
def orderfield2column ( f , opts ) :
try :
return opts . get_field ( f , False ) . column
except FieldDoesNotExist :
return f
def orderlist2sql ( order_list , opts , prefix = ' ' ) :
if prefix . endswith ( ' . ' ) :
prefix = backend . quote_name ( prefix [ : - 1 ] ) + ' . '
output = [ ]
for f in handle_legacy_orderlist ( order_list ) :
if f . startswith ( ' - ' ) :
output . append ( ' %s %s DESC ' % ( prefix , backend . quote_name ( orderfield2column ( f [ 1 : ] , opts ) ) ) )
elif f == ' ? ' :
output . append ( backend . get_random_function_sql ( ) )
else :
output . append ( ' %s %s ASC ' % ( prefix , backend . quote_name ( orderfield2column ( f , opts ) ) ) )
return ' , ' . join ( output )
def quote_only_if_word ( word ) :
2006-06-01 12:47:56 +08:00
if re . search ( ' \ W ' , word ) : # Don't quote if there are spaces or non-word chars.
2006-05-02 09:31:56 +08:00
return word
else :
return backend . quote_name ( word )
class QuerySet ( object ) :
" Represents a lazy database lookup for a set of objects "
def __init__ ( self , model = None ) :
self . model = model
self . _filters = Q ( )
self . _order_by = None # Ordering, e.g. ('date', '-name'). If None, use model's ordering.
self . _select_related = False # Whether to fill cache for related objects.
2007-02-28 23:24:05 +08:00
self . _max_related_depth = 0 # Maximum "depth" for select_related
2006-05-02 09:31:56 +08:00
self . _distinct = False # Whether the query should use SELECT DISTINCT.
self . _select = { } # Dictionary of attname -> SQL.
self . _where = [ ] # List of extra WHERE clauses to use.
self . _params = [ ] # List of params to use for extra WHERE clauses.
self . _tables = [ ] # List of extra tables to use.
2006-07-11 11:21:59 +08:00
self . _offset = None # OFFSET clause.
self . _limit = None # LIMIT clause.
2006-05-02 09:31:56 +08:00
self . _result_cache = None
########################
# PYTHON MAGIC METHODS #
########################
def __repr__ ( self ) :
return repr ( self . _get_data ( ) )
def __len__ ( self ) :
return len ( self . _get_data ( ) )
def __iter__ ( self ) :
return iter ( self . _get_data ( ) )
def __getitem__ ( self , k ) :
" Retrieve an item or slice from the set of results. "
2007-03-22 11:00:12 +08:00
if not isinstance ( k , ( slice , int ) ) :
raise TypeError
2006-05-27 02:41:03 +08:00
assert ( not isinstance ( k , slice ) and ( k > = 0 ) ) \
or ( isinstance ( k , slice ) and ( k . start is None or k . start > = 0 ) and ( k . stop is None or k . stop > = 0 ) ) , \
" Negative indexing is not supported. "
2006-05-02 09:31:56 +08:00
if self . _result_cache is None :
if isinstance ( k , slice ) :
# Offset:
if self . _offset is None :
offset = k . start
elif k . start is None :
offset = self . _offset
else :
offset = self . _offset + k . start
# Now adjust offset to the bounds of any existing limit:
if self . _limit is not None and k . start is not None :
limit = self . _limit - k . start
else :
limit = self . _limit
# Limit:
if k . stop is not None and k . start is not None :
if limit is None :
limit = k . stop - k . start
else :
limit = min ( ( k . stop - k . start ) , limit )
else :
if limit is None :
limit = k . stop
else :
if k . stop is not None :
limit = min ( k . stop , limit )
if k . step is None :
return self . _clone ( _offset = offset , _limit = limit )
else :
return list ( self . _clone ( _offset = offset , _limit = limit ) ) [ : : k . step ]
else :
2006-05-07 02:46:53 +08:00
try :
2006-05-24 03:55:30 +08:00
return list ( self . _clone ( _offset = k , _limit = 1 ) ) [ 0 ]
2006-05-07 02:46:53 +08:00
except self . model . DoesNotExist , e :
raise IndexError , e . args
2006-05-02 09:31:56 +08:00
else :
2006-05-07 02:46:53 +08:00
return self . _result_cache [ k ]
2006-05-02 09:31:56 +08:00
def __and__ ( self , other ) :
combined = self . _combine ( other )
combined . _filters = self . _filters & other . _filters
return combined
def __or__ ( self , other ) :
combined = self . _combine ( other )
combined . _filters = self . _filters | other . _filters
return combined
####################################
# METHODS THAT DO DATABASE QUERIES #
####################################
def iterator ( self ) :
" Performs the SELECT database lookup of this QuerySet. "
2007-02-10 13:38:38 +08:00
try :
select , sql , params = self . _get_sql_clause ( )
except EmptyResultSet :
raise StopIteration
2006-05-02 09:31:56 +08:00
# self._select is a dictionary, and dictionaries' key order is
# undefined, so we convert it to a list of tuples.
extra_select = self . _select . items ( )
cursor = connection . cursor ( )
cursor . execute ( " SELECT " + ( self . _distinct and " DISTINCT " or " " ) + " , " . join ( select ) + sql , params )
fill_cache = self . _select_related
index_end = len ( self . model . _meta . fields )
while 1 :
rows = cursor . fetchmany ( GET_ITERATOR_CHUNK_SIZE )
if not rows :
raise StopIteration
for row in rows :
if fill_cache :
2007-04-09 23:35:19 +08:00
obj , index_end = get_cached_row ( klass = self . model , row = row ,
2007-02-28 23:24:05 +08:00
index_start = 0 , max_depth = self . _max_related_depth )
2006-05-02 09:31:56 +08:00
else :
obj = self . model ( * row [ : index_end ] )
for i , k in enumerate ( extra_select ) :
setattr ( obj , k [ 0 ] , row [ index_end + i ] )
yield obj
def count ( self ) :
2007-02-24 04:58:28 +08:00
"""
Performs a SELECT COUNT ( ) and returns the number of records as an
integer .
2007-04-09 23:35:19 +08:00
2007-02-24 04:58:28 +08:00
If the queryset is already cached ( i . e . self . _result_cache is set ) this
simply returns the length of the cached results set to avoid multiple
SELECT COUNT ( * ) calls .
"""
2007-02-24 07:05:25 +08:00
if self . _result_cache is not None :
return len ( self . _result_cache )
2007-04-09 23:35:19 +08:00
2006-05-02 09:31:56 +08:00
counter = self . _clone ( )
counter . _order_by = ( )
2007-02-12 08:16:17 +08:00
counter . _select_related = False
2007-02-14 14:32:32 +08:00
2007-02-12 08:16:17 +08:00
offset = counter . _offset
limit = counter . _limit
2006-05-02 09:31:56 +08:00
counter . _offset = None
counter . _limit = None
2007-02-14 14:32:32 +08:00
2007-01-23 10:11:08 +08:00
try :
select , sql , params = counter . _get_sql_clause ( )
except EmptyResultSet :
return 0
2007-02-14 14:32:32 +08:00
2006-05-02 09:31:56 +08:00
cursor = connection . cursor ( )
2006-05-15 07:49:29 +08:00
if self . _distinct :
id_col = " %s . %s " % ( backend . quote_name ( self . model . _meta . db_table ) ,
backend . quote_name ( self . model . _meta . pk . column ) )
cursor . execute ( " SELECT COUNT(DISTINCT( %s )) " % id_col + sql , params )
else :
cursor . execute ( " SELECT COUNT(*) " + sql , params )
2007-02-12 08:16:17 +08:00
count = cursor . fetchone ( ) [ 0 ]
# Apply any offset and limit constraints manually, since using LIMIT or
# OFFSET in SQL doesn't change the output of COUNT.
if offset :
count = max ( 0 , count - offset )
if limit :
count = min ( limit , count )
return count
2006-05-02 09:31:56 +08:00
def get ( self , * args , * * kwargs ) :
" Performs the SELECT and returns a single object matching the given keyword arguments. "
clone = self . filter ( * args , * * kwargs )
2006-05-24 03:55:30 +08:00
# clean up SQL by removing unneeded ORDER BY
2006-05-02 09:31:56 +08:00
if not clone . _order_by :
clone . _order_by = ( )
obj_list = list ( clone )
if len ( obj_list ) < 1 :
2006-05-06 20:15:19 +08:00
raise self . model . DoesNotExist , " %s matching query does not exist. " % self . model . _meta . object_name
2006-05-02 09:31:56 +08:00
assert len ( obj_list ) == 1 , " get() returned more than one %s -- it returned %s ! Lookup parameters were %s " % ( self . model . _meta . object_name , len ( obj_list ) , kwargs )
return obj_list [ 0 ]
2006-07-01 11:21:32 +08:00
2006-06-28 04:36:25 +08:00
def create ( self , * * kwargs ) :
"""
Create a new object with the given kwargs , saving it to the database
and returning the created object .
"""
obj = self . model ( * * kwargs )
obj . save ( )
return obj
2006-05-02 09:31:56 +08:00
2006-06-07 08:09:29 +08:00
def get_or_create ( self , * * kwargs ) :
"""
Looks up an object with the given kwargs , creating one if necessary .
Returns a tuple of ( object , created ) , where created is a boolean
specifying whether an object was created .
"""
assert len ( kwargs ) , ' get_or_create() must be passed at least one keyword argument '
defaults = kwargs . pop ( ' defaults ' , { } )
try :
return self . get ( * * kwargs ) , False
except self . model . DoesNotExist :
params = dict ( [ ( k , v ) for k , v in kwargs . items ( ) if ' __ ' not in k ] )
params . update ( defaults )
obj = self . model ( * * params )
obj . save ( )
return obj , True
2006-05-02 09:31:56 +08:00
def latest ( self , field_name = None ) :
"""
Returns the latest object , according to the model ' s ' get_latest_by '
option or optional given field_name .
"""
latest_by = field_name or self . model . _meta . get_latest_by
assert bool ( latest_by ) , " latest() requires either a field_name parameter or ' get_latest_by ' in the model "
assert self . _limit is None and self . _offset is None , \
" Cannot change a query once a slice has been taken. "
return self . _clone ( _limit = 1 , _order_by = ( ' - ' + latest_by , ) ) . get ( )
def in_bulk ( self , id_list ) :
"""
Returns a dictionary mapping each of the given IDs to the object with
that ID .
"""
assert self . _limit is None and self . _offset is None , \
" Cannot use ' limit ' or ' offset ' with in_bulk "
assert isinstance ( id_list , ( tuple , list ) ) , " in_bulk() must be provided with a list of IDs. "
id_list = list ( id_list )
if id_list == [ ] :
return { }
qs = self . _clone ( )
qs . _where . append ( " %s . %s IN ( %s ) " % ( backend . quote_name ( self . model . _meta . db_table ) , backend . quote_name ( self . model . _meta . pk . column ) , " , " . join ( [ ' %s ' ] * len ( id_list ) ) ) )
qs . _params . extend ( id_list )
return dict ( [ ( obj . _get_pk_val ( ) , obj ) for obj in qs . iterator ( ) ] )
def delete ( self ) :
"""
Deletes the records in the current QuerySet .
"""
assert self . _limit is None and self . _offset is None , \
" Cannot use ' limit ' or ' offset ' with delete. "
del_query = self . _clone ( )
# disable non-supported fields
del_query . _select_related = False
del_query . _order_by = [ ]
# Delete objects in chunks to prevent an the list of
# related objects from becoming too long
more_objects = True
while more_objects :
# Collect all the objects to be deleted in this chunk, and all the objects
# that are related to the objects that are to be deleted
seen_objs = SortedDict ( )
more_objects = False
for object in del_query [ 0 : GET_ITERATOR_CHUNK_SIZE ] :
more_objects = True
object . _collect_sub_objects ( seen_objs )
# If one or more objects were found, delete them.
# Otherwise, stop looping.
if more_objects :
delete_objects ( seen_objs )
# Clear the result cache, in case this QuerySet gets reused.
self . _result_cache = None
delete . alters_data = True
##################################################
# PUBLIC METHODS THAT RETURN A QUERYSET SUBCLASS #
##################################################
def values ( self , * fields ) :
return self . _clone ( klass = ValuesQuerySet , _fields = fields )
def dates ( self , field_name , kind , order = ' ASC ' ) :
"""
Returns a list of datetime objects representing all available dates
for the given field_name , scoped to ' kind ' .
"""
assert kind in ( " month " , " year " , " day " ) , " ' kind ' must be one of ' year ' , ' month ' or ' day ' . "
assert order in ( ' ASC ' , ' DESC ' ) , " ' order ' must be either ' ASC ' or ' DESC ' . "
# Let the FieldDoesNotExist exception propagate.
field = self . model . _meta . get_field ( field_name , many_to_many = False )
assert isinstance ( field , DateField ) , " %r isn ' t a DateField. " % field_name
return self . _clone ( klass = DateQuerySet , _field = field , _kind = kind , _order = order )
##################################################################
# PUBLIC METHODS THAT ALTER ATTRIBUTES AND RETURN A NEW QUERYSET #
##################################################################
def filter ( self , * args , * * kwargs ) :
" Returns a new QuerySet instance with the args ANDed to the existing set. "
2006-05-27 07:41:43 +08:00
return self . _filter_or_exclude ( None , * args , * * kwargs )
2006-05-02 09:31:56 +08:00
def exclude ( self , * args , * * kwargs ) :
" Returns a new QuerySet instance with NOT (args) ANDed to the existing set. "
return self . _filter_or_exclude ( QNot , * args , * * kwargs )
2006-05-27 07:41:43 +08:00
def _filter_or_exclude ( self , mapper , * args , * * kwargs ) :
# mapper is a callable used to transform Q objects,
# or None for identity transform
if mapper is None :
mapper = lambda x : x
2006-05-02 09:31:56 +08:00
if len ( args ) > 0 or len ( kwargs ) > 0 :
assert self . _limit is None and self . _offset is None , \
" Cannot filter a query once a slice has been taken. "
clone = self . _clone ( )
if len ( kwargs ) > 0 :
2006-05-27 07:41:43 +08:00
clone . _filters = clone . _filters & mapper ( Q ( * * kwargs ) )
2006-05-02 09:31:56 +08:00
if len ( args ) > 0 :
2006-05-27 07:41:43 +08:00
clone . _filters = clone . _filters & reduce ( operator . and_ , map ( mapper , args ) )
2006-05-02 09:31:56 +08:00
return clone
2006-05-06 08:26:24 +08:00
def complex_filter ( self , filter_obj ) :
""" Returns a new QuerySet instance with filter_obj added to the filters.
2006-05-31 22:52:33 +08:00
filter_obj can be a Q object ( has ' get_sql ' method ) or a dictionary of
2006-05-06 08:26:24 +08:00
keyword lookup arguments . """
# This exists to support framework features such as 'limit_choices_to',
# and usually it will be more natural to use other methods.
if hasattr ( filter_obj , ' get_sql ' ) :
return self . _filter_or_exclude ( None , filter_obj )
else :
2006-05-27 07:41:43 +08:00
return self . _filter_or_exclude ( None , * * filter_obj )
2006-05-06 08:26:24 +08:00
2007-02-28 23:24:05 +08:00
def select_related ( self , true_or_false = True , depth = 0 ) :
2006-05-02 09:31:56 +08:00
" Returns a new QuerySet instance with ' _select_related ' modified. "
2007-02-28 23:24:05 +08:00
return self . _clone ( _select_related = true_or_false , _max_related_depth = depth )
2006-05-02 09:31:56 +08:00
def order_by ( self , * field_names ) :
" Returns a new QuerySet instance with the ordering changed. "
assert self . _limit is None and self . _offset is None , \
" Cannot reorder a query once a slice has been taken. "
return self . _clone ( _order_by = field_names )
def distinct ( self , true_or_false = True ) :
" Returns a new QuerySet instance with ' _distinct ' modified. "
return self . _clone ( _distinct = true_or_false )
def extra ( self , select = None , where = None , params = None , tables = None ) :
assert self . _limit is None and self . _offset is None , \
" Cannot change a query once a slice has been taken "
clone = self . _clone ( )
if select : clone . _select . update ( select )
if where : clone . _where . extend ( where )
if params : clone . _params . extend ( params )
if tables : clone . _tables . extend ( tables )
return clone
###################
# PRIVATE METHODS #
###################
def _clone ( self , klass = None , * * kwargs ) :
if klass is None :
klass = self . __class__
c = klass ( )
c . model = self . model
c . _filters = self . _filters
c . _order_by = self . _order_by
c . _select_related = self . _select_related
2007-02-28 23:24:05 +08:00
c . _max_related_depth = self . _max_related_depth
2006-05-02 09:31:56 +08:00
c . _distinct = self . _distinct
c . _select = self . _select . copy ( )
c . _where = self . _where [ : ]
c . _params = self . _params [ : ]
c . _tables = self . _tables [ : ]
c . _offset = self . _offset
c . _limit = self . _limit
c . __dict__ . update ( kwargs )
return c
def _combine ( self , other ) :
assert self . _limit is None and self . _offset is None \
and other . _limit is None and other . _offset is None , \
" Cannot combine queries once a slice has been taken. "
assert self . _distinct == other . _distinct , \
" Cannot combine a unique query with a non-unique query "
# use 'other's order by
# (so that A.filter(args1) & A.filter(args2) does the same as
# A.filter(args1).filter(args2)
combined = other . _clone ( )
2006-05-31 22:52:33 +08:00
if self . _select : combined . _select . update ( self . _select )
if self . _where : combined . _where . extend ( self . _where )
if self . _params : combined . _params . extend ( self . _params )
if self . _tables : combined . _tables . extend ( self . _tables )
2006-05-02 09:31:56 +08:00
# If 'self' is ordered and 'other' isn't, propagate 'self's ordering
if ( self . _order_by is not None and len ( self . _order_by ) > 0 ) and \
( combined . _order_by is None or len ( combined . _order_by ) == 0 ) :
combined . _order_by = self . _order_by
return combined
def _get_data ( self ) :
if self . _result_cache is None :
self . _result_cache = list ( self . iterator ( ) )
return self . _result_cache
def _get_sql_clause ( self ) :
opts = self . model . _meta
# Construct the fundamental parts of the query: SELECT X FROM Y WHERE Z.
select = [ " %s . %s " % ( backend . quote_name ( opts . db_table ) , backend . quote_name ( f . column ) ) for f in opts . fields ]
tables = [ quote_only_if_word ( t ) for t in self . _tables ]
joins = SortedDict ( )
where = self . _where [ : ]
params = self . _params [ : ]
# Convert self._filters into SQL.
2006-07-11 10:32:53 +08:00
joins2 , where2 , params2 = self . _filters . get_sql ( opts )
2006-05-02 09:31:56 +08:00
joins . update ( joins2 )
where . extend ( where2 )
params . extend ( params2 )
# Add additional tables and WHERE clauses based on select_related.
if self . _select_related :
2007-04-09 23:35:19 +08:00
fill_table_cache ( opts , select , tables , where ,
old_prefix = opts . db_table ,
cache_tables_seen = [ opts . db_table ] ,
2007-02-28 23:24:05 +08:00
max_depth = self . _max_related_depth )
2006-05-02 09:31:56 +08:00
# Add any additional SELECTs.
if self . _select :
select . extend ( [ ' ( %s ) AS %s ' % ( quote_only_if_word ( s [ 1 ] ) , backend . quote_name ( s [ 0 ] ) ) for s in self . _select . items ( ) ] )
# Start composing the body of the SQL statement.
sql = [ " FROM " , backend . quote_name ( opts . db_table ) ]
# Compose the join dictionary into SQL describing the joins.
if joins :
sql . append ( " " . join ( [ " %s %s AS %s ON %s " % ( join_type , table , alias , condition )
for ( alias , ( table , join_type , condition ) ) in joins . items ( ) ] ) )
# Compose the tables clause into SQL.
if tables :
sql . append ( " , " + " , " . join ( tables ) )
# Compose the where clause into SQL.
if where :
sql . append ( where and " WHERE " + " AND " . join ( where ) )
# ORDER BY clause
order_by = [ ]
if self . _order_by is not None :
ordering_to_use = self . _order_by
else :
ordering_to_use = opts . ordering
for f in handle_legacy_orderlist ( ordering_to_use ) :
if f == ' ? ' : # Special case.
order_by . append ( backend . get_random_function_sql ( ) )
else :
if f . startswith ( ' - ' ) :
col_name = f [ 1 : ]
order = " DESC "
else :
col_name = f
order = " ASC "
if " . " in col_name :
table_prefix , col_name = col_name . split ( ' . ' , 1 )
table_prefix = backend . quote_name ( table_prefix ) + ' . '
else :
# Use the database table as a column prefix if it wasn't given,
# and if the requested column isn't a custom SELECT.
if " . " not in col_name and col_name not in ( self . _select or ( ) ) :
table_prefix = backend . quote_name ( opts . db_table ) + ' . '
else :
table_prefix = ' '
order_by . append ( ' %s %s %s ' % ( table_prefix , backend . quote_name ( orderfield2column ( col_name , opts ) ) , order ) )
if order_by :
sql . append ( " ORDER BY " + " , " . join ( order_by ) )
# LIMIT and OFFSET clauses
if self . _limit is not None :
sql . append ( " %s " % backend . get_limit_offset_sql ( self . _limit , self . _offset ) )
else :
assert self . _offset is None , " ' offset ' is not allowed without ' limit ' "
return select , " " . join ( sql ) , params
class ValuesQuerySet ( QuerySet ) :
2007-02-10 13:38:38 +08:00
def __init__ ( self , * args , * * kwargs ) :
super ( ValuesQuerySet , self ) . __init__ ( * args , * * kwargs )
2006-05-02 09:31:56 +08:00
# select_related and select aren't supported in values().
self . _select_related = False
self . _select = { }
2007-02-10 13:38:38 +08:00
def iterator ( self ) :
try :
select , sql , params = self . _get_sql_clause ( )
except EmptyResultSet :
raise StopIteration
2006-05-02 09:31:56 +08:00
# self._fields is a list of field names to fetch.
if self . _fields :
columns = [ self . model . _meta . get_field ( f , many_to_many = False ) . column for f in self . _fields ]
field_names = self . _fields
else : # Default to all fields.
columns = [ f . column for f in self . model . _meta . fields ]
field_names = [ f . attname for f in self . model . _meta . fields ]
2007-02-14 14:32:32 +08:00
2006-05-02 09:31:56 +08:00
select = [ ' %s . %s ' % ( backend . quote_name ( self . model . _meta . db_table ) , backend . quote_name ( c ) ) for c in columns ]
2007-02-10 13:38:38 +08:00
cursor = connection . cursor ( )
2006-05-02 09:31:56 +08:00
cursor . execute ( " SELECT " + ( self . _distinct and " DISTINCT " or " " ) + " , " . join ( select ) + sql , params )
while 1 :
rows = cursor . fetchmany ( GET_ITERATOR_CHUNK_SIZE )
if not rows :
raise StopIteration
for row in rows :
yield dict ( zip ( field_names , row ) )
def _clone ( self , klass = None , * * kwargs ) :
c = super ( ValuesQuerySet , self ) . _clone ( klass , * * kwargs )
c . _fields = self . _fields [ : ]
return c
class DateQuerySet ( QuerySet ) :
def iterator ( self ) :
from django . db . backends . util import typecast_timestamp
self . _order_by = ( ) # Clear this because it'll mess things up otherwise.
if self . _field . null :
2006-05-07 02:35:51 +08:00
self . _where . append ( ' %s . %s IS NOT NULL ' % \
2006-05-02 09:31:56 +08:00
( backend . quote_name ( self . model . _meta . db_table ) , backend . quote_name ( self . _field . column ) ) )
2007-02-14 14:32:32 +08:00
2007-01-23 10:11:08 +08:00
try :
select , sql , params = self . _get_sql_clause ( )
except EmptyResultSet :
raise StopIteration
2007-02-14 14:32:32 +08:00
2006-05-02 09:31:56 +08:00
sql = ' SELECT %s %s GROUP BY 1 ORDER BY 1 %s ' % \
( backend . get_date_trunc_sql ( self . _kind , ' %s . %s ' % ( backend . quote_name ( self . model . _meta . db_table ) ,
backend . quote_name ( self . _field . column ) ) ) , sql , self . _order )
cursor = connection . cursor ( )
cursor . execute ( sql , params )
# We have to manually run typecast_timestamp(str()) on the results, because
# MySQL doesn't automatically cast the result of date functions as datetime
# objects -- MySQL returns the values as strings, instead.
return [ typecast_timestamp ( str ( row [ 0 ] ) ) for row in cursor . fetchall ( ) ]
def _clone ( self , klass = None , * * kwargs ) :
c = super ( DateQuerySet , self ) . _clone ( klass , * * kwargs )
c . _field = self . _field
c . _kind = self . _kind
c . _order = self . _order
return c
2007-02-14 14:32:32 +08:00
2007-01-23 10:11:08 +08:00
class EmptyQuerySet ( QuerySet ) :
def __init__ ( self , model = None ) :
super ( EmptyQuerySet , self ) . __init__ ( model )
self . _result_cache = [ ]
2007-02-14 14:32:32 +08:00
2007-01-23 10:11:08 +08:00
def count ( self ) :
return 0
2007-02-14 14:32:32 +08:00
2007-01-23 10:11:08 +08:00
def delete ( self ) :
pass
def _clone ( self , klass = None , * * kwargs ) :
c = super ( EmptyQuerySet , self ) . _clone ( klass , * * kwargs )
c . _result_cache = [ ]
return c
2006-05-02 09:31:56 +08:00
2007-02-10 13:38:38 +08:00
def _get_sql_clause ( self ) :
raise EmptyResultSet
2006-06-08 13:00:13 +08:00
class QOperator ( object ) :
2006-05-02 09:31:56 +08:00
" Base class for QAnd and QOr "
def __init__ ( self , * args ) :
self . args = args
def get_sql ( self , opts ) :
2006-07-11 10:32:53 +08:00
joins , where , params = SortedDict ( ) , [ ] , [ ]
2006-05-02 09:31:56 +08:00
for val in self . args :
2007-01-23 10:11:08 +08:00
try :
joins2 , where2 , params2 = val . get_sql ( opts )
joins . update ( joins2 )
where . extend ( where2 )
params . extend ( params2 )
except EmptyResultSet :
if not isinstance ( self , QOr ) :
raise EmptyResultSet
2006-05-02 09:31:56 +08:00
if where :
2006-07-11 10:32:53 +08:00
return joins , [ ' ( %s ) ' % self . operator . join ( where ) ] , params
return joins , [ ] , params
2006-05-02 09:31:56 +08:00
class QAnd ( QOperator ) :
" Encapsulates a combined query that uses ' AND ' . "
operator = ' AND '
def __or__ ( self , other ) :
return QOr ( self , other )
def __and__ ( self , other ) :
if isinstance ( other , QAnd ) :
return QAnd ( * ( self . args + other . args ) )
elif isinstance ( other , ( Q , QOr ) ) :
return QAnd ( * ( self . args + ( other , ) ) )
else :
raise TypeError , other
class QOr ( QOperator ) :
" Encapsulates a combined query that uses ' OR ' . "
operator = ' OR '
def __and__ ( self , other ) :
return QAnd ( self , other )
def __or__ ( self , other ) :
if isinstance ( other , QOr ) :
return QOr ( * ( self . args + other . args ) )
elif isinstance ( other , ( Q , QAnd ) ) :
return QOr ( * ( self . args + ( other , ) ) )
else :
raise TypeError , other
class Q ( object ) :
" Encapsulates queries as objects that can be combined logically. "
def __init__ ( self , * * kwargs ) :
self . kwargs = kwargs
def __and__ ( self , other ) :
return QAnd ( self , other )
def __or__ ( self , other ) :
return QOr ( self , other )
def get_sql ( self , opts ) :
return parse_lookup ( self . kwargs . items ( ) , opts )
class QNot ( Q ) :
" Encapsulates NOT (...) queries as objects "
2006-05-27 07:41:43 +08:00
def __init__ ( self , q ) :
" Creates a negation of the q object passed in. "
self . q = q
2006-05-02 09:31:56 +08:00
def get_sql ( self , opts ) :
2007-01-23 10:11:08 +08:00
try :
joins , where , params = self . q . get_sql ( opts )
where2 = [ ' (NOT ( %s )) ' % " AND " . join ( where ) ]
except EmptyResultSet :
return SortedDict ( ) , [ ] , [ ]
2006-07-11 10:32:53 +08:00
return joins , where2 , params
2006-05-02 09:31:56 +08:00
def get_where_clause ( lookup_type , table_prefix , field_name , value ) :
if table_prefix . endswith ( ' . ' ) :
table_prefix = backend . quote_name ( table_prefix [ : - 1 ] ) + ' . '
field_name = backend . quote_name ( field_name )
try :
return ' %s %s %s ' % ( table_prefix , field_name , ( backend . OPERATOR_MAPPING [ lookup_type ] % ' %s ' ) )
except KeyError :
pass
if lookup_type == ' in ' :
2007-01-04 12:00:16 +08:00
in_string = ' , ' . join ( [ ' %s ' for id in value ] )
if in_string :
return ' %s %s IN ( %s ) ' % ( table_prefix , field_name , in_string )
else :
2007-01-23 10:11:08 +08:00
raise EmptyResultSet
2007-02-14 14:32:32 +08:00
elif lookup_type in ( ' range ' , ' year ' ) :
2006-05-02 09:31:56 +08:00
return ' %s %s BETWEEN %% s AND %% s ' % ( table_prefix , field_name )
2007-02-14 14:32:32 +08:00
elif lookup_type in ( ' month ' , ' day ' ) :
2006-05-02 09:31:56 +08:00
return " %s = %% s " % backend . get_date_extract_sql ( lookup_type , table_prefix + field_name )
elif lookup_type == ' isnull ' :
return " %s %s IS %s NULL " % ( table_prefix , field_name , ( not value and ' NOT ' or ' ' ) )
2006-06-04 07:28:24 +08:00
elif lookup_type == ' search ' :
return backend . get_fulltext_search_sql ( table_prefix + field_name )
2006-05-02 09:31:56 +08:00
raise TypeError , " Got invalid lookup_type: %s " % repr ( lookup_type )
2007-02-28 23:24:05 +08:00
def get_cached_row ( klass , row , index_start , max_depth = 0 , cur_depth = 0 ) :
""" Helper function that recursively returns an object with cache filled """
2007-04-09 23:35:19 +08:00
2007-02-28 23:24:05 +08:00
# If we've got a max_depth set and we've exceeded that depth, bail now.
if max_depth and cur_depth > max_depth :
return None
2007-04-09 23:35:19 +08:00
2006-05-02 09:31:56 +08:00
index_end = index_start + len ( klass . _meta . fields )
obj = klass ( * row [ index_start : index_end ] )
for f in klass . _meta . fields :
if f . rel and not f . null :
2007-02-28 23:24:05 +08:00
cached_row = get_cached_row ( f . rel . to , row , index_end , max_depth , cur_depth + 1 )
if cached_row :
rel_obj , index_end = cached_row
setattr ( obj , f . get_cache_name ( ) , rel_obj )
2006-05-02 09:31:56 +08:00
return obj , index_end
2007-02-28 23:24:05 +08:00
def fill_table_cache ( opts , select , tables , where , old_prefix , cache_tables_seen , max_depth = 0 , cur_depth = 0 ) :
2006-05-02 09:31:56 +08:00
"""
Helper function that recursively populates the select , tables and where ( in
2006-07-08 22:17:45 +08:00
place ) for select_related queries .
2006-05-02 09:31:56 +08:00
"""
2007-04-09 23:35:19 +08:00
2007-02-28 23:24:05 +08:00
# If we've got a max_depth set and we've exceeded that depth, bail now.
if max_depth and cur_depth > max_depth :
return None
2007-04-09 23:35:19 +08:00
2006-07-11 11:35:17 +08:00
qn = backend . quote_name
2006-05-02 09:31:56 +08:00
for f in opts . fields :
if f . rel and not f . null :
db_table = f . rel . to . _meta . db_table
if db_table not in cache_tables_seen :
2006-07-11 11:35:17 +08:00
tables . append ( qn ( db_table ) )
2006-05-02 09:31:56 +08:00
else : # The table was already seen, so give it a table alias.
new_prefix = ' %s %s ' % ( db_table , len ( cache_tables_seen ) )
2006-07-11 11:35:17 +08:00
tables . append ( ' %s %s ' % ( qn ( db_table ) , qn ( new_prefix ) ) )
2006-05-02 09:31:56 +08:00
db_table = new_prefix
cache_tables_seen . append ( db_table )
where . append ( ' %s . %s = %s . %s ' % \
2006-07-11 11:35:17 +08:00
( qn ( old_prefix ) , qn ( f . column ) , qn ( db_table ) , qn ( f . rel . get_related_field ( ) . column ) ) )
select . extend ( [ ' %s . %s ' % ( qn ( db_table ) , qn ( f2 . column ) ) for f2 in f . rel . to . _meta . fields ] )
2007-02-28 23:24:05 +08:00
fill_table_cache ( f . rel . to . _meta , select , tables , where , db_table , cache_tables_seen , max_depth , cur_depth + 1 )
2006-05-02 09:31:56 +08:00
def parse_lookup ( kwarg_items , opts ) :
# Helper function that handles converting API kwargs
# (e.g. "name__exact": "tom") to SQL.
2007-03-20 19:09:40 +08:00
# Returns a tuple of (joins, where, params).
2006-05-02 09:31:56 +08:00
# 'joins' is a sorted dictionary describing the tables that must be joined
# to complete the query. The dictionary is sorted because creation order
# is significant; it is a dictionary to ensure uniqueness of alias names.
#
# Each key-value pair follows the form
# alias: (table, join_type, condition)
# where
# alias is the AS alias for the joined table
# table is the actual table name to be joined
# join_type is the type of join (INNER JOIN, LEFT OUTER JOIN, etc)
# condition is the where-like statement over which narrows the join.
# alias will be derived from the lookup list name.
#
# At present, this method only every returns INNER JOINs; the option is
# there for others to implement custom Q()s, etc that return other join
# types.
2006-07-11 10:32:53 +08:00
joins , where , params = SortedDict ( ) , [ ] , [ ]
2006-05-02 09:31:56 +08:00
for kwarg , value in kwarg_items :
2006-10-14 10:48:05 +08:00
path = kwarg . split ( LOOKUP_SEPARATOR )
# Extract the last elements of the kwarg.
# The very-last is the lookup_type (equals, like, etc).
# The second-last is the table column on which the lookup_type is
# to be performed. If this name is 'pk', it will be substituted with
# the name of the primary key.
# If there is only one part, or the last part is not a query
# term, assume that the query is an __exact
lookup_type = path . pop ( )
if lookup_type == ' pk ' :
lookup_type = ' exact '
path . append ( None )
elif len ( path ) == 0 or lookup_type not in QUERY_TERMS :
path . append ( lookup_type )
lookup_type = ' exact '
if len ( path ) < 1 :
raise TypeError , " Cannot parse keyword query %r " % kwarg
2007-02-14 14:32:32 +08:00
2006-10-14 10:48:05 +08:00
if value is None :
# Interpret '__exact=None' as the sql '= NULL'; otherwise, reject
# all uses of None as a query value.
if lookup_type != ' exact ' :
2007-04-09 23:35:19 +08:00
raise ValueError , " Cannot use None as a query value "
2007-04-09 21:28:09 +08:00
elif callable ( value ) :
value = value ( )
2006-10-14 10:48:05 +08:00
joins2 , where2 , params2 = lookup_inner ( path , lookup_type , value , opts , opts . db_table , None )
joins . update ( joins2 )
where . extend ( where2 )
params . extend ( params2 )
2006-07-11 10:32:53 +08:00
return joins , where , params
2006-05-02 09:31:56 +08:00
class FieldFound ( Exception ) :
" Exception used to short circuit field-finding operations. "
pass
def find_field ( name , field_list , related_query ) :
"""
Finds a field with a specific name in a list of field instances .
Returns None if there are no matches , or several matches .
"""
if related_query :
matches = [ f for f in field_list if f . field . related_query_name ( ) == name ]
else :
matches = [ f for f in field_list if f . name == name ]
if len ( matches ) != 1 :
return None
return matches [ 0 ]
2007-05-01 11:21:54 +08:00
def field_choices ( field_list , related_query ) :
if related_query :
choices = [ f . field . related_query_name ( ) for f in field_list ]
else :
choices = [ f . name for f in field_list ]
return choices
2006-07-11 11:21:59 +08:00
def lookup_inner ( path , lookup_type , value , opts , table , column ) :
2006-07-11 11:27:10 +08:00
qn = backend . quote_name
2006-07-11 10:32:53 +08:00
joins , where , params = SortedDict ( ) , [ ] , [ ]
2006-05-02 09:31:56 +08:00
current_opts = opts
current_table = table
current_column = column
intermediate_table = None
join_required = False
name = path . pop ( 0 )
# Has the primary key been requested? If so, expand it out
# to be the name of the current class' primary key
2006-09-25 21:49:01 +08:00
if name is None or name == ' pk ' :
2006-05-02 09:31:56 +08:00
name = current_opts . pk . name
# Try to find the name in the fields associated with the current class
try :
# Does the name belong to a defined many-to-many field?
field = find_field ( name , current_opts . many_to_many , False )
if field :
2006-07-11 11:29:12 +08:00
new_table = current_table + ' __ ' + name
2006-05-02 09:31:56 +08:00
new_opts = field . rel . to . _meta
new_column = new_opts . pk . column
# Need to create an intermediate table join over the m2m table
# This process hijacks current_table/column to point to the
# intermediate table.
current_table = " m2m_ " + new_table
intermediate_table = field . m2m_db_table ( )
join_column = field . m2m_reverse_name ( )
intermediate_column = field . m2m_column_name ( )
raise FieldFound
# Does the name belong to a reverse defined many-to-many field?
field = find_field ( name , current_opts . get_all_related_many_to_many_objects ( ) , True )
if field :
2006-07-11 11:29:12 +08:00
new_table = current_table + ' __ ' + name
2006-05-02 09:31:56 +08:00
new_opts = field . opts
new_column = new_opts . pk . column
# Need to create an intermediate table join over the m2m table.
# This process hijacks current_table/column to point to the
# intermediate table.
current_table = " m2m_ " + new_table
intermediate_table = field . field . m2m_db_table ( )
join_column = field . field . m2m_column_name ( )
intermediate_column = field . field . m2m_reverse_name ( )
raise FieldFound
# Does the name belong to a one-to-many field?
field = find_field ( name , current_opts . get_all_related_objects ( ) , True )
if field :
2006-07-11 11:29:12 +08:00
new_table = table + ' __ ' + name
2006-05-02 09:31:56 +08:00
new_opts = field . opts
new_column = field . field . column
join_column = opts . pk . column
# 1-N fields MUST be joined, regardless of any other conditions.
join_required = True
raise FieldFound
# Does the name belong to a one-to-one, many-to-one, or regular field?
field = find_field ( name , current_opts . fields , False )
if field :
if field . rel : # One-to-One/Many-to-one field
2006-07-11 11:29:12 +08:00
new_table = current_table + ' __ ' + name
2006-05-02 09:31:56 +08:00
new_opts = field . rel . to . _meta
new_column = new_opts . pk . column
join_column = field . column
2007-02-10 11:39:56 +08:00
raise FieldFound
elif path :
# For regular fields, if there are still items on the path,
# an error has been made. We munge "name" so that the error
# properly identifies the cause of the problem.
name + = LOOKUP_SEPARATOR + path [ 0 ]
else :
raise FieldFound
2006-05-02 09:31:56 +08:00
except FieldFound : # Match found, loop has been shortcut.
pass
else : # No match found.
2007-05-01 11:21:54 +08:00
choices = field_choices ( current_opts . many_to_many , False ) + \
field_choices ( current_opts . get_all_related_many_to_many_objects ( ) , True ) + \
field_choices ( current_opts . get_all_related_objects ( ) , True ) + \
field_choices ( current_opts . fields , False )
raise TypeError , " Cannot resolve keyword ' %s ' into field, choices are: %s " % ( name , " , " . join ( choices ) )
2006-05-02 09:31:56 +08:00
2006-07-11 11:21:59 +08:00
# Check whether an intermediate join is required between current_table
2006-05-02 09:31:56 +08:00
# and new_table.
if intermediate_table :
2006-07-11 11:27:10 +08:00
joins [ qn ( current_table ) ] = (
qn ( intermediate_table ) , " LEFT OUTER JOIN " ,
" %s . %s = %s . %s " % ( qn ( table ) , qn ( current_opts . pk . column ) , qn ( current_table ) , qn ( intermediate_column ) )
2006-05-02 09:31:56 +08:00
)
if path :
2006-07-01 09:14:41 +08:00
# There are elements left in the path. More joins are required.
2006-05-02 09:31:56 +08:00
if len ( path ) == 1 and path [ 0 ] in ( new_opts . pk . name , None ) \
2006-07-11 11:21:59 +08:00
and lookup_type in ( ' exact ' , ' isnull ' ) and not join_required :
2006-07-01 11:21:32 +08:00
# If the next and final name query is for a primary key,
# and the search is for isnull/exact, then the current
# (for N-1) or intermediate (for N-N) table can be used
2006-07-11 11:21:59 +08:00
# for the search. No need to join an extra table just
2006-07-01 09:14:41 +08:00
# to check the primary key.
2006-05-02 09:31:56 +08:00
new_table = current_table
else :
# There are 1 or more name queries pending, and we have ruled out
# any shortcuts; therefore, a join is required.
2006-07-11 11:27:10 +08:00
joins [ qn ( new_table ) ] = (
qn ( new_opts . db_table ) , " INNER JOIN " ,
" %s . %s = %s . %s " % ( qn ( current_table ) , qn ( join_column ) , qn ( new_table ) , qn ( new_column ) )
2006-05-02 09:31:56 +08:00
)
# If we have made the join, we don't need to tell subsequent
# recursive calls about the column name we joined on.
join_column = None
# There are name queries remaining. Recurse deeper.
2006-07-11 11:21:59 +08:00
joins2 , where2 , params2 = lookup_inner ( path , lookup_type , value , new_opts , new_table , join_column )
2006-05-02 09:31:56 +08:00
joins . update ( joins2 )
where . extend ( where2 )
params . extend ( params2 )
else :
2006-07-01 11:21:32 +08:00
# No elements left in path. Current element is the element on which
# the search is being performed.
2006-07-01 09:14:41 +08:00
if join_required :
2006-07-01 11:21:32 +08:00
# Last query term is a RelatedObject
if field . field . rel . multiple :
2006-07-01 09:14:41 +08:00
# RelatedObject is from a 1-N relation.
# Join is required; query operates on joined table.
2006-07-01 11:21:32 +08:00
column = new_opts . pk . name
2006-07-11 11:27:10 +08:00
joins [ qn ( new_table ) ] = (
qn ( new_opts . db_table ) , " INNER JOIN " ,
" %s . %s = %s . %s " % ( qn ( current_table ) , qn ( join_column ) , qn ( new_table ) , qn ( new_column ) )
2006-07-01 09:14:41 +08:00
)
current_table = new_table
else :
2006-07-01 11:21:32 +08:00
# RelatedObject is from a 1-1 relation,
# No need to join; get the pk value from the related object,
2006-07-01 09:14:41 +08:00
# and compare using that.
column = current_opts . pk . name
2006-07-01 11:21:32 +08:00
elif intermediate_table :
2006-07-01 09:14:41 +08:00
# Last query term is a related object from an N-N relation.
# Join from intermediate table is sufficient.
column = join_column
2006-07-11 11:21:59 +08:00
elif name == current_opts . pk . name and lookup_type in ( ' exact ' , ' isnull ' ) and current_column :
2006-07-01 11:21:32 +08:00
# Last query term is for a primary key. If previous iterations
2006-07-01 09:14:41 +08:00
# introduced a current/intermediate table that can be used to
# optimize the query, then use that table and column name.
2006-05-02 09:31:56 +08:00
column = current_column
else :
2006-07-01 09:14:41 +08:00
# Last query term was a normal field.
2006-05-02 09:31:56 +08:00
column = field . column
2006-07-11 11:21:59 +08:00
where . append ( get_where_clause ( lookup_type , current_table + ' . ' , column , value ) )
params . extend ( field . get_db_prep_lookup ( lookup_type , value ) )
2006-05-02 09:31:56 +08:00
2006-07-11 10:32:53 +08:00
return joins , where , params
2006-05-02 09:31:56 +08:00
def delete_objects ( seen_objs ) :
" Iterate through a list of seen classes, and remove any instances that are referred to "
2006-07-11 12:11:40 +08:00
qn = backend . quote_name
2006-05-02 09:31:56 +08:00
ordered_classes = seen_objs . keys ( )
ordered_classes . reverse ( )
cursor = connection . cursor ( )
for cls in ordered_classes :
seen_objs [ cls ] = seen_objs [ cls ] . items ( )
seen_objs [ cls ] . sort ( )
# Pre notify all instances to be deleted
for pk_val , instance in seen_objs [ cls ] :
dispatcher . send ( signal = signals . pre_delete , sender = cls , instance = instance )
pk_list = [ pk for pk , instance in seen_objs [ cls ] ]
for related in cls . _meta . get_all_related_many_to_many_objects ( ) :
2007-01-25 19:24:17 +08:00
if not isinstance ( related . field , GenericRelation ) :
for offset in range ( 0 , len ( pk_list ) , GET_ITERATOR_CHUNK_SIZE ) :
cursor . execute ( " DELETE FROM %s WHERE %s IN ( %s ) " % \
( qn ( related . field . m2m_db_table ( ) ) ,
qn ( related . field . m2m_reverse_name ( ) ) ,
' , ' . join ( [ ' %s ' for pk in pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] ] ) ) ,
pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] )
2006-05-02 09:31:56 +08:00
for f in cls . _meta . many_to_many :
2007-01-25 19:24:17 +08:00
if isinstance ( f , GenericRelation ) :
2007-02-14 14:32:32 +08:00
from django . contrib . contenttypes . models import ContentType
2007-01-25 19:24:17 +08:00
query_extra = ' AND %s = %% s ' % f . rel . to . _meta . get_field ( f . content_type_field_name ) . column
args_extra = [ ContentType . objects . get_for_model ( cls ) . id ]
else :
query_extra = ' '
args_extra = [ ]
2006-05-02 09:31:56 +08:00
for offset in range ( 0 , len ( pk_list ) , GET_ITERATOR_CHUNK_SIZE ) :
2007-01-25 19:24:17 +08:00
cursor . execute ( ( " DELETE FROM %s WHERE %s IN ( %s ) " % \
2006-07-11 12:11:40 +08:00
( qn ( f . m2m_db_table ( ) ) , qn ( f . m2m_column_name ( ) ) ,
2007-01-25 19:24:17 +08:00
' , ' . join ( [ ' %s ' for pk in pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] ] ) ) ) + query_extra ,
pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] + args_extra )
2006-05-02 09:31:56 +08:00
for field in cls . _meta . fields :
if field . rel and field . null and field . rel . to in seen_objs :
for offset in range ( 0 , len ( pk_list ) , GET_ITERATOR_CHUNK_SIZE ) :
cursor . execute ( " UPDATE %s SET %s =NULL WHERE %s IN ( %s ) " % \
2006-07-11 12:11:40 +08:00
( qn ( cls . _meta . db_table ) , qn ( field . column ) , qn ( cls . _meta . pk . column ) ,
2006-05-02 09:31:56 +08:00
' , ' . join ( [ ' %s ' for pk in pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] ] ) ) ,
pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] )
# Now delete the actual data
for cls in ordered_classes :
seen_objs [ cls ] . reverse ( )
pk_list = [ pk for pk , instance in seen_objs [ cls ] ]
for offset in range ( 0 , len ( pk_list ) , GET_ITERATOR_CHUNK_SIZE ) :
cursor . execute ( " DELETE FROM %s WHERE %s IN ( %s ) " % \
2006-07-11 12:11:40 +08:00
( qn ( cls . _meta . db_table ) , qn ( cls . _meta . pk . column ) ,
' , ' . join ( [ ' %s ' for pk in pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] ] ) ) ,
2006-05-02 09:31:56 +08:00
pk_list [ offset : offset + GET_ITERATOR_CHUNK_SIZE ] )
# Last cleanup; set NULLs where there once was a reference to the object,
# NULL the primary key of the found objects, and perform post-notification.
for pk_val , instance in seen_objs [ cls ] :
for field in cls . _meta . fields :
if field . rel and field . null and field . rel . to in seen_objs :
setattr ( instance , field . attname , None )
setattr ( instance , cls . _meta . pk . attname , None )
dispatcher . send ( signal = signals . post_delete , sender = cls , instance = instance )
transaction . commit_unless_managed ( )