2012-06-08 00:08:47 +08:00
|
|
|
from __future__ import unicode_literals
|
|
|
|
|
2013-02-10 23:15:49 +08:00
|
|
|
from django.conf import settings
|
2007-08-20 09:26:46 +08:00
|
|
|
from django.db.backends import BaseDatabaseOperations
|
|
|
|
|
|
|
|
|
|
|
|
class DatabaseOperations(BaseDatabaseOperations):
|
2009-12-22 23:18:51 +08:00
|
|
|
def __init__(self, connection):
|
2011-04-05 08:19:17 +08:00
|
|
|
super(DatabaseOperations, self).__init__(connection)
|
2007-08-20 09:26:46 +08:00
|
|
|
|
|
|
|
def date_extract_sql(self, lookup_type, field_name):
|
2013-03-17 18:45:45 +08:00
|
|
|
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
|
2009-02-08 13:08:06 +08:00
|
|
|
if lookup_type == 'week_day':
|
2009-04-12 10:00:58 +08:00
|
|
|
# For consistency across backends, we return Sunday=1, Saturday=7.
|
|
|
|
return "EXTRACT('dow' FROM %s) + 1" % field_name
|
2009-02-08 13:08:06 +08:00
|
|
|
else:
|
|
|
|
return "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
|
2007-08-20 09:26:46 +08:00
|
|
|
|
2010-12-22 11:34:04 +08:00
|
|
|
def date_interval_sql(self, sql, connector, timedelta):
|
|
|
|
"""
|
|
|
|
implements the interval functionality for expressions
|
|
|
|
format for Postgres:
|
|
|
|
(datefield + interval '3 days 200 seconds 5 microseconds')
|
|
|
|
"""
|
|
|
|
modifiers = []
|
|
|
|
if timedelta.days:
|
2012-06-08 00:08:47 +08:00
|
|
|
modifiers.append('%s days' % timedelta.days)
|
2010-12-22 11:34:04 +08:00
|
|
|
if timedelta.seconds:
|
2012-06-08 00:08:47 +08:00
|
|
|
modifiers.append('%s seconds' % timedelta.seconds)
|
2010-12-22 11:34:04 +08:00
|
|
|
if timedelta.microseconds:
|
2012-06-08 00:08:47 +08:00
|
|
|
modifiers.append('%s microseconds' % timedelta.microseconds)
|
|
|
|
mods = ' '.join(modifiers)
|
|
|
|
conn = ' %s ' % connector
|
|
|
|
return '(%s)' % conn.join([sql, 'interval \'%s\'' % mods])
|
2010-12-22 11:34:04 +08:00
|
|
|
|
2007-08-20 09:26:46 +08:00
|
|
|
def date_trunc_sql(self, lookup_type, field_name):
|
2013-03-17 18:45:45 +08:00
|
|
|
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
|
2007-08-20 09:26:46 +08:00
|
|
|
return "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
|
|
|
|
|
2013-02-10 23:15:49 +08:00
|
|
|
def datetime_extract_sql(self, lookup_type, field_name, tzname):
|
|
|
|
if settings.USE_TZ:
|
|
|
|
field_name = "%s AT TIME ZONE %%s" % field_name
|
|
|
|
params = [tzname]
|
|
|
|
else:
|
|
|
|
params = []
|
2013-03-17 18:45:45 +08:00
|
|
|
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-EXTRACT
|
2013-02-10 23:15:49 +08:00
|
|
|
if lookup_type == 'week_day':
|
|
|
|
# For consistency across backends, we return Sunday=1, Saturday=7.
|
|
|
|
sql = "EXTRACT('dow' FROM %s) + 1" % field_name
|
|
|
|
else:
|
|
|
|
sql = "EXTRACT('%s' FROM %s)" % (lookup_type, field_name)
|
|
|
|
return sql, params
|
|
|
|
|
|
|
|
def datetime_trunc_sql(self, lookup_type, field_name, tzname):
|
|
|
|
if settings.USE_TZ:
|
|
|
|
field_name = "%s AT TIME ZONE %%s" % field_name
|
|
|
|
params = [tzname]
|
|
|
|
else:
|
|
|
|
params = []
|
2013-03-17 18:45:45 +08:00
|
|
|
# http://www.postgresql.org/docs/current/static/functions-datetime.html#FUNCTIONS-DATETIME-TRUNC
|
2013-02-10 23:15:49 +08:00
|
|
|
sql = "DATE_TRUNC('%s', %s)" % (lookup_type, field_name)
|
|
|
|
return sql, params
|
|
|
|
|
2007-08-20 09:26:46 +08:00
|
|
|
def deferrable_sql(self):
|
|
|
|
return " DEFERRABLE INITIALLY DEFERRED"
|
|
|
|
|
2008-08-09 04:09:53 +08:00
|
|
|
def lookup_cast(self, lookup_type):
|
2008-08-25 20:56:06 +08:00
|
|
|
lookup = '%s'
|
|
|
|
|
|
|
|
# Cast text lookups to text to allow things like filter(x__contains=4)
|
|
|
|
if lookup_type in ('iexact', 'contains', 'icontains', 'startswith',
|
2013-06-26 16:07:18 +08:00
|
|
|
'istartswith', 'endswith', 'iendswith', 'regex', 'iregex'):
|
2008-08-25 20:56:06 +08:00
|
|
|
lookup = "%s::text"
|
|
|
|
|
|
|
|
# Use UPPER(x) for case-insensitive lookups; it's faster.
|
|
|
|
if lookup_type in ('iexact', 'icontains', 'istartswith', 'iendswith'):
|
|
|
|
lookup = 'UPPER(%s)' % lookup
|
|
|
|
|
|
|
|
return lookup
|
2008-08-09 04:09:53 +08:00
|
|
|
|
2013-05-19 19:28:09 +08:00
|
|
|
def field_cast_sql(self, db_type, internal_type):
|
|
|
|
if internal_type == "GenericIPAddressField" or internal_type == "IPAddressField":
|
2008-02-27 07:12:47 +08:00
|
|
|
return 'HOST(%s)'
|
|
|
|
return '%s'
|
|
|
|
|
2007-08-20 09:26:46 +08:00
|
|
|
def last_insert_id(self, cursor, table_name, pk_name):
|
2010-06-21 19:48:45 +08:00
|
|
|
# Use pg_get_serial_sequence to get the underlying sequence name
|
|
|
|
# from the table name and column name (available since PostgreSQL 8)
|
2010-07-30 10:43:01 +08:00
|
|
|
cursor.execute("SELECT CURRVAL(pg_get_serial_sequence('%s','%s'))" % (
|
|
|
|
self.quote_name(table_name), pk_name))
|
2007-08-20 09:26:46 +08:00
|
|
|
return cursor.fetchone()[0]
|
|
|
|
|
Merged the queryset-refactor branch into trunk.
This is a big internal change, but mostly backwards compatible with existing
code. Also adds a couple of new features.
Fixed #245, #1050, #1656, #1801, #2076, #2091, #2150, #2253, #2306, #2400, #2430, #2482, #2496, #2676, #2737, #2874, #2902, #2939, #3037, #3141, #3288, #3440, #3592, #3739, #4088, #4260, #4289, #4306, #4358, #4464, #4510, #4858, #5012, #5020, #5261, #5295, #5321, #5324, #5325, #5555, #5707, #5796, #5817, #5987, #6018, #6074, #6088, #6154, #6177, #6180, #6203, #6658
git-svn-id: http://code.djangoproject.com/svn/django/trunk@7477 bcc190cf-cafb-0310-a4f2-bffc1f526a37
2008-04-27 10:50:16 +08:00
|
|
|
def no_limit_value(self):
|
|
|
|
return None
|
|
|
|
|
2014-04-26 16:22:48 +08:00
|
|
|
def prepare_sql_script(self, sql, _allow_fallback=False):
|
|
|
|
return [sql]
|
|
|
|
|
2007-08-20 09:26:46 +08:00
|
|
|
def quote_name(self, name):
|
|
|
|
if name.startswith('"') and name.endswith('"'):
|
2013-11-03 05:02:56 +08:00
|
|
|
return name # Quoting once is enough.
|
2007-08-20 09:26:46 +08:00
|
|
|
return '"%s"' % name
|
|
|
|
|
2012-03-14 07:29:15 +08:00
|
|
|
def set_time_zone_sql(self):
|
|
|
|
return "SET TIME ZONE %s"
|
|
|
|
|
2013-06-10 03:04:36 +08:00
|
|
|
def sql_flush(self, style, tables, sequences, allow_cascade=False):
|
2007-08-20 09:26:46 +08:00
|
|
|
if tables:
|
2011-06-17 04:05:25 +08:00
|
|
|
# Perform a single SQL 'TRUNCATE x, y, z...;' statement. It allows
|
|
|
|
# us to truncate tables referenced by a foreign key in any other
|
|
|
|
# table.
|
2013-06-10 03:04:36 +08:00
|
|
|
tables_sql = ', '.join(
|
|
|
|
style.SQL_FIELD(self.quote_name(table)) for table in tables)
|
|
|
|
if allow_cascade:
|
|
|
|
sql = ['%s %s %s;' % (
|
|
|
|
style.SQL_KEYWORD('TRUNCATE'),
|
|
|
|
tables_sql,
|
|
|
|
style.SQL_KEYWORD('CASCADE'),
|
|
|
|
)]
|
|
|
|
else:
|
|
|
|
sql = ['%s %s;' % (
|
|
|
|
style.SQL_KEYWORD('TRUNCATE'),
|
|
|
|
tables_sql,
|
|
|
|
)]
|
2012-07-25 04:24:16 +08:00
|
|
|
sql.extend(self.sequence_reset_by_name_sql(style, sequences))
|
2007-08-20 09:26:46 +08:00
|
|
|
return sql
|
|
|
|
else:
|
|
|
|
return []
|
|
|
|
|
2012-07-25 04:24:16 +08:00
|
|
|
def sequence_reset_by_name_sql(self, style, sequences):
|
|
|
|
# 'ALTER SEQUENCE sequence_name RESTART WITH 1;'... style SQL statements
|
|
|
|
# to reset sequence indices
|
|
|
|
sql = []
|
|
|
|
for sequence_info in sequences:
|
|
|
|
table_name = sequence_info['table']
|
|
|
|
column_name = sequence_info['column']
|
|
|
|
if not (column_name and len(column_name) > 0):
|
|
|
|
# This will be the case if it's an m2m using an autogenerated
|
|
|
|
# intermediate table (see BaseDatabaseIntrospection.sequence_list)
|
|
|
|
column_name = 'id'
|
2013-07-08 08:39:54 +08:00
|
|
|
sql.append("%s setval(pg_get_serial_sequence('%s','%s'), 1, false);" %
|
2012-07-25 04:24:16 +08:00
|
|
|
(style.SQL_KEYWORD('SELECT'),
|
|
|
|
style.SQL_TABLE(self.quote_name(table_name)),
|
|
|
|
style.SQL_FIELD(column_name))
|
|
|
|
)
|
|
|
|
return sql
|
|
|
|
|
2011-10-15 05:49:43 +08:00
|
|
|
def tablespace_sql(self, tablespace, inline=False):
|
|
|
|
if inline:
|
|
|
|
return "USING INDEX TABLESPACE %s" % self.quote_name(tablespace)
|
|
|
|
else:
|
|
|
|
return "TABLESPACE %s" % self.quote_name(tablespace)
|
|
|
|
|
2007-08-20 09:26:46 +08:00
|
|
|
def sequence_reset_sql(self, style, model_list):
|
|
|
|
from django.db import models
|
|
|
|
output = []
|
|
|
|
qn = self.quote_name
|
|
|
|
for model in model_list:
|
|
|
|
# Use `coalesce` to set the sequence for each model to the max pk value if there are records,
|
|
|
|
# or 1 if there are none. Set the `is_called` property (the third argument to `setval`) to true
|
|
|
|
# if there are records (as the max pk value is already in use), otherwise set it to false.
|
2010-06-21 19:48:45 +08:00
|
|
|
# Use pg_get_serial_sequence to get the underlying sequence name from the table name
|
|
|
|
# and column name (available since PostgreSQL 8)
|
|
|
|
|
2008-06-30 08:38:14 +08:00
|
|
|
for f in model._meta.local_fields:
|
2007-08-20 09:26:46 +08:00
|
|
|
if isinstance(f, models.AutoField):
|
2013-07-08 08:39:54 +08:00
|
|
|
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" %
|
2007-08-20 09:26:46 +08:00
|
|
|
(style.SQL_KEYWORD('SELECT'),
|
2010-07-30 10:43:01 +08:00
|
|
|
style.SQL_TABLE(qn(model._meta.db_table)),
|
2010-06-21 19:48:45 +08:00
|
|
|
style.SQL_FIELD(f.column),
|
2007-08-20 09:26:46 +08:00
|
|
|
style.SQL_FIELD(qn(f.column)),
|
|
|
|
style.SQL_FIELD(qn(f.column)),
|
|
|
|
style.SQL_KEYWORD('IS NOT'),
|
|
|
|
style.SQL_KEYWORD('FROM'),
|
|
|
|
style.SQL_TABLE(qn(model._meta.db_table))))
|
2013-11-03 05:02:56 +08:00
|
|
|
break # Only one AutoField is allowed per model, so don't bother continuing.
|
2007-08-20 09:26:46 +08:00
|
|
|
for f in model._meta.many_to_many:
|
2009-07-11 22:22:52 +08:00
|
|
|
if not f.rel.through:
|
2013-07-08 08:39:54 +08:00
|
|
|
output.append("%s setval(pg_get_serial_sequence('%s','%s'), coalesce(max(%s), 1), max(%s) %s null) %s %s;" %
|
2009-07-11 22:22:52 +08:00
|
|
|
(style.SQL_KEYWORD('SELECT'),
|
2010-07-30 10:43:01 +08:00
|
|
|
style.SQL_TABLE(qn(f.m2m_db_table())),
|
2010-06-21 19:48:45 +08:00
|
|
|
style.SQL_FIELD('id'),
|
2009-07-11 22:22:52 +08:00
|
|
|
style.SQL_FIELD(qn('id')),
|
|
|
|
style.SQL_FIELD(qn('id')),
|
|
|
|
style.SQL_KEYWORD('IS NOT'),
|
|
|
|
style.SQL_KEYWORD('FROM'),
|
|
|
|
style.SQL_TABLE(qn(f.m2m_db_table()))))
|
2007-08-26 03:24:47 +08:00
|
|
|
return output
|
2008-08-12 13:34:56 +08:00
|
|
|
|
2008-08-28 13:42:05 +08:00
|
|
|
def prep_for_iexact_query(self, x):
|
|
|
|
return x
|
2009-02-02 20:03:31 +08:00
|
|
|
|
2010-04-29 09:22:50 +08:00
|
|
|
def max_name_length(self):
|
|
|
|
"""
|
|
|
|
Returns the maximum length of an identifier.
|
|
|
|
|
|
|
|
Note that the maximum length of an identifier is 63 by default, but can
|
|
|
|
be changed by recompiling PostgreSQL after editing the NAMEDATALEN
|
|
|
|
macro in src/include/pg_config_manual.h .
|
|
|
|
|
|
|
|
This implementation simply returns 63, but can easily be overridden by a
|
|
|
|
custom database backend that inherits most of its behavior from this one.
|
|
|
|
"""
|
|
|
|
|
|
|
|
return 63
|
2011-04-02 16:39:08 +08:00
|
|
|
|
2011-12-23 04:42:40 +08:00
|
|
|
def distinct_sql(self, fields):
|
|
|
|
if fields:
|
|
|
|
return 'DISTINCT ON (%s)' % ', '.join(fields)
|
|
|
|
else:
|
|
|
|
return 'DISTINCT'
|
|
|
|
|
2011-04-02 16:39:08 +08:00
|
|
|
def last_executed_query(self, cursor, sql, params):
|
2011-04-22 20:14:54 +08:00
|
|
|
# http://initd.org/psycopg/docs/cursor.html#cursor.query
|
|
|
|
# The query attribute is a Psycopg extension to the DB API 2.0.
|
2012-06-13 17:36:27 +08:00
|
|
|
if cursor.query is not None:
|
|
|
|
return cursor.query.decode('utf-8')
|
|
|
|
return None
|
2011-04-02 16:39:08 +08:00
|
|
|
|
|
|
|
def return_insert_id(self):
|
|
|
|
return "RETURNING %s", ()
|
2011-09-10 03:22:28 +08:00
|
|
|
|
|
|
|
def bulk_insert_sql(self, fields, num_values):
|
|
|
|
items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
|
|
|
|
return "VALUES " + ", ".join([items_sql] * num_values)
|