Fixed #3460 -- Added an ability to enable true autocommit for psycopg2 backend.
Ensure to read the documentation before blindly enabling this: requires some code audits first, but might well be worth it for busy sites. Thanks to nicferrier, iamseb and Richard Davies for help with this patch. git-svn-id: http://code.djangoproject.com/svn/django/trunk@10029 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
parent
0543f33bbc
commit
5fb6667036
|
@ -41,6 +41,21 @@ class BaseDatabaseWrapper(local):
|
|||
if self.connection is not None:
|
||||
return self.connection.rollback()
|
||||
|
||||
def _enter_transaction_management(self, managed):
|
||||
"""
|
||||
A hook for backend-specific changes required when entering manual
|
||||
transaction handling.
|
||||
"""
|
||||
pass
|
||||
|
||||
def _leave_transaction_management(self, managed):
|
||||
"""
|
||||
A hook for backend-specific changes required when leaving manual
|
||||
transaction handling. Will usually be implemented only when
|
||||
_enter_transaction_management() is also required.
|
||||
"""
|
||||
pass
|
||||
|
||||
def _savepoint(self, sid):
|
||||
if not self.features.uses_savepoints:
|
||||
return
|
||||
|
@ -81,6 +96,8 @@ class BaseDatabaseFeatures(object):
|
|||
update_can_self_select = True
|
||||
interprets_empty_strings_as_nulls = False
|
||||
can_use_chunked_reads = True
|
||||
can_return_id_from_insert = False
|
||||
uses_autocommit = False
|
||||
uses_savepoints = False
|
||||
# If True, don't use integer foreign keys referring to, e.g., positive
|
||||
# integer primary keys.
|
||||
|
@ -230,6 +247,15 @@ class BaseDatabaseOperations(object):
|
|||
"""
|
||||
return 'DEFAULT'
|
||||
|
||||
def return_insert_id(self):
|
||||
"""
|
||||
For backends that support returning the last insert ID as part of an
|
||||
insert query, this method returns the SQL to append to the INSERT
|
||||
query. The returned fragment should contain a format string to hold
|
||||
hold the appropriate column.
|
||||
"""
|
||||
pass
|
||||
|
||||
def query_class(self, DefaultQueryClass):
|
||||
"""
|
||||
Given the default Query class, returns a custom Query class
|
||||
|
|
|
@ -4,6 +4,7 @@ PostgreSQL database backend for Django.
|
|||
Requires psycopg 2: http://initd.org/projects/psycopg2
|
||||
"""
|
||||
|
||||
from django.conf import settings
|
||||
from django.db.backends import *
|
||||
from django.db.backends.postgresql.operations import DatabaseOperations as PostgresqlDatabaseOperations
|
||||
from django.db.backends.postgresql.client import DatabaseClient
|
||||
|
@ -28,7 +29,7 @@ psycopg2.extensions.register_adapter(SafeUnicode, psycopg2.extensions.QuotedStri
|
|||
|
||||
class DatabaseFeatures(BaseDatabaseFeatures):
|
||||
needs_datetime_string_cast = False
|
||||
uses_savepoints = True
|
||||
can_return_id_from_insert = True
|
||||
|
||||
class DatabaseOperations(PostgresqlDatabaseOperations):
|
||||
def last_executed_query(self, cursor, sql, params):
|
||||
|
@ -37,6 +38,9 @@ class DatabaseOperations(PostgresqlDatabaseOperations):
|
|||
# http://www.initd.org/tracker/psycopg/wiki/psycopg2_documentation#postgresql-status-message-and-executed-query
|
||||
return cursor.query
|
||||
|
||||
def return_insert_id(self):
|
||||
return "RETURNING %s"
|
||||
|
||||
class DatabaseWrapper(BaseDatabaseWrapper):
|
||||
operators = {
|
||||
'exact': '= %s',
|
||||
|
@ -57,8 +61,14 @@ class DatabaseWrapper(BaseDatabaseWrapper):
|
|||
|
||||
def __init__(self, *args, **kwargs):
|
||||
super(DatabaseWrapper, self).__init__(*args, **kwargs)
|
||||
|
||||
|
||||
self.features = DatabaseFeatures()
|
||||
if settings.DATABASE_OPTIONS.get('autocommit', False):
|
||||
self.features.uses_autocommit = True
|
||||
self._iso_level_0()
|
||||
else:
|
||||
self.features.uses_autocommit = False
|
||||
self._iso_level_1()
|
||||
self.ops = DatabaseOperations()
|
||||
self.client = DatabaseClient(self)
|
||||
self.creation = DatabaseCreation(self)
|
||||
|
@ -77,6 +87,8 @@ class DatabaseWrapper(BaseDatabaseWrapper):
|
|||
'database': settings_dict['DATABASE_NAME'],
|
||||
}
|
||||
conn_params.update(settings_dict['DATABASE_OPTIONS'])
|
||||
if 'autocommit' in conn_params:
|
||||
del conn_params['autocommit']
|
||||
if settings_dict['DATABASE_USER']:
|
||||
conn_params['user'] = settings_dict['DATABASE_USER']
|
||||
if settings_dict['DATABASE_PASSWORD']:
|
||||
|
@ -86,7 +98,6 @@ class DatabaseWrapper(BaseDatabaseWrapper):
|
|||
if settings_dict['DATABASE_PORT']:
|
||||
conn_params['port'] = settings_dict['DATABASE_PORT']
|
||||
self.connection = Database.connect(**conn_params)
|
||||
self.connection.set_isolation_level(1) # make transactions transparent to all cursors
|
||||
self.connection.set_client_encoding('UTF8')
|
||||
cursor = self.connection.cursor()
|
||||
cursor.tzinfo_factory = None
|
||||
|
@ -98,3 +109,44 @@ class DatabaseWrapper(BaseDatabaseWrapper):
|
|||
# No savepoint support for earlier version of PostgreSQL.
|
||||
self.features.uses_savepoints = False
|
||||
return cursor
|
||||
|
||||
def _enter_transaction_management(self, managed):
|
||||
"""
|
||||
Switch the isolation level when needing transaction support, so that
|
||||
the same transaction is visible across all the queries.
|
||||
"""
|
||||
if self.features.uses_autocommit and managed and not self.isolation_level:
|
||||
self._iso_level_1()
|
||||
|
||||
def _leave_transaction_management(self, managed):
|
||||
"""
|
||||
If the normal operating mode is "autocommit", switch back to that when
|
||||
leaving transaction management.
|
||||
"""
|
||||
if self.features.uses_autocommit and not managed and self.isolation_level:
|
||||
self._iso_level_0()
|
||||
|
||||
def _iso_level_0(self):
|
||||
"""
|
||||
Do all the related feature configurations for isolation level 0. This
|
||||
doesn't touch the uses_autocommit feature, since that controls the
|
||||
movement *between* isolation levels.
|
||||
"""
|
||||
try:
|
||||
if self.connection is not None:
|
||||
self.connection.set_isolation_level(0)
|
||||
finally:
|
||||
self.isolation_level = 0
|
||||
self.features.uses_savepoints = False
|
||||
|
||||
def _iso_level_1(self):
|
||||
"""
|
||||
The "isolation level 1" version of _iso_level_0().
|
||||
"""
|
||||
try:
|
||||
if self.connection is not None:
|
||||
self.connection.set_isolation_level(1)
|
||||
finally:
|
||||
self.isolation_level = 1
|
||||
self.features.uses_savepoints = True
|
||||
|
||||
|
|
|
@ -447,8 +447,20 @@ class QuerySet(object):
|
|||
"Cannot update a query once a slice has been taken."
|
||||
query = self.query.clone(sql.UpdateQuery)
|
||||
query.add_update_values(kwargs)
|
||||
rows = query.execute_sql(None)
|
||||
transaction.commit_unless_managed()
|
||||
if not transaction.is_managed():
|
||||
transaction.enter_transaction_management()
|
||||
forced_managed = True
|
||||
else:
|
||||
forced_managed = False
|
||||
try:
|
||||
rows = query.execute_sql(None)
|
||||
if forced_managed:
|
||||
transaction.commit()
|
||||
else:
|
||||
transaction.commit_unless_managed()
|
||||
finally:
|
||||
if forced_managed:
|
||||
transaction.leave_transaction_management()
|
||||
self._result_cache = None
|
||||
return rows
|
||||
update.alters_data = True
|
||||
|
@ -962,6 +974,11 @@ def delete_objects(seen_objs):
|
|||
Iterate through a list of seen classes, and remove any instances that are
|
||||
referred to.
|
||||
"""
|
||||
if not transaction.is_managed():
|
||||
transaction.enter_transaction_management()
|
||||
forced_managed = True
|
||||
else:
|
||||
forced_managed = False
|
||||
try:
|
||||
ordered_classes = seen_objs.keys()
|
||||
except CyclicDependency:
|
||||
|
@ -972,51 +989,58 @@ def delete_objects(seen_objs):
|
|||
ordered_classes = seen_objs.unordered_keys()
|
||||
|
||||
obj_pairs = {}
|
||||
for cls in ordered_classes:
|
||||
items = seen_objs[cls].items()
|
||||
items.sort()
|
||||
obj_pairs[cls] = items
|
||||
try:
|
||||
for cls in ordered_classes:
|
||||
items = seen_objs[cls].items()
|
||||
items.sort()
|
||||
obj_pairs[cls] = items
|
||||
|
||||
# Pre-notify all instances to be deleted.
|
||||
for pk_val, instance in items:
|
||||
signals.pre_delete.send(sender=cls, instance=instance)
|
||||
# Pre-notify all instances to be deleted.
|
||||
for pk_val, instance in items:
|
||||
signals.pre_delete.send(sender=cls, instance=instance)
|
||||
|
||||
pk_list = [pk for pk,instance in items]
|
||||
del_query = sql.DeleteQuery(cls, connection)
|
||||
del_query.delete_batch_related(pk_list)
|
||||
pk_list = [pk for pk,instance in items]
|
||||
del_query = sql.DeleteQuery(cls, connection)
|
||||
del_query.delete_batch_related(pk_list)
|
||||
|
||||
update_query = sql.UpdateQuery(cls, connection)
|
||||
for field, model in cls._meta.get_fields_with_model():
|
||||
if (field.rel and field.null and field.rel.to in seen_objs and
|
||||
filter(lambda f: f.column == field.column,
|
||||
field.rel.to._meta.fields)):
|
||||
if model:
|
||||
sql.UpdateQuery(model, connection).clear_related(field,
|
||||
pk_list)
|
||||
else:
|
||||
update_query.clear_related(field, pk_list)
|
||||
update_query = sql.UpdateQuery(cls, connection)
|
||||
for field, model in cls._meta.get_fields_with_model():
|
||||
if (field.rel and field.null and field.rel.to in seen_objs and
|
||||
filter(lambda f: f.column == field.column,
|
||||
field.rel.to._meta.fields)):
|
||||
if model:
|
||||
sql.UpdateQuery(model, connection).clear_related(field,
|
||||
pk_list)
|
||||
else:
|
||||
update_query.clear_related(field, pk_list)
|
||||
|
||||
# Now delete the actual data.
|
||||
for cls in ordered_classes:
|
||||
items = obj_pairs[cls]
|
||||
items.reverse()
|
||||
# Now delete the actual data.
|
||||
for cls in ordered_classes:
|
||||
items = obj_pairs[cls]
|
||||
items.reverse()
|
||||
|
||||
pk_list = [pk for pk,instance in items]
|
||||
del_query = sql.DeleteQuery(cls, connection)
|
||||
del_query.delete_batch(pk_list)
|
||||
pk_list = [pk for pk,instance in items]
|
||||
del_query = sql.DeleteQuery(cls, connection)
|
||||
del_query.delete_batch(pk_list)
|
||||
|
||||
# Last cleanup; set NULLs where there once was a reference to the
|
||||
# object, NULL the primary key of the found objects, and perform
|
||||
# post-notification.
|
||||
for pk_val, instance in items:
|
||||
for field in cls._meta.fields:
|
||||
if field.rel and field.null and field.rel.to in seen_objs:
|
||||
setattr(instance, field.attname, None)
|
||||
# Last cleanup; set NULLs where there once was a reference to the
|
||||
# object, NULL the primary key of the found objects, and perform
|
||||
# post-notification.
|
||||
for pk_val, instance in items:
|
||||
for field in cls._meta.fields:
|
||||
if field.rel and field.null and field.rel.to in seen_objs:
|
||||
setattr(instance, field.attname, None)
|
||||
|
||||
signals.post_delete.send(sender=cls, instance=instance)
|
||||
setattr(instance, cls._meta.pk.attname, None)
|
||||
signals.post_delete.send(sender=cls, instance=instance)
|
||||
setattr(instance, cls._meta.pk.attname, None)
|
||||
|
||||
transaction.commit_unless_managed()
|
||||
if forced_managed:
|
||||
transaction.commit()
|
||||
else:
|
||||
transaction.commit_unless_managed()
|
||||
finally:
|
||||
if forced_managed:
|
||||
transaction.leave_transaction_management()
|
||||
|
||||
|
||||
def insert_query(model, values, return_id=False, raw_values=False):
|
||||
|
|
|
@ -302,9 +302,13 @@ class InsertQuery(Query):
|
|||
# We don't need quote_name_unless_alias() here, since these are all
|
||||
# going to be column names (so we can avoid the extra overhead).
|
||||
qn = self.connection.ops.quote_name
|
||||
result = ['INSERT INTO %s' % qn(self.model._meta.db_table)]
|
||||
opts = self.model._meta
|
||||
result = ['INSERT INTO %s' % qn(opts.db_table)]
|
||||
result.append('(%s)' % ', '.join([qn(c) for c in self.columns]))
|
||||
result.append('VALUES (%s)' % ', '.join(self.values))
|
||||
if self.connection.features.can_return_id_from_insert:
|
||||
col = "%s.%s" % (qn(opts.db_table), qn(opts.pk.column))
|
||||
result.append(self.connection.ops.return_insert_id() % col)
|
||||
return ' '.join(result), self.params
|
||||
|
||||
def execute_sql(self, return_id=False):
|
||||
|
|
|
@ -40,7 +40,7 @@ savepoint_state = {}
|
|||
# database commit.
|
||||
dirty = {}
|
||||
|
||||
def enter_transaction_management():
|
||||
def enter_transaction_management(managed=True):
|
||||
"""
|
||||
Enters transaction management for a running thread. It must be balanced with
|
||||
the appropriate leave_transaction_management call, since the actual state is
|
||||
|
@ -58,6 +58,7 @@ def enter_transaction_management():
|
|||
state[thread_ident].append(settings.TRANSACTIONS_MANAGED)
|
||||
if thread_ident not in dirty:
|
||||
dirty[thread_ident] = False
|
||||
connection._enter_transaction_management(managed)
|
||||
|
||||
def leave_transaction_management():
|
||||
"""
|
||||
|
@ -65,6 +66,7 @@ def leave_transaction_management():
|
|||
over to the surrounding block, as a commit will commit all changes, even
|
||||
those from outside. (Commits are on connection level.)
|
||||
"""
|
||||
connection._leave_transaction_management(is_managed())
|
||||
thread_ident = thread.get_ident()
|
||||
if thread_ident in state and state[thread_ident]:
|
||||
del state[thread_ident][-1]
|
||||
|
@ -216,7 +218,7 @@ def autocommit(func):
|
|||
"""
|
||||
def _autocommit(*args, **kw):
|
||||
try:
|
||||
enter_transaction_management()
|
||||
enter_transaction_management(managed=False)
|
||||
managed(False)
|
||||
return func(*args, **kw)
|
||||
finally:
|
||||
|
|
|
@ -13,6 +13,8 @@ This file describes some of the features that might be relevant to Django
|
|||
usage. Of course, it is not intended as a replacement for server-specific
|
||||
documentation or reference manuals.
|
||||
|
||||
.. postgresql-notes:
|
||||
|
||||
PostgreSQL notes
|
||||
================
|
||||
|
||||
|
@ -29,6 +31,56 @@ aggregate with an database backend falls within the affected release range.
|
|||
.. _known to be faulty: http://archives.postgresql.org/pgsql-bugs/2007-07/msg00046.php
|
||||
.. _Release 8.2.5: http://developer.postgresql.org/pgdocs/postgres/release-8-2-5.html
|
||||
|
||||
Transaction handling
|
||||
---------------------
|
||||
|
||||
:ref:`By default <topics-db-transactions>`, Django starts a transaction when a
|
||||
database connection if first used and commits the result at the end of the
|
||||
request/response handling. The PostgreSQL backends normally operate the same
|
||||
as any other Django backend in this respect.
|
||||
|
||||
Autocommit mode
|
||||
~~~~~~~~~~~~~~~
|
||||
|
||||
.. versionadded:: 1.1
|
||||
|
||||
If your application is particularly read-heavy and doesn't make many database
|
||||
writes, the overhead of a constantly open transaction can sometimes be
|
||||
noticeable. For those situations, if you're using the ``postgresql_psycopg2``
|
||||
backend, you can configure Django to use *"autocommit"* behavior for the
|
||||
connection, meaning that each database operation will normally be in its own
|
||||
transaction, rather than having the transaction extend over multiple
|
||||
operations. In this case, you can still manually start a transaction if you're
|
||||
doing something that requires consistency across multiple database operations.
|
||||
The autocommit behavior is enabled by setting the ``autocommit`` key in the
|
||||
:setting:`DATABASE_OPTIONS` setting::
|
||||
|
||||
DATABASE_OPTIONS = {
|
||||
"autocommit": True,
|
||||
}
|
||||
|
||||
In this configuration, Django still ensures that :ref:`delete()
|
||||
<topics-db-queries-delete>` and :ref:`update() <topics-db-queries-update>`
|
||||
queries run inside a single transaction, so that either all the affected
|
||||
objects are changed or none of them are.
|
||||
|
||||
.. admonition:: This is database-level autocommit
|
||||
|
||||
This functionality is not the same as the
|
||||
:ref:`topics-db-transactions-autocommit` decorator. That decorator is a
|
||||
Django-level implementation that commits automatically after data changing
|
||||
operations. The feature enabled using the :setting:`DATABASE_OPTIONS`
|
||||
settings provides autocommit behavior at the database adapter level. It
|
||||
commits after *every* operation.
|
||||
|
||||
If you are using this feature and performing an operation akin to delete or
|
||||
updating that requires multiple operations, you are strongly recommended to
|
||||
wrap you operations in manual transaction handling to ensure data consistency.
|
||||
You should also audit your existing code for any instances of this behavior
|
||||
before enabling this feature. It's faster, but it provides less automatic
|
||||
protection for multi-call operations.
|
||||
|
||||
|
||||
.. _mysql-notes:
|
||||
|
||||
MySQL notes
|
||||
|
@ -199,7 +251,7 @@ Here's a sample configuration which uses a MySQL option file::
|
|||
DATABASE_ENGINE = "mysql"
|
||||
DATABASE_OPTIONS = {
|
||||
'read_default_file': '/path/to/my.cnf',
|
||||
}
|
||||
}
|
||||
|
||||
# my.cnf
|
||||
[client]
|
||||
|
@ -237,9 +289,7 @@ storage engine, you have a couple of options.
|
|||
creating your tables::
|
||||
|
||||
DATABASE_OPTIONS = {
|
||||
# ...
|
||||
"init_command": "SET storage_engine=INNODB",
|
||||
# ...
|
||||
}
|
||||
|
||||
This sets the default storage engine upon connecting to the database.
|
||||
|
|
|
@ -714,6 +714,8 @@ primary key field is called ``name``, these two statements are equivalent::
|
|||
>>> some_obj == other_obj
|
||||
>>> some_obj.name == other_obj.name
|
||||
|
||||
.. _topics-db-queries-delete:
|
||||
|
||||
Deleting objects
|
||||
================
|
||||
|
||||
|
@ -756,6 +758,8 @@ complete query set::
|
|||
|
||||
Entry.objects.all().delete()
|
||||
|
||||
.. _topics-db-queries-update:
|
||||
|
||||
Updating multiple objects at once
|
||||
=================================
|
||||
|
||||
|
|
|
@ -63,6 +63,8 @@ particular view function.
|
|||
Although the examples below use view functions as examples, these
|
||||
decorators can be applied to non-view functions as well.
|
||||
|
||||
.. _topics-db-transactions-autocommit:
|
||||
|
||||
``django.db.transaction.autocommit``
|
||||
------------------------------------
|
||||
|
||||
|
|
Loading…
Reference in New Issue