Merged branch 'database-level-autocommit'.

Fixed #2227: `atomic` supports nesting.
Fixed #6623: `commit_manually` is deprecated and `atomic` doesn't suffer from this defect.
Fixed #8320: the problem wasn't identified, but the legacy transaction management is deprecated.
Fixed #10744: the problem wasn't identified, but the legacy transaction management is deprecated.
Fixed #10813: since autocommit is enabled, it isn't necessary to rollback after errors any more.
Fixed #13742: savepoints are now implemented for SQLite.
Fixed #13870: transaction management in long running processes isn't a problem any more, and it's documented.
Fixed #14970: while it digresses on transaction management, this ticket essentially asks for autocommit on PostgreSQL.
Fixed #15694: `atomic` supports nesting.
Fixed #17887: autocommit makes it impossible for a connection to stay "idle of transaction".
This commit is contained in:
Aymeric Augustin 2013-03-11 15:11:34 +01:00
commit 14cddf51c5
52 changed files with 1749 additions and 938 deletions

View File

@ -434,6 +434,7 @@ answer newbie questions, and generally made Django that much better:
Andreas Pelme <andreas@pelme.se> Andreas Pelme <andreas@pelme.se>
permonik@mesias.brnonet.cz permonik@mesias.brnonet.cz
peter@mymart.com peter@mymart.com
Christophe Pettus <xof@thebuild.com>
pgross@thoughtworks.com pgross@thoughtworks.com
phaedo <http://phaedo.cx/> phaedo <http://phaedo.cx/>
phil@produxion.net phil@produxion.net

View File

@ -555,10 +555,6 @@ class LayerMapping(object):
except SystemExit: except SystemExit:
raise raise
except Exception as msg: except Exception as msg:
if self.transaction_mode == 'autocommit':
# Rolling back the transaction so that other model saves
# will work.
transaction.rollback_unless_managed()
if strict: if strict:
# Bailing out if the `strict` keyword is set. # Bailing out if the `strict` keyword is set.
if not silent: if not silent:

View File

@ -74,7 +74,6 @@ class SessionStore(SessionBase):
@classmethod @classmethod
def clear_expired(cls): def clear_expired(cls):
Session.objects.filter(expire_date__lt=timezone.now()).delete() Session.objects.filter(expire_date__lt=timezone.now()).delete()
transaction.commit_unless_managed()
# At bottom to avoid circular import # At bottom to avoid circular import

View File

@ -10,7 +10,7 @@ except ImportError:
from django.conf import settings from django.conf import settings
from django.core.cache.backends.base import BaseCache from django.core.cache.backends.base import BaseCache
from django.db import connections, router, transaction, DatabaseError from django.db import connections, router, DatabaseError
from django.utils import timezone, six from django.utils import timezone, six
from django.utils.encoding import force_bytes from django.utils.encoding import force_bytes
@ -70,7 +70,6 @@ class DatabaseCache(BaseDatabaseCache):
cursor = connections[db].cursor() cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s " cursor.execute("DELETE FROM %s "
"WHERE cache_key = %%s" % table, [key]) "WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
return default return default
value = connections[db].ops.process_clob(row[1]) value = connections[db].ops.process_clob(row[1])
return pickle.loads(base64.b64decode(force_bytes(value))) return pickle.loads(base64.b64decode(force_bytes(value)))
@ -124,10 +123,8 @@ class DatabaseCache(BaseDatabaseCache):
[key, b64encoded, connections[db].ops.value_to_db_datetime(exp)]) [key, b64encoded, connections[db].ops.value_to_db_datetime(exp)])
except DatabaseError: except DatabaseError:
# To be threadsafe, updates/inserts are allowed to fail silently # To be threadsafe, updates/inserts are allowed to fail silently
transaction.rollback_unless_managed(using=db)
return False return False
else: else:
transaction.commit_unless_managed(using=db)
return True return True
def delete(self, key, version=None): def delete(self, key, version=None):
@ -139,7 +136,6 @@ class DatabaseCache(BaseDatabaseCache):
cursor = connections[db].cursor() cursor = connections[db].cursor()
cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key]) cursor.execute("DELETE FROM %s WHERE cache_key = %%s" % table, [key])
transaction.commit_unless_managed(using=db)
def has_key(self, key, version=None): def has_key(self, key, version=None):
key = self.make_key(key, version=version) key = self.make_key(key, version=version)
@ -184,7 +180,6 @@ class DatabaseCache(BaseDatabaseCache):
table = connections[db].ops.quote_name(self._table) table = connections[db].ops.quote_name(self._table)
cursor = connections[db].cursor() cursor = connections[db].cursor()
cursor.execute('DELETE FROM %s' % table) cursor.execute('DELETE FROM %s' % table)
transaction.commit_unless_managed(using=db)
# For backwards compatibility # For backwards compatibility
class CacheClass(DatabaseCache): class CacheClass(DatabaseCache):

View File

@ -6,10 +6,10 @@ import types
from django import http from django import http
from django.conf import settings from django.conf import settings
from django.core import exceptions
from django.core import urlresolvers from django.core import urlresolvers
from django.core import signals from django.core import signals
from django.core.exceptions import MiddlewareNotUsed, PermissionDenied from django.core.exceptions import MiddlewareNotUsed, PermissionDenied
from django.db import connections, transaction
from django.utils.encoding import force_text from django.utils.encoding import force_text
from django.utils.module_loading import import_by_path from django.utils.module_loading import import_by_path
from django.utils import six from django.utils import six
@ -65,6 +65,13 @@ class BaseHandler(object):
# as a flag for initialization being complete. # as a flag for initialization being complete.
self._request_middleware = request_middleware self._request_middleware = request_middleware
def make_view_atomic(self, view):
if getattr(view, 'transactions_per_request', True):
for db in connections.all():
if db.settings_dict['ATOMIC_REQUESTS']:
view = transaction.atomic(using=db.alias)(view)
return view
def get_response(self, request): def get_response(self, request):
"Returns an HttpResponse object for the given HttpRequest" "Returns an HttpResponse object for the given HttpRequest"
try: try:
@ -101,8 +108,9 @@ class BaseHandler(object):
break break
if response is None: if response is None:
wrapped_callback = self.make_view_atomic(callback)
try: try:
response = callback(request, *callback_args, **callback_kwargs) response = wrapped_callback(request, *callback_args, **callback_kwargs)
except Exception as e: except Exception as e:
# If the view raised an exception, run it through exception # If the view raised an exception, run it through exception
# middleware, and if the exception middleware returns a # middleware, and if the exception middleware returns a

View File

@ -53,14 +53,13 @@ class Command(LabelCommand):
for i, line in enumerate(table_output): for i, line in enumerate(table_output):
full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or '')) full_statement.append(' %s%s' % (line, i < len(table_output)-1 and ',' or ''))
full_statement.append(');') full_statement.append(');')
curs = connection.cursor() with transaction.commit_on_success_unless_managed():
try: curs = connection.cursor()
curs.execute("\n".join(full_statement)) try:
except DatabaseError as e: curs.execute("\n".join(full_statement))
transaction.rollback_unless_managed(using=db) except DatabaseError as e:
raise CommandError( raise CommandError(
"Cache table '%s' could not be created.\nThe error was: %s." % "Cache table '%s' could not be created.\nThe error was: %s." %
(tablename, force_text(e))) (tablename, force_text(e)))
for statement in index_output: for statement in index_output:
curs.execute(statement) curs.execute(statement)
transaction.commit_unless_managed(using=db)

View File

@ -57,18 +57,17 @@ Are you sure you want to do this?
if confirm == 'yes': if confirm == 'yes':
try: try:
cursor = connection.cursor() with transaction.commit_on_success_unless_managed():
for sql in sql_list: cursor = connection.cursor()
cursor.execute(sql) for sql in sql_list:
cursor.execute(sql)
except Exception as e: except Exception as e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons: raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly. * The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist. * At least one of the expected database tables doesn't exist.
* The SQL was invalid. * The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run. Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e)) The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual # Emit the post sync signal. This allows individual
# applications to respond as if the database had been # applications to respond as if the database had been

View File

@ -41,8 +41,6 @@ class Command(BaseCommand):
self.ignore = options.get('ignore') self.ignore = options.get('ignore')
self.using = options.get('database') self.using = options.get('database')
connection = connections[self.using]
if not len(fixture_labels): if not len(fixture_labels):
raise CommandError( raise CommandError(
"No database fixture specified. Please provide the path of at " "No database fixture specified. Please provide the path of at "
@ -51,13 +49,18 @@ class Command(BaseCommand):
self.verbosity = int(options.get('verbosity')) self.verbosity = int(options.get('verbosity'))
# commit is a stealth option - it isn't really useful as with transaction.commit_on_success_unless_managed(using=self.using):
# a command line option, but it can be useful when invoking self.loaddata(fixture_labels)
# loaddata from within another script.
# If commit=True, loaddata will use its own transaction; # Close the DB connection -- unless we're still in a transaction. This
# if commit=False, the data load SQL will become part of # is required as a workaround for an edge case in MySQL: if the same
# the transaction in place when loaddata was invoked. # connection is used to create tables, load data, and query, the query
commit = options.get('commit', True) # can return incorrect results. See Django #7572, MySQL #37735.
if transaction.get_autocommit(self.using):
connections[self.using].close()
def loaddata(self, fixture_labels):
connection = connections[self.using]
# Keep a count of the installed objects and fixtures # Keep a count of the installed objects and fixtures
self.fixture_count = 0 self.fixture_count = 0
@ -65,18 +68,6 @@ class Command(BaseCommand):
self.fixture_object_count = 0 self.fixture_object_count = 0
self.models = set() self.models = set()
# Get a cursor (even though we don't need one yet). This has
# the side effect of initializing the test database (if
# it isn't already initialized).
cursor = connection.cursor()
# Start transaction management. All fixtures are installed in a
# single transaction to ensure that all references are resolved.
if commit:
transaction.commit_unless_managed(using=self.using)
transaction.enter_transaction_management(using=self.using)
transaction.managed(True, using=self.using)
class SingleZipReader(zipfile.ZipFile): class SingleZipReader(zipfile.ZipFile):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
zipfile.ZipFile.__init__(self, *args, **kwargs) zipfile.ZipFile.__init__(self, *args, **kwargs)
@ -105,26 +96,17 @@ class Command(BaseCommand):
app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths] app_fixtures = [os.path.join(os.path.dirname(path), 'fixtures') for path in app_module_paths]
with connection.constraint_checks_disabled():
for fixture_label in fixture_labels:
self.load_label(fixture_label, app_fixtures)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try: try:
with connection.constraint_checks_disabled(): connection.check_constraints(table_names=table_names)
for fixture_label in fixture_labels:
self.load_label(fixture_label, app_fixtures)
# Since we disabled constraint checks, we must manually check for
# any invalid keys that might have been added
table_names = [model._meta.db_table for model in self.models]
try:
connection.check_constraints(table_names=table_names)
except Exception as e:
e.args = ("Problem installing fixtures: %s" % e,)
raise
except (SystemExit, KeyboardInterrupt):
raise
except Exception as e: except Exception as e:
if commit: e.args = ("Problem installing fixtures: %s" % e,)
transaction.rollback(using=self.using)
transaction.leave_transaction_management(using=self.using)
raise raise
# If we found even one object in a fixture, we need to reset the # If we found even one object in a fixture, we need to reset the
@ -137,10 +119,6 @@ class Command(BaseCommand):
for line in sequence_sql: for line in sequence_sql:
cursor.execute(line) cursor.execute(line)
if commit:
transaction.commit(using=self.using)
transaction.leave_transaction_management(using=self.using)
if self.verbosity >= 1: if self.verbosity >= 1:
if self.fixture_object_count == self.loaded_object_count: if self.fixture_object_count == self.loaded_object_count:
self.stdout.write("Installed %d object(s) from %d fixture(s)" % ( self.stdout.write("Installed %d object(s) from %d fixture(s)" % (
@ -149,13 +127,6 @@ class Command(BaseCommand):
self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" % ( self.stdout.write("Installed %d object(s) (of %d) from %d fixture(s)" % (
self.loaded_object_count, self.fixture_object_count, self.fixture_count)) self.loaded_object_count, self.fixture_object_count, self.fixture_count))
# Close the DB connection. This is required as a workaround for an
# edge case in MySQL: if the same connection is used to
# create tables, load data, and query, the query can return
# incorrect results. See Django #7572, MySQL #37735.
if commit:
connection.close()
def load_label(self, fixture_label, app_fixtures): def load_label(self, fixture_label, app_fixtures):
parts = fixture_label.split('.') parts = fixture_label.split('.')

View File

@ -83,26 +83,25 @@ class Command(NoArgsCommand):
# Create the tables for each model # Create the tables for each model
if verbosity >= 1: if verbosity >= 1:
self.stdout.write("Creating tables ...\n") self.stdout.write("Creating tables ...\n")
for app_name, model_list in manifest.items(): with transaction.commit_on_success_unless_managed(using=db):
for model in model_list: for app_name, model_list in manifest.items():
# Create the model's database table, if it doesn't already exist. for model in model_list:
if verbosity >= 3: # Create the model's database table, if it doesn't already exist.
self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name)) if verbosity >= 3:
sql, references = connection.creation.sql_create_model(model, self.style, seen_models) self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
seen_models.add(model) sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
created_models.add(model) seen_models.add(model)
for refto, refs in references.items(): created_models.add(model)
pending_references.setdefault(refto, []).extend(refs) for refto, refs in references.items():
if refto in seen_models: pending_references.setdefault(refto, []).extend(refs)
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references)) if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references)) sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
if verbosity >= 1 and sql: sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
self.stdout.write("Creating table %s\n" % model._meta.db_table) if verbosity >= 1 and sql:
for statement in sql: self.stdout.write("Creating table %s\n" % model._meta.db_table)
cursor.execute(statement) for statement in sql:
tables.append(connection.introspection.table_name_converter(model._meta.db_table)) cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
transaction.commit_unless_managed(using=db)
# Send the post_syncdb signal, so individual apps can do whatever they need # Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point. # to do at this point.
@ -122,17 +121,16 @@ class Command(NoArgsCommand):
if custom_sql: if custom_sql:
if verbosity >= 2: if verbosity >= 2:
self.stdout.write("Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name)) self.stdout.write("Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
try: with transaction.commit_on_success_unless_managed(using=db):
for sql in custom_sql: try:
cursor.execute(sql) for sql in custom_sql:
except Exception as e: cursor.execute(sql)
self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \ except Exception as e:
(app_name, model._meta.object_name, e)) self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
if show_traceback: (app_name, model._meta.object_name, e))
traceback.print_exc() if show_traceback:
transaction.rollback_unless_managed(using=db) traceback.print_exc()
else: raise
transaction.commit_unless_managed(using=db)
else: else:
if verbosity >= 3: if verbosity >= 3:
self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name)) self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
@ -147,15 +145,14 @@ class Command(NoArgsCommand):
if index_sql: if index_sql:
if verbosity >= 2: if verbosity >= 2:
self.stdout.write("Installing index for %s.%s model\n" % (app_name, model._meta.object_name)) self.stdout.write("Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
try: with transaction.commit_on_success_unless_managed(using=db):
for sql in index_sql: try:
cursor.execute(sql) for sql in index_sql:
except Exception as e: cursor.execute(sql)
self.stderr.write("Failed to install index for %s.%s model: %s\n" % \ except Exception as e:
(app_name, model._meta.object_name, e)) self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
transaction.rollback_unless_managed(using=db) (app_name, model._meta.object_name, e))
else: raise
transaction.commit_unless_managed(using=db)
# Load initial_data fixtures (unless that has been disabled) # Load initial_data fixtures (unless that has been disabled)
if load_initial_data: if load_initial_data:

View File

@ -70,6 +70,7 @@ signals.request_started.connect(reset_queries)
# their lifetime. NB: abort() doesn't do anything outside of a transaction. # their lifetime. NB: abort() doesn't do anything outside of a transaction.
def close_old_connections(**kwargs): def close_old_connections(**kwargs):
for conn in connections.all(): for conn in connections.all():
# Remove this when the legacy transaction management goes away.
try: try:
conn.abort() conn.abort()
except DatabaseError: except DatabaseError:
@ -77,14 +78,3 @@ def close_old_connections(**kwargs):
conn.close_if_unusable_or_obsolete() conn.close_if_unusable_or_obsolete()
signals.request_started.connect(close_old_connections) signals.request_started.connect(close_old_connections)
signals.request_finished.connect(close_old_connections) signals.request_finished.connect(close_old_connections)
# Register an event that rolls back the connections
# when a Django request has an exception.
def _rollback_on_exception(**kwargs):
from django.db import transaction
for conn in connections:
try:
transaction.rollback_unless_managed(using=conn)
except DatabaseError:
pass
signals.got_request_exception.connect(_rollback_on_exception)

View File

@ -44,11 +44,21 @@ class BaseDatabaseWrapper(object):
self.savepoint_state = 0 self.savepoint_state = 0
# Transaction management related attributes # Transaction management related attributes
self.autocommit = False
self.transaction_state = [] self.transaction_state = []
# Tracks if the connection is believed to be in transaction. This is # Tracks if the connection is believed to be in transaction. This is
# set somewhat aggressively, as the DBAPI doesn't make it easy to # set somewhat aggressively, as the DBAPI doesn't make it easy to
# deduce if the connection is in transaction or not. # deduce if the connection is in transaction or not.
self._dirty = False self._dirty = False
# Tracks if the connection is in a transaction managed by 'atomic'
self.in_atomic_block = False
# Tracks if the transaction should be rolled back to the next
# available savepoint because of an exception in an inner block.
self.needs_rollback = False
# List of savepoints created by 'atomic'
self.savepoint_ids = []
# Hack to provide compatibility with legacy transaction management
self._atomic_forced_unmanaged = False
# Connection termination related attributes # Connection termination related attributes
self.close_at = None self.close_at = None
@ -85,20 +95,35 @@ class BaseDatabaseWrapper(object):
"""Creates a cursor. Assumes that a connection is established.""" """Creates a cursor. Assumes that a connection is established."""
raise NotImplementedError raise NotImplementedError
##### Backend-specific methods for creating connections #####
def connect(self):
"""Connects to the database. Assumes that the connection is closed."""
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
if self.settings_dict['AUTOCOMMIT']:
self.set_autocommit(True)
connection_created.send(sender=self.__class__, connection=self)
def ensure_connection(self):
"""
Guarantees that a connection to the database is established.
"""
if self.connection is None:
with self.wrap_database_errors():
self.connect()
##### Backend-specific wrappers for PEP-249 connection methods ##### ##### Backend-specific wrappers for PEP-249 connection methods #####
def _cursor(self): def _cursor(self):
self.ensure_connection()
with self.wrap_database_errors(): with self.wrap_database_errors():
if self.connection is None:
# Reset parameters defining when to close the connection
max_age = self.settings_dict['CONN_MAX_AGE']
self.close_at = None if max_age is None else time.time() + max_age
self.errors_occurred = False
# Establish the connection
conn_params = self.get_connection_params()
self.connection = self.get_new_connection(conn_params)
self.init_connection_state()
connection_created.send(sender=self.__class__, connection=self)
return self.create_cursor() return self.create_cursor()
def _commit(self): def _commit(self):
@ -132,17 +157,19 @@ class BaseDatabaseWrapper(object):
def commit(self): def commit(self):
""" """
Does the commit itself and resets the dirty flag. Commits a transaction and resets the dirty flag.
""" """
self.validate_thread_sharing() self.validate_thread_sharing()
self.validate_no_atomic_block()
self._commit() self._commit()
self.set_clean() self.set_clean()
def rollback(self): def rollback(self):
""" """
Does the rollback itself and resets the dirty flag. Rolls back a transaction and resets the dirty flag.
""" """
self.validate_thread_sharing() self.validate_thread_sharing()
self.validate_no_atomic_block()
self._rollback() self._rollback()
self.set_clean() self.set_clean()
@ -160,54 +187,59 @@ class BaseDatabaseWrapper(object):
##### Backend-specific savepoint management methods ##### ##### Backend-specific savepoint management methods #####
def _savepoint(self, sid): def _savepoint(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_create_sql(sid)) self.cursor().execute(self.ops.savepoint_create_sql(sid))
def _savepoint_rollback(self, sid): def _savepoint_rollback(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_rollback_sql(sid)) self.cursor().execute(self.ops.savepoint_rollback_sql(sid))
def _savepoint_commit(self, sid): def _savepoint_commit(self, sid):
if not self.features.uses_savepoints:
return
self.cursor().execute(self.ops.savepoint_commit_sql(sid)) self.cursor().execute(self.ops.savepoint_commit_sql(sid))
def _savepoint_allowed(self):
# Savepoints cannot be created outside a transaction
return self.features.uses_savepoints and not self.autocommit
##### Generic savepoint management methods ##### ##### Generic savepoint management methods #####
def savepoint(self): def savepoint(self):
""" """
Creates a savepoint (if supported and required by the backend) inside the Creates a savepoint inside the current transaction. Returns an
current transaction. Returns an identifier for the savepoint that will be identifier for the savepoint that will be used for the subsequent
used for the subsequent rollback or commit. rollback or commit. Does nothing if savepoints are not supported.
""" """
if not self._savepoint_allowed():
return
thread_ident = thread.get_ident() thread_ident = thread.get_ident()
tid = str(thread_ident).replace('-', '')
self.savepoint_state += 1 self.savepoint_state += 1
tid = str(thread_ident).replace('-', '')
sid = "s%s_x%d" % (tid, self.savepoint_state) sid = "s%s_x%d" % (tid, self.savepoint_state)
self.validate_thread_sharing()
self._savepoint(sid) self._savepoint(sid)
return sid return sid
def savepoint_rollback(self, sid): def savepoint_rollback(self, sid):
""" """
Rolls back the most recent savepoint (if one exists). Does nothing if Rolls back to a savepoint. Does nothing if savepoints are not supported.
savepoints are not supported.
""" """
if not self._savepoint_allowed():
return
self.validate_thread_sharing() self.validate_thread_sharing()
if self.savepoint_state: self._savepoint_rollback(sid)
self._savepoint_rollback(sid)
def savepoint_commit(self, sid): def savepoint_commit(self, sid):
""" """
Commits the most recent savepoint (if one exists). Does nothing if Releases a savepoint. Does nothing if savepoints are not supported.
savepoints are not supported.
""" """
if not self._savepoint_allowed():
return
self.validate_thread_sharing() self.validate_thread_sharing()
if self.savepoint_state: self._savepoint_commit(sid)
self._savepoint_commit(sid)
def clean_savepoints(self): def clean_savepoints(self):
""" """
@ -217,24 +249,15 @@ class BaseDatabaseWrapper(object):
##### Backend-specific transaction management methods ##### ##### Backend-specific transaction management methods #####
def _enter_transaction_management(self, managed): def _set_autocommit(self, autocommit):
""" """
A hook for backend-specific changes required when entering manual Backend-specific implementation to enable or disable autocommit.
transaction handling.
""" """
pass raise NotImplementedError
def _leave_transaction_management(self, managed):
"""
A hook for backend-specific changes required when leaving manual
transaction handling. Will usually be implemented only when
_enter_transaction_management() is also required.
"""
pass
##### Generic transaction management methods ##### ##### Generic transaction management methods #####
def enter_transaction_management(self, managed=True): def enter_transaction_management(self, managed=True, forced=False):
""" """
Enters transaction management for a running thread. It must be balanced with Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is the appropriate leave_transaction_management call, since the actual state is
@ -243,12 +266,22 @@ class BaseDatabaseWrapper(object):
The state and dirty flag are carried over from the surrounding block or The state and dirty flag are carried over from the surrounding block or
from the settings, if there is no surrounding block (dirty is always false from the settings, if there is no surrounding block (dirty is always false
when no current block is running). when no current block is running).
If you switch off transaction management and there is a pending
commit/rollback, the data will be commited, unless "forced" is True.
""" """
if self.transaction_state: self.validate_no_atomic_block()
self.transaction_state.append(self.transaction_state[-1])
else: self.ensure_connection()
self.transaction_state.append(settings.TRANSACTIONS_MANAGED)
self._enter_transaction_management(managed) self.transaction_state.append(managed)
if not managed and self.is_dirty() and not forced:
self.commit()
self.set_clean()
if managed == self.autocommit:
self.set_autocommit(not managed)
def leave_transaction_management(self): def leave_transaction_management(self):
""" """
@ -256,22 +289,48 @@ class BaseDatabaseWrapper(object):
over to the surrounding block, as a commit will commit all changes, even over to the surrounding block, as a commit will commit all changes, even
those from outside. (Commits are on connection level.) those from outside. (Commits are on connection level.)
""" """
self.validate_no_atomic_block()
self.ensure_connection()
if self.transaction_state: if self.transaction_state:
del self.transaction_state[-1] del self.transaction_state[-1]
else: else:
raise TransactionManagementError( raise TransactionManagementError(
"This code isn't under transaction management") "This code isn't under transaction management")
# The _leave_transaction_management hook can change the dirty flag,
# so memoize it. if self.transaction_state:
dirty = self._dirty managed = self.transaction_state[-1]
# We will pass the next status (after leaving the previous state else:
# behind) to subclass hook. managed = not self.settings_dict['AUTOCOMMIT']
self._leave_transaction_management(self.is_managed())
if dirty: if self._dirty:
self.rollback() self.rollback()
if managed == self.autocommit:
self.set_autocommit(not managed)
raise TransactionManagementError( raise TransactionManagementError(
"Transaction managed block ended with pending COMMIT/ROLLBACK") "Transaction managed block ended with pending COMMIT/ROLLBACK")
if managed == self.autocommit:
self.set_autocommit(not managed)
def set_autocommit(self, autocommit):
"""
Enable or disable autocommit.
"""
self.validate_no_atomic_block()
self.ensure_connection()
self._set_autocommit(autocommit)
self.autocommit = autocommit
def validate_no_atomic_block(self):
"""
Raise an error if an atomic block is active.
"""
if self.in_atomic_block:
raise TransactionManagementError(
"This is forbidden when an 'atomic' block is active.")
def abort(self): def abort(self):
""" """
Roll back any ongoing transaction and clean the transaction state Roll back any ongoing transaction and clean the transaction state
@ -295,7 +354,8 @@ class BaseDatabaseWrapper(object):
to decide in a managed block of code to decide whether there are open to decide in a managed block of code to decide whether there are open
changes waiting for commit. changes waiting for commit.
""" """
self._dirty = True if not self.autocommit:
self._dirty = True
def set_clean(self): def set_clean(self):
""" """
@ -306,51 +366,6 @@ class BaseDatabaseWrapper(object):
self._dirty = False self._dirty = False
self.clean_savepoints() self.clean_savepoints()
def is_managed(self):
"""
Checks whether the transaction manager is in manual or in auto state.
"""
if self.transaction_state:
return self.transaction_state[-1]
return settings.TRANSACTIONS_MANAGED
def managed(self, flag=True):
"""
Puts the transaction manager into a manual state: managed transactions have
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
top = self.transaction_state
if top:
top[-1] = flag
if not flag and self.is_dirty():
self.commit()
else:
raise TransactionManagementError("This code isn't under transaction "
"management")
def commit_unless_managed(self):
"""
Commits changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self.commit()
self.clean_savepoints()
else:
self.set_dirty()
def rollback_unless_managed(self):
"""
Rolls back changes if the system is not in managed transaction mode.
"""
self.validate_thread_sharing()
if not self.is_managed():
self.rollback()
else:
self.set_dirty()
##### Foreign key constraints checks handling ##### ##### Foreign key constraints checks handling #####
@contextmanager @contextmanager
@ -402,12 +417,19 @@ class BaseDatabaseWrapper(object):
or if it outlived its maximum age. or if it outlived its maximum age.
""" """
if self.connection is not None: if self.connection is not None:
# If the application didn't restore the original autocommit setting,
# don't take chances, drop the connection.
if self.autocommit != self.settings_dict['AUTOCOMMIT']:
self.close()
return
if self.errors_occurred: if self.errors_occurred:
if self.is_usable(): if self.is_usable():
self.errors_occurred = False self.errors_occurred = False
else: else:
self.close() self.close()
return return
if self.close_at is not None and time.time() >= self.close_at: if self.close_at is not None and time.time() >= self.close_at:
self.close() self.close()
return return
@ -460,6 +482,12 @@ class BaseDatabaseWrapper(object):
if must_close: if must_close:
self.close() self.close()
def _start_transaction_under_autocommit(self):
"""
Only required when autocommits_when_autocommit_is_off = True.
"""
raise NotImplementedError
class BaseDatabaseFeatures(object): class BaseDatabaseFeatures(object):
allows_group_by_pk = False allows_group_by_pk = False
@ -479,7 +507,6 @@ class BaseDatabaseFeatures(object):
can_use_chunked_reads = True can_use_chunked_reads = True
can_return_id_from_insert = False can_return_id_from_insert = False
has_bulk_insert = False has_bulk_insert = False
uses_autocommit = False
uses_savepoints = False uses_savepoints = False
can_combine_inserts_with_and_without_auto_increment_pk = False can_combine_inserts_with_and_without_auto_increment_pk = False
@ -563,6 +590,10 @@ class BaseDatabaseFeatures(object):
# Support for the DISTINCT ON clause # Support for the DISTINCT ON clause
can_distinct_on_fields = False can_distinct_on_fields = False
# Does the backend decide to commit before SAVEPOINT statements
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
def __init__(self, connection): def __init__(self, connection):
self.connection = connection self.connection = connection
@ -574,7 +605,6 @@ class BaseDatabaseFeatures(object):
# otherwise autocommit will cause the confimation to # otherwise autocommit will cause the confimation to
# fail. # fail.
self.connection.enter_transaction_management() self.connection.enter_transaction_management()
self.connection.managed(True)
cursor = self.connection.cursor() cursor = self.connection.cursor()
cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)') cursor.execute('CREATE TABLE ROLLBACK_TEST (X INT)')
self.connection.commit() self.connection.commit()
@ -883,19 +913,19 @@ class BaseDatabaseOperations(object):
"uses_savepoints" feature is True. The "sid" parameter is a string "uses_savepoints" feature is True. The "sid" parameter is a string
for the savepoint id. for the savepoint id.
""" """
raise NotImplementedError return "SAVEPOINT %s" % self.quote_name(sid)
def savepoint_commit_sql(self, sid): def savepoint_commit_sql(self, sid):
""" """
Returns the SQL for committing the given savepoint. Returns the SQL for committing the given savepoint.
""" """
raise NotImplementedError return "RELEASE SAVEPOINT %s" % self.quote_name(sid)
def savepoint_rollback_sql(self, sid): def savepoint_rollback_sql(self, sid):
""" """
Returns the SQL for rolling back the given savepoint. Returns the SQL for rolling back the given savepoint.
""" """
raise NotImplementedError return "ROLLBACK TO SAVEPOINT %s" % self.quote_name(sid)
def set_time_zone_sql(self): def set_time_zone_sql(self):
""" """
@ -946,6 +976,9 @@ class BaseDatabaseOperations(object):
return "BEGIN;" return "BEGIN;"
def end_transaction_sql(self, success=True): def end_transaction_sql(self, success=True):
"""
Returns the SQL statement required to end a transaction.
"""
if not success: if not success:
return "ROLLBACK;" return "ROLLBACK;"
return "COMMIT;" return "COMMIT;"

View File

@ -1,6 +1,7 @@
import hashlib import hashlib
import sys import sys
import time import time
import warnings
from django.conf import settings from django.conf import settings
from django.db.utils import load_backend from django.db.utils import load_backend
@ -382,10 +383,7 @@ class BaseDatabaseCreation(object):
qn = self.connection.ops.quote_name qn = self.connection.ops.quote_name
# Create the test database and connect to it. We need to autocommit # Create the test database and connect to it.
# if the database supports it because PostgreSQL doesn't allow
# CREATE/DROP DATABASE statements within transactions.
self._prepare_for_test_db_ddl()
cursor = self.connection.cursor() cursor = self.connection.cursor()
try: try:
cursor.execute( cursor.execute(
@ -453,7 +451,6 @@ class BaseDatabaseCreation(object):
# to do so, because it's not allowed to delete a database while being # to do so, because it's not allowed to delete a database while being
# connected to it. # connected to it.
cursor = self.connection.cursor() cursor = self.connection.cursor()
self._prepare_for_test_db_ddl()
# Wait to avoid "database is being accessed by other users" errors. # Wait to avoid "database is being accessed by other users" errors.
time.sleep(1) time.sleep(1)
cursor.execute("DROP DATABASE %s" cursor.execute("DROP DATABASE %s"
@ -466,16 +463,10 @@ class BaseDatabaseCreation(object):
anymore by Django code. Kept for compatibility with user code that anymore by Django code. Kept for compatibility with user code that
might use it. might use it.
""" """
pass warnings.warn(
"set_autocommit was moved from BaseDatabaseCreation to "
def _prepare_for_test_db_ddl(self): "BaseDatabaseWrapper.", PendingDeprecationWarning, stacklevel=2)
""" return self.connection.set_autocommit(True)
Internal implementation - Hook for tasks that should be performed
before the ``CREATE DATABASE``/``DROP DATABASE`` clauses used by
testing code to create/ destroy test databases. Needed e.g. in
PostgreSQL to rollback and close any active transaction.
"""
pass
def sql_table_creation_suffix(self): def sql_table_creation_suffix(self):
""" """

View File

@ -55,12 +55,9 @@ class DatabaseWrapper(BaseDatabaseWrapper):
_savepoint = ignore _savepoint = ignore
_savepoint_commit = complain _savepoint_commit = complain
_savepoint_rollback = ignore _savepoint_rollback = ignore
_enter_transaction_management = complain _set_autocommit = complain
_leave_transaction_management = ignore
set_dirty = complain set_dirty = complain
set_clean = complain set_clean = complain
commit_unless_managed = complain
rollback_unless_managed = ignore
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs) super(DatabaseWrapper, self).__init__(*args, **kwargs)

View File

@ -355,15 +355,6 @@ class DatabaseOperations(BaseDatabaseOperations):
items_sql = "(%s)" % ", ".join(["%s"] * len(fields)) items_sql = "(%s)" % ", ".join(["%s"] * len(fields))
return "VALUES " + ", ".join([items_sql] * num_values) return "VALUES " + ", ".join([items_sql] * num_values)
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
class DatabaseWrapper(BaseDatabaseWrapper): class DatabaseWrapper(BaseDatabaseWrapper):
vendor = 'mysql' vendor = 'mysql'
operators = { operators = {
@ -445,6 +436,9 @@ class DatabaseWrapper(BaseDatabaseWrapper):
except Database.NotSupportedError: except Database.NotSupportedError:
pass pass
def _set_autocommit(self, autocommit):
self.connection.autocommit(autocommit)
def disable_constraint_checking(self): def disable_constraint_checking(self):
""" """
Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True, Disables foreign key checks, primarily for use in adding rows with forward references. Always returns True,

View File

@ -612,6 +612,9 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def _savepoint_commit(self, sid): def _savepoint_commit(self, sid):
pass pass
def _set_autocommit(self, autocommit):
self.connection.autocommit = autocommit
def check_constraints(self, table_names=None): def check_constraints(self, table_names=None):
""" """
To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they To check constraints, we set constraints to immediate. Then, when, we're done we must ensure they

View File

@ -273,6 +273,3 @@ class DatabaseCreation(BaseDatabaseCreation):
settings_dict['NAME'], settings_dict['NAME'],
self._test_database_user(), self._test_database_user(),
) )
def set_autocommit(self):
self.connection.connection.autocommit = True

View File

@ -49,6 +49,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
has_select_for_update = True has_select_for_update = True
has_select_for_update_nowait = True has_select_for_update_nowait = True
has_bulk_insert = True has_bulk_insert = True
uses_savepoints = True
supports_tablespaces = True supports_tablespaces = True
supports_transactions = True supports_transactions = True
can_distinct_on_fields = True can_distinct_on_fields = True
@ -77,15 +78,11 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def __init__(self, *args, **kwargs): def __init__(self, *args, **kwargs):
super(DatabaseWrapper, self).__init__(*args, **kwargs) super(DatabaseWrapper, self).__init__(*args, **kwargs)
opts = self.settings_dict["OPTIONS"]
RC = psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED
self.isolation_level = opts.get('isolation_level', RC)
self.features = DatabaseFeatures(self) self.features = DatabaseFeatures(self)
autocommit = self.settings_dict["OPTIONS"].get('autocommit', False)
self.features.uses_autocommit = autocommit
if autocommit:
level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
else:
level = self.settings_dict["OPTIONS"].get('isolation_level',
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self._set_isolation_level(level)
self.ops = DatabaseOperations(self) self.ops = DatabaseOperations(self)
self.client = DatabaseClient(self) self.client = DatabaseClient(self)
self.creation = DatabaseCreation(self) self.creation = DatabaseCreation(self)
@ -135,8 +132,7 @@ class DatabaseWrapper(BaseDatabaseWrapper):
if conn_tz != tz: if conn_tz != tz:
# Set the time zone in autocommit mode (see #17062) # Set the time zone in autocommit mode (see #17062)
self.connection.set_isolation_level( self.set_autocommit(True)
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)
self.connection.cursor().execute( self.connection.cursor().execute(
self.ops.set_time_zone_sql(), [tz]) self.ops.set_time_zone_sql(), [tz])
self.connection.set_isolation_level(self.isolation_level) self.connection.set_isolation_level(self.isolation_level)
@ -167,44 +163,22 @@ class DatabaseWrapper(BaseDatabaseWrapper):
finally: finally:
self.set_clean() self.set_clean()
def _enter_transaction_management(self, managed): def _set_isolation_level(self, isolation_level):
""" assert isolation_level in range(1, 5) # Use set_autocommit for level = 0
Switch the isolation level when needing transaction support, so that if self.psycopg2_version >= (2, 4, 2):
the same transaction is visible across all the queries. self.connection.set_session(isolation_level=isolation_level)
""" else:
if self.features.uses_autocommit and managed and not self.isolation_level: self.connection.set_isolation_level(isolation_level)
level = self.settings_dict["OPTIONS"].get('isolation_level',
psycopg2.extensions.ISOLATION_LEVEL_READ_COMMITTED)
self._set_isolation_level(level)
def _leave_transaction_management(self, managed): def _set_autocommit(self, autocommit):
""" if self.psycopg2_version >= (2, 4, 2):
If the normal operating mode is "autocommit", switch back to that when self.connection.autocommit = autocommit
leaving transaction management. else:
""" if autocommit:
if self.features.uses_autocommit and not managed and self.isolation_level: level = psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT
self._set_isolation_level(psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT) else:
level = self.isolation_level
def _set_isolation_level(self, level): self.connection.set_isolation_level(level)
"""
Do all the related feature configurations for changing isolation
levels. This doesn't touch the uses_autocommit feature, since that
controls the movement *between* isolation levels.
"""
assert level in range(5)
try:
if self.connection is not None:
self.connection.set_isolation_level(level)
if level == psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT:
self.set_clean()
finally:
self.isolation_level = level
self.features.uses_savepoints = bool(level)
def set_dirty(self):
if ((self.transaction_state and self.transaction_state[-1]) or
not self.features.uses_autocommit):
super(DatabaseWrapper, self).set_dirty()
def check_constraints(self, table_names=None): def check_constraints(self, table_names=None):
""" """
@ -223,6 +197,11 @@ class DatabaseWrapper(BaseDatabaseWrapper):
else: else:
return True return True
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]
return tuple(int(v) for v in version.split('.'))
@cached_property @cached_property
def pg_version(self): def pg_version(self):
with self.temporary_connection(): with self.temporary_connection():

View File

@ -77,14 +77,3 @@ class DatabaseCreation(BaseDatabaseCreation):
output.append(get_index_sql('%s_%s_like' % (db_table, f.column), output.append(get_index_sql('%s_%s_like' % (db_table, f.column),
' text_pattern_ops')) ' text_pattern_ops'))
return output return output
def set_autocommit(self):
self._prepare_for_test_db_ddl()
def _prepare_for_test_db_ddl(self):
"""Rollback and close the active transaction."""
# Make sure there is an open connection.
self.connection.cursor()
self.connection.connection.rollback()
self.connection.connection.set_isolation_level(
psycopg2.extensions.ISOLATION_LEVEL_AUTOCOMMIT)

View File

@ -175,15 +175,6 @@ class DatabaseOperations(BaseDatabaseOperations):
style.SQL_TABLE(qn(f.m2m_db_table())))) style.SQL_TABLE(qn(f.m2m_db_table()))))
return output return output
def savepoint_create_sql(self, sid):
return "SAVEPOINT %s" % sid
def savepoint_commit_sql(self, sid):
return "RELEASE SAVEPOINT %s" % sid
def savepoint_rollback_sql(self, sid):
return "ROLLBACK TO SAVEPOINT %s" % sid
def prep_for_iexact_query(self, x): def prep_for_iexact_query(self, x):
return x return x

View File

@ -99,6 +99,11 @@ class DatabaseFeatures(BaseDatabaseFeatures):
supports_mixed_date_datetime_comparisons = False supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False can_combine_inserts_with_and_without_auto_increment_pk = False
autocommits_when_autocommit_is_off = True
@cached_property
def uses_savepoints(self):
return Database.sqlite_version_info >= (3, 6, 8)
@cached_property @cached_property
def supports_stddev(self): def supports_stddev(self):
@ -355,6 +360,25 @@ class DatabaseWrapper(BaseDatabaseWrapper):
if self.settings_dict['NAME'] != ":memory:": if self.settings_dict['NAME'] != ":memory:":
BaseDatabaseWrapper.close(self) BaseDatabaseWrapper.close(self)
def _savepoint_allowed(self):
# When 'isolation_level' is not None, sqlite3 commits before each
# savepoint; it's a bug. When it is None, savepoints don't make sense
# because autocommit is enabled. The only exception is inside atomic
# blocks. To work around that bug, on SQLite, atomic starts a
# transaction explicitly rather than simply disable autocommit.
return self.in_atomic_block
def _set_autocommit(self, autocommit):
if autocommit:
level = None
else:
# sqlite3's internal default is ''. It's different from None.
# See Modules/_sqlite/connection.c.
level = ''
# 'isolation_level' is a misleading API.
# SQLite always runs at the SERIALIZABLE isolation level.
self.connection.isolation_level = level
def check_constraints(self, table_names=None): def check_constraints(self, table_names=None):
""" """
Checks each table name in `table_names` for rows with invalid foreign key references. This method is Checks each table name in `table_names` for rows with invalid foreign key references. This method is
@ -392,6 +416,14 @@ class DatabaseWrapper(BaseDatabaseWrapper):
def is_usable(self): def is_usable(self):
return True return True
def _start_transaction_under_autocommit(self):
"""
Start a transaction explicitly in autocommit mode.
Staying in autocommit mode works around a bug of sqlite3 that breaks
savepoints when autocommit is disabled.
"""
self.cursor().execute("BEGIN")
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s') FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')

View File

@ -72,9 +72,6 @@ class DatabaseCreation(BaseDatabaseCreation):
# Remove the SQLite database file # Remove the SQLite database file
os.remove(test_database_name) os.remove(test_database_name)
def set_autocommit(self):
self.connection.connection.isolation_level = None
def test_db_signature(self): def test_db_signature(self):
""" """
Returns a tuple that uniquely identifies a test database. Returns a tuple that uniquely identifies a test database.

View File

@ -609,48 +609,48 @@ class Model(six.with_metaclass(ModelBase)):
if update_fields: if update_fields:
non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields] non_pks = [f for f in non_pks if f.name in update_fields or f.attname in update_fields]
# First, try an UPDATE. If that doesn't update anything, do an INSERT. with transaction.commit_on_success_unless_managed(using=using):
pk_val = self._get_pk_val(meta) # First, try an UPDATE. If that doesn't update anything, do an INSERT.
pk_set = pk_val is not None pk_val = self._get_pk_val(meta)
record_exists = True pk_set = pk_val is not None
manager = cls._base_manager record_exists = True
if pk_set: manager = cls._base_manager
# Determine if we should do an update (pk already exists, forced update, if pk_set:
# no force_insert) # Determine if we should do an update (pk already exists, forced update,
if ((force_update or update_fields) or (not force_insert and # no force_insert)
manager.using(using).filter(pk=pk_val).exists())): if ((force_update or update_fields) or (not force_insert and
if force_update or non_pks: manager.using(using).filter(pk=pk_val).exists())):
values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks] if force_update or non_pks:
if values: values = [(f, None, (raw and getattr(self, f.attname) or f.pre_save(self, False))) for f in non_pks]
rows = manager.using(using).filter(pk=pk_val)._update(values) if values:
if force_update and not rows: rows = manager.using(using).filter(pk=pk_val)._update(values)
raise DatabaseError("Forced update did not affect any rows.") if force_update and not rows:
if update_fields and not rows: raise DatabaseError("Forced update did not affect any rows.")
raise DatabaseError("Save with update_fields did not affect any rows.") if update_fields and not rows:
else: raise DatabaseError("Save with update_fields did not affect any rows.")
else:
record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_fields
if not pk_set:
if force_update or update_fields:
raise ValueError("Cannot force an update in save() with no primary key.")
fields = [f for f in fields if not isinstance(f, AutoField)]
record_exists = False record_exists = False
if not pk_set or not record_exists:
if meta.order_with_respect_to:
# If this is a model with an order_with_respect_to
# autopopulate the _order field
field = meta.order_with_respect_to
order_value = manager.using(using).filter(**{field.name: getattr(self, field.attname)}).count()
self._order = order_value
fields = meta.local_fields update_pk = bool(meta.has_auto_field and not pk_set)
if not pk_set: result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
if force_update or update_fields:
raise ValueError("Cannot force an update in save() with no primary key.")
fields = [f for f in fields if not isinstance(f, AutoField)]
record_exists = False if update_pk:
setattr(self, meta.pk.attname, result)
update_pk = bool(meta.has_auto_field and not pk_set)
result = manager._insert([self], fields=fields, return_id=update_pk, using=using, raw=raw)
if update_pk:
setattr(self, meta.pk.attname, result)
transaction.commit_unless_managed(using=using)
# Store the database on which the object was saved # Store the database on which the object was saved
self._state.db = using self._state.db = using
@ -963,9 +963,9 @@ def method_set_order(ordered_obj, self, id_list, using=None):
order_name = ordered_obj._meta.order_with_respect_to.name order_name = ordered_obj._meta.order_with_respect_to.name
# FIXME: It would be nice if there was an "update many" version of update # FIXME: It would be nice if there was an "update many" version of update
# for situations like this. # for situations like this.
for i, j in enumerate(id_list): with transaction.commit_on_success_unless_managed(using=using):
ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i) for i, j in enumerate(id_list):
transaction.commit_unless_managed(using=using) ordered_obj.objects.filter(**{'pk': j, order_name: rel_val}).update(_order=i)
def method_get_order(ordered_obj, self): def method_get_order(ordered_obj, self):

View File

@ -50,26 +50,6 @@ def DO_NOTHING(collector, field, sub_objs, using):
pass pass
def force_managed(func):
@wraps(func)
def decorated(self, *args, **kwargs):
if not transaction.is_managed(using=self.using):
transaction.enter_transaction_management(using=self.using)
forced_managed = True
else:
forced_managed = False
try:
func(self, *args, **kwargs)
if forced_managed:
transaction.commit(using=self.using)
else:
transaction.commit_unless_managed(using=self.using)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.using)
return decorated
class Collector(object): class Collector(object):
def __init__(self, using): def __init__(self, using):
self.using = using self.using = using
@ -262,7 +242,6 @@ class Collector(object):
self.data = SortedDict([(model, self.data[model]) self.data = SortedDict([(model, self.data[model])
for model in sorted_models]) for model in sorted_models])
@force_managed
def delete(self): def delete(self):
# sort instance collections # sort instance collections
for model, instances in self.data.items(): for model, instances in self.data.items():
@ -273,40 +252,41 @@ class Collector(object):
# end of a transaction. # end of a transaction.
self.sort() self.sort()
# send pre_delete signals with transaction.commit_on_success_unless_managed(using=self.using):
for model, obj in self.instances_with_model(): # send pre_delete signals
if not model._meta.auto_created: for model, obj in self.instances_with_model():
signals.pre_delete.send( if not model._meta.auto_created:
sender=model, instance=obj, using=self.using signals.pre_delete.send(
)
# fast deletes
for qs in self.fast_deletes:
qs._raw_delete(using=self.using)
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using sender=model, instance=obj, using=self.using
) )
# fast deletes
for qs in self.fast_deletes:
qs._raw_delete(using=self.using)
# update fields
for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
query = sql.UpdateQuery(model)
for (field, value), instances in six.iteritems(instances_for_fieldvalues):
query.update_batch([obj.pk for obj in instances],
{field.name: value}, self.using)
# reverse instance collections
for instances in six.itervalues(self.data):
instances.reverse()
# delete instances
for model, instances in six.iteritems(self.data):
query = sql.DeleteQuery(model)
pk_list = [obj.pk for obj in instances]
query.delete_batch(pk_list, self.using)
if not model._meta.auto_created:
for obj in instances:
signals.post_delete.send(
sender=model, instance=obj, using=self.using
)
# update collected instances # update collected instances
for model, instances_for_fieldvalues in six.iteritems(self.field_updates): for model, instances_for_fieldvalues in six.iteritems(self.field_updates):
for (field, value), instances in six.iteritems(instances_for_fieldvalues): for (field, value), instances in six.iteritems(instances_for_fieldvalues):

View File

@ -442,12 +442,7 @@ class QuerySet(object):
self._for_write = True self._for_write = True
connection = connections[self.db] connection = connections[self.db]
fields = self.model._meta.local_fields fields = self.model._meta.local_fields
if not transaction.is_managed(using=self.db): with transaction.commit_on_success_unless_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk if (connection.features.can_combine_inserts_with_and_without_auto_increment_pk
and self.model._meta.has_auto_field): and self.model._meta.has_auto_field):
self._batched_insert(objs, fields, batch_size) self._batched_insert(objs, fields, batch_size)
@ -458,13 +453,6 @@ class QuerySet(object):
if objs_without_pk: if objs_without_pk:
fields= [f for f in fields if not isinstance(f, AutoField)] fields= [f for f in fields if not isinstance(f, AutoField)]
self._batched_insert(objs_without_pk, fields, batch_size) self._batched_insert(objs_without_pk, fields, batch_size)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
return objs return objs
@ -581,20 +569,8 @@ class QuerySet(object):
self._for_write = True self._for_write = True
query = self.query.clone(sql.UpdateQuery) query = self.query.clone(sql.UpdateQuery)
query.add_update_values(kwargs) query.add_update_values(kwargs)
if not transaction.is_managed(using=self.db): with transaction.commit_on_success_unless_managed(using=self.db):
transaction.enter_transaction_management(using=self.db)
forced_managed = True
else:
forced_managed = False
try:
rows = query.get_compiler(self.db).execute_sql(None) rows = query.get_compiler(self.db).execute_sql(None)
if forced_managed:
transaction.commit(using=self.db)
else:
transaction.commit_unless_managed(using=self.db)
finally:
if forced_managed:
transaction.leave_transaction_management(using=self.db)
self._result_cache = None self._result_cache = None
return rows return rows
update.alters_data = True update.alters_data = True

View File

@ -12,9 +12,11 @@ Managed transactions don't do those commits, but will need some kind of manual
or implicit commits or rollbacks. or implicit commits or rollbacks.
""" """
import warnings
from functools import wraps from functools import wraps
from django.db import connections, DEFAULT_DB_ALIAS from django.db import connections, DatabaseError, DEFAULT_DB_ALIAS
class TransactionManagementError(Exception): class TransactionManagementError(Exception):
@ -37,6 +39,10 @@ def get_connection(using=None):
using = DEFAULT_DB_ALIAS using = DEFAULT_DB_ALIAS
return connections[using] return connections[using]
###########################
# Deprecated private APIs #
###########################
def abort(using=None): def abort(using=None):
""" """
Roll back any ongoing transactions and clean the transaction management Roll back any ongoing transactions and clean the transaction management
@ -49,7 +55,7 @@ def abort(using=None):
""" """
get_connection(using).abort() get_connection(using).abort()
def enter_transaction_management(managed=True, using=None): def enter_transaction_management(managed=True, using=None, forced=False):
""" """
Enters transaction management for a running thread. It must be balanced with Enters transaction management for a running thread. It must be balanced with
the appropriate leave_transaction_management call, since the actual state is the appropriate leave_transaction_management call, since the actual state is
@ -59,7 +65,7 @@ def enter_transaction_management(managed=True, using=None):
from the settings, if there is no surrounding block (dirty is always false from the settings, if there is no surrounding block (dirty is always false
when no current block is running). when no current block is running).
""" """
get_connection(using).enter_transaction_management(managed) get_connection(using).enter_transaction_management(managed, forced)
def leave_transaction_management(using=None): def leave_transaction_management(using=None):
""" """
@ -92,52 +98,47 @@ def set_clean(using=None):
""" """
get_connection(using).set_clean() get_connection(using).set_clean()
def clean_savepoints(using=None):
"""
Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
def is_managed(using=None): def is_managed(using=None):
""" warnings.warn("'is_managed' is deprecated.",
Checks whether the transaction manager is in manual or in auto state. PendingDeprecationWarning, stacklevel=2)
"""
return get_connection(using).is_managed()
def managed(flag=True, using=None): def managed(flag=True, using=None):
""" warnings.warn("'managed' no longer serves a purpose.",
Puts the transaction manager into a manual state: managed transactions have PendingDeprecationWarning, stacklevel=2)
to be committed explicitly by the user. If you switch off transaction
management and there is a pending commit/rollback, the data will be
commited.
"""
get_connection(using).managed(flag)
def commit_unless_managed(using=None): def commit_unless_managed(using=None):
""" warnings.warn("'commit_unless_managed' is now a no-op.",
Commits changes if the system is not in managed transaction mode. PendingDeprecationWarning, stacklevel=2)
"""
get_connection(using).commit_unless_managed()
def rollback_unless_managed(using=None): def rollback_unless_managed(using=None):
""" warnings.warn("'rollback_unless_managed' is now a no-op.",
Rolls back changes if the system is not in managed transaction mode. PendingDeprecationWarning, stacklevel=2)
"""
get_connection(using).rollback_unless_managed()
############### ###############
# Public APIs # # Public APIs #
############### ###############
def get_autocommit(using=None):
"""
Get the autocommit status of the connection.
"""
return get_connection(using).autocommit
def set_autocommit(autocommit, using=None):
"""
Set the autocommit status of the connection.
"""
return get_connection(using).set_autocommit(autocommit)
def commit(using=None): def commit(using=None):
""" """
Does the commit itself and resets the dirty flag. Commits a transaction and resets the dirty flag.
""" """
get_connection(using).commit() get_connection(using).commit()
def rollback(using=None): def rollback(using=None):
""" """
This function does the rollback itself and resets the dirty flag. Rolls back a transaction and resets the dirty flag.
""" """
get_connection(using).rollback() get_connection(using).rollback()
@ -163,9 +164,193 @@ def savepoint_commit(sid, using=None):
""" """
get_connection(using).savepoint_commit(sid) get_connection(using).savepoint_commit(sid)
############## def clean_savepoints(using=None):
# DECORATORS # """
############## Resets the counter used to generate unique savepoint ids in this thread.
"""
get_connection(using).clean_savepoints()
#################################
# Decorators / context managers #
#################################
class Atomic(object):
"""
This class guarantees the atomic execution of a given block.
An instance can be used either as a decorator or as a context manager.
When it's used as a decorator, __call__ wraps the execution of the
decorated function in the instance itself, used as a context manager.
When it's used as a context manager, __enter__ creates a transaction or a
savepoint, depending on whether a transaction is already in progress, and
__exit__ commits the transaction or releases the savepoint on normal exit,
and rolls back the transaction or to the savepoint on exceptions.
It's possible to disable the creation of savepoints if the goal is to
ensure that some code runs within a transaction without creating overhead.
A stack of savepoints identifiers is maintained as an attribute of the
connection. None denotes the absence of a savepoint.
This allows reentrancy even if the same AtomicWrapper is reused. For
example, it's possible to define `oa = @atomic('other')` and use `@ao` or
`with oa:` multiple times.
Since database connections are thread-local, this is thread-safe.
"""
def __init__(self, using, savepoint):
self.using = using
self.savepoint = savepoint
def _legacy_enter_transaction_management(self, connection):
if not connection.in_atomic_block:
if connection.transaction_state and connection.transaction_state[-1]:
connection._atomic_forced_unmanaged = True
connection.enter_transaction_management(managed=False)
else:
connection._atomic_forced_unmanaged = False
def _legacy_leave_transaction_management(self, connection):
if not connection.in_atomic_block and connection._atomic_forced_unmanaged:
connection.leave_transaction_management()
def __enter__(self):
connection = get_connection(self.using)
# Ensure we have a connection to the database before testing
# autocommit status.
connection.ensure_connection()
# Remove this when the legacy transaction management goes away.
self._legacy_enter_transaction_management(connection)
if not connection.in_atomic_block and not connection.autocommit:
raise TransactionManagementError(
"'atomic' cannot be used when autocommit is disabled.")
if connection.in_atomic_block:
# We're already in a transaction; create a savepoint, unless we
# were told not to or we're already waiting for a rollback. The
# second condition avoids creating useless savepoints and prevents
# overwriting needs_rollback until the rollback is performed.
if self.savepoint and not connection.needs_rollback:
sid = connection.savepoint()
connection.savepoint_ids.append(sid)
else:
connection.savepoint_ids.append(None)
else:
# We aren't in a transaction yet; create one.
# The usual way to start a transaction is to turn autocommit off.
# However, some database adapters (namely sqlite3) don't handle
# transactions and savepoints properly when autocommit is off.
# In such cases, start an explicit transaction instead, which has
# the side-effect of disabling autocommit.
if connection.features.autocommits_when_autocommit_is_off:
connection._start_transaction_under_autocommit()
connection.autocommit = False
else:
connection.set_autocommit(False)
connection.in_atomic_block = True
connection.needs_rollback = False
def __exit__(self, exc_type, exc_value, traceback):
connection = get_connection(self.using)
if exc_value is None and not connection.needs_rollback:
if connection.savepoint_ids:
# Release savepoint if there is one
sid = connection.savepoint_ids.pop()
if sid is not None:
try:
connection.savepoint_commit(sid)
except DatabaseError:
connection.savepoint_rollback(sid)
# Remove this when the legacy transaction management goes away.
self._legacy_leave_transaction_management(connection)
raise
else:
# Commit transaction
connection.in_atomic_block = False
try:
connection.commit()
except DatabaseError:
connection.rollback()
# Remove this when the legacy transaction management goes away.
self._legacy_leave_transaction_management(connection)
raise
finally:
if connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
else:
# This flag will be set to True again if there isn't a savepoint
# allowing to perform the rollback at this level.
connection.needs_rollback = False
if connection.savepoint_ids:
# Roll back to savepoint if there is one, mark for rollback
# otherwise.
sid = connection.savepoint_ids.pop()
if sid is None:
connection.needs_rollback = True
else:
connection.savepoint_rollback(sid)
else:
# Roll back transaction
connection.in_atomic_block = False
try:
connection.rollback()
finally:
if connection.features.autocommits_when_autocommit_is_off:
connection.autocommit = True
else:
connection.set_autocommit(True)
# Remove this when the legacy transaction management goes away.
self._legacy_leave_transaction_management(connection)
def __call__(self, func):
@wraps(func)
def inner(*args, **kwargs):
with self:
return func(*args, **kwargs)
return inner
def atomic(using=None, savepoint=True):
# Bare decorator: @atomic -- although the first argument is called
# `using`, it's actually the function being decorated.
if callable(using):
return Atomic(DEFAULT_DB_ALIAS, savepoint)(using)
# Decorator: @atomic(...) or context manager: with atomic(...): ...
else:
return Atomic(using, savepoint)
def atomic_if_autocommit(using=None, savepoint=True):
# This variant only exists to support the ability to disable transaction
# management entirely in the DATABASES setting. It doesn't care about the
# autocommit state at run time.
db = DEFAULT_DB_ALIAS if callable(using) else using
autocommit = get_connection(db).settings_dict['AUTOCOMMIT']
if autocommit:
return atomic(using, savepoint)
else:
# Bare decorator: @atomic_if_autocommit
if callable(using):
return using
# Decorator: @atomic_if_autocommit(...)
else:
return lambda func: func
############################################
# Deprecated decorators / context managers #
############################################
class Transaction(object): class Transaction(object):
""" """
@ -222,9 +407,11 @@ def autocommit(using=None):
this decorator is useful if you globally activated transaction management in this decorator is useful if you globally activated transaction management in
your settings file and want the default behavior in some view functions. your settings file and want the default behavior in some view functions.
""" """
warnings.warn("autocommit is deprecated in favor of set_autocommit.",
PendingDeprecationWarning, stacklevel=2)
def entering(using): def entering(using):
enter_transaction_management(managed=False, using=using) enter_transaction_management(managed=False, using=using)
managed(False, using=using)
def exiting(exc_value, using): def exiting(exc_value, using):
leave_transaction_management(using=using) leave_transaction_management(using=using)
@ -238,9 +425,11 @@ def commit_on_success(using=None):
a rollback is made. This is one of the most common ways to do transaction a rollback is made. This is one of the most common ways to do transaction
control in Web apps. control in Web apps.
""" """
warnings.warn("commit_on_success is deprecated in favor of atomic.",
PendingDeprecationWarning, stacklevel=2)
def entering(using): def entering(using):
enter_transaction_management(using=using) enter_transaction_management(using=using)
managed(True, using=using)
def exiting(exc_value, using): def exiting(exc_value, using):
try: try:
@ -266,11 +455,37 @@ def commit_manually(using=None):
own -- it's up to the user to call the commit and rollback functions own -- it's up to the user to call the commit and rollback functions
themselves. themselves.
""" """
warnings.warn("commit_manually is deprecated in favor of set_autocommit.",
PendingDeprecationWarning, stacklevel=2)
def entering(using): def entering(using):
enter_transaction_management(using=using) enter_transaction_management(using=using)
managed(True, using=using)
def exiting(exc_value, using): def exiting(exc_value, using):
leave_transaction_management(using=using) leave_transaction_management(using=using)
return _transaction_func(entering, exiting, using) return _transaction_func(entering, exiting, using)
def commit_on_success_unless_managed(using=None, savepoint=False):
"""
Transitory API to preserve backwards-compatibility while refactoring.
Once the legacy transaction management is fully deprecated, this should
simply be replaced by atomic_if_autocommit. Until then, it's necessary to
avoid making a commit where Django didn't use to, since entering atomic in
managed mode triggers a commmit.
Unlike atomic, savepoint defaults to False because that's closer to the
legacy behavior.
"""
connection = get_connection(using)
if connection.autocommit or connection.in_atomic_block:
return atomic_if_autocommit(using, savepoint)
else:
def entering(using):
pass
def exiting(exc_value, using):
set_dirty(using=using)
return _transaction_func(entering, exiting, using)

View File

@ -2,6 +2,7 @@ from functools import wraps
import os import os
import pkgutil import pkgutil
from threading import local from threading import local
import warnings
from django.conf import settings from django.conf import settings
from django.core.exceptions import ImproperlyConfigured from django.core.exceptions import ImproperlyConfigured
@ -158,6 +159,13 @@ class ConnectionHandler(object):
except KeyError: except KeyError:
raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias) raise ConnectionDoesNotExist("The connection %s doesn't exist" % alias)
conn.setdefault('ATOMIC_REQUESTS', False)
if settings.TRANSACTIONS_MANAGED:
warnings.warn(
"TRANSACTIONS_MANAGED is deprecated. Use AUTOCOMMIT instead.",
PendingDeprecationWarning, stacklevel=2)
conn.setdefault('AUTOCOMMIT', False)
conn.setdefault('AUTOCOMMIT', True)
conn.setdefault('ENGINE', 'django.db.backends.dummy') conn.setdefault('ENGINE', 'django.db.backends.dummy')
if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']: if conn['ENGINE'] == 'django.db.backends.' or not conn['ENGINE']:
conn['ENGINE'] = 'django.db.backends.dummy' conn['ENGINE'] = 'django.db.backends.dummy'

View File

@ -1,4 +1,7 @@
from django.db import transaction import warnings
from django.core.exceptions import MiddlewareNotUsed
from django.db import connection, transaction
class TransactionMiddleware(object): class TransactionMiddleware(object):
""" """
@ -7,10 +10,17 @@ class TransactionMiddleware(object):
commit, the commit is done when a successful response is created. If an commit, the commit is done when a successful response is created. If an
exception happens, the database is rolled back. exception happens, the database is rolled back.
""" """
def __init__(self):
warnings.warn(
"TransactionMiddleware is deprecated in favor of ATOMIC_REQUESTS.",
PendingDeprecationWarning, stacklevel=2)
if connection.settings_dict['ATOMIC_REQUESTS']:
raise MiddlewareNotUsed
def process_request(self, request): def process_request(self, request):
"""Enters transaction management""" """Enters transaction management"""
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
def process_exception(self, request, exception): def process_exception(self, request, exception):
"""Rolls back the database and leaves transaction management""" """Rolls back the database and leaves transaction management"""
@ -24,7 +34,7 @@ class TransactionMiddleware(object):
def process_response(self, request, response): def process_response(self, request, response):
"""Commits and leaves transaction management.""" """Commits and leaves transaction management."""
if transaction.is_managed(): if not transaction.get_autocommit():
if transaction.is_dirty(): if transaction.is_dirty():
# Note: it is possible that the commit fails. If the reason is # Note: it is possible that the commit fails. If the reason is
# closed connection or some similar reason, then there is # closed connection or some similar reason, then there is

View File

@ -67,7 +67,6 @@ real_commit = transaction.commit
real_rollback = transaction.rollback real_rollback = transaction.rollback
real_enter_transaction_management = transaction.enter_transaction_management real_enter_transaction_management = transaction.enter_transaction_management
real_leave_transaction_management = transaction.leave_transaction_management real_leave_transaction_management = transaction.leave_transaction_management
real_managed = transaction.managed
real_abort = transaction.abort real_abort = transaction.abort
def nop(*args, **kwargs): def nop(*args, **kwargs):
@ -78,7 +77,6 @@ def disable_transaction_methods():
transaction.rollback = nop transaction.rollback = nop
transaction.enter_transaction_management = nop transaction.enter_transaction_management = nop
transaction.leave_transaction_management = nop transaction.leave_transaction_management = nop
transaction.managed = nop
transaction.abort = nop transaction.abort = nop
def restore_transaction_methods(): def restore_transaction_methods():
@ -86,7 +84,6 @@ def restore_transaction_methods():
transaction.rollback = real_rollback transaction.rollback = real_rollback
transaction.enter_transaction_management = real_enter_transaction_management transaction.enter_transaction_management = real_enter_transaction_management
transaction.leave_transaction_management = real_leave_transaction_management transaction.leave_transaction_management = real_leave_transaction_management
transaction.managed = real_managed
transaction.abort = real_abort transaction.abort = real_abort
@ -157,14 +154,6 @@ class DocTestRunner(doctest.DocTestRunner):
doctest.DocTestRunner.__init__(self, *args, **kwargs) doctest.DocTestRunner.__init__(self, *args, **kwargs)
self.optionflags = doctest.ELLIPSIS self.optionflags = doctest.ELLIPSIS
def report_unexpected_exception(self, out, test, example, exc_info):
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
example, exc_info)
# Rollback, in case of database errors. Otherwise they'd have
# side effects on other tests.
for conn in connections:
transaction.rollback_unless_managed(using=conn)
class _AssertNumQueriesContext(CaptureQueriesContext): class _AssertNumQueriesContext(CaptureQueriesContext):
def __init__(self, test_case, num, connection): def __init__(self, test_case, num, connection):
@ -490,14 +479,10 @@ class TransactionTestCase(SimpleTestCase):
conn.ops.sequence_reset_by_name_sql(no_style(), conn.ops.sequence_reset_by_name_sql(no_style(),
conn.introspection.sequence_list()) conn.introspection.sequence_list())
if sql_list: if sql_list:
try: with transaction.commit_on_success_unless_managed(using=db_name):
cursor = conn.cursor() cursor = conn.cursor()
for sql in sql_list: for sql in sql_list:
cursor.execute(sql) cursor.execute(sql)
except Exception:
transaction.rollback_unless_managed(using=db_name)
raise
transaction.commit_unless_managed(using=db_name)
def _fixture_setup(self): def _fixture_setup(self):
for db_name in self._databases_names(include_mirrors=False): for db_name in self._databases_names(include_mirrors=False):
@ -537,11 +522,6 @@ class TransactionTestCase(SimpleTestCase):
conn.close() conn.close()
def _fixture_teardown(self): def _fixture_teardown(self):
# Roll back any pending transactions in order to avoid a deadlock
# during flush when TEST_MIRROR is used (#18984).
for conn in connections.all():
conn.rollback_unless_managed()
for db in self._databases_names(include_mirrors=False): for db in self._databases_names(include_mirrors=False):
call_command('flush', verbosity=0, interactive=False, database=db, call_command('flush', verbosity=0, interactive=False, database=db,
skip_validation=True, reset_sequences=False) skip_validation=True, reset_sequences=False)
@ -831,9 +811,11 @@ class TestCase(TransactionTestCase):
assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances' assert not self.reset_sequences, 'reset_sequences cannot be used on TestCase instances'
self.atomics = {}
for db_name in self._databases_names(): for db_name in self._databases_names():
transaction.enter_transaction_management(using=db_name) self.atomics[db_name] = transaction.atomic(using=db_name)
transaction.managed(True, using=db_name) self.atomics[db_name].__enter__()
# Remove this when the legacy transaction management goes away.
disable_transaction_methods() disable_transaction_methods()
from django.contrib.sites.models import Site from django.contrib.sites.models import Site
@ -853,10 +835,12 @@ class TestCase(TransactionTestCase):
if not connections_support_transactions(): if not connections_support_transactions():
return super(TestCase, self)._fixture_teardown() return super(TestCase, self)._fixture_teardown()
# Remove this when the legacy transaction management goes away.
restore_transaction_methods() restore_transaction_methods()
for db in self._databases_names(): for db_name in reversed(self._databases_names()):
transaction.rollback(using=db) # Hack to force a rollback
transaction.leave_transaction_management(using=db) connections[db_name].needs_rollback = True
self.atomics[db_name].__exit__(None, None, None)
def _deferredSkip(condition, reason): def _deferredSkip(condition, reason):

View File

@ -329,6 +329,15 @@ these changes.
1.8 1.8
--- ---
* The following transaction management APIs will be removed:
- ``TransactionMiddleware``,
- the decorators and context managers ``autocommit``, ``commit_on_success``,
and ``commit_manually``,
- the ``TRANSACTIONS_MANAGED`` setting.
Upgrade paths are described in :ref:`transactions-upgrading-from-1.5`.
* The :ttag:`cycle` and :ttag:`firstof` template tags will auto-escape their * The :ttag:`cycle` and :ttag:`firstof` template tags will auto-escape their
arguments. In 1.6 and 1.7, this behavior is provided by the version of these arguments. In 1.6 and 1.7, this behavior is provided by the version of these
tags in the ``future`` template tag library. tags in the ``future`` template tag library.
@ -339,8 +348,6 @@ these changes.
* ``Model._meta.module_name`` was renamed to ``model_name``. * ``Model._meta.module_name`` was renamed to ``model_name``.
* The private API ``django.db.close_connection`` will be removed.
* Remove the backward compatible shims introduced to rename ``get_query_set`` * Remove the backward compatible shims introduced to rename ``get_query_set``
and similar queryset methods. This affects the following classes: and similar queryset methods. This affects the following classes:
``BaseModelAdmin``, ``ChangeList``, ``BaseCommentNode``, ``BaseModelAdmin``, ``ChangeList``, ``BaseCommentNode``,
@ -350,6 +357,14 @@ these changes.
* Remove the backward compatible shims introduced to rename the attributes * Remove the backward compatible shims introduced to rename the attributes
``ChangeList.root_query_set`` and ``ChangeList.query_set``. ``ChangeList.root_query_set`` and ``ChangeList.query_set``.
* The following private APIs will be removed:
- ``django.db.close_connection()``
- ``django.db.backends.creation.BaseDatabaseCreation.set_autocommit()``
- ``django.db.transaction.is_managed()``
- ``django.db.transaction.managed()``
- ``django.db.transaction.commit_unless_managed()``
- ``django.db.transaction.rollback_unless_managed()``
2.0 2.0
--- ---

View File

@ -69,7 +69,6 @@ even ``0``, because it doesn't make sense to maintain a connection that's
unlikely to be reused. This will help keep the number of simultaneous unlikely to be reused. This will help keep the number of simultaneous
connections to this database small. connections to this database small.
The development server creates a new thread for each request it handles, The development server creates a new thread for each request it handles,
negating the effect of persistent connections. negating the effect of persistent connections.
@ -104,7 +103,8 @@ Optimizing PostgreSQL's configuration
Django needs the following parameters for its database connections: Django needs the following parameters for its database connections:
- ``client_encoding``: ``'UTF8'``, - ``client_encoding``: ``'UTF8'``,
- ``default_transaction_isolation``: ``'read committed'``, - ``default_transaction_isolation``: ``'read committed'`` by default,
or the value set in the connection options (see below),
- ``timezone``: ``'UTC'`` when :setting:`USE_TZ` is ``True``, value of - ``timezone``: ``'UTC'`` when :setting:`USE_TZ` is ``True``, value of
:setting:`TIME_ZONE` otherwise. :setting:`TIME_ZONE` otherwise.
@ -118,30 +118,16 @@ will do some additional queries to set these parameters.
.. _ALTER ROLE: http://www.postgresql.org/docs/current/interactive/sql-alterrole.html .. _ALTER ROLE: http://www.postgresql.org/docs/current/interactive/sql-alterrole.html
Transaction handling
---------------------
:doc:`By default </topics/db/transactions>`, Django runs with an open
transaction which it commits automatically when any built-in, data-altering
model function is called. The PostgreSQL backends normally operate the same as
any other Django backend in this respect.
.. _postgresql-autocommit-mode: .. _postgresql-autocommit-mode:
Autocommit mode Autocommit mode
~~~~~~~~~~~~~~~ ---------------
If your application is particularly read-heavy and doesn't make many .. versionchanged:: 1.6
database writes, the overhead of a constantly open transaction can
sometimes be noticeable. For those situations, you can configure Django In previous versions of Django, database-level autocommit could be enabled by
to use *"autocommit"* behavior for the connection, meaning that each database setting the ``autocommit`` key in the :setting:`OPTIONS` part of your database
operation will normally be in its own transaction, rather than having configuration in :setting:`DATABASES`::
the transaction extend over multiple operations. In this case, you can
still manually start a transaction if you're doing something that
requires consistency across multiple database operations. The
autocommit behavior is enabled by setting the ``autocommit`` key in
the :setting:`OPTIONS` part of your database configuration in
:setting:`DATABASES`::
DATABASES = { DATABASES = {
# ... # ...
@ -150,29 +136,11 @@ the :setting:`OPTIONS` part of your database configuration in
}, },
} }
In this configuration, Django still ensures that :ref:`delete() Since Django 1.6, autocommit is turned on by default. This configuration is
<topics-db-queries-delete>` and :ref:`update() <topics-db-queries-update>` ignored and can be safely removed.
queries run inside a single transaction, so that either all the affected
objects are changed or none of them are.
.. admonition:: This is database-level autocommit
This functionality is not the same as the :ref:`autocommit
<topics-db-transactions-autocommit>` decorator. That decorator is
a Django-level implementation that commits automatically after
data changing operations. The feature enabled using the
:setting:`OPTIONS` option provides autocommit behavior at the
database adapter level. It commits after *every* operation.
If you are using this feature and performing an operation akin to delete or
updating that requires multiple operations, you are strongly recommended to
wrap you operations in manual transaction handling to ensure data consistency.
You should also audit your existing code for any instances of this behavior
before enabling this feature. It's faster, but it provides less automatic
protection for multi-call operations.
Isolation level Isolation level
~~~~~~~~~~~~~~~ ---------------
.. versionadded:: 1.6 .. versionadded:: 1.6
@ -200,7 +168,7 @@ such as ``REPEATABLE READ`` or ``SERIALIZABLE``, set it in the
.. _postgresql-isolation-levels: http://www.postgresql.org/docs/devel/static/transaction-iso.html .. _postgresql-isolation-levels: http://www.postgresql.org/docs/devel/static/transaction-iso.html
Indexes for ``varchar`` and ``text`` columns Indexes for ``varchar`` and ``text`` columns
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ --------------------------------------------
When specifying ``db_index=True`` on your model fields, Django typically When specifying ``db_index=True`` on your model fields, Django typically
outputs a single ``CREATE INDEX`` statement. However, if the database type outputs a single ``CREATE INDEX`` statement. However, if the database type
@ -456,8 +424,7 @@ Savepoints
Both the Django ORM and MySQL (when using the InnoDB :ref:`storage engine Both the Django ORM and MySQL (when using the InnoDB :ref:`storage engine
<mysql-storage-engines>`) support database :ref:`savepoints <mysql-storage-engines>`) support database :ref:`savepoints
<topics-db-transactions-savepoints>`, but this feature wasn't available in <topics-db-transactions-savepoints>`.
Django until version 1.4 when such supports was added.
If you use the MyISAM storage engine please be aware of the fact that you will If you use the MyISAM storage engine please be aware of the fact that you will
receive database-generated errors if you try to use the :ref:`savepoint-related receive database-generated errors if you try to use the :ref:`savepoint-related

View File

@ -205,6 +205,10 @@ Transaction middleware
.. class:: TransactionMiddleware .. class:: TransactionMiddleware
.. versionchanged:: 1.6
``TransactionMiddleware`` is deprecated. The documentation of transactions
contains :ref:`upgrade instructions <transactions-upgrading-from-1.5>`.
Binds commit and rollback of the default database to the request/response Binds commit and rollback of the default database to the request/response
phase. If a view function runs successfully, a commit is done. If it fails with phase. If a view function runs successfully, a commit is done. If it fails with
an exception, a rollback is done. an exception, a rollback is done.

View File

@ -814,8 +814,8 @@ generating large CSV files.
.. admonition:: Performance considerations .. admonition:: Performance considerations
Django is designed for short-lived requests. Streaming responses will tie Django is designed for short-lived requests. Streaming responses will tie
a worker process and keep a database connection idle in transaction for a worker process for the entire duration of the response. This may result
the entire duration of the response. This may result in poor performance. in poor performance.
Generally speaking, you should perform expensive tasks outside of the Generally speaking, you should perform expensive tasks outside of the
request-response cycle, rather than resorting to a streamed response. request-response cycle, rather than resorting to a streamed response.

View File

@ -408,6 +408,30 @@ SQLite. This can be configured using the following::
For other database backends, or more complex SQLite configurations, other options For other database backends, or more complex SQLite configurations, other options
will be required. The following inner options are available. will be required. The following inner options are available.
.. setting:: DATABASE-ATOMIC_REQUESTS
ATOMIC_REQUESTS
~~~~~~~~~~~~~~~
.. versionadded:: 1.6
Default: ``False``
Set this to ``True`` to wrap each HTTP request in a transaction on this
database. See :ref:`tying-transactions-to-http-requests`.
.. setting:: DATABASE-AUTOCOMMIT
AUTOCOMMIT
~~~~~~~~~~
.. versionadded:: 1.6
Default: ``True``
Set this to ``False`` if you want to :ref:`disable Django's transaction
management <deactivate-transaction-management>` and implement your own.
.. setting:: DATABASE-ENGINE .. setting:: DATABASE-ENGINE
ENGINE ENGINE
@ -1807,6 +1831,12 @@ to ensure your processes are running in the correct environment.
TRANSACTIONS_MANAGED TRANSACTIONS_MANAGED
-------------------- --------------------
.. deprecated:: 1.6
This setting was deprecated because its name is very misleading. Use the
:setting:`AUTOCOMMIT <DATABASE-AUTOCOMMIT>` key in :setting:`DATABASES`
entries instead.
Default: ``False`` Default: ``False``
Set this to ``True`` if you want to :ref:`disable Django's transaction Set this to ``True`` if you want to :ref:`disable Django's transaction

View File

@ -105,16 +105,14 @@ you just won't get any of the nice new unittest2 features.
Transaction context managers Transaction context managers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Users of Python 2.5 and above may now use :ref:`transaction management functions Users of Python 2.5 and above may now use transaction management functions as
<transaction-management-functions>` as `context managers`_. For example:: `context managers`_. For example::
with transaction.autocommit(): with transaction.autocommit():
# ... # ...
.. _context managers: http://docs.python.org/glossary.html#term-context-manager .. _context managers: http://docs.python.org/glossary.html#term-context-manager
For more information, see :ref:`transaction-management-functions`.
Configurable delete-cascade Configurable delete-cascade
~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -148,16 +148,14 @@ you just won't get any of the nice new unittest2 features.
Transaction context managers Transaction context managers
~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Users of Python 2.5 and above may now use :ref:`transaction management functions Users of Python 2.5 and above may now use transaction management functions as
<transaction-management-functions>` as `context managers`_. For example:: `context managers`_. For example::
with transaction.autocommit(): with transaction.autocommit():
# ... # ...
.. _context managers: http://docs.python.org/glossary.html#term-context-manager .. _context managers: http://docs.python.org/glossary.html#term-context-manager
For more information, see :ref:`transaction-management-functions`.
Configurable delete-cascade Configurable delete-cascade
~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -30,6 +30,18 @@ prevention <clickjacking-prevention>` are turned on.
If the default templates don't suit your tastes, you can use :ref:`custom If the default templates don't suit your tastes, you can use :ref:`custom
project and app templates <custom-app-and-project-templates>`. project and app templates <custom-app-and-project-templates>`.
Improved transaction management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Django's transaction management was overhauled. Database-level autocommit is
now turned on by default. This makes transaction handling more explicit and
should improve performance. The existing APIs were deprecated, and new APIs
were introduced, as described in :doc:`/topics/db/transactions`.
Please review carefully the list of :ref:`known backwards-incompatibilities
<transactions-upgrading-from-1.5>` to determine if you need to make changes in
your code.
Persistent database connections Persistent database connections
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -148,6 +160,16 @@ Backwards incompatible changes in 1.6
deprecation timeline for a given feature, its removal may appear as a deprecation timeline for a given feature, its removal may appear as a
backwards incompatible change. backwards incompatible change.
* Database-level autocommit is enabled by default in Django 1.6. While this
doesn't change the general spirit of Django's transaction management, there
are a few known backwards-incompatibities, described in the :ref:`transaction
management docs <transactions-upgrading-from-1.5>`. You should review your code
to determine if you're affected.
* In previous versions, database-level autocommit was only an option for
PostgreSQL, and it was disabled by default. This option is now
:ref:`ignored <postgresql-autocommit-mode>`.
* The ``django.db.models.query.EmptyQuerySet`` can't be instantiated any more - * The ``django.db.models.query.EmptyQuerySet`` can't be instantiated any more -
it is only usable as a marker class for checking if it is only usable as a marker class for checking if
:meth:`~django.db.models.query.QuerySet.none` has been called: :meth:`~django.db.models.query.QuerySet.none` has been called:
@ -234,6 +256,21 @@ Backwards incompatible changes in 1.6
Features deprecated in 1.6 Features deprecated in 1.6
========================== ==========================
Transaction management APIs
~~~~~~~~~~~~~~~~~~~~~~~~~~~
Transaction management was completely overhauled in Django 1.6, and the
current APIs are deprecated:
- ``django.middleware.transaction.TransactionMiddleware``
- ``django.db.transaction.autocommit``
- ``django.db.transaction.commit_on_success``
- ``django.db.transaction.commit_manually``
- the ``TRANSACTIONS_MANAGED`` setting
The reasons for this change and the upgrade path are described in the
:ref:`transactions documentation <transactions-upgrading-from-1.5>`.
Changes to :ttag:`cycle` and :ttag:`firstof` Changes to :ttag:`cycle` and :ttag:`firstof`
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~

View File

@ -201,31 +201,32 @@ perform queries that don't map cleanly to models, or directly execute
In these cases, you can always access the database directly, routing around In these cases, you can always access the database directly, routing around
the model layer entirely. the model layer entirely.
The object ``django.db.connection`` represents the The object ``django.db.connection`` represents the default database
default database connection, and ``django.db.transaction`` represents the connection. To use the database connection, call ``connection.cursor()`` to
default database transaction. To use the database connection, call get a cursor object. Then, call ``cursor.execute(sql, [params])`` to execute
``connection.cursor()`` to get a cursor object. Then, call the SQL and ``cursor.fetchone()`` or ``cursor.fetchall()`` to return the
``cursor.execute(sql, [params])`` to execute the SQL and ``cursor.fetchone()`` resulting rows.
or ``cursor.fetchall()`` to return the resulting rows. After performing a data
changing operation, you should then call For example::
``transaction.commit_unless_managed()`` to ensure your changes are committed
to the database. If your query is purely a data retrieval operation, no commit from django.db import connection
is required. For example::
def my_custom_sql(): def my_custom_sql():
from django.db import connection, transaction
cursor = connection.cursor() cursor = connection.cursor()
# Data modifying operation - commit required
cursor.execute("UPDATE bar SET foo = 1 WHERE baz = %s", [self.baz]) cursor.execute("UPDATE bar SET foo = 1 WHERE baz = %s", [self.baz])
transaction.commit_unless_managed()
# Data retrieval operation - no commit required
cursor.execute("SELECT foo FROM bar WHERE baz = %s", [self.baz]) cursor.execute("SELECT foo FROM bar WHERE baz = %s", [self.baz])
row = cursor.fetchone() row = cursor.fetchone()
return row return row
.. versionchanged:: 1.6
In Django 1.5 and earlier, after performing a data changing operation, you
had to call ``transaction.commit_unless_managed()`` to ensure your changes
were committed to the database. Since Django now defaults to database-level
autocommit, this isn't necessary any longer.
If you are using :doc:`more than one database </topics/db/multi-db>`, you can If you are using :doc:`more than one database </topics/db/multi-db>`, you can
use ``django.db.connections`` to obtain the connection (and cursor) for a use ``django.db.connections`` to obtain the connection (and cursor) for a
specific database. ``django.db.connections`` is a dictionary-like specific database. ``django.db.connections`` is a dictionary-like
@ -235,7 +236,6 @@ alias::
from django.db import connections from django.db import connections
cursor = connections['my_db_alias'].cursor() cursor = connections['my_db_alias'].cursor()
# Your code here... # Your code here...
transaction.commit_unless_managed(using='my_db_alias')
By default, the Python DB API will return results without their field By default, the Python DB API will return results without their field
names, which means you end up with a ``list`` of values, rather than a names, which means you end up with a ``list`` of values, rather than a
@ -260,27 +260,18 @@ Here is an example of the difference between the two::
>>> dictfetchall(cursor) >>> dictfetchall(cursor)
[{'parent_id': None, 'id': 54360982L}, {'parent_id': None, 'id': 54360880L}] [{'parent_id': None, 'id': 54360982L}, {'parent_id': None, 'id': 54360880L}]
.. _transactions-and-raw-sql:
Transactions and raw SQL
------------------------
When you make a raw SQL call, Django will automatically mark the
current transaction as dirty. You must then ensure that the
transaction containing those calls is closed correctly. See :ref:`the
notes on the requirements of Django's transaction handling
<topics-db-transactions-requirements>` for more details.
Connections and cursors Connections and cursors
----------------------- -----------------------
``connection`` and ``cursor`` mostly implement the standard Python DB-API ``connection`` and ``cursor`` mostly implement the standard Python DB-API
described in :pep:`249` (except when it comes to :doc:`transaction handling described in :pep:`249` — except when it comes to :doc:`transaction handling
</topics/db/transactions>`). If you're not familiar with the Python DB-API, note </topics/db/transactions>`.
that the SQL statement in ``cursor.execute()`` uses placeholders, ``"%s"``,
rather than adding parameters directly within the SQL. If you use this If you're not familiar with the Python DB-API, note that the SQL statement in
technique, the underlying database library will automatically add quotes and ``cursor.execute()`` uses placeholders, ``"%s"``, rather than adding
escaping to your parameter(s) as necessary. (Also note that Django expects the parameters directly within the SQL. If you use this technique, the underlying
``"%s"`` placeholder, *not* the ``"?"`` placeholder, which is used by the SQLite database library will automatically escape your parameters as necessary.
Python bindings. This is for the sake of consistency and sanity.)
Also note that Django expects the ``"%s"`` placeholder, *not* the ``"?"``
placeholder, which is used by the SQLite Python bindings. This is for the sake
of consistency and sanity.

View File

@ -1,286 +1,375 @@
============================== =====================
Managing database transactions Database transactions
============================== =====================
.. module:: django.db.transaction .. module:: django.db.transaction
Django gives you a few ways to control how database transactions are managed, Django gives you a few ways to control how database transactions are managed.
if you're using a database that supports transactions.
Managing database transactions
==============================
Django's default transaction behavior Django's default transaction behavior
===================================== -------------------------------------
Django's default behavior is to run with an open transaction which it Django's default behavior is to run in autocommit mode. Each query is
commits automatically when any built-in, data-altering model function is immediately committed to the database. :ref:`See below for details
called. For example, if you call ``model.save()`` or ``model.delete()``, the <autocommit-details>`.
change will be committed immediately.
This is much like the auto-commit setting for most databases. As soon as you Django uses transactions or savepoints automatically to guarantee the
perform an action that needs to write to the database, Django produces the integrity of ORM operations that require multiple queries, especially
``INSERT``/``UPDATE``/``DELETE`` statements and then does the ``COMMIT``. :ref:`delete() <topics-db-queries-delete>` and :ref:`update()
There's no implicit ``ROLLBACK``. <topics-db-queries-update>` queries.
.. versionchanged:: 1.6
Previous version of Django featured :ref:`a more complicated default
behavior <transactions-upgrading-from-1.5>`.
.. _tying-transactions-to-http-requests:
Tying transactions to HTTP requests Tying transactions to HTTP requests
=================================== -----------------------------------
The recommended way to handle transactions in Web requests is to tie them to A common way to handle transactions on the web is to wrap each request in a
the request and response phases via Django's ``TransactionMiddleware``. transaction. Set :setting:`ATOMIC_REQUESTS <DATABASE-ATOMIC_REQUESTS>` to
``True`` in the configuration of each database for which you want to enable
this behavior.
It works like this: When a request starts, Django starts a transaction. If the It works like this. When a request starts, Django starts a transaction. If the
response is produced without problems, Django commits any pending transactions. response is produced without problems, Django commits the transaction. If the
If the view function produces an exception, Django rolls back any pending view function produces an exception, Django rolls back the transaction.
transactions. Middleware always runs outside of this transaction.
To activate this feature, just add the ``TransactionMiddleware`` middleware to You may perfom partial commits and rollbacks in your view code, typically with
your :setting:`MIDDLEWARE_CLASSES` setting:: the :func:`atomic` context manager. However, at the end of the view, either
all the changes will be committed, or none of them.
MIDDLEWARE_CLASSES = ( To disable this behavior for a specific view, you must set the
'django.middleware.cache.UpdateCacheMiddleware', ``transactions_per_request`` attribute of the view function itself to
'django.contrib.sessions.middleware.SessionMiddleware', ``False``, like this::
'django.middleware.common.CommonMiddleware',
'django.middleware.transaction.TransactionMiddleware',
'django.middleware.cache.FetchFromCacheMiddleware',
)
The order is quite important. The transaction middleware applies not only to def my_view(request):
view functions, but also for all middleware modules that come after it. So if do_stuff()
you use the session middleware after the transaction middleware, session my_view.transactions_per_request = False
creation will be part of the transaction.
The various cache middlewares are an exception: .. warning::
``CacheMiddleware``, :class:`~django.middleware.cache.UpdateCacheMiddleware`,
and :class:`~django.middleware.cache.FetchFromCacheMiddleware` are never
affected. Even when using database caching, Django's cache backend uses its own
database cursor (which is mapped to its own database connection internally).
.. note:: While the simplicity of this transaction model is appealing, it also makes it
inefficient when traffic increases. Opening a transaction for every view has
some overhead. The impact on performance depends on the query patterns of your
application and on how well your database handles locking.
The ``TransactionMiddleware`` only affects the database aliased .. admonition:: Per-request transactions and streaming responses
as "default" within your :setting:`DATABASES` setting. If you are using
multiple databases and want transaction control over databases other than
"default", you will need to write your own transaction middleware.
.. _transaction-management-functions: When a view returns a :class:`~django.http.StreamingHttpResponse`, reading
the contents of the response will often execute code to generate the
content. Since the view has already returned, such code runs outside of
the transaction.
Controlling transaction management in views Generally speaking, it isn't advisable to write to the database while
=========================================== generating a streaming response, since there's no sensible way to handle
errors after starting to send the response.
For most people, implicit request-based transactions work wonderfully. However, In practice, this feature simply wraps every view function in the :func:`atomic`
if you need more fine-grained control over how transactions are managed, you can decorator described below.
use a set of functions in ``django.db.transaction`` to control transactions on a
per-function or per-code-block basis.
These functions, described in detail below, can be used in two different ways: Note that only the execution of your view in enclosed in the transactions.
Middleware run outside of the transaction, and so does the rendering of
template responses.
* As a decorator_ on a particular function. For example:: .. versionchanged:: 1.6
Django used to provide this feature via ``TransactionMiddleware``, which is
now deprecated.
from django.db import transaction Controlling transactions explicitly
-----------------------------------
@transaction.commit_on_success .. versionadded:: 1.6
def viewfunc(request):
# ...
# this code executes inside a transaction
# ...
* As a `context manager`_ around a particular block of code:: Django provides a single API to control database transactions.
from django.db import transaction .. function:: atomic(using=None, savepoint=True)
def viewfunc(request): This function creates an atomic block for writes to the database.
# ... (Atomicity is the defining property of database transactions.)
# this code executes using default transaction management
# ...
with transaction.commit_on_success(): When the block completes successfully, the changes are committed to the
# ... database. When it raises an exception, the changes are rolled back.
# this code executes inside a transaction
# ...
Both techniques work with all supported version of Python. ``atomic`` can be nested. In this case, when an inner block completes
successfully, its effects can still be rolled back if an exception is
raised in the outer block at a later point.
.. _decorator: http://docs.python.org/glossary.html#term-decorator ``atomic`` takes a ``using`` argument which should be the name of a
.. _context manager: http://docs.python.org/glossary.html#term-context-manager database. If this argument isn't provided, Django uses the ``"default"``
database.
For maximum compatibility, all of the examples below show transactions using the ``atomic`` is usable both as a `decorator`_::
decorator syntax, but all of the follow functions may be used as context
managers, too.
.. note::
Although the examples below use view functions as examples, these
decorators and context managers can be used anywhere in your code
that you need to deal with transactions.
.. _topics-db-transactions-autocommit:
.. function:: autocommit
Use the ``autocommit`` decorator to switch a view function to Django's
default commit behavior, regardless of the global transaction setting.
Example::
from django.db import transaction from django.db import transaction
@transaction.autocommit @transaction.atomic
def viewfunc(request): def viewfunc(request):
.... # This code executes inside a transaction.
do_stuff()
@transaction.autocommit(using="my_other_database") and as a `context manager`_::
def viewfunc2(request):
....
Within ``viewfunc()``, transactions will be committed as soon as you call
``model.save()``, ``model.delete()``, or any other function that writes to
the database. ``viewfunc2()`` will have this same behavior, but for the
``"my_other_database"`` connection.
.. function:: commit_on_success
Use the ``commit_on_success`` decorator to use a single transaction for all
the work done in a function::
from django.db import transaction from django.db import transaction
@transaction.commit_on_success
def viewfunc(request): def viewfunc(request):
.... # This code executes in autocommit mode (Django's default).
do_stuff()
@transaction.commit_on_success(using="my_other_database") with transaction.atomic():
def viewfunc2(request): # This code executes inside a transaction.
.... do_more_stuff()
If the function returns successfully, then Django will commit all work done .. _decorator: http://docs.python.org/glossary.html#term-decorator
within the function at that point. If the function raises an exception, .. _context manager: http://docs.python.org/glossary.html#term-context-manager
though, Django will roll back the transaction.
.. function:: commit_manually Wrapping ``atomic`` in a try/except block allows for natural handling of
integrity errors::
Use the ``commit_manually`` decorator if you need full control over from django.db import IntegrityError, transaction
transactions. It tells Django you'll be managing the transaction on your
own.
Whether you are writing or simply reading from the database, you must @transaction.atomic
``commit()`` or ``rollback()`` explicitly or Django will raise a
:exc:`TransactionManagementError` exception. This is required when reading
from the database because ``SELECT`` statements may call functions which
modify tables, and thus it is impossible to know if any data has been
modified.
Manual transaction management looks like this::
from django.db import transaction
@transaction.commit_manually
def viewfunc(request): def viewfunc(request):
... do_stuff()
# You can commit/rollback however and whenever you want
transaction.commit()
...
# But you've got to remember to do it yourself!
try: try:
... with transaction.atomic():
except: do_stuff_that_could_fail()
transaction.rollback() except IntegrityError:
else: handle_exception()
transaction.commit()
@transaction.commit_manually(using="my_other_database") do_more_stuff()
def viewfunc2(request):
....
.. _topics-db-transactions-requirements: In this example, even if ``do_stuff_that_could_fail()`` causes a database
error by breaking an integrity constraint, you can execute queries in
``do_more_stuff()``, and the changes from ``do_stuff()`` are still there.
Requirements for transaction handling In order to guarantee atomicity, ``atomic`` disables some APIs. Attempting
===================================== to commit, roll back, or change the autocommit state of the database
connection within an ``atomic`` block will raise an exception.
Django requires that every transaction that is opened is closed before ``atomic`` can only be used in autocommit mode. It will raise an exception
the completion of a request. If you are using :func:`autocommit` (the if autocommit is turned off.
default commit mode) or :func:`commit_on_success`, this will be done
for you automatically (with the exception of :ref:`executing custom SQL
<executing-custom-sql>`). However, if you are manually managing
transactions (using the :func:`commit_manually` decorator), you must
ensure that the transaction is either committed or rolled back before
a request is completed.
This applies to all database operations, not just write operations. Even Under the hood, Django's transaction management code:
if your transaction only reads from the database, the transaction must
be committed or rolled back before you complete a request. - opens a transaction when entering the outermost ``atomic`` block;
- creates a savepoint when entering an inner ``atomic`` block;
- releases or rolls back to the savepoint when exiting an inner block;
- commits or rolls back the transaction when exiting the outermost block.
You can disable the creation of savepoints for inner blocks by setting the
``savepoint`` argument to ``False``. If an exception occurs, Django will
perform the rollback when exiting the first parent block with a savepoint
if there is one, and the outermost block otherwise. Atomicity is still
guaranteed by the outer transaction. This option should only be used if
the overhead of savepoints is noticeable. It has the drawback of breaking
the error handling described above.
.. admonition:: Performance considerations
Open transactions have a performance cost for your database server. To
minimize this overhead, keep your transactions as short as possible. This
is especially important of you're using :func:`atomic` in long-running
processes, outside of Django's request / response cycle.
Autocommit
==========
.. _autocommit-details:
Why Django uses autocommit
--------------------------
In the SQL standards, each SQL query starts a transaction, unless one is
already in progress. Such transactions must then be committed or rolled back.
This isn't always convenient for application developers. To alleviate this
problem, most databases provide an autocommit mode. When autocommit is turned
on, each SQL query is wrapped in its own transaction. In other words, the
transaction is not only automatically started, but also automatically
committed.
:pep:`249`, the Python Database API Specification v2.0, requires autocommit to
be initially turned off. Django overrides this default and turns autocommit
on.
To avoid this, you can :ref:`deactivate the transaction management
<deactivate-transaction-management>`, but it isn't recommended.
.. versionchanged:: 1.6
Before Django 1.6, autocommit was turned off, and it was emulated by
forcing a commit after write operations in the ORM.
.. warning::
If you're using the database API directly — for instance, you're running
SQL queries with ``cursor.execute()`` — be aware that autocommit is on,
and consider wrapping your operations in a transaction, with
:func:`atomic`, to ensure consistency.
.. _deactivate-transaction-management: .. _deactivate-transaction-management:
How to globally deactivate transaction management Deactivating transaction management
================================================= -----------------------------------
Control freaks can totally disable all transaction management by setting You can totally disable Django's transaction management for a given database
:setting:`TRANSACTIONS_MANAGED` to ``True`` in the Django settings file. by setting :setting:`AUTOCOMMIT <DATABASE-AUTOCOMMIT>` to ``False`` in its
configuration. If you do this, Django won't enable autocommit, and won't
perform any commits. You'll get the regular behavior of the underlying
database library.
If you do this, Django won't provide any automatic transaction management This requires you to commit explicitly every transaction, even those started
whatsoever. Middleware will no longer implicitly commit transactions, and by Django or by third-party libraries. Thus, this is best used in situations
you'll need to roll management yourself. This even requires you to commit where you want to run your own transaction-controlling middleware or do
changes done by middleware somewhere else. something really strange.
Thus, this is best used in situations where you want to run your own .. versionchanged:: 1.6
transaction-controlling middleware or do something really strange. In almost This used to be controlled by the ``TRANSACTIONS_MANAGED`` setting.
all situations, you'll be better off using the default behavior, or the
transaction middleware, and only modify selected functions as needed. Low-level APIs
==============
.. warning::
Always prefer :func:`atomic` if possible at all. It accounts for the
idiosyncrasies of each database and prevents invalid operations.
The low level APIs are only useful if you're implementing your own
transaction management.
.. _managing-autocommit:
Autocommit
----------
.. versionadded:: 1.6
Django provides a straightforward API to manage the autocommit state of each
database connection, if you need to.
.. function:: get_autocommit(using=None)
.. function:: set_autocommit(autocommit, using=None)
These functions take a ``using`` argument which should be the name of a
database. If it isn't provided, Django uses the ``"default"`` database.
Autocommit is initially turned on. If you turn it off, it's your
responsibility to restore it.
Once you turn autocommit off, you get the default behavior of your database
adapter, and Django won't help you. Although that behavior is specified in
:pep:`249`, implementations of adapters aren't always consistent with one
another. Review the documentation of the adapter you're using carefully.
You must ensure that no transaction is active, usually by issuing a
:func:`commit` or a :func:`rollback`, before turning autocommit back on.
:func:`atomic` requires autocommit to be turned on; it will raise an exception
if autocommit is off. Django will also refuse to turn autocommit off when an
:func:`atomic` block is active, because that would break atomicity.
Transactions
------------
A transaction is an atomic set of database queries. Even if your program
crashes, the database guarantees that either all the changes will be applied,
or none of them.
Django doesn't provide an API to start a transaction. The expected way to
start a transaction is to disable autocommit with :func:`set_autocommit`.
Once you're in a transaction, you can choose either to apply the changes
you've performed until this point with :func:`commit`, or to cancel them with
:func:`rollback`.
.. function:: commit(using=None)
.. function:: rollback(using=None)
These functions take a ``using`` argument which should be the name of a
database. If it isn't provided, Django uses the ``"default"`` database.
Django will refuse to commit or to rollback when an :func:`atomic` block is
active, because that would break atomicity.
.. _topics-db-transactions-savepoints: .. _topics-db-transactions-savepoints:
Savepoints Savepoints
========== ----------
A savepoint is a marker within a transaction that enables you to roll back part A savepoint is a marker within a transaction that enables you to roll back
of a transaction, rather than the full transaction. Savepoints are available part of a transaction, rather than the full transaction. Savepoints are
with the PostgreSQL 8, Oracle and MySQL (when using the InnoDB storage engine) available with the SQLite (≥ 3.6.8), PostgreSQL, Oracle and MySQL (when using
backends. Other backends provide the savepoint functions, but they're empty the InnoDB storage engine) backends. Other backends provide the savepoint
operations -- they don't actually do anything. functions, but they're empty operations -- they don't actually do anything.
Savepoints aren't especially useful if you are using the default Savepoints aren't especially useful if you are using autocommit, the default
``autocommit`` behavior of Django. However, if you are using behavior of Django. However, once you open a transaction with :func:`atomic`,
``commit_on_success`` or ``commit_manually``, each open transaction will build you build up a series of database operations awaiting a commit or rollback. If
up a series of database operations, awaiting a commit or rollback. If you you issue a rollback, the entire transaction is rolled back. Savepoints
issue a rollback, the entire transaction is rolled back. Savepoints provide provide the ability to perform a fine-grained rollback, rather than the full
the ability to perform a fine-grained rollback, rather than the full rollback rollback that would be performed by ``transaction.rollback()``.
that would be performed by ``transaction.rollback()``.
.. versionchanged:: 1.6
When the :func:`atomic` decorator is nested, it creates a savepoint to allow
partial commit or rollback. You're strongly encouraged to use :func:`atomic`
rather than the functions described below, but they're still part of the
public API, and there's no plan to deprecate them.
Each of these functions takes a ``using`` argument which should be the name of Each of these functions takes a ``using`` argument which should be the name of
a database for which the behavior applies. If no ``using`` argument is a database for which the behavior applies. If no ``using`` argument is
provided then the ``"default"`` database is used. provided then the ``"default"`` database is used.
Savepoints are controlled by three methods on the transaction object: Savepoints are controlled by three functions in :mod:`django.db.transaction`:
.. method:: transaction.savepoint(using=None) .. function:: savepoint(using=None)
Creates a new savepoint. This marks a point in the transaction that Creates a new savepoint. This marks a point in the transaction that
is known to be in a "good" state. is known to be in a "good" state.
Returns the savepoint ID (sid). Returns the savepoint ID (``sid``).
.. method:: transaction.savepoint_commit(sid, using=None) .. function:: savepoint_commit(sid, using=None)
Updates the savepoint to include any operations that have been performed Releases savepoint ``sid``. The changes performed since the savepoint was
since the savepoint was created, or since the last commit. created become part of the transaction.
.. method:: transaction.savepoint_rollback(sid, using=None) .. function:: savepoint_rollback(sid, using=None)
Rolls the transaction back to the last point at which the savepoint was Rolls back the transaction to savepoint ``sid``.
committed.
These functions do nothing if savepoints aren't supported or if the database
is in autocommit mode.
In addition, there's a utility function:
.. function:: clean_savepoints(using=None)
Resets the counter used to generate unique savepoint IDs.
The following example demonstrates the use of savepoints:: The following example demonstrates the use of savepoints::
from django.db import transaction from django.db import transaction
@transaction.commit_manually # open a transaction
@transaction.atomic
def viewfunc(request): def viewfunc(request):
a.save() a.save()
# open transaction now contains a.save() # transaction now contains a.save()
sid = transaction.savepoint() sid = transaction.savepoint()
b.save() b.save()
# open transaction now contains a.save() and b.save() # transaction now contains a.save() and b.save()
if want_to_keep_b: if want_to_keep_b:
transaction.savepoint_commit(sid) transaction.savepoint_commit(sid)
@ -289,10 +378,25 @@ The following example demonstrates the use of savepoints::
transaction.savepoint_rollback(sid) transaction.savepoint_rollback(sid)
# open transaction now contains only a.save() # open transaction now contains only a.save()
transaction.commit() Database-specific notes
=======================
Savepoints in SQLite
--------------------
While SQLite ≥ 3.6.8 supports savepoints, a flaw in the design of the
:mod:`sqlite3` makes them hardly usable.
When autocommit is enabled, savepoints don't make sense. When it's disabled,
:mod:`sqlite3` commits implicitly before savepoint-related statement. (It
commits before any statement other than ``SELECT``, ``INSERT``, ``UPDATE``,
``DELETE`` and ``REPLACE``.)
As a consequence, savepoints are only usable inside a transaction ie. inside
an :func:`atomic` block.
Transactions in MySQL Transactions in MySQL
===================== ---------------------
If you're using MySQL, your tables may or may not support transactions; it If you're using MySQL, your tables may or may not support transactions; it
depends on your MySQL version and the table types you're using. (By depends on your MySQL version and the table types you're using. (By
@ -301,14 +405,14 @@ peculiarities are outside the scope of this article, but the MySQL site has
`information on MySQL transactions`_. `information on MySQL transactions`_.
If your MySQL setup does *not* support transactions, then Django will function If your MySQL setup does *not* support transactions, then Django will function
in auto-commit mode: Statements will be executed and committed as soon as in autocommit mode: Statements will be executed and committed as soon as
they're called. If your MySQL setup *does* support transactions, Django will they're called. If your MySQL setup *does* support transactions, Django will
handle transactions as explained in this document. handle transactions as explained in this document.
.. _information on MySQL transactions: http://dev.mysql.com/doc/refman/5.0/en/sql-syntax-transactions.html .. _information on MySQL transactions: http://dev.mysql.com/doc/refman/5.0/en/sql-syntax-transactions.html
Handling exceptions within PostgreSQL transactions Handling exceptions within PostgreSQL transactions
================================================== --------------------------------------------------
When a call to a PostgreSQL cursor raises an exception (typically When a call to a PostgreSQL cursor raises an exception (typically
``IntegrityError``), all subsequent SQL in the same transaction will fail with ``IntegrityError``), all subsequent SQL in the same transaction will fail with
@ -321,7 +425,7 @@ force_insert/force_update flag, or invoking custom SQL.
There are several ways to recover from this sort of error. There are several ways to recover from this sort of error.
Transaction rollback Transaction rollback
-------------------- ~~~~~~~~~~~~~~~~~~~~
The first option is to roll back the entire transaction. For example:: The first option is to roll back the entire transaction. For example::
@ -338,13 +442,13 @@ made by ``a.save()`` would be lost, even though that operation raised no error
itself. itself.
Savepoint rollback Savepoint rollback
------------------ ~~~~~~~~~~~~~~~~~~
If you are using PostgreSQL 8 or later, you can use :ref:`savepoints You can use :ref:`savepoints <topics-db-transactions-savepoints>` to control
<topics-db-transactions-savepoints>` to control the extent of a rollback. the extent of a rollback. Before performing a database operation that could
Before performing a database operation that could fail, you can set or update fail, you can set or update the savepoint; that way, if the operation fails,
the savepoint; that way, if the operation fails, you can roll back the single you can roll back the single offending operation, rather than the entire
offending operation, rather than the entire transaction. For example:: transaction. For example::
a.save() # Succeeds, and never undone by savepoint rollback a.save() # Succeeds, and never undone by savepoint rollback
try: try:
@ -358,25 +462,227 @@ offending operation, rather than the entire transaction. For example::
In this example, ``a.save()`` will not be undone in the case where In this example, ``a.save()`` will not be undone in the case where
``b.save()`` raises an exception. ``b.save()`` raises an exception.
Database-level autocommit .. _transactions-upgrading-from-1.5:
-------------------------
With PostgreSQL 8.2 or later, there is an advanced option to run PostgreSQL Changes from Django 1.5 and earlier
with :doc:`database-level autocommit </ref/databases>`. If you use this option, ===================================
there is no constantly open transaction, so it is always possible to continue
after catching an exception. For example::
a.save() # succeeds The features described below were deprecated in Django 1.6 and will be removed
in Django 1.8. They're documented in order to ease the migration to the new
transaction management APIs.
Legacy APIs
-----------
The following functions, defined in ``django.db.transaction``, provided a way
to control transactions on a per-function or per-code-block basis. They could
be used as decorators or as context managers, and they accepted a ``using``
argument, exactly like :func:`atomic`.
.. function:: autocommit
Enable Django's default autocommit behavior.
Transactions will be committed as soon as you call ``model.save()``,
``model.delete()``, or any other function that writes to the database.
.. function:: commit_on_success
Use a single transaction for all the work done in a function.
If the function returns successfully, then Django will commit all work done
within the function at that point. If the function raises an exception,
though, Django will roll back the transaction.
.. function:: commit_manually
Tells Django you'll be managing the transaction on your own.
Whether you are writing or simply reading from the database, you must
``commit()`` or ``rollback()`` explicitly or Django will raise a
:exc:`TransactionManagementError` exception. This is required when reading
from the database because ``SELECT`` statements may call functions which
modify tables, and thus it is impossible to know if any data has been
modified.
.. _transaction-states:
Transaction states
------------------
The three functions described above relied on a concept called "transaction
states". This mechanisme was deprecated in Django 1.6, but it's still
available until Django 1.8..
At any time, each database connection is in one of these two states:
- **auto mode**: autocommit is enabled;
- **managed mode**: autocommit is disabled.
Django starts in auto mode. ``TransactionMiddleware``,
:func:`commit_on_success` and :func:`commit_manually` activate managed mode;
:func:`autocommit` activates auto mode.
Internally, Django keeps a stack of states. Activations and deactivations must
be balanced.
For example, ``commit_on_success`` switches to managed mode when entering the
block of code it controls; when exiting the block, it commits or rollbacks,
and switches back to auto mode.
So :func:`commit_on_success` really has two effects: it changes the
transaction state and it defines an transaction block. Nesting will give the
expected results in terms of transaction state, but not in terms of
transaction semantics. Most often, the inner block will commit, breaking the
atomicity of the outer block.
:func:`autocommit` and :func:`commit_manually` have similar limitations.
API changes
-----------
Transaction middleware
~~~~~~~~~~~~~~~~~~~~~~
In Django 1.6, ``TransactionMiddleware`` is deprecated and replaced
:setting:`ATOMIC_REQUESTS <DATABASE-ATOMIC_REQUESTS>`. While the general
behavior is the same, there are a few differences.
With the transaction middleware, it was still possible to switch to autocommit
or to commit explicitly in a view. Since :func:`atomic` guarantees atomicity,
this isn't allowed any longer.
To avoid wrapping a particular view in a transaction, instead of::
@transaction.autocommit
def my_view(request):
do_stuff()
you must now use this pattern::
def my_view(request):
do_stuff()
my_view.transactions_per_request = False
The transaction middleware applied not only to view functions, but also to
middleware modules that come after it. For instance, if you used the session
middleware after the transaction middleware, session creation was part of the
transaction. :setting:`ATOMIC_REQUESTS <DATABASE-ATOMIC_REQUESTS>` only
applies to the view itself.
Managing transactions
~~~~~~~~~~~~~~~~~~~~~
Starting with Django 1.6, :func:`atomic` is the only supported API for
defining a transaction. Unlike the deprecated APIs, it's nestable and always
guarantees atomicity.
In most cases, it will be a drop-in replacement for :func:`commit_on_success`.
During the deprecation period, it's possible to use :func:`atomic` within
:func:`autocommit`, :func:`commit_on_success` or :func:`commit_manually`.
However, the reverse is forbidden, because nesting the old decorators /
context managers breaks atomicity.
If you enter :func:`atomic` while you're in managed mode, it will trigger a
commit to start from a clean slate.
Managing autocommit
~~~~~~~~~~~~~~~~~~~
Django 1.6 introduces an explicit :ref:`API for mananging autocommit
<managing-autocommit>`.
To disable autocommit temporarily, instead of::
with transaction.commit_manually():
# do stuff
you should now use::
transaction.set_autocommit(False)
try: try:
b.save() # Could throw exception # do stuff
except IntegrityError: finally:
pass transaction.set_autocommit(True)
c.save() # succeeds
.. note:: To enable autocommit temporarily, instead of::
This is not the same as the :ref:`autocommit decorator with transaction.autocommit():
<topics-db-transactions-autocommit>`. When using database level autocommit # do stuff
there is no database transaction at all. The ``autocommit`` decorator
still uses transactions, automatically committing each transaction when you should now use::
a database modifying operation occurs.
transaction.set_autocommit(True)
try:
# do stuff
finally:
transaction.set_autocommit(False)
Disabling transaction management
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Instead of setting ``TRANSACTIONS_MANAGED = True``, set the ``AUTOCOMMIT`` key
to ``False`` in the configuration of each database, as explained in :ref
:`deactivate-transaction-management`.
Backwards incompatibilities
---------------------------
Since version 1.6, Django uses database-level autocommit in auto mode.
Previously, it implemented application-level autocommit by triggering a commit
after each ORM write.
As a consequence, each database query (for instance, an ORM read) started a
transaction that lasted until the next ORM write. Such "automatic
transactions" no longer exist in Django 1.6.
There are four known scenarios where this is backwards-incompatible.
Note that managed mode isn't affected at all. This section assumes auto mode.
See the :ref:`description of modes <transaction-states>` above.
Sequences of custom SQL queries
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you're executing several :ref:`custom SQL queries <executing-custom-sql>`
in a row, each one now runs in its own transaction, instead of sharing the
same "automatic transaction". If you need to enforce atomicity, you must wrap
the sequence of queries in :func:`commit_on_success`.
To check for this problem, look for calls to ``cursor.execute()``. They're
usually followed by a call to ``transaction.commit_unless_managed``, which
isn't necessary any more and should be removed.
Select for update
~~~~~~~~~~~~~~~~~
If you were relying on "automatic transactions" to provide locking between
:meth:`~django.db.models.query.QuerySet.select_for_update` and a subsequent
write operation — an extremely fragile design, but nonetheless possible — you
must wrap the relevant code in :func:`atomic`.
Using a high isolation level
~~~~~~~~~~~~~~~~~~~~~~~~~~~~
If you were using the "repeatable read" isolation level or higher, and if you
relied on "automatic transactions" to guarantee consistency between successive
reads, the new behavior might be backwards-incompatible. To enforce
consistency, you must wrap such sequences in :func:`atomic`.
MySQL defaults to "repeatable read" and SQLite to "serializable"; they may be
affected by this problem.
At the "read committed" isolation level or lower, "automatic transactions"
have no effect on the semantics of any sequence of ORM operations.
PostgreSQL and Oracle default to "read committed" and aren't affected, unless
you changed the isolation level.
Using unsupported database features
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
With triggers, views, or functions, it's possible to make ORM reads result in
database modifications. Django 1.5 and earlier doesn't deal with this case and
it's theoretically possible to observe a different behavior after upgrading to
Django 1.6 or later. In doubt, use :func:`atomic` to enforce integrity.

View File

@ -302,8 +302,8 @@ class PostgresNewConnectionTest(TestCase):
transaction is rolled back. transaction is rolled back.
""" """
@unittest.skipUnless( @unittest.skipUnless(
connection.vendor == 'postgresql' and connection.isolation_level > 0, connection.vendor == 'postgresql',
"This test applies only to PostgreSQL without autocommit") "This test applies only to PostgreSQL")
def test_connect_and_rollback(self): def test_connect_and_rollback(self):
new_connections = ConnectionHandler(settings.DATABASES) new_connections = ConnectionHandler(settings.DATABASES)
new_connection = new_connections[DEFAULT_DB_ALIAS] new_connection = new_connections[DEFAULT_DB_ALIAS]
@ -522,7 +522,8 @@ class FkConstraintsTests(TransactionTestCase):
""" """
When constraint checks are disabled, should be able to write bad data without IntegrityErrors. When constraint checks are disabled, should be able to write bad data without IntegrityErrors.
""" """
with transaction.commit_manually(): transaction.set_autocommit(False)
try:
# Create an Article. # Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r) models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB # Retrive it from the DB
@ -536,12 +537,15 @@ class FkConstraintsTests(TransactionTestCase):
self.fail("IntegrityError should not have occurred.") self.fail("IntegrityError should not have occurred.")
finally: finally:
transaction.rollback() transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_disable_constraint_checks_context_manager(self): def test_disable_constraint_checks_context_manager(self):
""" """
When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors. When constraint checks are disabled (using context manager), should be able to write bad data without IntegrityErrors.
""" """
with transaction.commit_manually(): transaction.set_autocommit(False)
try:
# Create an Article. # Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r) models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB # Retrive it from the DB
@ -554,12 +558,15 @@ class FkConstraintsTests(TransactionTestCase):
self.fail("IntegrityError should not have occurred.") self.fail("IntegrityError should not have occurred.")
finally: finally:
transaction.rollback() transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_check_constraints(self): def test_check_constraints(self):
""" """
Constraint checks should raise an IntegrityError when bad data is in the DB. Constraint checks should raise an IntegrityError when bad data is in the DB.
""" """
with transaction.commit_manually(): try:
transaction.set_autocommit(False)
# Create an Article. # Create an Article.
models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r) models.Article.objects.create(headline="Test article", pub_date=datetime.datetime(2010, 9, 4), reporter=self.r)
# Retrive it from the DB # Retrive it from the DB
@ -572,6 +579,8 @@ class FkConstraintsTests(TransactionTestCase):
connection.check_constraints() connection.check_constraints()
finally: finally:
transaction.rollback() transaction.rollback()
finally:
transaction.set_autocommit(True)
class ThreadTests(TestCase): class ThreadTests(TestCase):

View File

@ -22,9 +22,7 @@ class DeleteLockingTest(TransactionTestCase):
self.conn2 = new_connections[DEFAULT_DB_ALIAS] self.conn2 = new_connections[DEFAULT_DB_ALIAS]
# Put both DB connections into managed transaction mode # Put both DB connections into managed transaction mode
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
self.conn2.enter_transaction_management() self.conn2.enter_transaction_management()
self.conn2.managed(True)
def tearDown(self): def tearDown(self):
# Close down the second connection. # Close down the second connection.

View File

@ -25,7 +25,8 @@ class SampleTestCase(TestCase):
class TestNoInitialDataLoading(TransactionTestCase): class TestNoInitialDataLoading(TransactionTestCase):
def test_syncdb(self): def test_syncdb(self):
with transaction.commit_manually(): transaction.set_autocommit(False)
try:
Book.objects.all().delete() Book.objects.all().delete()
management.call_command( management.call_command(
@ -35,6 +36,9 @@ class TestNoInitialDataLoading(TransactionTestCase):
) )
self.assertQuerysetEqual(Book.objects.all(), []) self.assertQuerysetEqual(Book.objects.all(), [])
transaction.rollback() transaction.rollback()
finally:
transaction.set_autocommit(True)
def test_flush(self): def test_flush(self):
# Test presence of fixture (flush called by TransactionTestCase) # Test presence of fixture (flush called by TransactionTestCase)
@ -45,7 +49,8 @@ class TestNoInitialDataLoading(TransactionTestCase):
lambda a: a.name lambda a: a.name
) )
with transaction.commit_manually(): transaction.set_autocommit(False)
try:
management.call_command( management.call_command(
'flush', 'flush',
verbosity=0, verbosity=0,
@ -55,6 +60,8 @@ class TestNoInitialDataLoading(TransactionTestCase):
) )
self.assertQuerysetEqual(Book.objects.all(), []) self.assertQuerysetEqual(Book.objects.all(), [])
transaction.rollback() transaction.rollback()
finally:
transaction.set_autocommit(True)
class FixtureTestCase(TestCase): class FixtureTestCase(TestCase):

View File

@ -684,5 +684,8 @@ class TestTicket11101(TransactionTestCase):
@skipUnlessDBFeature('supports_transactions') @skipUnlessDBFeature('supports_transactions')
def test_ticket_11101(self): def test_ticket_11101(self):
"""Test that fixtures can be rolled back (ticket #11101).""" """Test that fixtures can be rolled back (ticket #11101)."""
ticket_11101 = transaction.commit_manually(self.ticket_11101) transaction.set_autocommit(False)
ticket_11101() try:
self.ticket_11101()
finally:
transaction.set_autocommit(True)

View File

@ -1,9 +1,8 @@
from django.core.handlers.wsgi import WSGIHandler from django.core.handlers.wsgi import WSGIHandler
from django.core.signals import request_started, request_finished from django.core.signals import request_started, request_finished
from django.db import close_old_connections from django.db import close_old_connections, connection
from django.test import RequestFactory, TestCase from django.test import RequestFactory, TestCase, TransactionTestCase
from django.test.utils import override_settings from django.test.utils import override_settings
from django.utils import six
class HandlerTests(TestCase): class HandlerTests(TestCase):
@ -37,6 +36,31 @@ class HandlerTests(TestCase):
self.assertEqual(response.status_code, 400) self.assertEqual(response.status_code, 400)
class TransactionsPerRequestTests(TransactionTestCase):
urls = 'handlers.urls'
def test_no_transaction(self):
response = self.client.get('/in_transaction/')
self.assertContains(response, 'False')
def test_auto_transaction(self):
old_atomic_requests = connection.settings_dict['ATOMIC_REQUESTS']
try:
connection.settings_dict['ATOMIC_REQUESTS'] = True
response = self.client.get('/in_transaction/')
finally:
connection.settings_dict['ATOMIC_REQUESTS'] = old_atomic_requests
self.assertContains(response, 'True')
def test_no_auto_transaction(self):
old_atomic_requests = connection.settings_dict['ATOMIC_REQUESTS']
try:
connection.settings_dict['ATOMIC_REQUESTS'] = True
response = self.client.get('/not_in_transaction/')
finally:
connection.settings_dict['ATOMIC_REQUESTS'] = old_atomic_requests
self.assertContains(response, 'False')
class SignalsTests(TestCase): class SignalsTests(TestCase):
urls = 'handlers.urls' urls = 'handlers.urls'

View File

@ -1,9 +1,12 @@
from __future__ import unicode_literals from __future__ import unicode_literals
from django.conf.urls import patterns, url from django.conf.urls import patterns, url
from django.http import HttpResponse, StreamingHttpResponse
from . import views
urlpatterns = patterns('', urlpatterns = patterns('',
url(r'^regular/$', lambda request: HttpResponse(b"regular content")), url(r'^regular/$', views.regular),
url(r'^streaming/$', lambda request: StreamingHttpResponse([b"streaming", b" ", b"content"])), url(r'^streaming/$', views.streaming),
url(r'^in_transaction/$', views.in_transaction),
url(r'^not_in_transaction/$', views.not_in_transaction),
) )

17
tests/handlers/views.py Normal file
View File

@ -0,0 +1,17 @@
from __future__ import unicode_literals
from django.db import connection
from django.http import HttpResponse, StreamingHttpResponse
def regular(request):
return HttpResponse(b"regular content")
def streaming(request):
return StreamingHttpResponse([b"streaming", b" ", b"content"])
def in_transaction(request):
return HttpResponse(str(connection.in_atomic_block))
def not_in_transaction(request):
return HttpResponse(str(connection.in_atomic_block))
not_in_transaction.transactions_per_request = False

View File

@ -22,6 +22,9 @@ from django.test.utils import override_settings
from django.utils import six from django.utils import six
from django.utils.encoding import force_str from django.utils.encoding import force_str
from django.utils.six.moves import xrange from django.utils.six.moves import xrange
from django.utils.unittest import expectedFailure
from transactions.tests import IgnorePendingDeprecationWarningsMixin
from .models import Band from .models import Band
@ -669,11 +672,12 @@ class ETagGZipMiddlewareTest(TestCase):
self.assertNotEqual(gzip_etag, nogzip_etag) self.assertNotEqual(gzip_etag, nogzip_etag)
class TransactionMiddlewareTest(TransactionTestCase): class TransactionMiddlewareTest(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
""" """
Test the transaction middleware. Test the transaction middleware.
""" """
def setUp(self): def setUp(self):
super(TransactionMiddlewareTest, self).setUp()
self.request = HttpRequest() self.request = HttpRequest()
self.request.META = { self.request.META = {
'SERVER_NAME': 'testserver', 'SERVER_NAME': 'testserver',
@ -685,33 +689,22 @@ class TransactionMiddlewareTest(TransactionTestCase):
def tearDown(self): def tearDown(self):
transaction.abort() transaction.abort()
super(TransactionMiddlewareTest, self).tearDown()
def test_request(self): def test_request(self):
TransactionMiddleware().process_request(self.request) TransactionMiddleware().process_request(self.request)
self.assertTrue(transaction.is_managed()) self.assertFalse(transaction.get_autocommit())
def test_managed_response(self): def test_managed_response(self):
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
Band.objects.create(name='The Beatles') Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty()) self.assertTrue(transaction.is_dirty())
TransactionMiddleware().process_response(self.request, self.response) TransactionMiddleware().process_response(self.request, self.response)
self.assertFalse(transaction.is_dirty()) self.assertFalse(transaction.is_dirty())
self.assertEqual(Band.objects.count(), 1) self.assertEqual(Band.objects.count(), 1)
def test_unmanaged_response(self):
transaction.enter_transaction_management()
transaction.managed(False)
self.assertEqual(Band.objects.count(), 0)
TransactionMiddleware().process_response(self.request, self.response)
self.assertFalse(transaction.is_managed())
# The transaction middleware doesn't commit/rollback if management
# has been disabled.
self.assertTrue(transaction.is_dirty())
def test_exception(self): def test_exception(self):
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
Band.objects.create(name='The Beatles') Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty()) self.assertTrue(transaction.is_dirty())
TransactionMiddleware().process_exception(self.request, None) TransactionMiddleware().process_exception(self.request, None)
@ -726,7 +719,6 @@ class TransactionMiddlewareTest(TransactionTestCase):
raise IntegrityError() raise IntegrityError()
connections[DEFAULT_DB_ALIAS].commit = raise_exception connections[DEFAULT_DB_ALIAS].commit = raise_exception
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
Band.objects.create(name='The Beatles') Band.objects.create(name='The Beatles')
self.assertTrue(transaction.is_dirty()) self.assertTrue(transaction.is_dirty())
with self.assertRaises(IntegrityError): with self.assertRaises(IntegrityError):

View File

@ -576,7 +576,6 @@ class DatabaseConnectionHandlingTests(TransactionTestCase):
# Make sure there is an open connection # Make sure there is an open connection
connection.cursor() connection.cursor()
connection.enter_transaction_management() connection.enter_transaction_management()
connection.managed(True)
signals.request_finished.send(sender=response._handler_class) signals.request_finished.send(sender=response._handler_class)
self.assertEqual(len(connection.transaction_state), 0) self.assertEqual(len(connection.transaction_state), 0)
@ -585,7 +584,6 @@ class DatabaseConnectionHandlingTests(TransactionTestCase):
connection.settings_dict['CONN_MAX_AGE'] = 0 connection.settings_dict['CONN_MAX_AGE'] = 0
connection.enter_transaction_management() connection.enter_transaction_management()
connection.managed(True)
connection.set_dirty() connection.set_dirty()
# Test that the rollback doesn't succeed (for example network failure # Test that the rollback doesn't succeed (for example network failure
# could cause this). # could cause this).

View File

@ -25,7 +25,6 @@ class SelectForUpdateTests(TransactionTestCase):
def setUp(self): def setUp(self):
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
self.person = Person.objects.create(name='Reinhardt') self.person = Person.objects.create(name='Reinhardt')
# We have to commit here so that code in run_select_for_update can # We have to commit here so that code in run_select_for_update can
@ -37,7 +36,6 @@ class SelectForUpdateTests(TransactionTestCase):
new_connections = ConnectionHandler(settings.DATABASES) new_connections = ConnectionHandler(settings.DATABASES)
self.new_connection = new_connections[DEFAULT_DB_ALIAS] self.new_connection = new_connections[DEFAULT_DB_ALIAS]
self.new_connection.enter_transaction_management() self.new_connection.enter_transaction_management()
self.new_connection.managed(True)
# We need to set settings.DEBUG to True so we can capture # We need to set settings.DEBUG to True so we can capture
# the output SQL to examine. # the output SQL to examine.
@ -162,7 +160,6 @@ class SelectForUpdateTests(TransactionTestCase):
# We need to enter transaction management again, as this is done on # We need to enter transaction management again, as this is done on
# per-thread basis # per-thread basis
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
people = list( people = list(
Person.objects.all().select_for_update(nowait=nowait) Person.objects.all().select_for_update(nowait=nowait)
) )

View File

@ -268,7 +268,6 @@ class SerializersTransactionTestBase(object):
# within a transaction in order to test forward reference # within a transaction in order to test forward reference
# handling. # handling.
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True)
objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str) objs = serializers.deserialize(self.serializer_name, self.fwd_ref_str)
with connection.constraint_checks_disabled(): with connection.constraint_checks_disabled():
for obj in objs: for obj in objs:

View File

@ -22,4 +22,4 @@ class Reporter(models.Model):
ordering = ('first_name', 'last_name') ordering = ('first_name', 'last_name')
def __str__(self): def __str__(self):
return "%s %s" % (self.first_name, self.last_name) return ("%s %s" % (self.first_name, self.last_name)).strip()

View File

@ -1,12 +1,320 @@
from __future__ import absolute_import from __future__ import absolute_import
import sys
import warnings
from django.db import connection, transaction, IntegrityError from django.db import connection, transaction, IntegrityError
from django.test import TransactionTestCase, skipUnlessDBFeature from django.test import TransactionTestCase, skipUnlessDBFeature
from django.utils import six
from django.utils.unittest import skipUnless
from .models import Reporter from .models import Reporter
class TransactionTests(TransactionTestCase): @skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicTests(TransactionTestCase):
"""
Tests for the atomic decorator and context manager.
The tests make assertions on internal attributes because there isn't a
robust way to ask the database for its current transaction state.
Since the decorator syntax is converted into a context manager (see the
implementation), there are only a few basic tests with the decorator
syntax and the bulk of the tests use the context manager syntax.
"""
def test_decorator_syntax_commit(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_decorator_syntax_rollback(self):
@transaction.atomic
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_alternate_decorator_syntax_commit(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Tintin")
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_alternate_decorator_syntax_rollback(self):
@transaction.atomic()
def make_reporter():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
with six.assertRaisesRegex(self, Exception, "Oops"):
make_reporter()
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_nested_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_nested_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic():
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_nested_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_commit_commit(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_merged_commit_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# Writes in the outer block are rolled back too.
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_commit(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_rollback_rollback(self):
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic():
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_commit_commit(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with atomic:
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
self.assertQuerysetEqual(Reporter.objects.all(),
['<Reporter: Archibald Haddock>', '<Reporter: Tintin>'])
def test_reuse_commit_rollback(self):
atomic = transaction.atomic()
with atomic:
Reporter.objects.create(first_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_reuse_rollback_commit(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with atomic:
Reporter.objects.create(last_name="Haddock")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_reuse_rollback_rollback(self):
atomic = transaction.atomic()
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(last_name="Tintin")
with six.assertRaisesRegex(self, Exception, "Oops"):
with atomic:
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
raise Exception("Oops, that's his first name")
self.assertQuerysetEqual(Reporter.objects.all(), [])
class AtomicInsideTransactionTests(AtomicTests):
"""All basic tests for atomic should also pass within an existing transaction."""
def setUp(self):
self.atomic = transaction.atomic()
self.atomic.__enter__()
def tearDown(self):
self.atomic.__exit__(*sys.exc_info())
class AtomicInsideLegacyTransactionManagementTests(AtomicTests):
def setUp(self):
transaction.enter_transaction_management()
def tearDown(self):
# The tests access the database after exercising 'atomic', making the
# connection dirty; a rollback is required to make it clean.
transaction.rollback()
transaction.leave_transaction_management()
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicMergeTests(TransactionTestCase):
"""Test merging transactions with savepoint=False."""
def test_merged_outer_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Tournesol")
raise Exception("Oops, that's his last name")
# It wasn't possible to roll back
self.assertEqual(Reporter.objects.count(), 3)
# It wasn't possible to roll back
self.assertEqual(Reporter.objects.count(), 3)
# The outer block must roll back
self.assertQuerysetEqual(Reporter.objects.all(), [])
def test_merged_inner_savepoint_rollback(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
with transaction.atomic():
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Tournesol")
raise Exception("Oops, that's his last name")
# It wasn't possible to roll back
self.assertEqual(Reporter.objects.count(), 3)
# The first block with a savepoint must roll back
self.assertEqual(Reporter.objects.count(), 1)
self.assertQuerysetEqual(Reporter.objects.all(), ['<Reporter: Tintin>'])
def test_merged_outer_rollback_after_inner_failure_and_inner_success(self):
with transaction.atomic():
Reporter.objects.create(first_name="Tintin")
# Inner block without a savepoint fails
with six.assertRaisesRegex(self, Exception, "Oops"):
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Haddock")
raise Exception("Oops, that's his last name")
# It wasn't possible to roll back
self.assertEqual(Reporter.objects.count(), 2)
# Inner block with a savepoint succeeds
with transaction.atomic(savepoint=False):
Reporter.objects.create(first_name="Archibald", last_name="Haddock")
# It still wasn't possible to roll back
self.assertEqual(Reporter.objects.count(), 3)
# The outer block must rollback
self.assertQuerysetEqual(Reporter.objects.all(), [])
@skipUnless(connection.features.uses_savepoints,
"'atomic' requires transactions and savepoints.")
class AtomicErrorsTests(TransactionTestCase):
def test_atomic_requires_autocommit(self):
transaction.set_autocommit(False)
try:
with self.assertRaises(transaction.TransactionManagementError):
with transaction.atomic():
pass
finally:
transaction.set_autocommit(True)
def test_atomic_prevents_disabling_autocommit(self):
autocommit = transaction.get_autocommit()
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.set_autocommit(not autocommit)
# Make sure autocommit wasn't changed.
self.assertEqual(connection.autocommit, autocommit)
def test_atomic_prevents_calling_transaction_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.commit()
with self.assertRaises(transaction.TransactionManagementError):
transaction.rollback()
def test_atomic_prevents_calling_transaction_management_methods(self):
with transaction.atomic():
with self.assertRaises(transaction.TransactionManagementError):
transaction.enter_transaction_management()
with self.assertRaises(transaction.TransactionManagementError):
transaction.leave_transaction_management()
class IgnorePendingDeprecationWarningsMixin(object):
def setUp(self):
super(IgnorePendingDeprecationWarningsMixin, self).setUp()
self.catch_warnings = warnings.catch_warnings()
self.catch_warnings.__enter__()
warnings.filterwarnings("ignore", category=PendingDeprecationWarning)
def tearDown(self):
self.catch_warnings.__exit__(*sys.exc_info())
super(IgnorePendingDeprecationWarningsMixin, self).tearDown()
class TransactionTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
def create_a_reporter_then_fail(self, first, last): def create_a_reporter_then_fail(self, first, last):
a = Reporter(first_name=first, last_name=last) a = Reporter(first_name=first, last_name=last)
a.save() a.save()
@ -161,7 +469,7 @@ class TransactionTests(TransactionTestCase):
) )
class TransactionRollbackTests(TransactionTestCase): class TransactionRollbackTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
def execute_bad_sql(self): def execute_bad_sql(self):
cursor = connection.cursor() cursor = connection.cursor()
cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');") cursor.execute("INSERT INTO transactions_reporter (first_name, last_name) VALUES ('Douglas', 'Adams');")
@ -178,7 +486,7 @@ class TransactionRollbackTests(TransactionTestCase):
self.assertRaises(IntegrityError, execute_bad_sql) self.assertRaises(IntegrityError, execute_bad_sql)
transaction.rollback() transaction.rollback()
class TransactionContextManagerTests(TransactionTestCase): class TransactionContextManagerTests(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
def create_reporter_and_fail(self): def create_reporter_and_fail(self):
Reporter.objects.create(first_name="Bob", last_name="Holtzman") Reporter.objects.create(first_name="Bob", last_name="Holtzman")
raise Exception raise Exception

View File

@ -6,10 +6,12 @@ from django.test import TransactionTestCase, skipUnlessDBFeature
from django.test.utils import override_settings from django.test.utils import override_settings
from django.utils.unittest import skipIf, skipUnless from django.utils.unittest import skipIf, skipUnless
from transactions.tests import IgnorePendingDeprecationWarningsMixin
from .models import Mod, M2mA, M2mB from .models import Mod, M2mA, M2mB
class TestTransactionClosing(TransactionTestCase): class TestTransactionClosing(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
""" """
Tests to make sure that transactions are properly closed Tests to make sure that transactions are properly closed
when they should be, and aren't left pending after operations when they should be, and aren't left pending after operations
@ -166,17 +168,13 @@ class TestTransactionClosing(TransactionTestCase):
(connection.settings_dict['NAME'] == ':memory:' or (connection.settings_dict['NAME'] == ':memory:' or
not connection.settings_dict['NAME']), not connection.settings_dict['NAME']),
'Test uses multiple connections, but in-memory sqlite does not support this') 'Test uses multiple connections, but in-memory sqlite does not support this')
class TestNewConnection(TransactionTestCase): class TestNewConnection(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
""" """
Check that new connections don't have special behaviour. Check that new connections don't have special behaviour.
""" """
def setUp(self): def setUp(self):
self._old_backend = connections[DEFAULT_DB_ALIAS] self._old_backend = connections[DEFAULT_DB_ALIAS]
settings = self._old_backend.settings_dict.copy() settings = self._old_backend.settings_dict.copy()
opts = settings['OPTIONS'].copy()
if 'autocommit' in opts:
opts['autocommit'] = False
settings['OPTIONS'] = opts
new_backend = self._old_backend.__class__(settings, DEFAULT_DB_ALIAS) new_backend = self._old_backend.__class__(settings, DEFAULT_DB_ALIAS)
connections[DEFAULT_DB_ALIAS] = new_backend connections[DEFAULT_DB_ALIAS] = new_backend
@ -193,16 +191,20 @@ class TestNewConnection(TransactionTestCase):
""" """
Users are allowed to commit and rollback connections. Users are allowed to commit and rollback connections.
""" """
# The starting value is False, not None. connection.set_autocommit(False)
self.assertIs(connection._dirty, False) try:
list(Mod.objects.all()) # The starting value is False, not None.
self.assertTrue(connection.is_dirty()) self.assertIs(connection._dirty, False)
connection.commit() list(Mod.objects.all())
self.assertFalse(connection.is_dirty()) self.assertTrue(connection.is_dirty())
list(Mod.objects.all()) connection.commit()
self.assertTrue(connection.is_dirty()) self.assertFalse(connection.is_dirty())
connection.rollback() list(Mod.objects.all())
self.assertFalse(connection.is_dirty()) self.assertTrue(connection.is_dirty())
connection.rollback()
self.assertFalse(connection.is_dirty())
finally:
connection.set_autocommit(True)
def test_enter_exit_management(self): def test_enter_exit_management(self):
orig_dirty = connection._dirty orig_dirty = connection._dirty
@ -210,39 +212,10 @@ class TestNewConnection(TransactionTestCase):
connection.leave_transaction_management() connection.leave_transaction_management()
self.assertEqual(orig_dirty, connection._dirty) self.assertEqual(orig_dirty, connection._dirty)
def test_commit_unless_managed(self):
cursor = connection.cursor()
cursor.execute("INSERT into transactions_regress_mod (fld) values (2)")
connection.commit_unless_managed()
self.assertFalse(connection.is_dirty())
self.assertEqual(len(Mod.objects.all()), 1)
self.assertTrue(connection.is_dirty())
connection.commit_unless_managed()
self.assertFalse(connection.is_dirty())
def test_commit_unless_managed_in_managed(self):
cursor = connection.cursor()
connection.enter_transaction_management()
transaction.managed(True)
cursor.execute("INSERT into transactions_regress_mod (fld) values (2)")
connection.commit_unless_managed()
self.assertTrue(connection.is_dirty())
connection.rollback()
self.assertFalse(connection.is_dirty())
self.assertEqual(len(Mod.objects.all()), 0)
connection.commit()
connection.leave_transaction_management()
self.assertFalse(connection.is_dirty())
self.assertEqual(len(Mod.objects.all()), 0)
self.assertTrue(connection.is_dirty())
connection.commit_unless_managed()
self.assertFalse(connection.is_dirty())
self.assertEqual(len(Mod.objects.all()), 0)
@skipUnless(connection.vendor == 'postgresql', @skipUnless(connection.vendor == 'postgresql',
"This test only valid for PostgreSQL") "This test only valid for PostgreSQL")
class TestPostgresAutocommitAndIsolation(TransactionTestCase): class TestPostgresAutocommitAndIsolation(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
""" """
Tests to make sure psycopg2's autocommit mode and isolation level Tests to make sure psycopg2's autocommit mode and isolation level
is restored after entering and leaving transaction management. is restored after entering and leaving transaction management.
@ -261,7 +234,6 @@ class TestPostgresAutocommitAndIsolation(TransactionTestCase):
self._old_backend = connections[DEFAULT_DB_ALIAS] self._old_backend = connections[DEFAULT_DB_ALIAS]
settings = self._old_backend.settings_dict.copy() settings = self._old_backend.settings_dict.copy()
opts = settings['OPTIONS'].copy() opts = settings['OPTIONS'].copy()
opts['autocommit'] = True
opts['isolation_level'] = ISOLATION_LEVEL_SERIALIZABLE opts['isolation_level'] = ISOLATION_LEVEL_SERIALIZABLE
settings['OPTIONS'] = opts settings['OPTIONS'] = opts
new_backend = self._old_backend.__class__(settings, DEFAULT_DB_ALIAS) new_backend = self._old_backend.__class__(settings, DEFAULT_DB_ALIAS)
@ -275,40 +247,42 @@ class TestPostgresAutocommitAndIsolation(TransactionTestCase):
connections[DEFAULT_DB_ALIAS] = self._old_backend connections[DEFAULT_DB_ALIAS] = self._old_backend
def test_initial_autocommit_state(self): def test_initial_autocommit_state(self):
self.assertTrue(connection.features.uses_autocommit) # Autocommit is activated when the connection is created.
self.assertEqual(connection.isolation_level, self._autocommit) connection.cursor().close()
self.assertTrue(connection.autocommit)
def test_transaction_management(self): def test_transaction_management(self):
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True) self.assertFalse(connection.autocommit)
self.assertEqual(connection.isolation_level, self._serializable) self.assertEqual(connection.isolation_level, self._serializable)
transaction.leave_transaction_management() transaction.leave_transaction_management()
self.assertEqual(connection.isolation_level, self._autocommit) self.assertTrue(connection.autocommit)
def test_transaction_stacking(self): def test_transaction_stacking(self):
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True) self.assertFalse(connection.autocommit)
self.assertEqual(connection.isolation_level, self._serializable) self.assertEqual(connection.isolation_level, self._serializable)
transaction.enter_transaction_management() transaction.enter_transaction_management()
self.assertFalse(connection.autocommit)
self.assertEqual(connection.isolation_level, self._serializable) self.assertEqual(connection.isolation_level, self._serializable)
transaction.leave_transaction_management() transaction.leave_transaction_management()
self.assertFalse(connection.autocommit)
self.assertEqual(connection.isolation_level, self._serializable) self.assertEqual(connection.isolation_level, self._serializable)
transaction.leave_transaction_management() transaction.leave_transaction_management()
self.assertEqual(connection.isolation_level, self._autocommit) self.assertTrue(connection.autocommit)
def test_enter_autocommit(self): def test_enter_autocommit(self):
transaction.enter_transaction_management() transaction.enter_transaction_management()
transaction.managed(True) self.assertFalse(connection.autocommit)
self.assertEqual(connection.isolation_level, self._serializable) self.assertEqual(connection.isolation_level, self._serializable)
list(Mod.objects.all()) list(Mod.objects.all())
self.assertTrue(transaction.is_dirty()) self.assertTrue(transaction.is_dirty())
# Enter autocommit mode again. # Enter autocommit mode again.
transaction.enter_transaction_management(False) transaction.enter_transaction_management(False)
transaction.managed(False)
self.assertFalse(transaction.is_dirty()) self.assertFalse(transaction.is_dirty())
self.assertEqual( self.assertEqual(
connection.connection.get_transaction_status(), connection.connection.get_transaction_status(),
@ -316,12 +290,13 @@ class TestPostgresAutocommitAndIsolation(TransactionTestCase):
list(Mod.objects.all()) list(Mod.objects.all())
self.assertFalse(transaction.is_dirty()) self.assertFalse(transaction.is_dirty())
transaction.leave_transaction_management() transaction.leave_transaction_management()
self.assertFalse(connection.autocommit)
self.assertEqual(connection.isolation_level, self._serializable) self.assertEqual(connection.isolation_level, self._serializable)
transaction.leave_transaction_management() transaction.leave_transaction_management()
self.assertEqual(connection.isolation_level, self._autocommit) self.assertTrue(connection.autocommit)
class TestManyToManyAddTransaction(TransactionTestCase): class TestManyToManyAddTransaction(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
def test_manyrelated_add_commit(self): def test_manyrelated_add_commit(self):
"Test for https://code.djangoproject.com/ticket/16818" "Test for https://code.djangoproject.com/ticket/16818"
a = M2mA.objects.create() a = M2mA.objects.create()
@ -336,8 +311,10 @@ class TestManyToManyAddTransaction(TransactionTestCase):
self.assertEqual(a.others.count(), 1) self.assertEqual(a.others.count(), 1)
class SavepointTest(TransactionTestCase): class SavepointTest(IgnorePendingDeprecationWarningsMixin, TransactionTestCase):
@skipIf(connection.vendor == 'sqlite',
"SQLite doesn't support savepoints in managed mode")
@skipUnlessDBFeature('uses_savepoints') @skipUnlessDBFeature('uses_savepoints')
def test_savepoint_commit(self): def test_savepoint_commit(self):
@commit_manually @commit_manually
@ -353,6 +330,8 @@ class SavepointTest(TransactionTestCase):
work() work()
@skipIf(connection.vendor == 'sqlite',
"SQLite doesn't support savepoints in managed mode")
@skipIf(connection.vendor == 'mysql' and @skipIf(connection.vendor == 'mysql' and
connection.features._mysql_storage_engine == 'MyISAM', connection.features._mysql_storage_engine == 'MyISAM',
"MyISAM MySQL storage engine doesn't support savepoints") "MyISAM MySQL storage engine doesn't support savepoints")