Merge pull request #376 from andrewgodwin/schema-alteration

Schema alteration
This commit is contained in:
Marc Tamlyn 2013-08-23 04:41:23 -07:00
commit 9aa358cedd
111 changed files with 6334 additions and 403 deletions

View File

@ -609,3 +609,10 @@ STATICFILES_FINDERS = (
'django.contrib.staticfiles.finders.AppDirectoriesFinder',
# 'django.contrib.staticfiles.finders.DefaultStorageFinder',
)
##############
# MIGRATIONS #
##############
# Migration module overrides for apps, by app label.
MIGRATION_MODULES = {}

View File

@ -65,7 +65,7 @@ def create_permissions(app, created_models, verbosity, db=DEFAULT_DB_ALIAS, **kw
except UnavailableApp:
return
if not router.allow_syncdb(db, auth_app.Permission):
if not router.allow_migrate(db, auth_app.Permission):
return
from django.contrib.contenttypes.models import ContentType
@ -188,7 +188,7 @@ def get_default_username(check_db=True):
return ''
return default_username
signals.post_syncdb.connect(create_permissions,
signals.post_migrate.connect(create_permissions,
dispatch_uid="django.contrib.auth.management.create_permissions")
signals.post_syncdb.connect(create_superuser,
signals.post_migrate.connect(create_superuser,
sender=auth_app, dispatch_uid="django.contrib.auth.management.create_superuser")

View File

@ -16,7 +16,7 @@ def update_contenttypes(app, created_models, verbosity=2, db=DEFAULT_DB_ALIAS, *
except UnavailableApp:
return
if not router.allow_syncdb(db, ContentType):
if not router.allow_migrate(db, ContentType):
return
ContentType.objects.clear_cache()
@ -88,7 +88,7 @@ def update_all_contenttypes(verbosity=2, **kwargs):
for app in get_apps():
update_contenttypes(app, None, verbosity, **kwargs)
signals.post_syncdb.connect(update_contenttypes)
signals.post_migrate.connect(update_contenttypes)
if __name__ == "__main__":
update_all_contenttypes()

View File

@ -47,7 +47,7 @@ class SpatiaLiteCreation(DatabaseCreation):
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# test fixtures, or autogenerated from post_migrate triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',

View File

@ -311,7 +311,7 @@ class OtherRouter(object):
def allow_relation(self, obj1, obj2, **hints):
return None
def allow_syncdb(self, db, model):
def allow_migrate(self, db, model):
return True

View File

@ -11,7 +11,7 @@ from django.core.management.color import no_style
def create_default_site(app, created_models, verbosity, db, **kwargs):
# Only create the default sites in databases where Django created the table
if Site in created_models and router.allow_syncdb(db, Site) :
if Site in created_models and router.allow_migrate(db, Site) :
# The default settings set SITE_ID = 1, and some tests in Django's test
# suite rely on this value. However, if database sequences are reused
# (e.g. in the test suite after flush/syncdb), it isn't guaranteed that
@ -33,4 +33,4 @@ def create_default_site(app, created_models, verbosity, db, **kwargs):
Site.objects.clear_cache()
signals.post_syncdb.connect(create_default_site, sender=site_app)
signals.post_migrate.connect(create_default_site, sender=site_app)

View File

@ -24,7 +24,7 @@ class Command(LabelCommand):
def handle_label(self, tablename, **options):
db = options.get('database')
cache = BaseDatabaseCache(tablename, {})
if not router.allow_syncdb(db, cache.cache_model_class):
if not router.allow_migrate(db, cache.cache_model_class):
return
connection = connections[db]
fields = (

View File

@ -118,7 +118,7 @@ class Command(BaseCommand):
for model in sort_dependencies(app_list.items()):
if model in excluded_models:
continue
if not model._meta.proxy and router.allow_syncdb(using, model):
if not model._meta.proxy and router.allow_migrate(using, model):
if use_base_manager:
objects = model._base_manager
else:

View File

@ -7,7 +7,7 @@ from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.core.management.sql import sql_flush, emit_post_migrate_signal
from django.utils.six.moves import input
from django.utils import six
@ -23,8 +23,8 @@ class Command(NoArgsCommand):
help='Tells Django not to load any initial data after database synchronization.'),
)
help = ('Returns the database to the state it was in immediately after '
'syncdb was executed. This means that all data will be removed '
'from the database, any post-synchronization handlers will be '
'migrate was first executed. This means that all data will be removed '
'from the database, any post-migration handlers will be '
're-executed, and the initial_data fixture will be re-installed.')
def handle_noargs(self, **options):
@ -35,7 +35,7 @@ class Command(NoArgsCommand):
# The following are stealth options used by Django's internals.
reset_sequences = options.get('reset_sequences', True)
allow_cascade = options.get('allow_cascade', False)
inhibit_post_syncdb = options.get('inhibit_post_syncdb', False)
inhibit_post_migrate = options.get('inhibit_post_migrate', False)
self.style = no_style()
@ -54,7 +54,7 @@ class Command(NoArgsCommand):
if interactive:
confirm = input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
and return each table to a fresh state.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
@ -77,8 +77,8 @@ Are you sure you want to do this?
"The full error: %s") % (connection.settings_dict['NAME'], e)
six.reraise(CommandError, CommandError(new_msg), sys.exc_info()[2])
if not inhibit_post_syncdb:
self.emit_post_syncdb(verbosity, interactive, db)
if not inhibit_post_migrate:
self.emit_post_migrate(verbosity, interactive, db)
# Reinstall the initial_data fixture.
if options.get('load_initial_data'):
@ -89,13 +89,13 @@ Are you sure you want to do this?
self.stdout.write("Flush cancelled.\n")
@staticmethod
def emit_post_syncdb(verbosity, interactive, database):
# Emit the post sync signal. This allows individual applications to
# respond as if the database had been sync'd from scratch.
def emit_post_migrate(verbosity, interactive, database):
# Emit the post migrate signal. This allows individual applications to
# respond as if the database had been migrated from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(database, m)
if router.allow_migrate(database, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, database)
emit_post_migrate_signal(set(all_models), verbosity, interactive, database)

View File

@ -134,7 +134,7 @@ class Command(BaseCommand):
for obj in objects:
objects_in_fixture += 1
if router.allow_syncdb(self.using, obj.object.__class__):
if router.allow_migrate(self.using, obj.object.__class__):
loaded_objects_in_fixture += 1
self.models.add(obj.object.__class__)
try:

View File

@ -0,0 +1,84 @@
import sys
import os
from optparse import make_option
from django.core.management.base import BaseCommand
from django.core.exceptions import ImproperlyConfigured
from django.db import connections, DEFAULT_DB_ALIAS
from django.db.migrations.loader import MigrationLoader
from django.db.migrations.autodetector import MigrationAutodetector, InteractiveMigrationQuestioner
from django.db.migrations.state import ProjectState
from django.db.migrations.writer import MigrationWriter
from django.db.models.loading import cache
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--empty', action='store_true', dest='empty', default=False,
help='Make a blank migration.'),
)
help = "Creates new migration(s) for apps."
usage_str = "Usage: ./manage.py makemigrations [--empty] [app [app ...]]"
def handle(self, *app_labels, **options):
self.verbosity = int(options.get('verbosity'))
self.interactive = options.get('interactive')
# Make sure the app they asked for exists
app_labels = set(app_labels)
bad_app_labels = set()
for app_label in app_labels:
try:
cache.get_app(app_label)
except ImproperlyConfigured:
bad_app_labels.add(app_label)
if bad_app_labels:
for app_label in bad_app_labels:
self.stderr.write("App '%s' could not be found. Is it in INSTALLED_APPS?" % app_label)
sys.exit(2)
# Load the current graph state. Takes a connection, but it's not used
# (makemigrations doesn't look at the database state).
loader = MigrationLoader(connections[DEFAULT_DB_ALIAS])
# Detect changes
autodetector = MigrationAutodetector(
loader.graph.project_state(),
ProjectState.from_app_cache(cache),
InteractiveMigrationQuestioner(specified_apps=app_labels),
)
changes = autodetector.changes(graph=loader.graph, trim_to_apps=app_labels or None)
# No changes? Tell them.
if not changes:
if len(app_labels) == 1:
self.stdout.write("No changes detected in app '%s'" % app_labels.pop())
elif len(app_labels) > 1:
self.stdout.write("No changes detected in apps '%s'" % ("', '".join(app_labels)))
else:
self.stdout.write("No changes detected")
return
directory_created = {}
for app_label, migrations in changes.items():
self.stdout.write(self.style.MIGRATE_HEADING("Migrations for '%s':" % app_label) + "\n")
for migration in migrations:
# Describe the migration
writer = MigrationWriter(migration)
self.stdout.write(" %s:\n" % (self.style.MIGRATE_LABEL(writer.filename),))
for operation in migration.operations:
self.stdout.write(" - %s\n" % operation.describe())
# Write it
migrations_directory = os.path.dirname(writer.path)
if not directory_created.get(app_label, False):
if not os.path.isdir(migrations_directory):
os.mkdir(migrations_directory)
init_path = os.path.join(migrations_directory, "__init__.py")
if not os.path.isfile(init_path):
open(init_path, "w").close()
# We just do this once per app
directory_created[app_label] = True
with open(writer.path, "w") as fh:
fh.write(writer.as_string())

View File

@ -0,0 +1,245 @@
from optparse import make_option
from collections import OrderedDict
from importlib import import_module
import itertools
import traceback
from django.conf import settings
from django.core.management import call_command
from django.core.management.base import BaseCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_migrate_signal, emit_pre_migrate_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.db.migrations.executor import MigrationExecutor
from django.db.migrations.loader import AmbiguityError
from django.utils.module_loading import module_has_submodule
class Command(BaseCommand):
option_list = BaseCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--no-initial-data', action='store_false', dest='load_initial_data', default=True,
help='Tells Django not to load any initial data after database synchronization.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
make_option('--fake', action='store_true', dest='fake', default=False,
help='Mark migrations as run without actually running them'),
)
help = "Updates database schema. Manages both apps with migrations and those without."
def handle(self, *args, **options):
self.verbosity = int(options.get('verbosity'))
self.interactive = options.get('interactive')
self.show_traceback = options.get('traceback')
self.load_initial_data = options.get('load_initial_data')
self.test_database = options.get('test_database', False)
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
if module_has_submodule(import_module(app_name), "management"):
import_module('.management', app_name)
# Get the database we're operating from
db = options.get('database')
connection = connections[db]
# Work out which apps have migrations and which do not
executor = MigrationExecutor(connection, self.migration_progress_callback)
# If they supplied command line arguments, work out what they mean.
run_syncdb = False
target_app_labels_only = True
if len(args) > 2:
raise CommandError("Too many command-line arguments (expecting 'appname' or 'appname migrationname')")
elif len(args) == 2:
app_label, migration_name = args
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
if migration_name == "zero":
targets = [(app_label, None)]
else:
try:
migration = executor.loader.get_migration_by_prefix(app_label, migration_name)
except AmbiguityError:
raise CommandError("More than one migration matches '%s' in app '%s'. Please be more specific." % (app_label, migration_name))
except KeyError:
raise CommandError("Cannot find a migration matching '%s' from app '%s'. Is it in INSTALLED_APPS?" % (app_label, migration_name))
targets = [(app_label, migration.name)]
target_app_labels_only = False
elif len(args) == 1:
app_label = args[0]
if app_label not in executor.loader.migrated_apps:
raise CommandError("App '%s' does not have migrations (you cannot selectively sync unmigrated apps)" % app_label)
targets = [key for key in executor.loader.graph.leaf_nodes() if key[0] == app_label]
else:
targets = executor.loader.graph.leaf_nodes()
run_syncdb = True
plan = executor.migration_plan(targets)
# Print some useful info
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Operations to perform:"))
if run_syncdb:
self.stdout.write(self.style.MIGRATE_LABEL(" Synchronize unmigrated apps: ") + (", ".join(executor.loader.unmigrated_apps) or "(none)"))
if target_app_labels_only:
self.stdout.write(self.style.MIGRATE_LABEL(" Apply all migrations: ") + (", ".join(set(a for a, n in targets)) or "(none)"))
else:
if targets[0][1] is None:
self.stdout.write(self.style.MIGRATE_LABEL(" Unapply all migrations: ") + "%s" % (targets[0][0], ))
else:
self.stdout.write(self.style.MIGRATE_LABEL(" Target specific migration: ") + "%s, from %s" % (targets[0][1], targets[0][0]))
# Run the syncdb phase.
# If you ever manage to get rid of this, I owe you many, many drinks.
# Note that pre_migrate is called from inside here, as it needs
# the list of models about to be installed.
if run_syncdb:
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Synchronizing apps without migrations:"))
created_models = self.sync_apps(connection, executor.loader.unmigrated_apps)
else:
created_models = []
# Migrate!
if self.verbosity >= 1:
self.stdout.write(self.style.MIGRATE_HEADING("Running migrations:"))
if not plan:
if self.verbosity >= 1:
self.stdout.write(" No migrations needed.")
else:
executor.migrate(targets, plan, fake=options.get("fake", False))
# Send the post_migrate signal, so individual apps can do whatever they need
# to do at this point.
emit_post_migrate_signal(created_models, self.verbosity, self.interactive, connection.alias)
def migration_progress_callback(self, action, migration):
if self.verbosity >= 1:
if action == "apply_start":
self.stdout.write(" Applying %s..." % migration, ending="")
self.stdout.flush()
elif action == "apply_success":
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
elif action == "unapply_start":
self.stdout.write(" Unapplying %s..." % migration, ending="")
self.stdout.flush()
elif action == "unapply_success":
self.stdout.write(self.style.MIGRATE_SUCCESS(" OK"))
def sync_apps(self, connection, apps):
"Runs the old syncdb-style operation on a list of apps."
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[
m for m in models.get_models(app, include_auto_created=True)
if router.allow_migrate(connection.alias, m)
])
for app in models.get_apps() if app.__name__.split('.')[-2] in apps
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
# Note that if a model is unmanaged we short-circuit and never try to install it
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
create_models = set([x for x in itertools.chain(*manifest.values())])
emit_pre_migrate_signal(create_models, self.verbosity, self.interactive, connection.alias)
# Create the tables for each model
if self.verbosity >= 1:
self.stdout.write(" Creating tables...\n")
with transaction.atomic(using=connection.alias, savepoint=False):
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if self.verbosity >= 3:
self.stdout.write(" Processing %s.%s model\n" % (app_name, model._meta.object_name))
sql, references = connection.creation.sql_create_model(model, no_style(), seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, no_style(), pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, no_style(), pending_references))
if self.verbosity >= 1 and sql:
self.stdout.write(" Creating table %s\n" % model._meta.db_table)
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
# We force a commit here, as that was the previous behaviour.
# If you can prove we don't need this, remove it.
transaction.set_dirty(using=connection.alias)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if self.verbosity >= 1:
self.stdout.write(" Installing custom SQL...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, no_style(), connection)
if custom_sql:
if self.verbosity >= 2:
self.stdout.write(" Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=connection.alias):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(" Failed to install custom SQL for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
if self.show_traceback:
traceback.print_exc()
else:
if self.verbosity >= 3:
self.stdout.write(" No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
if self.verbosity >= 1:
self.stdout.write(" Installing indexes...\n")
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, no_style())
if index_sql:
if self.verbosity >= 2:
self.stdout.write(" Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=connection.alias):
for sql in index_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write(" Failed to install index for %s.%s model: %s\n" % (app_name, model._meta.object_name, e))
# Load initial_data fixtures (unless that has been disabled)
if self.load_initial_data:
call_command('loaddata', 'initial_data', verbosity=self.verbosity, database=connection.alias, skip_validation=True)
return created_models

View File

@ -1,15 +1,8 @@
from collections import OrderedDict
from importlib import import_module
import warnings
from optparse import make_option
import itertools
import traceback
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand
from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model, emit_post_sync_signal, emit_pre_sync_signal
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
class Command(NoArgsCommand):
@ -22,141 +15,8 @@ class Command(NoArgsCommand):
default=DEFAULT_DB_ALIAS, help='Nominates a database to synchronize. '
'Defaults to the "default" database.'),
)
help = "Create the database tables for all apps in INSTALLED_APPS whose tables haven't already been created."
help = "Deprecated - use 'migrate' instead."
def handle_noargs(self, **options):
verbosity = int(options.get('verbosity'))
interactive = options.get('interactive')
show_traceback = options.get('traceback')
load_initial_data = options.get('load_initial_data')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError as exc:
# This is slightly hackish. We want to ignore ImportErrors
# if the "management" module itself is missing -- but we don't
# want to ignore the exception if the management module exists
# but raises an ImportError for some reason. The only way we
# can do this is to check the text of the exception. Note that
# we're a bit broad in how we check the text, because different
# Python implementations may not use the same text.
# CPython uses the text "No module named management"
# PyPy uses "No module named myproject.myapp.management"
msg = exc.args[0]
if not msg.startswith('No module named') or 'management' not in msg:
raise
db = options.get('database')
connection = connections[db]
cursor = connection.cursor()
# Get a list of already installed *models* so that references work right.
tables = connection.introspection.table_names()
seen_models = connection.introspection.installed_models(tables)
created_models = set()
pending_references = {}
# Build the manifest of apps and models that are to be synchronized
all_models = [
(app.__name__.split('.')[-2],
[m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)])
for app in models.get_apps()
]
def model_installed(model):
opts = model._meta
converter = connection.introspection.table_name_converter
return not ((converter(opts.db_table) in tables) or
(opts.auto_created and converter(opts.auto_created._meta.db_table) in tables))
manifest = OrderedDict(
(app_name, list(filter(model_installed, model_list)))
for app_name, model_list in all_models
)
create_models = set([x for x in itertools.chain(*manifest.values())])
emit_pre_sync_signal(create_models, verbosity, interactive, db)
# Create the tables for each model
if verbosity >= 1:
self.stdout.write("Creating tables ...\n")
with transaction.commit_on_success_unless_managed(using=db):
for app_name, model_list in manifest.items():
for model in model_list:
# Create the model's database table, if it doesn't already exist.
if verbosity >= 3:
self.stdout.write("Processing %s.%s model\n" % (app_name, model._meta.object_name))
sql, references = connection.creation.sql_create_model(model, self.style, seen_models)
seen_models.add(model)
created_models.add(model)
for refto, refs in references.items():
pending_references.setdefault(refto, []).extend(refs)
if refto in seen_models:
sql.extend(connection.creation.sql_for_pending_references(refto, self.style, pending_references))
sql.extend(connection.creation.sql_for_pending_references(model, self.style, pending_references))
if verbosity >= 1 and sql:
self.stdout.write("Creating table %s\n" % model._meta.db_table)
for statement in sql:
cursor.execute(statement)
tables.append(connection.introspection.table_name_converter(model._meta.db_table))
# Send the post_syncdb signal, so individual apps can do whatever they need
# to do at this point.
emit_post_sync_signal(created_models, verbosity, interactive, db)
# The connection may have been closed by a syncdb handler.
cursor = connection.cursor()
# Install custom SQL for the app (but only if this
# is a model we've just created)
if verbosity >= 1:
self.stdout.write("Installing custom SQL ...\n")
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
custom_sql = custom_sql_for_model(model, self.style, connection)
if custom_sql:
if verbosity >= 2:
self.stdout.write("Installing custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=db):
for sql in custom_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write("Failed to install custom SQL for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
if show_traceback:
traceback.print_exc()
else:
if verbosity >= 3:
self.stdout.write("No custom SQL for %s.%s model\n" % (app_name, model._meta.object_name))
if verbosity >= 1:
self.stdout.write("Installing indexes ...\n")
# Install SQL indices for all newly created models
for app_name, model_list in manifest.items():
for model in model_list:
if model in created_models:
index_sql = connection.creation.sql_indexes_for_model(model, self.style)
if index_sql:
if verbosity >= 2:
self.stdout.write("Installing index for %s.%s model\n" % (app_name, model._meta.object_name))
try:
with transaction.commit_on_success_unless_managed(using=db):
for sql in index_sql:
cursor.execute(sql)
except Exception as e:
self.stderr.write("Failed to install index for %s.%s model: %s\n" % \
(app_name, model._meta.object_name, e))
# Load initial_data fixtures (unless that has been disabled)
if load_initial_data:
call_command('loaddata', 'initial_data', verbosity=verbosity,
database=db, skip_validation=True)
warnings.warn("The syncdb command will be removed in Django 1.9", PendingDeprecationWarning)
call_command("migrate", **options)

View File

@ -206,25 +206,25 @@ def custom_sql_for_model(model, style, connection):
return output
def emit_pre_sync_signal(create_models, verbosity, interactive, db):
# Emit the pre_sync signal for every application.
def emit_pre_migrate_signal(create_models, verbosity, interactive, db):
# Emit the pre_migrate signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running pre-sync handlers for application %s" % app_name)
models.signals.pre_syncdb.send(sender=app, app=app,
print("Running pre-migrate handlers for application %s" % app_name)
models.signals.pre_migrate.send(sender=app, app=app,
create_models=create_models,
verbosity=verbosity,
interactive=interactive,
db=db)
def emit_post_sync_signal(created_models, verbosity, interactive, db):
# Emit the post_sync signal for every application.
def emit_post_migrate_signal(created_models, verbosity, interactive, db):
# Emit the post_migrate signal for every application.
for app in models.get_apps():
app_name = app.__name__.split('.')[-2]
if verbosity >= 2:
print("Running post-sync handlers for application %s" % app_name)
models.signals.post_syncdb.send(sender=app, app=app,
print("Running post-migrate handlers for application %s" % app_name)
models.signals.post_migrate.send(sender=app, app=app,
created_models=created_models, verbosity=verbosity,
interactive=interactive, db=db)

View File

@ -521,6 +521,10 @@ class BaseDatabaseWrapper(object):
"""
raise NotImplementedError
def schema_editor(self):
"Returns a new instance of this backend's SchemaEditor"
raise NotImplementedError()
class BaseDatabaseFeatures(object):
allows_group_by_pk = False
@ -630,11 +634,32 @@ class BaseDatabaseFeatures(object):
# when autocommit is disabled? http://bugs.python.org/issue8145#msg109965
autocommits_when_autocommit_is_off = False
# Can we roll back DDL in a transaction?
can_rollback_ddl = False
# Can we issue more than one ALTER COLUMN clause in an ALTER TABLE?
supports_combined_alters = False
# What's the maximum length for index names?
max_index_name_length = 63
# Does it support foreign keys?
supports_foreign_keys = True
# Does it support CHECK constraints?
supports_check_constraints = True
# Does the backend support 'pyformat' style ("... %(name)s ...", {'name': value})
# parameter passing? Note this can be provided by the backend even if not
# supported by the Python driver
supports_paramstyle_pyformat = True
# Does the backend require literal defaults, rather than parameterised ones?
requires_literal_defaults = False
# Does the backend require a connection reset after each material schema change?
connection_persists_old_columns = False
def __init__(self, connection):
self.connection = connection
@ -1227,7 +1252,7 @@ class BaseDatabaseIntrospection(object):
for model in models.get_models(app):
if not model._meta.managed:
continue
if not router.allow_syncdb(self.connection.alias, model):
if not router.allow_migrate(self.connection.alias, model):
continue
tables.add(model._meta.db_table)
tables.update([f.m2m_db_table() for f in model._meta.local_many_to_many])
@ -1247,7 +1272,7 @@ class BaseDatabaseIntrospection(object):
all_models = []
for app in models.get_apps():
for model in models.get_models(app):
if router.allow_syncdb(self.connection.alias, model):
if router.allow_migrate(self.connection.alias, model):
all_models.append(model)
tables = list(map(self.table_name_converter, tables))
return set([
@ -1268,7 +1293,7 @@ class BaseDatabaseIntrospection(object):
continue
if model._meta.swapped:
continue
if not router.allow_syncdb(self.connection.alias, model):
if not router.allow_migrate(self.connection.alias, model):
continue
for f in model._meta.local_fields:
if isinstance(f, models.AutoField):
@ -1310,6 +1335,25 @@ class BaseDatabaseIntrospection(object):
"""
raise NotImplementedError
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index)
across one or more columns.
Returns a dict mapping constraint names to their attributes,
where attributes is a dict with keys:
* columns: List of columns this covers
* primary_key: True if primary key, False otherwise
* unique: True if this is a unique constraint, False otherwise
* foreign_key: (table, column) of target, or None
* check: True if check constraint, False otherwise
* index: True if index, False otherwise.
Some backends may return special constraint names that don't exist
if they don't name constraints of a certain type (e.g. SQLite)
"""
raise NotImplementedError
class BaseDatabaseClient(object):
"""

View File

@ -23,11 +23,13 @@ class BaseDatabaseCreation(object):
destruction of test databases.
"""
data_types = {}
data_type_check_constraints = {}
def __init__(self, connection):
self.connection = connection
def _digest(self, *args):
@classmethod
def _digest(cls, *args):
"""
Generates a 32-bit digest of a set of arguments that can be used to
shorten identifying names.
@ -330,18 +332,19 @@ class BaseDatabaseCreation(object):
settings.DATABASES[self.connection.alias]["NAME"] = test_database_name
self.connection.settings_dict["NAME"] = test_database_name
# Report syncdb messages at one level lower than that requested.
# Report migrate messages at one level lower than that requested.
# This ensures we don't get flooded with messages during testing
# (unless you really ask to be flooded)
call_command('syncdb',
call_command('migrate',
verbosity=max(verbosity - 1, 0),
interactive=False,
database=self.connection.alias,
load_initial_data=False)
load_initial_data=False,
test_database=True)
# We need to then do a flush to ensure that any data installed by
# custom SQL has been removed. The only test data should come from
# test fixtures, or autogenerated from post_syncdb triggers.
# test fixtures, or autogenerated from post_migrate triggers.
# This has the side effect of loading initial data (which was
# intentionally skipped in the syncdb).
call_command('flush',

View File

@ -44,6 +44,9 @@ from django.db.backends.mysql.creation import DatabaseCreation
from django.db.backends.mysql.introspection import DatabaseIntrospection
from django.db.backends.mysql.validation import DatabaseValidation
from django.utils.encoding import force_str, force_text
from django.db.backends.mysql.schema import DatabaseSchemaEditor
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeBytes, SafeText
from django.utils import six
from django.utils import timezone
@ -171,6 +174,7 @@ class DatabaseFeatures(BaseDatabaseFeatures):
requires_explicit_null_ordering_when_grouping = True
allows_primary_key_0 = False
uses_savepoints = True
supports_check_constraints = False
def __init__(self, connection):
super(DatabaseFeatures, self).__init__(connection)
@ -514,6 +518,10 @@ class DatabaseWrapper(BaseDatabaseWrapper):
table_name, column_name, bad_row[1],
referenced_table_name, referenced_column_name))
def schema_editor(self):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self)
def is_usable(self):
try:
self.connection.ping()

View File

@ -1,6 +1,6 @@
import re
from .base import FIELD_TYPE
from django.utils.datastructures import OrderedSet
from django.db.backends import BaseDatabaseIntrospection, FieldInfo
from django.utils.encoding import force_text
@ -115,5 +115,71 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
for row in rows:
if row[2] in multicol_indexes:
continue
indexes[row[4]] = {'primary_key': (row[2] == 'PRIMARY'), 'unique': not bool(row[1])}
if row[4] not in indexes:
indexes[row[4]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[2] == 'PRIMARY':
indexes[row[4]]['primary_key'] = True
if not bool(row[1]):
indexes[row[4]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the actual constraint names and columns
name_query = """
SELECT kc.`constraint_name`, kc.`column_name`,
kc.`referenced_table_name`, kc.`referenced_column_name`
FROM information_schema.key_column_usage AS kc
WHERE
kc.table_schema = %s AND
kc.table_name = %s
"""
cursor.execute(name_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, column, ref_table, ref_column in cursor.fetchall():
if constraint not in constraints:
constraints[constraint] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': False,
'check': False,
'foreign_key': (ref_table, ref_column) if ref_column else None,
}
constraints[constraint]['columns'].add(column)
# Now get the constraint types
type_query = """
SELECT c.constraint_name, c.constraint_type
FROM information_schema.table_constraints AS c
WHERE
c.table_schema = %s AND
c.table_name = %s
"""
cursor.execute(type_query, [self.connection.settings_dict['NAME'], table_name])
for constraint, kind in cursor.fetchall():
if kind.lower() == "primary key":
constraints[constraint]['primary_key'] = True
constraints[constraint]['unique'] = True
elif kind.lower() == "unique":
constraints[constraint]['unique'] = True
# Now add in the indexes
cursor.execute("SHOW INDEX FROM %s" % self.connection.ops.quote_name(table_name))
for table, non_unique, index, colseq, column in [x[:5] for x in cursor.fetchall()]:
if index not in constraints:
constraints[index] = {
'columns': OrderedSet(),
'primary_key': False,
'unique': False,
'index': True,
'check': False,
'foreign_key': None,
}
constraints[index]['index'] = True
constraints[index]['columns'].add(column)
# Convert the sorted sets to lists
for constraint in constraints.values():
constraint['columns'] = list(constraint['columns'])
return constraints

View File

@ -0,0 +1,26 @@
from django.db.backends.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_rename_table = "RENAME TABLE %(old_table)s TO %(new_table)s"
sql_alter_column_null = "MODIFY %(column)s %(type)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s %(type)s NOT NULL"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_rename_column = "ALTER TABLE %(table)s CHANGE %(old_column)s %(new_column)s %(type)s"
sql_delete_unique = "ALTER TABLE %(table)s DROP INDEX %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s)"
sql_delete_fk = "ALTER TABLE %(table)s DROP FOREIGN KEY %(name)s"
sql_delete_index = "DROP INDEX %(name)s ON %(table)s"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"
alter_string_set_null = 'MODIFY %(column)s %(type)s NULL;'
alter_string_drop_null = 'MODIFY %(column)s %(type)s NOT NULL;'
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP PRIMARY KEY"

View File

@ -55,6 +55,7 @@ from django.db.backends import *
from django.db.backends.oracle.client import DatabaseClient
from django.db.backends.oracle.creation import DatabaseCreation
from django.db.backends.oracle.introspection import DatabaseIntrospection
from django.db.backends.oracle.schema import DatabaseSchemaEditor
from django.utils.encoding import force_bytes, force_text
@ -90,6 +91,11 @@ class DatabaseFeatures(BaseDatabaseFeatures):
has_bulk_insert = True
supports_tablespaces = True
supports_sequence_reset = False
supports_combined_alters = False
max_index_name_length = 30
nulls_order_largest = True
requires_literal_defaults = True
connection_persists_old_columns = True
nulls_order_largest = True
@ -621,6 +627,10 @@ class DatabaseWrapper(BaseDatabaseWrapper):
and x.code == 2091 and 'ORA-02291' in x.message:
six.reraise(utils.IntegrityError, utils.IntegrityError(*tuple(e.args)), sys.exc_info()[2])
raise
def schema_editor(self):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self)
# Oracle doesn't support savepoint commits. Ignore them.
def _savepoint_commit(self, sid):

View File

@ -22,7 +22,7 @@ class DatabaseCreation(BaseDatabaseCreation):
data_types = {
'AutoField': 'NUMBER(11)',
'BinaryField': 'BLOB',
'BooleanField': 'NUMBER(1) CHECK (%(qn_column)s IN (0,1))',
'BooleanField': 'NUMBER(1)',
'CharField': 'NVARCHAR2(%(max_length)s)',
'CommaSeparatedIntegerField': 'VARCHAR2(%(max_length)s)',
'DateField': 'DATE',
@ -35,10 +35,10 @@ class DatabaseCreation(BaseDatabaseCreation):
'BigIntegerField': 'NUMBER(19)',
'IPAddressField': 'VARCHAR2(15)',
'GenericIPAddressField': 'VARCHAR2(39)',
'NullBooleanField': 'NUMBER(1) CHECK ((%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL))',
'NullBooleanField': 'NUMBER(1)',
'OneToOneField': 'NUMBER(11)',
'PositiveIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveSmallIntegerField': 'NUMBER(11) CHECK (%(qn_column)s >= 0)',
'PositiveIntegerField': 'NUMBER(11)',
'PositiveSmallIntegerField': 'NUMBER(11)',
'SlugField': 'NVARCHAR2(%(max_length)s)',
'SmallIntegerField': 'NUMBER(11)',
'TextField': 'NCLOB',
@ -46,6 +46,13 @@ class DatabaseCreation(BaseDatabaseCreation):
'URLField': 'VARCHAR2(%(max_length)s)',
}
data_type_check_constraints = {
'BooleanField': '%(qn_column)s IN (0,1)',
'NullBooleanField': '(%(qn_column)s IN (0,1)) OR (%(qn_column)s IS NULL)',
'PositiveIntegerField': '%(qn_column)s >= 0',
'PositiveSmallIntegerField': '%(qn_column)s >= 0',
}
def __init__(self, connection):
super(DatabaseCreation, self).__init__(connection)

View File

@ -134,3 +134,143 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
indexes[row[0]] = {'primary_key': bool(row[1]),
'unique': bool(row[2])}
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the constraints, getting PKs and uniques
cursor.execute("""
SELECT
user_constraints.constraint_name,
LOWER(cols.column_name) AS column_name,
CASE user_constraints.constraint_type
WHEN 'P' THEN 1
ELSE 0
END AS is_primary_key,
CASE user_indexes.uniqueness
WHEN 'UNIQUE' THEN 1
ELSE 0
END AS is_unique,
CASE user_constraints.constraint_type
WHEN 'C' THEN 1
ELSE 0
END AS is_check_constraint
FROM
user_constraints
INNER JOIN
user_indexes ON user_indexes.index_name = user_constraints.index_name
LEFT OUTER JOIN
user_cons_columns cols ON user_constraints.constraint_name = cols.constraint_name
WHERE
(
user_constraints.constraint_type = 'P' OR
user_constraints.constraint_type = 'U'
)
AND user_constraints.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, pk, unique, check in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": pk,
"unique": unique,
"foreign_key": None,
"check": check,
"index": True, # All P and U come with index, see inner join above
}
# Record the details
constraints[constraint]['columns'].append(column)
# Check constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name
FROM
user_constraints cons
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'C' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Foreign key constraints
cursor.execute("""
SELECT
cons.constraint_name,
LOWER(cols.column_name) AS column_name,
LOWER(rcons.table_name),
LOWER(rcols.column_name)
FROM
user_constraints cons
INNER JOIN
user_constraints rcons ON cons.r_constraint_name = rcons.constraint_name
INNER JOIN
user_cons_columns rcols ON rcols.constraint_name = rcons.constraint_name
LEFT OUTER JOIN
user_cons_columns cols ON cons.constraint_name = cols.constraint_name
WHERE
cons.constraint_type = 'R' AND
cons.table_name = UPPER(%s)
ORDER BY cols.position
""", [table_name])
for constraint, column, other_table, other_column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": (other_table, other_column),
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
index_name,
LOWER(column_name)
FROM
user_ind_columns cols
WHERE
table_name = UPPER(%s) AND
NOT EXISTS (
SELECT 1
FROM user_constraints cons
WHERE cols.index_name = cons.index_name
)
ORDER BY cols.column_position
""", [table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": False,
"index": True,
}
# Record the details
constraints[constraint]['columns'].append(column)
return constraints

View File

@ -0,0 +1,103 @@
import copy
import datetime
from django.utils import six
from django.db.backends.schema import BaseDatabaseSchemaEditor
from django.db.utils import DatabaseError
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_create_column = "ALTER TABLE %(table)s ADD %(column)s %(definition)s"
sql_alter_column_type = "MODIFY %(column)s %(type)s"
sql_alter_column_null = "MODIFY %(column)s NULL"
sql_alter_column_not_null = "MODIFY %(column)s NOT NULL"
sql_alter_column_default = "MODIFY %(column)s DEFAULT %(default)s"
sql_alter_column_no_default = "MODIFY %(column)s DEFAULT NULL"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE CONSTRAINTS"
def delete_model(self, model):
# Run superclass action
super(DatabaseSchemaEditor, self).delete_model(model)
# Clean up any autoincrement trigger
self.execute("""
DECLARE
i INTEGER;
BEGIN
SELECT COUNT(*) INTO i FROM USER_CATALOG
WHERE TABLE_NAME = '%(sq_name)s' AND TABLE_TYPE = 'SEQUENCE';
IF i = 1 THEN
EXECUTE IMMEDIATE 'DROP SEQUENCE "%(sq_name)s"';
END IF;
END;
/""" % {'sq_name': self.connection.ops._get_sequence_name(model._meta.db_table)})
def alter_field(self, model, old_field, new_field, strict=False):
try:
# Run superclass action
super(DatabaseSchemaEditor, self).alter_field(model, old_field, new_field, strict)
except DatabaseError as e:
description = str(e)
# If we're changing to/from LOB fields, we need to do a
# SQLite-ish workaround
if 'ORA-22858' in description or 'ORA-22859' in description:
self._alter_field_lob_workaround(model, old_field, new_field)
else:
raise
def _alter_field_lob_workaround(self, model, old_field, new_field):
"""
Oracle refuses to change a column type from/to LOB to/from a regular
column. In Django, this shows up when the field is changed from/to
a TextField.
What we need to do instead is:
- Add the desired field with a temporary name
- Update the table to transfer values from old to new
- Drop old column
- Rename the new column
"""
# Make a new field that's like the new one but with a temporary
# column name.
new_temp_field = copy.deepcopy(new_field)
new_temp_field.column = self._generate_temp_name(new_field.column)
# Add it
self.add_field(model, new_temp_field)
# Transfer values across
self.execute("UPDATE %s set %s=%s" % (
self.quote_name(model._meta.db_table),
self.quote_name(new_temp_field.column),
self.quote_name(old_field.column),
))
# Drop the old field
self.remove_field(model, old_field)
# Rename the new field
self.alter_field(model, new_temp_field, new_field)
# Close the connection to force cx_Oracle to get column types right
# on a new cursor
self.connection.close()
def normalize_name(self, name):
"""
Get the properly shortened and uppercased identifier as returned by quote_name(), but without the actual quotes.
"""
nn = self.quote_name(name)
if nn[0] == '"' and nn[-1] == '"':
nn = nn[1:-1]
return nn
def _generate_temp_name(self, for_name):
"""
Generates temporary names for workarounds that need temp columns
"""
suffix = hex(hash(for_name)).upper()[1:]
return self.normalize_name(for_name + "_" + suffix)
def prepare_default(self, value):
if isinstance(value, (datetime.date, datetime.time, datetime.datetime)):
return "'%s'" % value
elif isinstance(value, six.string_types):
return repr(value)
elif isinstance(value, bool):
return "1" if value else "0"
else:
return str(value)

View File

@ -14,6 +14,7 @@ from django.db.backends.postgresql_psycopg2.client import DatabaseClient
from django.db.backends.postgresql_psycopg2.creation import DatabaseCreation
from django.db.backends.postgresql_psycopg2.version import get_version
from django.db.backends.postgresql_psycopg2.introspection import DatabaseIntrospection
from django.db.backends.postgresql_psycopg2.schema import DatabaseSchemaEditor
from django.utils.encoding import force_str
from django.utils.functional import cached_property
from django.utils.safestring import SafeText, SafeBytes
@ -55,6 +56,8 @@ class DatabaseFeatures(BaseDatabaseFeatures):
supports_tablespaces = True
supports_transactions = True
can_distinct_on_fields = True
can_rollback_ddl = True
supports_combined_alters = True
nulls_order_largest = True
@ -202,6 +205,10 @@ class DatabaseWrapper(BaseDatabaseWrapper):
else:
return True
def schema_editor(self):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self)
@cached_property
def psycopg2_version(self):
version = psycopg2.__version__.split(' ', 1)[0]

View File

@ -25,14 +25,19 @@ class DatabaseCreation(BaseDatabaseCreation):
'GenericIPAddressField': 'inet',
'NullBooleanField': 'boolean',
'OneToOneField': 'integer',
'PositiveIntegerField': 'integer CHECK ("%(column)s" >= 0)',
'PositiveSmallIntegerField': 'smallint CHECK ("%(column)s" >= 0)',
'PositiveIntegerField': 'integer',
'PositiveSmallIntegerField': 'smallint',
'SlugField': 'varchar(%(max_length)s)',
'SmallIntegerField': 'smallint',
'TextField': 'text',
'TimeField': 'time',
}
data_type_check_constraints = {
'PositiveIntegerField': '"%(column)s" >= 0',
'PositiveSmallIntegerField': '"%(column)s" >= 0',
}
def sql_table_creation_suffix(self):
assert self.connection.settings_dict['TEST_COLLATION'] is None, "PostgreSQL does not support collation setting at database creation time."
if self.connection.settings_dict['TEST_CHARSET']:

View File

@ -107,5 +107,100 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
# Here, we skip any indexes across multiple fields.
if ' ' in row[1]:
continue
indexes[row[0]] = {'primary_key': row[3], 'unique': row[2]}
if row[0] not in indexes:
indexes[row[0]] = {'primary_key': False, 'unique': False}
# It's possible to have the unique and PK constraints in separate indexes.
if row[3]:
indexes[row[0]]['primary_key'] = True
if row[2]:
indexes[row[0]]['unique'] = True
return indexes
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Loop over the key table, collecting things as constraints
# This will get PKs, FKs, and uniques, but not CHECK
cursor.execute("""
SELECT
kc.constraint_name,
kc.column_name,
c.constraint_type,
array(SELECT table_name::text || '.' || column_name::text FROM information_schema.constraint_column_usage WHERE constraint_name = kc.constraint_name)
FROM information_schema.key_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column, kind, used_cols in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": kind.lower() == "primary key",
"unique": kind.lower() in ["primary key", "unique"],
"foreign_key": tuple(used_cols[0].split(".", 1)) if kind.lower() == "foreign key" else None,
"check": False,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get CHECK constraint columns
cursor.execute("""
SELECT kc.constraint_name, kc.column_name
FROM information_schema.constraint_column_usage AS kc
JOIN information_schema.table_constraints AS c ON
kc.table_schema = c.table_schema AND
kc.table_name = c.table_name AND
kc.constraint_name = c.constraint_name
WHERE
c.constraint_type = 'CHECK' AND
kc.table_schema = %s AND
kc.table_name = %s
""", ["public", table_name])
for constraint, column in cursor.fetchall():
# If we're the first column, make the record
if constraint not in constraints:
constraints[constraint] = {
"columns": [],
"primary_key": False,
"unique": False,
"foreign_key": None,
"check": True,
"index": False,
}
# Record the details
constraints[constraint]['columns'].append(column)
# Now get indexes
cursor.execute("""
SELECT
c2.relname,
ARRAY(
SELECT (SELECT attname FROM pg_catalog.pg_attribute WHERE attnum = i AND attrelid = c.oid)
FROM unnest(idx.indkey) i
),
idx.indisunique,
idx.indisprimary
FROM pg_catalog.pg_class c, pg_catalog.pg_class c2,
pg_catalog.pg_index idx
WHERE c.oid = idx.indrelid
AND idx.indexrelid = c2.oid
AND c.relname = %s
""", [table_name])
for index, columns, unique, primary in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": list(columns),
"primary_key": primary,
"unique": unique,
"foreign_key": None,
"check": False,
"index": True,
}
return constraints

View File

@ -0,0 +1,5 @@
from django.db.backends.schema import BaseDatabaseSchemaEditor
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
pass

View File

@ -0,0 +1,729 @@
import hashlib
import operator
import sys
from django.db.backends.creation import BaseDatabaseCreation
from django.db.backends.util import truncate_name
from django.db.models.fields.related import ManyToManyField
from django.db.transaction import atomic
from django.utils.log import getLogger
from django.utils.six.moves import reduce
logger = getLogger('django.db.backends.schema')
class BaseDatabaseSchemaEditor(object):
"""
This class (and its subclasses) are responsible for emitting schema-changing
statements to the databases - model creation/removal/alteration, field
renaming, index fiddling, and so on.
It is intended to eventually completely replace DatabaseCreation.
This class should be used by creating an instance for each set of schema
changes (e.g. a syncdb run, a migration file), and by first calling start(),
then the relevant actions, and then commit(). This is necessary to allow
things like circular foreign key references - FKs will only be created once
commit() is called.
"""
# Overrideable SQL templates
sql_create_table = "CREATE TABLE %(table)s (%(definition)s)"
sql_create_table_unique = "UNIQUE (%(columns)s)"
sql_rename_table = "ALTER TABLE %(old_table)s RENAME TO %(new_table)s"
sql_retablespace_table = "ALTER TABLE %(table)s SET TABLESPACE %(new_tablespace)s"
sql_delete_table = "DROP TABLE %(table)s CASCADE"
sql_create_column = "ALTER TABLE %(table)s ADD COLUMN %(column)s %(definition)s"
sql_alter_column = "ALTER TABLE %(table)s %(changes)s"
sql_alter_column_type = "ALTER COLUMN %(column)s TYPE %(type)s"
sql_alter_column_null = "ALTER COLUMN %(column)s DROP NOT NULL"
sql_alter_column_not_null = "ALTER COLUMN %(column)s SET NOT NULL"
sql_alter_column_default = "ALTER COLUMN %(column)s SET DEFAULT %(default)s"
sql_alter_column_no_default = "ALTER COLUMN %(column)s DROP DEFAULT"
sql_delete_column = "ALTER TABLE %(table)s DROP COLUMN %(column)s CASCADE"
sql_rename_column = "ALTER TABLE %(table)s RENAME COLUMN %(old_column)s TO %(new_column)s"
sql_create_check = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s CHECK (%(check)s)"
sql_delete_check = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_unique = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s UNIQUE (%(columns)s)"
sql_delete_unique = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_fk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s FOREIGN KEY (%(column)s) REFERENCES %(to_table)s (%(to_column)s) DEFERRABLE INITIALLY DEFERRED"
sql_delete_fk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
sql_create_index = "CREATE INDEX %(name)s ON %(table)s (%(columns)s)%(extra)s;"
sql_delete_index = "DROP INDEX %(name)s"
sql_create_pk = "ALTER TABLE %(table)s ADD CONSTRAINT %(name)s PRIMARY KEY (%(columns)s)"
sql_delete_pk = "ALTER TABLE %(table)s DROP CONSTRAINT %(name)s"
def __init__(self, connection):
self.connection = connection
# State-managing methods
def __enter__(self):
self.deferred_sql = []
atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__enter__()
return self
def __exit__(self, exc_type, exc_value, traceback):
if exc_type is None:
for sql in self.deferred_sql:
self.execute(sql)
atomic(self.connection.alias, self.connection.features.can_rollback_ddl).__exit__(None, None, None)
else:
# Continue propagating exception
return None
# Core utility functions
def execute(self, sql, params=[]):
"""
Executes the given SQL statement, with optional parameters.
"""
# Get the cursor
cursor = self.connection.cursor()
# Log the command we're running, then run it
logger.debug("%s; (params %r)" % (sql, params))
cursor.execute(sql, params)
def quote_name(self, name):
return self.connection.ops.quote_name(name)
# Field <-> database mapping functions
def column_sql(self, model, field, include_default=False):
"""
Takes a field and returns its column definition.
The field must already have had set_attributes_from_name called.
"""
# Get the column's type and use that as the basis of the SQL
db_params = field.db_parameters(connection=self.connection)
sql = db_params['type']
params = []
# Check for fields that aren't actually columns (e.g. M2M)
if sql is None:
return None
# Optionally add the tablespace if it's an implicitly indexed column
tablespace = field.db_tablespace or model._meta.db_tablespace
if tablespace and self.connection.features.supports_tablespaces and field.unique:
sql += " %s" % self.connection.ops.tablespace_sql(tablespace, inline=True)
# Work out nullability
null = field.null
# If we were told to include a default value, do so
default_value = self.effective_default(field)
if include_default and default_value is not None:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
sql += " DEFAULT %s" % self.prepare_default(default_value)
else:
sql += " DEFAULT %s"
params += [default_value]
# Oracle treats the empty string ('') as null, so coerce the null
# option whenever '' is a possible value.
if (field.empty_strings_allowed and not field.primary_key and
self.connection.features.interprets_empty_strings_as_nulls):
null = True
if null:
sql += " NULL"
else:
sql += " NOT NULL"
# Primary key/unique outputs
if field.primary_key:
sql += " PRIMARY KEY"
elif field.unique:
sql += " UNIQUE"
# Return the sql
return sql, params
def prepare_default(self, value):
"""
Only used for backends which have requires_literal_defaults feature
"""
raise NotImplementedError()
def effective_default(self, field):
"""
Returns a field's effective database default value
"""
if field.has_default():
default = field.get_default()
elif not field.null and field.blank and field.empty_strings_allowed:
default = ""
else:
default = None
# If it's a callable, call it
if callable(default):
default = default()
return default
# Actions
def create_model(self, model):
"""
Takes a model and creates a table for it in the database.
Will also create any accompanying indexes or unique constraints.
"""
# Create column SQL, add FK deferreds if needed
column_sqls = []
params = []
for field in model._meta.local_fields:
# SQL
definition, extra_params = self.column_sql(model, field)
if definition is None:
continue
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Add the SQL to our big list
column_sqls.append("%s %s" % (
self.quote_name(field.column),
definition,
))
params.extend(extra_params)
# Indexes
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# FK
if field.rel and self.connection.features.supports_foreign_keys:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
self.deferred_sql.append(
self.sql_create_fk % {
"name": self._create_index_name(model, [field.column], suffix="_fk_%s_%s" % (to_table, to_column)),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
# Autoincrement SQL
if field.get_internal_type() == "AutoField":
autoinc_sql = self.connection.ops.autoinc_sql(model._meta.db_table, field.column)
if autoinc_sql:
self.deferred_sql.extend(autoinc_sql)
# Add any unique_togethers
for fields in model._meta.unique_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
column_sqls.append(self.sql_create_table_unique % {
"columns": ", ".join(self.quote_name(column) for column in columns),
})
# Make the table
sql = self.sql_create_table % {
"table": model._meta.db_table,
"definition": ", ".join(column_sqls)
}
self.execute(sql, params)
# Add any index_togethers
for fields in model._meta.index_together:
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
# Make M2M tables
for field in model._meta.local_many_to_many:
self.create_model(field.rel.through)
def delete_model(self, model):
"""
Deletes a model from the database.
"""
# Delete the table
self.execute(self.sql_delete_table % {
"table": self.quote_name(model._meta.db_table),
})
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_unique_together)
news = set(tuple(fields) for fields in new_unique_together)
# Deleted uniques
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, columns, unique=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created uniques
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_uniq"),
"columns": ", ".join(self.quote_name(column) for column in columns),
})
def alter_index_together(self, model, old_index_together, new_index_together):
"""
Deals with a model changing its index_together.
Note: The input index_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
olds = set(tuple(fields) for fields in old_index_together)
news = set(tuple(fields) for fields in new_index_together)
# Deleted indexes
for fields in olds.difference(news):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
constraint_names = self._constraint_names(model, list(columns), index=True)
if len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of constraints for %s(%s)" % (
len(constraint_names),
model._meta.db_table,
", ".join(columns),
))
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_names[0],
},
)
# Created indexes
for fields in news.difference(olds):
columns = [model._meta.get_field_by_name(field)[0].column for field in fields]
self.execute(self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, columns, suffix="_idx"),
"columns": ", ".join(self.quote_name(column) for column in columns),
"extra": "",
})
def alter_db_table(self, model, old_db_table, new_db_table):
"""
Renames the table a model points to.
"""
self.execute(self.sql_rename_table % {
"old_table": self.quote_name(old_db_table),
"new_table": self.quote_name(new_db_table),
})
def alter_db_tablespace(self, model, old_db_tablespace, new_db_tablespace):
"""
Moves a model's table between tablespaces
"""
self.execute(self.sql_retablespace_table % {
"table": self.quote_name(model._meta.db_table),
"old_tablespace": self.quote_name(old_db_tablespace),
"new_tablespace": self.quote_name(new_db_tablespace),
})
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
# Get the column's definition
definition, params = self.column_sql(model, field, include_default=True)
# It might not actually have a column behind it
if definition is None:
return
# Check constraints can go on the column SQL here
db_params = field.db_parameters(connection=self.connection)
if db_params['check']:
definition += " CHECK (%s)" % db_params['check']
# Build the SQL and run it
sql = self.sql_create_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"definition": definition,
}
self.execute(sql, params)
# Drop the default if we need to
# (Django usually does not use in-database defaults)
if field.default is not None:
sql = self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": self.sql_alter_column_no_default % {
"column": self.quote_name(field.column),
}
}
self.execute(sql)
# Add an index, if required
if field.db_index and not field.unique:
self.deferred_sql.append(
self.sql_create_index % {
"name": self._create_index_name(model, [field.column], suffix=""),
"table": self.quote_name(model._meta.db_table),
"columns": self.quote_name(field.column),
"extra": "",
}
)
# Add any FK constraints later
if field.rel and self.connection.features.supports_foreign_keys:
to_table = field.rel.to._meta.db_table
to_column = field.rel.to._meta.get_field(field.rel.field_name).column
self.deferred_sql.append(
self.sql_create_fk % {
"name": '%s_refs_%s_%x' % (
field.column,
to_column,
abs(hash((model._meta.db_table, to_table)))
),
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
"to_table": self.quote_name(to_table),
"to_column": self.quote_name(to_column),
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# It might not actually have a column behind it
if field.db_parameters(connection=self.connection)['type'] is None:
return
# Get the column's definition
definition, params = self.column_sql(model, field)
# Delete the column
sql = self.sql_delete_column % {
"table": self.quote_name(model._meta.db_table),
"column": self.quote_name(field.column),
}
self.execute(sql)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
# Ensure this field is even column-based
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
old_field,
new_field,
))
# Has unique been removed?
if old_field.unique and (not new_field.unique or (not old_field.primary_key and new_field.primary_key)):
# Find the unique constraint for this field
constraint_names = self._constraint_names(model, [old_field.column], unique=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of unique constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_unique % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Removed an index?
if old_field.db_index and not new_field.db_index and not old_field.unique and not (not new_field.unique and old_field.unique):
# Find the index for this field
index_names = self._constraint_names(model, [old_field.column], index=True)
if strict and len(index_names) != 1:
raise ValueError("Found wrong number (%s) of indexes for %s.%s" % (
len(index_names),
model._meta.db_table,
old_field.column,
))
for index_name in index_names:
self.execute(
self.sql_delete_index % {
"table": self.quote_name(model._meta.db_table),
"name": index_name,
}
)
# Drop any FK constraints, we'll remake them later
if old_field.rel:
fk_names = self._constraint_names(model, [old_field.column], foreign_key=True)
if strict and len(fk_names) != 1:
raise ValueError("Found wrong number (%s) of foreign key constraints for %s.%s" % (
len(fk_names),
model._meta.db_table,
old_field.column,
))
for fk_name in fk_names:
self.execute(
self.sql_delete_fk % {
"table": self.quote_name(model._meta.db_table),
"name": fk_name,
}
)
# Change check constraints?
if old_db_params['check'] != new_db_params['check'] and old_db_params['check']:
constraint_names = self._constraint_names(model, [old_field.column], check=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of check constraints for %s.%s" % (
len(constraint_names),
model._meta.db_table,
old_field.column,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_check % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
}
)
# Have they renamed the column?
if old_field.column != new_field.column:
self.execute(self.sql_rename_column % {
"table": self.quote_name(model._meta.db_table),
"old_column": self.quote_name(old_field.column),
"new_column": self.quote_name(new_field.column),
"type": new_type,
})
# Next, start accumulating actions to do
actions = []
# Type change?
if old_type != new_type:
actions.append((
self.sql_alter_column_type % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
# Default change?
old_default = self.effective_default(old_field)
new_default = self.effective_default(new_field)
if old_default != new_default:
if new_default is None:
actions.append((
self.sql_alter_column_no_default % {
"column": self.quote_name(new_field.column),
},
[],
))
else:
if self.connection.features.requires_literal_defaults:
# Some databases can't take defaults as a parameter (oracle)
# If this is the case, the individual schema backend should
# implement prepare_default
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": self.prepare_default(new_default),
},
[],
))
else:
actions.append((
self.sql_alter_column_default % {
"column": self.quote_name(new_field.column),
"default": "%s",
},
[new_default],
))
# Nullability change?
if old_field.null != new_field.null:
if new_field.null:
actions.append((
self.sql_alter_column_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
else:
actions.append((
self.sql_alter_column_not_null % {
"column": self.quote_name(new_field.column),
"type": new_type,
},
[],
))
if actions:
# Combine actions together if we can (e.g. postgres)
if self.connection.features.supports_combined_alters:
sql, params = tuple(zip(*actions))
actions = [(", ".join(sql), reduce(operator.add, params))]
# Apply those actions
for sql, params in actions:
self.execute(
self.sql_alter_column % {
"table": self.quote_name(model._meta.db_table),
"changes": sql,
},
params,
)
# Added a unique?
if not old_field.unique and new_field.unique:
self.execute(
self.sql_create_unique % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
}
)
# Added an index?
if not old_field.db_index and new_field.db_index and not new_field.unique and not (not old_field.unique and new_field.unique):
self.execute(
self.sql_create_index % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_uniq"),
"columns": self.quote_name(new_field.column),
"extra": "",
}
)
# Changed to become primary key?
# Note that we don't detect unsetting of a PK, as we assume another field
# will always come along and replace it.
if not old_field.primary_key and new_field.primary_key:
# First, drop the old PK
constraint_names = self._constraint_names(model, primary_key=True)
if strict and len(constraint_names) != 1:
raise ValueError("Found wrong number (%s) of PK constraints for %s" % (
len(constraint_names),
model._meta.db_table,
))
for constraint_name in constraint_names:
self.execute(
self.sql_delete_pk % {
"table": self.quote_name(model._meta.db_table),
"name": constraint_name,
},
)
# Make the new one
self.execute(
self.sql_create_pk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_pk"),
"columns": self.quote_name(new_field.column),
}
)
# Does it have a foreign key?
if new_field.rel:
self.execute(
self.sql_create_fk % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_fk"),
"column": self.quote_name(new_field.column),
"to_table": self.quote_name(new_field.rel.to._meta.db_table),
"to_column": self.quote_name(new_field.rel.get_related_field().column),
}
)
# Does it have check constraints we need to add?
if old_db_params['check'] != new_db_params['check'] and new_db_params['check']:
self.execute(
self.sql_create_check % {
"table": self.quote_name(model._meta.db_table),
"name": self._create_index_name(model, [new_field.column], suffix="_check"),
"column": self.quote_name(new_field.column),
"check": new_db_params['check'],
}
)
# Reset connection if required
if self.connection.features.connection_persists_old_columns:
self.connection.close()
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Rename the through table
self.alter_db_table(old_field.rel.through, old_field.rel.through._meta.db_table, new_field.rel.through._meta.db_table)
# Repoint the FK to the other side
self.alter_field(
new_field.rel.through,
# We need the field that points to the target model, so we can tell alter_field to change it -
# this is m2m_reverse_field_name() (as opposed to m2m_field_name, which points to our model)
old_field.rel.through._meta.get_field_by_name(old_field.m2m_reverse_field_name())[0],
new_field.rel.through._meta.get_field_by_name(new_field.m2m_reverse_field_name())[0],
)
def _create_index_name(self, model, column_names, suffix=""):
"""
Generates a unique name for an index/unique constraint.
"""
# If there is just one column in the index, use a default algorithm from Django
if len(column_names) == 1 and not suffix:
return truncate_name(
'%s_%s' % (model._meta.db_table, BaseDatabaseCreation._digest(column_names[0])),
self.connection.ops.max_name_length()
)
# Else generate the name for the index using a different algorithm
table_name = model._meta.db_table.replace('"', '').replace('.', '_')
index_unique_name = '_%x' % abs(hash((table_name, ','.join(column_names))))
# If the index name is too long, truncate it
index_name = ('%s_%s%s%s' % (table_name, column_names[0], index_unique_name, suffix)).replace('"', '').replace('.', '_')
if len(index_name) > self.connection.features.max_index_name_length:
part = ('_%s%s%s' % (column_names[0], index_unique_name, suffix))
index_name = '%s%s' % (table_name[:(self.connection.features.max_index_name_length - len(part))], part)
# It shouldn't start with an underscore (Oracle hates this)
if index_name[0] == "_":
index_name = index_name[1:]
# If it's STILL too long, just hash it down
if len(index_name) > self.connection.features.max_index_name_length:
index_name = hashlib.md5(index_name).hexdigest()[:self.connection.features.max_index_name_length]
# It can't start with a number on Oracle, so prepend D if we need to
if index_name[0].isdigit():
index_name = "D%s" % index_name[:-1]
return index_name
def _constraint_names(self, model, column_names=None, unique=None, primary_key=None, index=None, foreign_key=None, check=None):
"""
Returns all constraint names matching the columns and conditions
"""
column_names = list(column_names) if column_names else None
constraints = self.connection.introspection.get_constraints(self.connection.cursor(), model._meta.db_table)
result = []
for name, infodict in constraints.items():
if column_names is None or column_names == infodict['columns']:
if unique is not None and infodict['unique'] != unique:
continue
if primary_key is not None and infodict['primary_key'] != primary_key:
continue
if index is not None and infodict['index'] != index:
continue
if check is not None and infodict['check'] != check:
continue
if foreign_key is not None and not infodict['foreign_key']:
continue
result.append(name)
return result

View File

@ -18,6 +18,7 @@ from django.db.backends import (util, BaseDatabaseFeatures,
from django.db.backends.sqlite3.client import DatabaseClient
from django.db.backends.sqlite3.creation import DatabaseCreation
from django.db.backends.sqlite3.introspection import DatabaseIntrospection
from django.db.backends.sqlite3.schema import DatabaseSchemaEditor
from django.db.models import fields
from django.db.models.sql import aggregates
from django.utils.dateparse import parse_date, parse_datetime, parse_time
@ -100,6 +101,8 @@ class DatabaseFeatures(BaseDatabaseFeatures):
supports_mixed_date_datetime_comparisons = False
has_bulk_insert = True
can_combine_inserts_with_and_without_auto_increment_pk = False
supports_foreign_keys = False
supports_check_constraints = False
autocommits_when_autocommit_is_off = True
supports_paramstyle_pyformat = False
@ -432,6 +435,10 @@ class DatabaseWrapper(BaseDatabaseWrapper):
"""
self.cursor().execute("BEGIN")
def schema_editor(self):
"Returns a new instance of this backend's SchemaEditor"
return DatabaseSchemaEditor(self)
FORMAT_QMARK_REGEX = re.compile(r'(?<!%)%s')

View File

@ -158,7 +158,7 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
if len(info) != 1:
continue
name = info[0][2] # seqno, cid, name
indexes[name] = {'primary_key': False,
indexes[name] = {'primary_key': indexes.get(name, {}).get("primary_key", False),
'unique': unique}
return indexes
@ -168,7 +168,10 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
"""
# Don't use PRAGMA because that causes issues with some transactions
cursor.execute("SELECT sql FROM sqlite_master WHERE tbl_name = %s AND type = %s", [table_name, "table"])
results = cursor.fetchone()[0].strip()
row = cursor.fetchone()
if row is None:
raise ValueError("Table %s does not exist" % table_name)
results = row[0].strip()
results = results[results.index('(') + 1:results.rindex(')')]
for field_desc in results.split(','):
field_desc = field_desc.strip()
@ -186,3 +189,41 @@ class DatabaseIntrospection(BaseDatabaseIntrospection):
'null_ok': not field[3],
'pk': field[5] # undocumented
} for field in cursor.fetchall()]
def get_constraints(self, cursor, table_name):
"""
Retrieves any constraints or keys (unique, pk, fk, check, index) across one or more columns.
"""
constraints = {}
# Get the index info
cursor.execute("PRAGMA index_list(%s)" % self.connection.ops.quote_name(table_name))
for number, index, unique in cursor.fetchall():
# Get the index info for that index
cursor.execute('PRAGMA index_info(%s)' % self.connection.ops.quote_name(index))
for index_rank, column_rank, column in cursor.fetchall():
if index not in constraints:
constraints[index] = {
"columns": [],
"primary_key": False,
"unique": bool(unique),
"foreign_key": False,
"check": False,
"index": True,
}
constraints[index]['columns'].append(column)
# Get the PK
pk_column = self.get_primary_key_column(cursor, table_name)
if pk_column:
# SQLite doesn't actually give a name to the PK constraint,
# so we invent one. This is fine, as the SQLite backend never
# deletes PK constraints by name, as you can't delete constraints
# in SQLite; we remake the table with a new PK instead.
constraints["__primary__"] = {
"columns": [pk_column],
"primary_key": True,
"unique": False, # It's not actually a unique constraint.
"foreign_key": False,
"check": False,
"index": False,
}
return constraints

View File

@ -0,0 +1,155 @@
from django.db.backends.schema import BaseDatabaseSchemaEditor
from django.db.models.fields.related import ManyToManyField
from django.db.models.loading import BaseAppCache
class DatabaseSchemaEditor(BaseDatabaseSchemaEditor):
sql_delete_table = "DROP TABLE %(table)s"
def _remake_table(self, model, create_fields=[], delete_fields=[], alter_fields=[], rename_fields=[], override_uniques=None):
"""
Shortcut to transform a model from old_model into new_model
"""
# Work out the new fields dict / mapping
body = dict((f.name, f) for f in model._meta.local_fields)
mapping = dict((f.column, f.column) for f in model._meta.local_fields)
# If any of the new or altered fields is introducing a new PK,
# remove the old one
restore_pk_field = None
if any(f.primary_key for f in create_fields) or any(n.primary_key for o, n in alter_fields):
for name, field in list(body.items()):
if field.primary_key:
field.primary_key = False
restore_pk_field = field
if field.auto_created:
del body[name]
del mapping[field.column]
# Add in any created fields
for field in create_fields:
body[field.name] = field
# Add in any altered fields
for (old_field, new_field) in alter_fields:
del body[old_field.name]
del mapping[old_field.column]
body[new_field.name] = new_field
mapping[new_field.column] = old_field.column
# Remove any deleted fields
for field in delete_fields:
del body[field.name]
del mapping[field.column]
# Work inside a new AppCache
app_cache = BaseAppCache()
# Construct a new model for the new state
meta_contents = {
'app_label': model._meta.app_label,
'db_table': model._meta.db_table + "__new",
'unique_together': model._meta.unique_together if override_uniques is None else override_uniques,
'app_cache': app_cache,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = model.__module__
temp_model = type(model._meta.object_name, model.__bases__, body)
# Create a new table with that format
self.create_model(temp_model)
# Copy data from the old table
field_maps = list(mapping.items())
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s;" % (
self.quote_name(temp_model._meta.db_table),
', '.join([x for x, y in field_maps]),
', '.join([y for x, y in field_maps]),
self.quote_name(model._meta.db_table),
))
# Delete the old table
self.delete_model(model)
# Rename the new to the old
self.alter_db_table(model, temp_model._meta.db_table, model._meta.db_table)
# Run deferred SQL on correct table
for sql in self.deferred_sql:
self.execute(sql.replace(temp_model._meta.db_table, model._meta.db_table))
self.deferred_sql = []
# Fix any PK-removed field
if restore_pk_field:
restore_pk_field.primary_key = True
def add_field(self, model, field):
"""
Creates a field on a model.
Usually involves adding a column, but may involve adding a
table instead (for M2M fields)
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.create_model(field.rel.through)
# Detect bad field combinations
if (not field.null and
(not field.has_default() or field.get_default() is None) and
not field.empty_strings_allowed):
raise ValueError("You cannot add a null=False column without a default value on SQLite.")
self._remake_table(model, create_fields=[field])
def remove_field(self, model, field):
"""
Removes a field from a model. Usually involves deleting a column,
but for M2Ms may involve deleting a table.
"""
# Special-case implicit M2M tables
if isinstance(field, ManyToManyField) and field.rel.through._meta.auto_created:
return self.delete_model(field.rel.through)
# For everything else, remake.
self._remake_table(model, delete_fields=[field])
def alter_field(self, model, old_field, new_field, strict=False):
"""
Allows a field's type, uniqueness, nullability, default, column,
constraints etc. to be modified.
Requires a copy of the old field as well so we can only perform
changes that are required.
If strict is true, raises errors if the old column does not match old_field precisely.
"""
old_db_params = old_field.db_parameters(connection=self.connection)
old_type = old_db_params['type']
new_db_params = new_field.db_parameters(connection=self.connection)
new_type = new_db_params['type']
if old_type is None and new_type is None and (old_field.rel.through and new_field.rel.through and old_field.rel.through._meta.auto_created and new_field.rel.through._meta.auto_created):
return self._alter_many_to_many(model, old_field, new_field, strict)
elif old_type is None or new_type is None:
raise ValueError("Cannot alter field %s into %s - they are not compatible types (probably means only one is an M2M with implicit through model)" % (
old_field,
new_field,
))
# Alter by remaking table
self._remake_table(model, alter_fields=[(old_field, new_field)])
def alter_unique_together(self, model, old_unique_together, new_unique_together):
"""
Deals with a model changing its unique_together.
Note: The input unique_togethers must be doubly-nested, not the single-
nested ["foo", "bar"] format.
"""
self._remake_table(model, override_uniques=new_unique_together)
def _alter_many_to_many(self, model, old_field, new_field, strict):
"""
Alters M2Ms to repoint their to= endpoints.
"""
# Make a new through table
self.create_model(new_field.rel.through)
# Copy the data across
self.execute("INSERT INTO %s (%s) SELECT %s FROM %s;" % (
self.quote_name(new_field.rel.through._meta.db_table),
', '.join([
"id",
new_field.m2m_column_name(),
new_field.m2m_reverse_name(),
]),
', '.join([
"id",
old_field.m2m_column_name(),
old_field.m2m_reverse_name(),
]),
self.quote_name(old_field.rel.through._meta.db_table),
))
# Delete the old through table
self.delete_model(old_field.rel.through)

View File

@ -0,0 +1,2 @@
from .migration import Migration
from .operations import *

View File

@ -0,0 +1,440 @@
import re
import sys
from django.utils import datetime_safe
from django.utils.six.moves import input
from django.db.migrations import operations
from django.db.migrations.migration import Migration
from django.db.models.loading import cache
class MigrationAutodetector(object):
"""
Takes a pair of ProjectStates, and compares them to see what the
first would need doing to make it match the second (the second
usually being the project's current state).
Note that this naturally operates on entire projects at a time,
as it's likely that changes interact (for example, you can't
add a ForeignKey without having a migration to add the table it
depends on first). A user interface may offer single-app usage
if it wishes, with the caveat that it may not always be possible.
"""
def __init__(self, from_state, to_state, questioner=None):
self.from_state = from_state
self.to_state = to_state
self.questioner = questioner or MigrationQuestioner()
def changes(self, graph, trim_to_apps=None):
"""
Main entry point to produce a list of appliable changes.
Takes a graph to base names on and an optional set of apps
to try and restrict to (restriction is not guaranteed)
"""
changes = self._detect_changes()
changes = self._arrange_for_graph(changes, graph)
if trim_to_apps:
changes = self._trim_to_apps(changes, trim_to_apps)
return changes
def _detect_changes(self):
"""
Returns a dict of migration plans which will achieve the
change from from_state to to_state. The dict has app labels
as keys and a list of migrations as values.
The resulting migrations aren't specially named, but the names
do matter for dependencies inside the set.
"""
# We'll store migrations as lists by app names for now
self.migrations = {}
old_app_cache = self.from_state.render()
new_app_cache = self.to_state.render()
# Adding models. Phase 1 is adding models with no outward relationships.
added_models = set(self.to_state.models.keys()) - set(self.from_state.models.keys())
pending_add = {}
for app_label, model_name in added_models:
model_state = self.to_state.models[app_label, model_name]
# Are there any relationships out from this model? if so, punt it to the next phase.
related_fields = []
for field in new_app_cache.get_model(app_label, model_name)._meta.fields:
if field.rel:
if field.rel.to:
related_fields.append((field.name, field.rel.to._meta.app_label.lower(), field.rel.to._meta.object_name.lower()))
if hasattr(field.rel, "through") and not field.rel.though._meta.auto_created:
related_fields.append((field.name, field.rel.through._meta.app_label.lower(), field.rel.through._meta.object_name.lower()))
if related_fields:
pending_add[app_label, model_name] = related_fields
else:
self.add_to_migration(
app_label,
operations.CreateModel(
name = model_state.name,
fields = model_state.fields,
options = model_state.options,
bases = model_state.bases,
)
)
# Phase 2 is progressively adding pending models, splitting up into two
# migrations if required.
pending_new_fks = []
while pending_add:
# Is there one we can add that has all dependencies satisfied?
satisfied = [(m, rf) for m, rf in pending_add.items() if all((al, mn) not in pending_add for f, al, mn in rf)]
if satisfied:
(app_label, model_name), related_fields = sorted(satisfied)[0]
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.CreateModel(
name = model_state.name,
fields = model_state.fields,
options = model_state.options,
bases = model_state.bases,
)
)
for field_name, other_app_label, other_model_name in related_fields:
self.add_dependency(app_label, other_app_label)
del pending_add[app_label, model_name]
# Ah well, we'll need to split one. Pick deterministically.
else:
(app_label, model_name), related_fields = sorted(pending_add.items())[0]
model_state = self.to_state.models[app_label, model_name]
# Work out the fields that need splitting out
bad_fields = dict((f, (al, mn)) for f, al, mn in related_fields if (al, mn) in pending_add)
# Create the model, without those
self.add_to_migration(
app_label,
operations.CreateModel(
name = model_state.name,
fields = [(n, f) for n, f in model_state.fields if n not in bad_fields],
options = model_state.options,
bases = model_state.bases,
)
)
# Add the bad fields to be made in a phase 3
for field_name, (other_app_label, other_model_name) in bad_fields.items():
pending_new_fks.append((app_label, model_name, field_name, other_app_label))
del pending_add[app_label, model_name]
# Phase 3 is adding the final set of FKs as separate new migrations
for app_label, model_name, field_name, other_app_label in pending_new_fks:
model_state = self.to_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.AddField(
model_name = model_name,
name = field_name,
field = model_state.get_field_by_name(field_name),
),
new = True,
)
self.add_dependency(app_label, other_app_label)
# Removing models
removed_models = set(self.from_state.models.keys()) - set(self.to_state.models.keys())
for app_label, model_name in removed_models:
model_state = self.from_state.models[app_label, model_name]
self.add_to_migration(
app_label,
operations.DeleteModel(
model_state.name,
)
)
# Changes within models
kept_models = set(self.from_state.models.keys()).intersection(self.to_state.models.keys())
for app_label, model_name in kept_models:
old_model_state = self.from_state.models[app_label, model_name]
new_model_state = self.to_state.models[app_label, model_name]
# New fields
old_field_names = set([x for x, y in old_model_state.fields])
new_field_names = set([x for x, y in new_model_state.fields])
for field_name in new_field_names - old_field_names:
field = new_model_state.get_field_by_name(field_name)
# Scan to see if this is actually a rename!
field_dec = field.deconstruct()[1:]
found_rename = False
for removed_field_name in (old_field_names - new_field_names):
if old_model_state.get_field_by_name(removed_field_name).deconstruct()[1:] == field_dec:
if self.questioner.ask_rename(model_name, removed_field_name, field_name, field):
self.add_to_migration(
app_label,
operations.RenameField(
model_name = model_name,
old_name = removed_field_name,
new_name = field_name,
)
)
old_field_names.remove(removed_field_name)
new_field_names.remove(field_name)
found_rename = True
break
if found_rename:
continue
# You can't just add NOT NULL fields with no default
if not field.null and not field.has_default():
field.default = self.questioner.ask_not_null_addition(field_name, model_name)
self.add_to_migration(
app_label,
operations.AddField(
model_name = model_name,
name = field_name,
field = field,
)
)
# Old fields
for field_name in old_field_names - new_field_names:
self.add_to_migration(
app_label,
operations.RemoveField(
model_name = model_name,
name = field_name,
)
)
# The same fields
for field_name in old_field_names.intersection(new_field_names):
# Did the field change?
old_field_dec = old_model_state.get_field_by_name(field_name).deconstruct()
new_field_dec = new_model_state.get_field_by_name(field_name).deconstruct()
if old_field_dec != new_field_dec:
self.add_to_migration(
app_label,
operations.AlterField(
model_name = model_name,
name = field_name,
field = new_model_state.get_field_by_name(field_name),
)
)
# unique_together changes
if old_model_state.options.get("unique_together", set()) != new_model_state.options.get("unique_together", set()):
self.add_to_migration(
app_label,
operations.AlterUniqueTogether(
name = model_name,
unique_together = new_model_state.options.get("unique_together", set()),
)
)
# Alright, now add internal dependencies
for app_label, migrations in self.migrations.items():
for m1, m2 in zip(migrations, migrations[1:]):
m2.dependencies.append((app_label, m1.name))
# Clean up dependencies
for app_label, migrations in self.migrations.items():
for migration in migrations:
migration.dependencies = list(set(migration.dependencies))
return self.migrations
def add_to_migration(self, app_label, operation, new=False):
migrations = self.migrations.setdefault(app_label, [])
if not migrations or new:
subclass = type("Migration", (Migration,), {"operations": [], "dependencies": []})
instance = subclass("auto_%i" % (len(migrations) + 1), app_label)
migrations.append(instance)
migrations[-1].operations.append(operation)
def add_dependency(self, app_label, other_app_label):
"""
Adds a dependency to app_label's newest migration on
other_app_label's latest migration.
"""
if self.migrations.get(other_app_label, []):
dependency = (other_app_label, self.migrations[other_app_label][-1].name)
else:
dependency = (other_app_label, "__first__")
self.migrations[app_label][-1].dependencies.append(dependency)
def _arrange_for_graph(self, changes, graph):
"""
Takes in a result from changes() and a MigrationGraph,
and fixes the names and dependencies of the changes so they
extend the graph from the leaf nodes for each app.
"""
leaves = graph.leaf_nodes()
name_map = {}
for app_label, migrations in list(changes.items()):
if not migrations:
continue
# Find the app label's current leaf node
app_leaf = None
for leaf in leaves:
if leaf[0] == app_label:
app_leaf = leaf
break
# Do they want an initial migration for this app?
if app_leaf is None and not self.questioner.ask_initial(app_label):
# They don't.
for migration in migrations:
name_map[(app_label, migration.name)] = (app_label, "__first__")
del changes[app_label]
# Work out the next number in the sequence
if app_leaf is None:
next_number = 1
else:
next_number = (self.parse_number(app_leaf[1]) or 0) + 1
# Name each migration
for i, migration in enumerate(migrations):
if i == 0 and app_leaf:
migration.dependencies.append(app_leaf)
if i == 0 and not app_leaf:
new_name = "0001_initial"
else:
new_name = "%04i_%s" % (next_number, self.suggest_name(migration.operations))
name_map[(app_label, migration.name)] = (app_label, new_name)
migration.name = new_name
# Now fix dependencies
for app_label, migrations in changes.items():
for migration in migrations:
migration.dependencies = [name_map.get(d, d) for d in migration.dependencies]
return changes
def _trim_to_apps(self, changes, app_labels):
"""
Takes changes from arrange_for_graph and set of app labels and
returns a modified set of changes which trims out as many migrations
that are not in app_labels as possible.
Note that some other migrations may still be present, as they may be
required dependencies.
"""
# Gather other app dependencies in a first pass
app_dependencies = {}
for app_label, migrations in changes.items():
for migration in migrations:
for dep_app_label, name in migration.dependencies:
app_dependencies.setdefault(app_label, set()).add(dep_app_label)
required_apps = set(app_labels)
# Keep resolving till there's no change
old_required_apps = None
while old_required_apps != required_apps:
old_required_apps = set(required_apps)
for app_label in list(required_apps):
required_apps.update(app_dependencies.get(app_label, set()))
# Remove all migrations that aren't needed
for app_label in list(changes.keys()):
if app_label not in required_apps:
del changes[app_label]
return changes
@classmethod
def suggest_name(cls, ops):
"""
Given a set of operations, suggests a name for the migration
they might represent. Names not guaranteed to be unique; they
must be prefixed by a number or date.
"""
if len(ops) == 1:
if isinstance(ops[0], operations.CreateModel):
return ops[0].name.lower()
elif isinstance(ops[0], operations.DeleteModel):
return "delete_%s" % ops[0].name.lower()
elif isinstance(ops[0], operations.AddField):
return "%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif isinstance(ops[0], operations.RemoveField):
return "remove_%s_%s" % (ops[0].model_name.lower(), ops[0].name.lower())
elif all(isinstance(o, operations.CreateModel) for o in ops):
return "_".join(sorted(o.name.lower() for o in ops))
return "auto"
@classmethod
def parse_number(cls, name):
"""
Given a migration name, tries to extract a number from the
beginning of it. If no number found, returns None.
"""
if re.match(r"^\d+_", name):
return int(name.split("_")[0])
return None
class MigrationQuestioner(object):
"""
Gives the autodetector responses to questions it might have.
This base class has a built-in noninteractive mode, but the
interactive subclass is what the command-line arguments will use.
"""
def __init__(self, defaults=None):
self.defaults = defaults or {}
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
return self.defaults.get("ask_initial", False)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
# None means quit
return None
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self.defaults.get("ask_rename", False)
class InteractiveMigrationQuestioner(MigrationQuestioner):
def __init__(self, specified_apps=set()):
self.specified_apps = specified_apps
def _boolean_input(self, question, default=None):
result = input("%s " % question)
if not result and default is not None:
return default
while len(result) < 1 or result[0].lower() not in "yn":
result = input("Please answer yes or no: ")
return result[0].lower() == "y"
def _choice_input(self, question, choices):
print(question)
for i, choice in enumerate(choices):
print(" %s) %s" % (i + 1, choice))
result = input("Select an option: ")
while True:
try:
value = int(result)
if 0 < value <= len(choices):
return value
except ValueError:
pass
result = input("Please select a valid option: ")
def ask_initial(self, app_label):
"Should we create an initial migration for the app?"
# Don't ask for django.contrib apps
app = cache.get_app(app_label)
if app.__name__.startswith("django.contrib"):
return False
# If it was specified on the command line, definitely true
if app_label in self.specified_apps:
return True
# Now ask
return self._boolean_input("Do you want to enable migrations for app '%s'? [y/N]" % app_label, False)
def ask_not_null_addition(self, field_name, model_name):
"Adding a NOT NULL field to a model"
choice = self._choice_input(
"You are trying to add a non-nullable field '%s' to %s without a default;\n" % (field_name, model_name) +
"this is not possible. Please select a fix:",
[
"Provide a one-off default now (will be set on all existing rows)",
"Quit, and let me add a default in models.py",
]
)
if choice == 2:
sys.exit(3)
else:
print("Please enter the default value now, as valid Python")
print("The datetime module is available, so you can do e.g. datetime.date.today()")
while True:
code = input(">>> ")
if not code:
print("Please enter some code, or 'exit' (with no quotes) to exit.")
elif code == "exit":
sys.exit(1)
else:
try:
return eval(code, {}, {"datetime": datetime_safe})
except (SyntaxError, NameError) as e:
print("Invalid input: %s" % e)
else:
break
def ask_rename(self, model_name, old_name, new_name, field_instance):
"Was this field really renamed?"
return self._boolean_input("Did you rename %s.%s to %s.%s (a %s)? [y/N]" % (model_name, old_name, model_name, new_name, field_instance.__class__.__name__), False)

View File

@ -0,0 +1,90 @@
from .loader import MigrationLoader
from .recorder import MigrationRecorder
class MigrationExecutor(object):
"""
End-to-end migration execution - loads migrations, and runs them
up or down to a specified set of targets.
"""
def __init__(self, connection, progress_callback=None):
self.connection = connection
self.loader = MigrationLoader(self.connection)
self.loader.load_disk()
self.recorder = MigrationRecorder(self.connection)
self.progress_callback = progress_callback
def migration_plan(self, targets):
"""
Given a set of targets, returns a list of (Migration instance, backwards?).
"""
plan = []
applied = self.recorder.applied_migrations()
for target in targets:
# If the target is (appname, None), that means unmigrate everything
if target[1] is None:
for root in self.loader.graph.root_nodes():
if root[0] == target[0]:
for migration in self.loader.graph.backwards_plan(root):
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
# If the migration is already applied, do backwards mode,
# otherwise do forwards mode.
elif target in applied:
backwards_plan = self.loader.graph.backwards_plan(target)[:-1]
# We only do this if the migration is not the most recent one
# in its app - that is, another migration with the same app
# label is in the backwards plan
if any(node[0] == target[0] for node in backwards_plan):
for migration in backwards_plan:
if migration in applied:
plan.append((self.loader.graph.nodes[migration], True))
applied.remove(migration)
else:
for migration in self.loader.graph.forwards_plan(target):
if migration not in applied:
plan.append((self.loader.graph.nodes[migration], False))
applied.add(migration)
return plan
def migrate(self, targets, plan=None, fake=False):
"""
Migrates the database up to the given targets.
"""
if plan is None:
plan = self.migration_plan(targets)
for migration, backwards in plan:
if not backwards:
self.apply_migration(migration, fake=fake)
else:
self.unapply_migration(migration, fake=fake)
def apply_migration(self, migration, fake=False):
"""
Runs a migration forwards.
"""
if self.progress_callback:
self.progress_callback("apply_start", migration)
if not fake:
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.graph.project_state((migration.app_label, migration.name), at_end=False)
migration.apply(project_state, schema_editor)
self.recorder.record_applied(migration.app_label, migration.name)
if self.progress_callback:
self.progress_callback("apply_success", migration)
def unapply_migration(self, migration, fake=False):
"""
Runs a migration backwards.
"""
if self.progress_callback:
self.progress_callback("unapply_start", migration)
if not fake:
with self.connection.schema_editor() as schema_editor:
project_state = self.loader.graph.project_state((migration.app_label, migration.name), at_end=False)
migration.unapply(project_state, schema_editor)
self.recorder.record_unapplied(migration.app_label, migration.name)
if self.progress_callback:
self.progress_callback("unapply_success", migration)

View File

@ -0,0 +1,152 @@
from django.utils.datastructures import OrderedSet
from django.db.migrations.state import ProjectState
class MigrationGraph(object):
"""
Represents the digraph of all migrations in a project.
Each migration is a node, and each dependency is an edge. There are
no implicit dependencies between numbered migrations - the numbering is
merely a convention to aid file listing. Every new numbered migration
has a declared dependency to the previous number, meaning that VCS
branch merges can be detected and resolved.
Migrations files can be marked as replacing another set of migrations -
this is to support the "squash" feature. The graph handler isn't responsible
for these; instead, the code to load them in here should examine the
migration files and if the replaced migrations are all either unapplied
or not present, it should ignore the replaced ones, load in just the
replacing migration, and repoint any dependencies that pointed to the
replaced migrations to point to the replacing one.
A node should be a tuple: (app_path, migration_name). The tree special-cases
things within an app - namely, root nodes and leaf nodes ignore dependencies
to other apps.
"""
def __init__(self):
self.nodes = {}
self.dependencies = {}
self.dependents = {}
def add_node(self, node, implementation):
self.nodes[node] = implementation
def add_dependency(self, child, parent):
if child not in self.nodes:
raise KeyError("Dependency references nonexistent child node %r" % (child,))
if parent not in self.nodes:
raise KeyError("Dependency references nonexistent parent node %r" % (parent,))
self.dependencies.setdefault(child, set()).add(parent)
self.dependents.setdefault(parent, set()).add(child)
def forwards_plan(self, node):
"""
Given a node, returns a list of which previous nodes (dependencies)
must be applied, ending with the node itself.
This is the list you would follow if applying the migrations to
a database.
"""
if node not in self.nodes:
raise ValueError("Node %r not a valid node" % (node, ))
return self.dfs(node, lambda x: self.dependencies.get(x, set()))
def backwards_plan(self, node):
"""
Given a node, returns a list of which dependent nodes (dependencies)
must be unapplied, ending with the node itself.
This is the list you would follow if removing the migrations from
a database.
"""
if node not in self.nodes:
raise ValueError("Node %r not a valid node" % (node, ))
return self.dfs(node, lambda x: self.dependents.get(x, set()))
def root_nodes(self):
"""
Returns all root nodes - that is, nodes with no dependencies inside
their app. These are the starting point for an app.
"""
roots = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.dependencies.get(node, set())):
roots.add(node)
return roots
def leaf_nodes(self):
"""
Returns all leaf nodes - that is, nodes with no dependents in their app.
These are the "most current" version of an app's schema.
Having more than one per app is technically an error, but one that
gets handled further up, in the interactive command - it's usually the
result of a VCS merge and needs some user input.
"""
leaves = set()
for node in self.nodes:
if not any(key[0] == node[0] for key in self.dependents.get(node, set())):
leaves.add(node)
return leaves
def dfs(self, start, get_children):
"""
Dynamic programming based depth first search, for finding dependencies.
"""
cache = {}
def _dfs(start, get_children, path):
# If we already computed this, use that (dynamic programming)
if (start, get_children) in cache:
return cache[(start, get_children)]
# If we've traversed here before, that's a circular dep
if start in path:
raise CircularDependencyError(path[path.index(start):] + [start])
# Build our own results list, starting with us
results = []
results.append(start)
# We need to add to results all the migrations this one depends on
children = sorted(get_children(start))
path.append(start)
for n in children:
results = _dfs(n, get_children, path) + results
path.pop()
# Use OrderedSet to ensure only one instance of each result
results = list(OrderedSet(results))
# Populate DP cache
cache[(start, get_children)] = results
# Done!
return results
return _dfs(start, get_children, [])
def __str__(self):
return "Graph: %s nodes, %s edges" % (len(self.nodes), sum(len(x) for x in self.dependencies.values()))
def project_state(self, nodes=None, at_end=True):
"""
Given a migration node or nodes, returns a complete ProjectState for it.
If at_end is False, returns the state before the migration has run.
If nodes is not provided, returns the overall most current project state.
"""
if nodes is None:
nodes = list(self.leaf_nodes())
if len(nodes) == 0:
return ProjectState()
if not isinstance(nodes[0], tuple):
nodes = [nodes]
plan = []
for node in nodes:
for migration in self.forwards_plan(node):
if migration not in plan:
if not at_end and migration in nodes:
continue
plan.append(migration)
project_state = ProjectState()
for node in plan:
project_state = self.nodes[node].mutate_state(project_state)
return project_state
class CircularDependencyError(Exception):
"""
Raised when there's an impossible-to-resolve circular dependency.
"""
pass

View File

@ -0,0 +1,167 @@
import os
from importlib import import_module
from django.utils.functional import cached_property
from django.db.models.loading import cache
from django.db.migrations.recorder import MigrationRecorder
from django.db.migrations.graph import MigrationGraph
from django.conf import settings
class MigrationLoader(object):
"""
Loads migration files from disk, and their status from the database.
Migration files are expected to live in the "migrations" directory of
an app. Their names are entirely unimportant from a code perspective,
but will probably follow the 1234_name.py convention.
On initialisation, this class will scan those directories, and open and
read the python files, looking for a class called Migration, which should
inherit from django.db.migrations.Migration. See
django.db.migrations.migration for what that looks like.
Some migrations will be marked as "replacing" another set of migrations.
These are loaded into a separate set of migrations away from the main ones.
If all the migrations they replace are either unapplied or missing from
disk, then they are injected into the main set, replacing the named migrations.
Any dependency pointers to the replaced migrations are re-pointed to the
new migration.
This does mean that this class MUST also talk to the database as well as
to disk, but this is probably fine. We're already not just operating
in memory.
"""
def __init__(self, connection):
self.connection = connection
self.disk_migrations = None
self.applied_migrations = None
@classmethod
def migrations_module(cls, app_label):
if app_label in settings.MIGRATION_MODULES:
return settings.MIGRATION_MODULES[app_label]
app = cache.get_app(app_label)
return ".".join(app.__name__.split(".")[:-1] + ["migrations"])
def load_disk(self):
"""
Loads the migrations from all INSTALLED_APPS from disk.
"""
self.disk_migrations = {}
self.unmigrated_apps = set()
self.migrated_apps = set()
for app in cache.get_apps():
# Get the migrations module directory
app_label = app.__name__.split(".")[-2]
module_name = self.migrations_module(app_label)
try:
module = import_module(module_name)
except ImportError as e:
# I hate doing this, but I don't want to squash other import errors.
# Might be better to try a directory check directly.
if "No module named" in str(e) and "migrations" in str(e):
self.unmigrated_apps.add(app_label)
continue
self.migrated_apps.add(app_label)
directory = os.path.dirname(module.__file__)
# Scan for .py[c|o] files
migration_names = set()
for name in os.listdir(directory):
if name.endswith(".py") or name.endswith(".pyc") or name.endswith(".pyo"):
import_name = name.rsplit(".", 1)[0]
if import_name[0] not in "_.~":
migration_names.add(import_name)
# Load them
for migration_name in migration_names:
migration_module = import_module("%s.%s" % (module_name, migration_name))
if not hasattr(migration_module, "Migration"):
raise BadMigrationError("Migration %s in app %s has no Migration class" % (migration_name, app_label))
self.disk_migrations[app_label, migration_name] = migration_module.Migration(migration_name, app_label)
def get_migration_by_prefix(self, app_label, name_prefix):
"Returns the migration(s) which match the given app label and name _prefix_"
# Make sure we have the disk data
if self.disk_migrations is None:
self.load_disk()
# Do the search
results = []
for l, n in self.disk_migrations:
if l == app_label and n.startswith(name_prefix):
results.append((l, n))
if len(results) > 1:
raise AmbiguityError("There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix))
elif len(results) == 0:
raise KeyError("There no migrations for '%s' with the prefix '%s'" % (app_label, name_prefix))
else:
return self.disk_migrations[results[0]]
@cached_property
def graph(self):
"""
Builds a migration dependency graph using both the disk and database.
"""
# Make sure we have the disk data
if self.disk_migrations is None:
self.load_disk()
# And the database data
if self.applied_migrations is None:
recorder = MigrationRecorder(self.connection)
self.applied_migrations = recorder.applied_migrations()
# Do a first pass to separate out replacing and non-replacing migrations
normal = {}
replacing = {}
for key, migration in self.disk_migrations.items():
if migration.replaces:
replacing[key] = migration
else:
normal[key] = migration
# Calculate reverse dependencies - i.e., for each migration, what depends on it?
# This is just for dependency re-pointing when applying replacements,
# so we ignore run_before here.
reverse_dependencies = {}
for key, migration in normal.items():
for parent in migration.dependencies:
reverse_dependencies.setdefault(parent, set()).add(key)
# Carry out replacements if we can - that is, if all replaced migrations
# are either unapplied or missing.
for key, migration in replacing.items():
# Do the check
can_replace = True
for target in migration.replaces:
if target in self.applied_migrations:
can_replace = False
break
if not can_replace:
continue
# Alright, time to replace. Step through the replaced migrations
# and remove, repointing dependencies if needs be.
for replaced in migration.replaces:
if replaced in normal:
del normal[replaced]
for child_key in reverse_dependencies.get(replaced, set()):
normal[child_key].dependencies.remove(replaced)
normal[child_key].dependencies.append(key)
normal[key] = migration
# Finally, make a graph and load everything into it
graph = MigrationGraph()
for key, migration in normal.items():
graph.add_node(key, migration)
for key, migration in normal.items():
for parent in migration.dependencies:
graph.add_dependency(key, parent)
return graph
class BadMigrationError(Exception):
"""
Raised when there's a bad migration (unreadable/bad format/etc.)
"""
pass
class AmbiguityError(Exception):
"""
Raised when more than one migration matches a name prefix
"""
pass

View File

@ -0,0 +1,101 @@
class Migration(object):
"""
The base class for all migrations.
Migration files will import this from django.db.migrations.Migration
and subclass it as a class called Migration. It will have one or more
of the following attributes:
- operations: A list of Operation instances, probably from django.db.migrations.operations
- dependencies: A list of tuples of (app_path, migration_name)
- run_before: A list of tuples of (app_path, migration_name)
- replaces: A list of migration_names
Note that all migrations come out of migrations and into the Loader or
Graph as instances, having been initialised with their app label and name.
"""
# Operations to apply during this migration, in order.
operations = []
# Other migrations that should be run before this migration.
# Should be a list of (app, migration_name).
dependencies = []
# Other migrations that should be run after this one (i.e. have
# this migration added to their dependencies). Useful to make third-party
# apps' migrations run after your AUTH_USER replacement, for example.
run_before = []
# Migration names in this app that this migration replaces. If this is
# non-empty, this migration will only be applied if all these migrations
# are not applied.
replaces = []
def __init__(self, name, app_label):
self.name = name
self.app_label = app_label
def __eq__(self, other):
if not isinstance(other, Migration):
return False
return (self.name == other.name) and (self.app_label == other.app_label)
def __ne__(self, other):
return not (self == other)
def __repr__(self):
return "<Migration %s.%s>" % (self.app_label, self.name)
def __str__(self):
return "%s.%s" % (self.app_label, self.name)
def __hash__(self):
return hash("%s.%s" % (self.app_label, self.name))
def mutate_state(self, project_state):
"""
Takes a ProjectState and returns a new one with the migration's
operations applied to it.
"""
new_state = project_state.clone()
for operation in self.operations:
operation.state_forwards(self.app_label, new_state)
return new_state
def apply(self, project_state, schema_editor):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a forwards order.
Returns the resulting project state for efficient re-use by following
Migrations.
"""
for operation in self.operations:
# Get the state after the operation has run
new_state = project_state.clone()
operation.state_forwards(self.app_label, new_state)
# Run the operation
operation.database_forwards(self.app_label, schema_editor, project_state, new_state)
# Switch states
project_state = new_state
return project_state
def unapply(self, project_state, schema_editor):
"""
Takes a project_state representing all migrations prior to this one
and a schema_editor for a live database and applies the migration
in a reverse order.
"""
# We need to pre-calculate the stack of project states
to_run = []
for operation in self.operations:
new_state = project_state.clone()
operation.state_forwards(self.app_label, new_state)
to_run.append((operation, project_state, new_state))
project_state = new_state
# Now run them in reverse
to_run.reverse()
for operation, to_state, from_state in to_run:
operation.database_backwards(self.app_label, schema_editor, from_state, to_state)

View File

@ -0,0 +1,2 @@
from .models import CreateModel, DeleteModel, AlterModelTable, AlterUniqueTogether, AlterIndexTogether
from .fields import AddField, RemoveField, AlterField, RenameField

View File

@ -0,0 +1,62 @@
class Operation(object):
"""
Base class for migration operations.
It's responsible for both mutating the in-memory model state
(see db/migrations/state.py) to represent what it performs, as well
as actually performing it against a live database.
Note that some operations won't modify memory state at all (e.g. data
copying operations), and some will need their modifications to be
optionally specified by the user (e.g. custom Python code snippets)
"""
# If this migration can be run in reverse.
# Some operations are impossible to reverse, like deleting data.
reversible = True
def __new__(cls, *args, **kwargs):
# We capture the arguments to make returning them trivial
self = object.__new__(cls)
self._constructor_args = (args, kwargs)
return self
def deconstruct(self):
"""
Returns a 3-tuple of class import path (or just name if it lives
under django.db.migrations), positional arguments, and keyword
arguments.
"""
return (
self.__class__.__name__,
self._constructor_args[0],
self._constructor_args[1],
)
def state_forwards(self, app_label, state):
"""
Takes the state from the previous migration, and mutates it
so that it matches what this migration would perform.
"""
raise NotImplementedError()
def database_forwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the normal
(forwards) direction.
"""
raise NotImplementedError()
def database_backwards(self, app_label, schema_editor, from_state, to_state):
"""
Performs the mutation on the database schema in the reverse
direction - e.g. if this were CreateModel, it would in fact
drop the model's table.
"""
raise NotImplementedError()
def describe(self):
"""
Outputs a brief summary of what the action does.
"""
return "%s: %s" % (self.__class__.__name__, self._constructor_args)

View File

@ -0,0 +1,132 @@
from django.db import router
from .base import Operation
class AddField(Operation):
"""
Adds a field to a model.
"""
def __init__(self, model_name, name, field):
self.model_name = model_name.lower()
self.name = name
self.field = field
def state_forwards(self, app_label, state):
state.models[app_label, self.model_name.lower()].fields.append((self.name, self.field))
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, to_model):
schema_editor.add_field(from_model, to_model._meta.get_field_by_name(self.name)[0])
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name(self.name)[0])
def describe(self):
return "Add field %s to %s" % (self.name, self.model_name)
class RemoveField(Operation):
"""
Removes a field from a model.
"""
def __init__(self, model_name, name):
self.model_name = model_name.lower()
self.name = name
def state_forwards(self, app_label, state):
new_fields = []
for name, instance in state.models[app_label, self.model_name.lower()].fields:
if name != self.name:
new_fields.append((name, instance))
state.models[app_label, self.model_name.lower()].fields = new_fields
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, from_model):
schema_editor.remove_field(from_model, from_model._meta.get_field_by_name(self.name)[0])
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, to_model):
schema_editor.add_field(from_model, to_model._meta.get_field_by_name(self.name)[0])
def describe(self):
return "Remove field %s from %s" % (self.name, self.model_name)
class AlterField(Operation):
"""
Alters a field's database column (e.g. null, max_length) to the provided new field
"""
def __init__(self, model_name, name, field):
self.model_name = model_name.lower()
self.name = name
self.field = field
def state_forwards(self, app_label, state):
state.models[app_label, self.model_name.lower()].fields = [
(n, self.field if n == self.name else f) for n, f in state.models[app_label, self.model_name.lower()].fields
]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, to_model):
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.name)[0],
to_model._meta.get_field_by_name(self.name)[0],
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter field %s on %s" % (self.name, self.model_name)
class RenameField(Operation):
"""
Renames a field on the model. Might affect db_column too.
"""
def __init__(self, model_name, old_name, new_name):
self.model_name = model_name.lower()
self.old_name = old_name
self.new_name = new_name
def state_forwards(self, app_label, state):
state.models[app_label, self.model_name.lower()].fields = [
(self.new_name if n == self.old_name else n, f) for n, f in state.models[app_label, self.model_name.lower()].fields
]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, to_model):
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.old_name)[0],
to_model._meta.get_field_by_name(self.new_name)[0],
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
from_model = from_state.render().get_model(app_label, self.model_name)
to_model = to_state.render().get_model(app_label, self.model_name)
if router.allow_migrate(schema_editor.connection.alias, to_model):
schema_editor.alter_field(
from_model,
from_model._meta.get_field_by_name(self.new_name)[0],
to_model._meta.get_field_by_name(self.old_name)[0],
)
def describe(self):
return "Rename field %s on %s to %s" % (self.old_name, self.model_name, self.new_name)

View File

@ -0,0 +1,157 @@
from .base import Operation
from django.db import models, router
from django.db.migrations.state import ModelState
class CreateModel(Operation):
"""
Create a model's table.
"""
def __init__(self, name, fields, options=None, bases=None):
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model,)
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()] = ModelState(app_label, self.name, self.fields, self.options, self.bases)
def database_forwards(self, app_label, schema_editor, from_state, to_state):
app_cache = to_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
app_cache = from_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def describe(self):
return "Create model %s" % (self.name, )
class DeleteModel(Operation):
"""
Drops a model's table.
"""
def __init__(self, name):
self.name = name
def state_forwards(self, app_label, state):
del state.models[app_label, self.name.lower()]
def database_forwards(self, app_label, schema_editor, from_state, to_state):
app_cache = from_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.delete_model(model)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
app_cache = to_state.render()
model = app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, model):
schema_editor.create_model(model)
def describe(self):
return "Delete model %s" % (self.name, )
class AlterModelTable(Operation):
"""
Renames a model's table
"""
def __init__(self, name, table):
self.name = name
self.table = table
def state_forwards(self, app_label, state):
state.models[app_label, self.name.lower()].options["db_table"] = self.table
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_db_table(
new_model,
old_model._meta.db_table,
new_model._meta.db_table,
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Rename table for %s to %s" % (self.name, self.table)
class AlterUniqueTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of unique_together must be a set of tuples.
"""
def __init__(self, name, unique_together):
self.name = name
self.unique_together = set(tuple(cons) for cons in unique_together)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options["unique_together"] = self.unique_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_unique_together(
new_model,
getattr(old_model._meta, "unique_together", set()),
getattr(new_model._meta, "unique_together", set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter unique_together for %s (%s constraints)" % (self.name, len(self.unique_together))
class AlterIndexTogether(Operation):
"""
Changes the value of index_together to the target one.
Input value of index_together must be a set of tuples.
"""
def __init__(self, name, index_together):
self.name = name
self.index_together = set(tuple(cons) for cons in index_together)
def state_forwards(self, app_label, state):
model_state = state.models[app_label, self.name.lower()]
model_state.options["index_together"] = self.index_together
def database_forwards(self, app_label, schema_editor, from_state, to_state):
old_app_cache = from_state.render()
new_app_cache = to_state.render()
old_model = old_app_cache.get_model(app_label, self.name)
new_model = new_app_cache.get_model(app_label, self.name)
if router.allow_migrate(schema_editor.connection.alias, new_model):
schema_editor.alter_index_together(
new_model,
getattr(old_model._meta, "index_together", set()),
getattr(new_model._meta, "index_together", set()),
)
def database_backwards(self, app_label, schema_editor, from_state, to_state):
return self.database_forwards(app_label, schema_editor, from_state, to_state)
def describe(self):
return "Alter index_together for %s (%s constraints)" % (self.name, len(self.index_together))

View File

@ -0,0 +1,69 @@
from django.db import models
from django.db.models.loading import BaseAppCache
from django.utils.timezone import now
class MigrationRecorder(object):
"""
Deals with storing migration records in the database.
Because this table is actually itself used for dealing with model
creation, it's the one thing we can't do normally via syncdb or migrations.
We manually handle table creation/schema updating (using schema backend)
and then have a floating model to do queries with.
If a migration is unapplied its row is removed from the table. Having
a row in the table always means a migration is applied.
"""
class Migration(models.Model):
app = models.CharField(max_length=255)
name = models.CharField(max_length=255)
applied = models.DateTimeField(default=now)
class Meta:
app_cache = BaseAppCache()
app_label = "migrations"
db_table = "django_migrations"
def __init__(self, connection):
self.connection = connection
def ensure_schema(self):
"""
Ensures the table exists and has the correct schema.
"""
# If the table's there, that's fine - we've never changed its schema
# in the codebase.
if self.Migration._meta.db_table in self.connection.introspection.get_table_list(self.connection.cursor()):
return
# Make the table
with self.connection.schema_editor() as editor:
editor.create_model(self.Migration)
def applied_migrations(self):
"""
Returns a set of (app, name) of applied migrations.
"""
self.ensure_schema()
return set(tuple(x) for x in self.Migration.objects.values_list("app", "name"))
def record_applied(self, app, name):
"""
Records that a migration was applied.
"""
self.ensure_schema()
self.Migration.objects.create(app=app, name=name)
def record_unapplied(self, app, name):
"""
Records that a migration was unapplied.
"""
self.ensure_schema()
self.Migration.objects.filter(app=app, name=name).delete()
@classmethod
def flush(cls):
"""
Deletes all migration records. Useful if you're testing migrations.
"""
cls.Migration.objects.all().delete()

View File

@ -0,0 +1,142 @@
from django.db import models
from django.db.models.loading import BaseAppCache
from django.db.models.options import DEFAULT_NAMES
from django.utils.module_loading import import_by_path
class ProjectState(object):
"""
Represents the entire project's overall state.
This is the item that is passed around - we do it here rather than at the
app level so that cross-app FKs/etc. resolve properly.
"""
def __init__(self, models=None):
self.models = models or {}
self.app_cache = None
def add_model_state(self, model_state):
self.models[(model_state.app_label, model_state.name.lower())] = model_state
def clone(self):
"Returns an exact copy of this ProjectState"
return ProjectState(
models = dict((k, v.clone()) for k, v in self.models.items())
)
def render(self):
"Turns the project state into actual models in a new AppCache"
if self.app_cache is None:
self.app_cache = BaseAppCache()
for model in self.models.values():
model.render(self.app_cache)
return self.app_cache
@classmethod
def from_app_cache(cls, app_cache):
"Takes in an AppCache and returns a ProjectState matching it"
models = {}
for model in app_cache.get_models():
model_state = ModelState.from_model(model)
models[(model_state.app_label, model_state.name.lower())] = model_state
return cls(models)
class ModelState(object):
"""
Represents a Django Model. We don't use the actual Model class
as it's not designed to have its options changed - instead, we
mutate this one and then render it into a Model as required.
Note that while you are allowed to mutate .fields, you are not allowed
to mutate the Field instances inside there themselves - you must instead
assign new ones, as these are not detached during a clone.
"""
def __init__(self, app_label, name, fields, options=None, bases=None):
self.app_label = app_label
self.name = name
self.fields = fields
self.options = options or {}
self.bases = bases or (models.Model, )
# Sanity-check that fields is NOT a dict. It must be ordered.
if isinstance(self.fields, dict):
raise ValueError("ModelState.fields cannot be a dict - it must be a list of 2-tuples.")
@classmethod
def from_model(cls, model):
"""
Feed me a model, get a ModelState representing it out.
"""
# Deconstruct the fields
fields = []
for field in model._meta.fields:
name, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
fields.append((name, field_class(*args, **kwargs)))
# Extract the options
options = {}
for name in DEFAULT_NAMES:
# Ignore some special options
if name in ["app_cache", "app_label"]:
continue
elif name in model._meta.original_attrs:
if name == "unique_together":
options[name] = set(model._meta.original_attrs["unique_together"])
else:
options[name] = model._meta.original_attrs[name]
# Make our record
bases = tuple(model for model in model.__bases__ if (not hasattr(model, "_meta") or not model._meta.abstract))
if not bases:
bases = (models.Model, )
return cls(
model._meta.app_label,
model._meta.object_name,
fields,
options,
bases,
)
def clone(self):
"Returns an exact copy of this ModelState"
# We deep-clone the fields using deconstruction
fields = []
for name, field in self.fields:
_, path, args, kwargs = field.deconstruct()
field_class = import_by_path(path)
fields.append((name, field_class(*args, **kwargs)))
# Now make a copy
return self.__class__(
app_label = self.app_label,
name = self.name,
fields = fields,
options = dict(self.options),
bases = self.bases,
)
def render(self, app_cache):
"Creates a Model object from our current state into the given app_cache"
# First, make a Meta object
meta_contents = {'app_label': self.app_label, "app_cache": app_cache}
meta_contents.update(self.options)
if "unique_together" in meta_contents:
meta_contents["unique_together"] = list(meta_contents["unique_together"])
meta = type("Meta", tuple(), meta_contents)
# Then, work out our bases
# TODO: Use the actual bases
# Turn fields into a dict for the body, add other bits
body = dict(self.fields)
body['Meta'] = meta
body['__module__'] = "__fake__"
# Then, make a Model object
return type(
self.name,
tuple(self.bases),
body,
)
def get_field_by_name(self, name):
for fname, field in self.fields:
if fname == name:
return field
raise ValueError("No field called %s on model %s" % (name, self.name))

View File

@ -0,0 +1,180 @@
from __future__ import unicode_literals
import datetime
import types
import os
from importlib import import_module
from django.utils import six
from django.db import models
from django.db.models.loading import cache
from django.db.migrations.loader import MigrationLoader
class MigrationWriter(object):
"""
Takes a Migration instance and is able to produce the contents
of the migration file from it.
"""
def __init__(self, migration):
self.migration = migration
def as_string(self):
"""
Returns a string of the file contents.
"""
items = {
"dependencies": repr(self.migration.dependencies),
}
imports = set()
# Deconstruct operations
operation_strings = []
for operation in self.migration.operations:
name, args, kwargs = operation.deconstruct()
arg_strings = []
for arg in args:
arg_string, arg_imports = self.serialize(arg)
arg_strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = self.serialize(arg)
imports.update(arg_imports)
arg_strings.append("%s = %s" % (kw, arg_string))
operation_strings.append("migrations.%s(%s\n )" % (name, "".join("\n %s," % arg for arg in arg_strings)))
items["operations"] = "[%s\n ]" % "".join("\n %s," % s for s in operation_strings)
# Format imports nicely
imports.discard("from django.db import models")
if not imports:
items["imports"] = ""
else:
items["imports"] = "\n".join(imports) + "\n"
return (MIGRATION_TEMPLATE % items).encode("utf8")
@property
def filename(self):
return "%s.py" % self.migration.name
@property
def path(self):
migrations_module_name = MigrationLoader.migrations_module(self.migration.app_label)
app_module = cache.get_app(self.migration.app_label)
# See if we can import the migrations module directly
try:
migrations_module = import_module(migrations_module_name)
basedir = os.path.dirname(migrations_module.__file__)
except ImportError:
# Alright, see if it's a direct submodule of the app
oneup = ".".join(migrations_module_name.split(".")[:-1])
app_oneup = ".".join(app_module.__name__.split(".")[:-1])
if oneup == app_oneup:
basedir = os.path.join(os.path.dirname(app_module.__file__), migrations_module_name.split(".")[-1])
else:
raise ImportError("Cannot open migrations module %s for app %s" % (migrations_module_name, self.migration.app_label))
return os.path.join(basedir, self.filename)
@classmethod
def serialize(cls, value):
"""
Serializes the value to a string that's parsable by Python, along
with any needed imports to make that string work.
More advanced than repr() as it can encode things
like datetime.datetime.now.
"""
# Sequences
if isinstance(value, (list, set, tuple)):
imports = set()
strings = []
for item in value:
item_string, item_imports = cls.serialize(item)
imports.update(item_imports)
strings.append(item_string)
if isinstance(value, set):
format = "set([%s])"
elif isinstance(value, tuple):
format = "(%s,)"
else:
format = "[%s]"
return format % (", ".join(strings)), imports
# Dictionaries
elif isinstance(value, dict):
imports = set()
strings = []
for k, v in value.items():
k_string, k_imports = cls.serialize(k)
v_string, v_imports = cls.serialize(v)
imports.update(k_imports)
imports.update(v_imports)
strings.append((k_string, v_string))
return "{%s}" % (", ".join(["%s: %s" % (k, v) for k, v in strings])), imports
# Datetimes
elif isinstance(value, (datetime.datetime, datetime.date)):
return repr(value), set(["import datetime"])
# Simple types
elif isinstance(value, six.integer_types + (float, six.binary_type, six.text_type, bool, type(None))):
return repr(value), set()
# Django fields
elif isinstance(value, models.Field):
attr_name, path, args, kwargs = value.deconstruct()
module, name = path.rsplit(".", 1)
if module == "django.db.models":
imports = set(["from django.db import models"])
name = "models.%s" % name
else:
imports = set(["import %s" % module])
name = path
arg_strings = []
for arg in args:
arg_string, arg_imports = cls.serialize(arg)
arg_strings.append(arg_string)
imports.update(arg_imports)
for kw, arg in kwargs.items():
arg_string, arg_imports = cls.serialize(arg)
imports.update(arg_imports)
arg_strings.append("%s=%s" % (kw, arg_string))
return "%s(%s)" % (name, ", ".join(arg_strings)), imports
# Functions
elif isinstance(value, (types.FunctionType, types.BuiltinFunctionType)):
# Special-cases, as these don't have im_class
special_cases = [
(datetime.datetime.now, "datetime.datetime.now", ["import datetime"]),
(datetime.datetime.utcnow, "datetime.datetime.utcnow", ["import datetime"]),
(datetime.date.today, "datetime.date.today", ["import datetime"]),
]
for func, string, imports in special_cases:
if func == value: # For some reason "utcnow is not utcnow"
return string, set(imports)
# Method?
if hasattr(value, "im_class"):
klass = value.im_class
module = klass.__module__
return "%s.%s.%s" % (module, klass.__name__, value.__name__), set(["import %s" % module])
else:
module = value.__module__
if module is None:
raise ValueError("Cannot serialize function %r: No module" % value)
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Classes
elif isinstance(value, type):
special_cases = [
(models.Model, "models.Model", []),
]
for case, string, imports in special_cases:
if case is value:
return string, set(imports)
if hasattr(value, "__module__"):
module = value.__module__
return "%s.%s" % (module, value.__name__), set(["import %s" % module])
# Uh oh.
else:
raise ValueError("Cannot serialize: %r" % value)
MIGRATION_TEMPLATE = """# encoding: utf8
from django.db import models, migrations
%(imports)s
class Migration(migrations.Migration):
dependencies = %(dependencies)s
operations = %(operations)s
"""

View File

@ -146,7 +146,7 @@ class ModelBase(type):
new_class._base_manager = new_class._base_manager._copy_to_model(new_class)
# Bail out early if we have already created this class.
m = get_model(new_class._meta.app_label, name,
m = new_class._meta.app_cache.get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
if m is not None:
return m
@ -264,13 +264,13 @@ class ModelBase(type):
return new_class
new_class._prepare()
register_models(new_class._meta.app_label, new_class)
new_class._meta.app_cache.register_models(new_class._meta.app_label, new_class)
# Because of the way imports happen (recursively), we may or may not be
# the first time this model tries to register with the framework. There
# should only be one class for each model, so we always return the
# registered version.
return get_model(new_class._meta.app_label, name,
return new_class._meta.app_cache.get_model(new_class._meta.app_label, name,
seed_cache=False, only_installed=False)
def copy_managers(cls, base_managers):

View File

@ -368,12 +368,32 @@ class Field(object):
# mapped to one of the built-in Django field types. In this case, you
# can implement db_type() instead of get_internal_type() to specify
# exactly which wacky database column type you want to use.
params = self.db_parameters(connection)
if params['type']:
if params['check']:
return "%s CHECK (%s)" % (params['type'], params['check'])
else:
return params['type']
return None
def db_parameters(self, connection):
"""
Replacement for db_type, providing a range of different return
values (type, checks)
"""
data = DictWrapper(self.__dict__, connection.ops.quote_name, "qn_")
try:
return (connection.creation.data_types[self.get_internal_type()]
% data)
type_string = connection.creation.data_types[self.get_internal_type()] % data
except KeyError:
return None
type_string = None
try:
check_string = connection.creation.data_type_check_constraints[self.get_internal_type()] % data
except KeyError:
check_string = None
return {
"type": type_string,
"check": check_string,
}
@property
def unique(self):

View File

@ -2,7 +2,7 @@ from operator import attrgetter
from django.db import connection, connections, router
from django.db.backends import util
from django.db.models import signals, get_model
from django.db.models import signals
from django.db.models.fields import (AutoField, Field, IntegerField,
PositiveIntegerField, PositiveSmallIntegerField, FieldDoesNotExist)
from django.db.models.related import RelatedObject, PathInfo
@ -18,8 +18,6 @@ from django import forms
RECURSIVE_RELATIONSHIP_CONSTANT = 'self'
pending_lookups = {}
def add_lazy_relation(cls, field, relation, operation):
"""
@ -70,14 +68,14 @@ def add_lazy_relation(cls, field, relation, operation):
# string right away. If get_model returns None, it means that the related
# model isn't loaded yet, so we need to pend the relation until the class
# is prepared.
model = get_model(app_label, model_name,
model = cls._meta.app_cache.get_model(app_label, model_name,
seed_cache=False, only_installed=False)
if model:
operation(field, model, cls)
else:
key = (app_label, model_name)
value = (cls, field, operation)
pending_lookups.setdefault(key, []).append(value)
cls._meta.app_cache.pending_lookups.setdefault(key, []).append(value)
def do_pending_lookups(sender, **kwargs):
@ -85,7 +83,7 @@ def do_pending_lookups(sender, **kwargs):
Handle any pending relations to the sending model. Sent from class_prepared.
"""
key = (sender._meta.app_label, sender.__name__)
for cls, field, operation in pending_lookups.pop(key, []):
for cls, field, operation in sender._meta.app_cache.pending_lookups.pop(key, []):
operation(field, sender, cls)
signals.class_prepared.connect(do_pending_lookups)
@ -941,6 +939,8 @@ class ForeignObject(RelatedField):
def resolve_related_fields(self):
if len(self.from_fields) < 1 or len(self.from_fields) != len(self.to_fields):
raise ValueError('Foreign Object from and to fields must be the same non-zero length')
if isinstance(self.rel.to, six.string_types):
raise ValueError('Related model %r cannot been resolved' % self.rel.to)
related_fields = []
for index in range(len(self.from_fields)):
from_field_name = self.from_fields[index]
@ -1281,6 +1281,9 @@ class ForeignKey(ForeignObject):
return IntegerField().db_type(connection=connection)
return rel_field.db_type(connection=connection)
def db_parameters(self, connection):
return {"type": self.db_type(connection), "check": []}
class OneToOneField(ForeignKey):
"""
@ -1351,6 +1354,7 @@ def create_many_to_many_intermediary_model(field, klass):
'unique_together': (from_, to),
'verbose_name': '%(from)s-%(to)s relationship' % {'from': from_, 'to': to},
'verbose_name_plural': '%(from)s-%(to)s relationships' % {'from': from_, 'to': to},
'app_cache': field.model._meta.app_cache,
})
# Construct and return the new class.
return type(str(name), (models.Model,), {
@ -1561,3 +1565,11 @@ class ManyToManyField(RelatedField):
initial = initial()
defaults['initial'] = [i._get_pk_val() for i in initial]
return super(ManyToManyField, self).formfield(**defaults)
def db_type(self, connection):
# A ManyToManyField is not represented by a single column,
# so return None.
return None
def db_parameters(self, connection):
return {"type": None, "check": None}

View File

@ -31,28 +31,30 @@ class UnavailableApp(Exception):
pass
class AppCache(object):
def _initialize():
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
Returns a dictionary to be used as the initial value of the
[shared] state of the app cache.
"""
# Use the Borg pattern to share state between all instances. Details at
# http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/66531.
__shared_state = dict(
return dict(
# Keys of app_store are the model modules for each application.
app_store=ModelDict(),
# Mapping of installed app_labels to model modules for that app.
app_labels={},
app_labels = {},
# Mapping of app_labels to a dictionary of model names to model code.
# May contain apps that are not installed.
app_models=ModelDict(),
# Mapping of app_labels to errors raised when trying to import the app.
app_errors={},
app_errors = {},
# Pending lookups for lazy relations
pending_lookups = {},
# -- Everything below here is only used when populating the cache --
loads_installed = True,
loaded=False,
handled=set(),
postponed=[],
@ -61,10 +63,27 @@ class AppCache(object):
available_apps=None,
)
class BaseAppCache(object):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
This provides the base (non-Borg) AppCache class - the AppCache
subclass adds borg-like behaviour for the few cases where it's needed,
and adds the code that auto-loads from INSTALLED_APPS.
"""
def __init__(self):
self.__dict__ = self.__shared_state
self.__dict__ = _initialize()
# This stops _populate loading from INSTALLED_APPS and ignores the
# only_installed arguments to get_model[s]
self.loads_installed = False
def _populate(self):
"""
Stub method - this base class does no auto-loading.
"""
"""
Fill in all the cache information. This method is threadsafe, in the
sense that every caller will see the same state upon return, and if the
@ -72,6 +91,9 @@ class AppCache(object):
"""
if self.loaded:
return
if not self.loads_installed:
self.loaded = True
return
# Note that we want to use the import lock here - the app loading is
# in many cases initiated implicitly by importing, and thus it is
# possible to end up in deadlock when one thread initiates loading
@ -233,12 +255,15 @@ class AppCache(object):
By default, models that aren't part of installed apps will *not*
be included in the list of models. However, if you specify
only_installed=False, they will be.
only_installed=False, they will be. If you're using a non-default
AppCache, this argument does nothing - all models will be included.
By default, models that have been swapped out will *not* be
included in the list of models. However, if you specify
include_swapped, they will be.
"""
if not self.loads_installed:
only_installed = False
cache_key = (app_mod, include_auto_created, include_deferred, only_installed, include_swapped)
model_list = None
try:
@ -287,6 +312,8 @@ class AppCache(object):
Raises UnavailableApp when set_available_apps() in in effect and
doesn't include app_label.
"""
if not self.loads_installed:
only_installed = False
if seed_cache:
self._populate()
if only_installed and app_label not in self.app_labels:
@ -332,8 +359,24 @@ class AppCache(object):
def unset_available_apps(self):
self.available_apps = None
class AppCache(BaseAppCache):
"""
A cache that stores installed applications and their models. Used to
provide reverse-relations and for app introspection (e.g. admin).
Borg version of the BaseAppCache class.
"""
__shared_state = _initialize()
def __init__(self):
self.__dict__ = self.__shared_state
cache = AppCache()
# These methods were always module level, so are kept that way for backwards
# compatibility.
get_apps = cache.get_apps

View File

@ -9,7 +9,7 @@ from django.conf import settings
from django.db.models.fields.related import ManyToManyRel
from django.db.models.fields import AutoField, FieldDoesNotExist
from django.db.models.fields.proxy import OrderWrt
from django.db.models.loading import get_models, app_cache_ready
from django.db.models.loading import app_cache_ready, cache
from django.utils import six
from django.utils.functional import cached_property
from django.utils.encoding import force_text, smart_text, python_2_unicode_compatible
@ -22,8 +22,7 @@ DEFAULT_NAMES = ('verbose_name', 'verbose_name_plural', 'db_table', 'ordering',
'unique_together', 'permissions', 'get_latest_by',
'order_with_respect_to', 'app_label', 'db_tablespace',
'abstract', 'managed', 'proxy', 'swappable', 'auto_created',
'index_together', 'default_permissions')
'index_together', 'app_cache', 'default_permissions')
@python_2_unicode_compatible
class Options(object):
@ -71,6 +70,9 @@ class Options(object):
# from *other* models. Needed for some admin checks. Internal use only.
self.related_fkey_lookups = []
# A custom AppCache to use, if you're making a separate model set.
self.app_cache = cache
def contribute_to_class(self, cls, name):
from django.db import connection
from django.db.backends.util import truncate_name
@ -83,6 +85,10 @@ class Options(object):
self.model_name = self.object_name.lower()
self.verbose_name = get_verbose_name(self.object_name)
# Store the original user-defined values for each option,
# for use when serializing the model definition
self.original_attrs = {}
# Next, apply any overridden values from 'class Meta'.
if self.meta:
meta_attrs = self.meta.__dict__.copy()
@ -95,8 +101,10 @@ class Options(object):
for attr_name in DEFAULT_NAMES:
if attr_name in meta_attrs:
setattr(self, attr_name, meta_attrs.pop(attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
elif hasattr(self.meta, attr_name):
setattr(self, attr_name, getattr(self.meta, attr_name))
self.original_attrs[attr_name] = getattr(self, attr_name)
# unique_together can be either a tuple of tuples, or a single
# tuple of two strings. Normalize it to a tuple of tuples, so that
@ -487,7 +495,7 @@ class Options(object):
cache[obj] = model
# Collect also objects which are in relation to some proxy child/parent of self.
proxy_cache = cache.copy()
for klass in get_models(include_auto_created=True, only_installed=False):
for klass in self.app_cache.get_models(include_auto_created=True, only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_fields:
if f.rel and not isinstance(f.rel.to, six.string_types) and f.generate_reverse_relation:
@ -530,7 +538,7 @@ class Options(object):
cache[obj] = parent
else:
cache[obj] = model
for klass in get_models(only_installed=False):
for klass in self.app_cache.get_models(only_installed=False):
if not klass._meta.swapped:
for f in klass._meta.local_many_to_many:
if (f.rel

View File

@ -12,7 +12,9 @@ post_save = Signal(providing_args=["instance", "raw", "created", "using", "updat
pre_delete = Signal(providing_args=["instance", "using"], use_caching=True)
post_delete = Signal(providing_args=["instance", "using"], use_caching=True)
pre_syncdb = Signal(providing_args=["app", "create_models", "verbosity", "interactive", "db"])
post_syncdb = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive", "db"])
pre_migrate = Signal(providing_args=["app", "create_models", "verbosity", "interactive", "db"])
pre_syncdb = pre_migrate
post_migrate = Signal(providing_args=["class", "app", "created_models", "verbosity", "interactive", "db"])
post_syncdb = post_migrate
m2m_changed = Signal(providing_args=["action", "instance", "reverse", "model", "pk_set", "using"], use_caching=True)

View File

@ -262,10 +262,13 @@ class ConnectionRouter(object):
return allow
return obj1._state.db == obj2._state.db
def allow_syncdb(self, db, model):
def allow_migrate(self, db, model):
for router in self.routers:
try:
method = router.allow_syncdb
try:
method = router.allow_migrate
except AttributeError:
method = router.allow_syncdb
except AttributeError:
# If the router doesn't have a method, skip to the next one.
pass

View File

@ -718,7 +718,7 @@ class TransactionTestCase(SimpleTestCase):
"""Performs any pre-test setup. This includes:
* If the class has an 'available_apps' attribute, restricting the app
cache to these applications, then firing post_syncdb -- it must run
cache to these applications, then firing post_migrate -- it must run
with the correct set of applications for the test case.
* If the class has a 'fixtures' attribute, installing these fixtures.
"""
@ -726,8 +726,7 @@ class TransactionTestCase(SimpleTestCase):
if self.available_apps is not None:
cache.set_available_apps(self.available_apps)
for db_name in self._databases_names(include_mirrors=False):
flush.Command.emit_post_syncdb(
verbosity=0, interactive=False, database=db_name)
flush.Command.emit_post_migrate(verbosity=0, interactive=False, database=db_name)
try:
self._fixture_setup()
except Exception:
@ -772,7 +771,7 @@ class TransactionTestCase(SimpleTestCase):
"""Performs any post-test things. This includes:
* Flushing the contents of the database, to leave a clean slate. If
the class has an 'available_apps' attribute, post_syncdb isn't fired.
the class has an 'available_apps' attribute, post_migrate isn't fired.
* Force-closing the connection, so the next test gets a clean cursor.
"""
try:
@ -790,14 +789,14 @@ class TransactionTestCase(SimpleTestCase):
cache.unset_available_apps()
def _fixture_teardown(self):
# Allow TRUNCATE ... CASCADE and don't emit the post_syncdb signal
# Allow TRUNCATE ... CASCADE and don't emit the post_migrate signal
# when flushing only a subset of the apps
for db_name in self._databases_names(include_mirrors=False):
call_command('flush', verbosity=0, interactive=False,
database=db_name, skip_validation=True,
reset_sequences=False,
allow_cascade=self.available_apps is not None,
inhibit_post_syncdb=self.available_apps is not None)
inhibit_post_migrate=self.available_apps is not None)
def assertQuerysetEqual(self, qs, values, transform=repr, ordered=True):
items = six.moves.map(transform, qs)

View File

@ -1,5 +1,6 @@
import copy
import warnings
from collections import OrderedDict
from django.utils import six
class MergeDict(object):
@ -236,6 +237,36 @@ class SortedDict(dict):
super(SortedDict, self).clear()
self.keyOrder = []
class OrderedSet(object):
"""
A set which keeps the ordering of the inserted items.
Currently backs onto OrderedDict.
"""
def __init__(self, iterable=None):
self.dict = OrderedDict(((x, None) for x in iterable) if iterable else [])
def add(self, item):
self.dict[item] = None
def remove(self, item):
del self.dict[item]
def discard(self, item):
try:
self.remove(item)
except KeyError:
pass
def __iter__(self):
return iter(self.dict.keys())
def __contains__(self, item):
return item in self.dict
def __nonzero__(self):
return bool(self.dict)
class MultiValueDictKeyError(KeyError):
pass

View File

@ -148,6 +148,11 @@ def lazy(func, *resultclasses):
else:
return func(*self.__args, **self.__kw)
def __ne__(self, other):
if isinstance(other, Promise):
other = other.__cast()
return self.__cast() != other
def __eq__(self, other):
if isinstance(other, Promise):
other = other.__cast()

View File

@ -86,6 +86,10 @@ PALETTES = {
'HTTP_BAD_REQUEST': {},
'HTTP_NOT_FOUND': {},
'HTTP_SERVER_ERROR': {},
'MIGRATE_HEADING': {},
'MIGRATE_LABEL': {},
'MIGRATE_SUCCESS': {},
'MIGRATE_FAILURE': {},
},
DARK_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
@ -101,6 +105,10 @@ PALETTES = {
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'yellow' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
'MIGRATE_HEADING': { 'fg': 'cyan', 'opts': ('bold',) },
'MIGRATE_LABEL': { 'opts': ('bold',) },
'MIGRATE_SUCCESS': { 'fg': 'green', 'opts': ('bold',) },
'MIGRATE_FAILURE': { 'fg': 'red', 'opts': ('bold',) },
},
LIGHT_PALETTE: {
'ERROR': { 'fg': 'red', 'opts': ('bold',) },
@ -116,6 +124,10 @@ PALETTES = {
'HTTP_BAD_REQUEST': { 'fg': 'red', 'opts': ('bold',) },
'HTTP_NOT_FOUND': { 'fg': 'red' },
'HTTP_SERVER_ERROR': { 'fg': 'magenta', 'opts': ('bold',) },
'MIGRATE_HEADING': { 'fg': 'cyan', 'opts': ('bold',) },
'MIGRATE_LABEL': { 'opts': ('bold',) },
'MIGRATE_SUCCESS': { 'fg': 'green', 'opts': ('bold',) },
'MIGRATE_FAILURE': { 'fg': 'red', 'opts': ('bold',) },
}
}
DEFAULT_PALETTE = DARK_PALETTE

View File

@ -81,10 +81,10 @@ access to your precious data on a model by model basis.
Install the core Django tables
==============================
Next, run the :djadmin:`syncdb` command to install any extra needed database
Next, run the :djadmin:`migrate` command to install any extra needed database
records such as admin permissions and content types::
python manage.py syncdb
python manage.py migrate
Test and tweak
==============

View File

@ -71,6 +71,9 @@ manipulating the data of your Web application. Learn more about it below:
:doc:`Instance methods <ref/models/instances>` |
:doc:`Accessing related objects <ref/models/relations>`
* **Migrations:**
:doc:`Introduction to Migrations<topics/migrations>`
* **Advanced:**
:doc:`Managers <topics/db/managers>` |
:doc:`Raw SQL <topics/db/sql>` |

View File

@ -165,9 +165,9 @@ __ http://sphinx.pocoo.org/markup/desc.html
* ``django-admin`` commands::
.. django-admin:: syncdb
.. django-admin:: migrate
To link, use ``:djadmin:`syncdb```.
To link, use ``:djadmin:`migrate```.
* ``django-admin`` command-line options::

View File

@ -419,6 +419,17 @@ these changes.
* ``django.utils.unittest`` will be removed.
* The ``syncdb`` command will be removed.
* ``django.db.models.signals.pre_syncdb`` and
``django.db.models.signals.post_syncdb`` will be removed, and
``django.db.models.signals.pre_migrate`` and
``django.db.models.signals.post_migrate`` will lose their
``create_models`` and ``created_models`` arguments.
* ``allow_syncdb`` on database routers will no longer automatically become
``allow_migrate``.
* If models are organized in a package, Django will no longer look for
:ref:`initial SQL data<initial-sql>` in ``myapp/models/sql/``. Move your
custom SQL files to ``myapp/sql/``.

View File

@ -53,10 +53,11 @@ automatically:
.. code-block:: bash
manage.py syncdb
manage.py migrate
The :djadmin:`syncdb` command looks at all your available models and creates
tables in your database for whichever tables don't already exist.
The :djadmin:`migrate` command looks at all your available models and creates
tables in your database for whichever tables don't already exist, as well as
optionally providing :doc:`much richer schema control </topics/migrations>`.
Enjoy the free API
==================

View File

@ -155,7 +155,7 @@ this. For a small app like polls, this process isn't too difficult.
url(r'^polls/', include('polls.urls')),
3. Run `python manage.py syncdb` to create the polls models.
3. Run `python manage.py migrate` to create the polls models.
4. Start the development server and visit http://127.0.0.1:8000/admin/
to create a poll (you'll need the Admin app enabled).

View File

@ -45,8 +45,7 @@ Outputs to standard output all data in the database associated with the named
application(s).
.TP
.BI flush
Returns the database to the state it was in immediately after syncdb was
executed.
Removes all data from the database and then re-installs any initial data.
.TP
.B inspectdb
Introspects the database tables in the database specified in settings.py and outputs a Django
@ -114,9 +113,9 @@ the current directory or the optional destination.
Creates a Django project directory structure for the given project name
in the current directory or the optional destination.
.TP
.BI syncdb
Creates the database tables for all apps in INSTALLED_APPS whose tables
haven't already been created.
.BI migrate
Runs migrations for apps containing migrations, and just creates missing tables
for apps without migrations.
.TP
.BI "test [" "\-\-verbosity" "] [" "\-\-failfast" "] [" "appname ..." "]"
Runs the test suite for the specified applications, or the entire project if

View File

@ -31,7 +31,7 @@ To get started using the ``comments`` app, follow these steps:
#. Install the comments framework by adding ``'django.contrib.comments'`` to
:setting:`INSTALLED_APPS`.
#. Run ``manage.py syncdb`` so that Django will create the comment tables.
#. Run ``manage.py migrate`` so that Django will create the comment tables.
#. Add the comment app's URLs to your project's ``urls.py``:

View File

@ -86,7 +86,7 @@ The ``ContentType`` model
Let's look at an example to see how this works. If you already have
the :mod:`~django.contrib.contenttypes` application installed, and then add
:mod:`the sites application <django.contrib.sites>` to your
:setting:`INSTALLED_APPS` setting and run ``manage.py syncdb`` to install it,
:setting:`INSTALLED_APPS` setting and run ``manage.py migrate`` to install it,
the model :class:`django.contrib.sites.models.Site` will be installed into
your database. Along with it a new instance of
:class:`~django.contrib.contenttypes.models.ContentType` will be

View File

@ -55,14 +55,14 @@ or:
3. Add ``'django.contrib.flatpages.middleware.FlatpageFallbackMiddleware'``
to your :setting:`MIDDLEWARE_CLASSES` setting.
4. Run the command :djadmin:`manage.py syncdb <syncdb>`.
4. Run the command :djadmin:`manage.py migrate <migrate>`.
.. currentmodule:: django.contrib.flatpages.middleware
How it works
============
``manage.py syncdb`` creates two tables in your database: ``django_flatpage``
``manage.py migrate`` creates two tables in your database: ``django_flatpage``
and ``django_flatpage_sites``. ``django_flatpage`` is a simple lookup table
that simply maps a URL to a title and bunch of text content.
``django_flatpage_sites`` associates a flatpage with a site.

View File

@ -15,7 +15,7 @@ those packages have.
For most of these add-ons -- specifically, the add-ons that include either
models or template tags -- you'll need to add the package name (e.g.,
``'django.contrib.admin'``) to your :setting:`INSTALLED_APPS` setting and
re-run ``manage.py syncdb``.
re-run ``manage.py migrate``.
.. _"batteries included" philosophy: http://docs.python.org/tutorial/stdlib.html#batteries-included

View File

@ -18,12 +18,12 @@ To install the redirects app, follow these steps:
2. Add ``'django.contrib.redirects'`` to your :setting:`INSTALLED_APPS` setting.
3. Add ``'django.contrib.redirects.middleware.RedirectFallbackMiddleware'``
to your :setting:`MIDDLEWARE_CLASSES` setting.
4. Run the command :djadmin:`manage.py syncdb <syncdb>`.
4. Run the command :djadmin:`manage.py migrate <migrate>`.
How it works
============
``manage.py syncdb`` creates a ``django_redirect`` table in your database. This
``manage.py migrate`` creates a ``django_redirect`` table in your database. This
is a simple lookup table with ``site_id``, ``old_path`` and ``new_path`` fields.
The ``RedirectFallbackMiddleware`` does all of the work. Each time any Django

View File

@ -264,10 +264,10 @@ To enable the sites framework, follow these steps:
SITE_ID = 1
3. Run :djadmin:`syncdb`.
3. Run :djadmin:`migrate`.
``django.contrib.sites`` registers a
:data:`~django.db.models.signals.post_syncdb` signal handler which creates a
:data:`~django.db.models.signals.post_migrate` signal handler which creates a
default site named ``example.com`` with the domain ``example.com``. This site
will also be created after Django creates the test database. To set the
correct name and domain for your project, you can use an :doc:`initial data

View File

@ -220,7 +220,7 @@ If you upgrade an existing project to MySQL 5.5.5 and subsequently add some
tables, ensure that your tables are using the same storage engine (i.e. MyISAM
vs. InnoDB). Specifically, if tables that have a ``ForeignKey`` between them
use different storage engines, you may see an error like the following when
running ``syncdb``::
running ``migrate``::
_mysql_exceptions.OperationalError: (
1005, "Can't create table '\\db_name\\.#sql-4a8_ab' (errno: 150)"
@ -659,7 +659,7 @@ required.
.. _`Oracle Database Server`: http://www.oracle.com/
.. _`cx_Oracle`: http://cx-oracle.sourceforge.net/
In order for the ``python manage.py syncdb`` command to work, your Oracle
In order for the ``python manage.py migrate`` command to work, your Oracle
database user must have privileges to run the following commands:
* CREATE TABLE
@ -748,7 +748,7 @@ Oracle imposes a name length limit of 30 characters. To accommodate this, the
backend truncates database identifiers to fit, replacing the final four
characters of the truncated name with a repeatable MD5 hash value.
When running syncdb, an ``ORA-06552`` error may be encountered if
When running ``migrate``, an ``ORA-06552`` error may be encountered if
certain Oracle keywords are used as the name of a model field or the
value of a ``db_column`` option. Django quotes all identifiers used
in queries to prevent most such problems, but this error can still

View File

@ -242,10 +242,8 @@ flush
.. django-admin:: flush
Returns the database to the state it was in immediately after :djadmin:`syncdb`
was executed. This means that all data will be removed from the database, any
post-synchronization handlers will be re-executed, and the ``initial_data``
fixture will be re-installed.
Removes all data from the database, re-executes any post-synchronization
handlers, and reinstalls any initial data fixtures.
The :djadminopt:`--noinput` option may be provided to suppress all user
prompts.
@ -568,6 +566,52 @@ Use the ``--keep-pot`` option to prevent django from deleting the temporary
.pot file it generates before creating the .po file. This is useful for
debugging errors which may prevent the final language files from being created.
makemigrations [<appname>]
--------------------------
.. django-admin:: makemigrations
.. versionadded:: 1.7
Creates new migrations based on the changes detected to your models.
Migrations, their relationship with apps and more are covered in depth in
:doc:`the migrations documentation</topics/migrations>`.
Providing one or more app names as arguments will limit the migrations created
to the app(s) specified and any dependencies needed (the table at the other end
of a ``ForeignKey``, for example).
.. django-admin-option:: --empty
The ``--empty`` option will cause ``makemigrations`` to output an empty
migration for the specified apps, for manual editing. This option is only
for advanced users and should not be used unless you are familiar with
the migration format, migration operations, and the dependencies between
your migrations.
migrate [<appname> [<migrationname>]]
-------------------------------------
.. django-admin:: migrate
.. versionadded:: 1.7
Synchronizes the database state with the current set of models and migrations.
Migrations, their relationship with apps and more are covered in depth in
:doc:`the migrations documentation</topics/migrations>`.
The behavior of this command changes depending on the arguments provided:
* No arguments: All migrated apps have all of their migrations run,
and all unmigrated apps are synchronized with the database,
* ``<appname>``: The specified app has its migrations run, up to the most
recent migration. This may involve running other apps' migrations too, due
to dependencies.
* ``<appname> <migrationname>``: Brings the database schema to a state where it
would have just run the given migration, but no further - this may involve
unapplying migrations if you have previously migrated past the named
migration. Use the name `zero` to unapply all migrations for an app.
runfcgi [options]
-----------------
@ -1102,45 +1146,13 @@ syncdb
.. django-admin:: syncdb
Creates the database tables for all apps in :setting:`INSTALLED_APPS` whose
tables have not already been created.
.. deprecated:: 1.7
Use this command when you've added new applications to your project and want to
install them in the database. This includes any apps shipped with Django that
might be in :setting:`INSTALLED_APPS` by default. When you start a new project,
run this command to install the default apps.
This command has been deprecated in favour of the :djadmin:`migrate`
command, which performs both the old behaviour as well as executing
migrations. It is now just an alias to that command.
.. admonition:: Syncdb will not alter existing tables
``syncdb`` will only create tables for models which have not yet been
installed. It will *never* issue ``ALTER TABLE`` statements to match
changes made to a model class after installation. Changes to model classes
and database schemas often involve some form of ambiguity and, in those
cases, Django would have to guess at the correct changes to make. There is
a risk that critical data would be lost in the process.
If you have made changes to a model and wish to alter the database tables
to match, use the ``sql`` command to display the new SQL structure and
compare that to your existing table schema to work out the changes.
If you're installing the ``django.contrib.auth`` application, ``syncdb`` will
give you the option of creating a superuser immediately.
``syncdb`` will also search for and install any fixture named ``initial_data``
with an appropriate extension (e.g. ``json`` or ``xml``). See the
documentation for ``loaddata`` for details on the specification of fixture
data files.
The :djadminopt:`--noinput` option may be provided to suppress all user
prompts.
The :djadminopt:`--database` option can be used to specify the database to
synchronize.
``--no-initial-data``
~~~~~~~~~~~~~~~~~~~~~
Use ``--no-initial-data`` to avoid loading the initial_data fixture.
Alias for :djadmin:`migrate`.
test <app or test identifier>
-----------------------------
@ -1278,7 +1290,7 @@ This command is only available if Django's :doc:`authentication system
Creates a superuser account (a user who has all permissions). This is
useful if you need to create an initial superuser account but did not
do so during ``syncdb``, or if you need to programmatically generate
do so during the first ``migrate``, or if you need to programmatically generate
superuser accounts for your site(s).
When run interactively, this command will prompt for a password for
@ -1362,7 +1374,7 @@ allows for the following options:
Example usage::
django-admin.py syncdb --pythonpath='/home/djangoprojects/myproject'
django-admin.py migrate --pythonpath='/home/djangoprojects/myproject'
Adds the given filesystem path to the Python `import search path`_. If this
isn't provided, ``django-admin.py`` will use the ``PYTHONPATH`` environment
@ -1377,7 +1389,7 @@ setting the Python path for you.
Example usage::
django-admin.py syncdb --settings=mysite.settings
django-admin.py migrate --settings=mysite.settings
Explicitly specifies the settings module to use. The settings module should be
in Python package syntax, e.g. ``mysite.settings``. If this isn't provided,
@ -1391,7 +1403,7 @@ Note that this option is unnecessary in ``manage.py``, because it uses
Example usage::
django-admin.py syncdb --traceback
django-admin.py migrate --traceback
By default, ``django-admin.py`` will show a simple error message whenever an
:class:`~django.core.management.CommandError` occurs, but a full stack trace
@ -1407,7 +1419,7 @@ will also output a full stack trace when a ``CommandError`` is raised.
Example usage::
django-admin.py syncdb --verbosity 2
django-admin.py migrate --verbosity 2
Use ``--verbosity`` to specify the amount of notification and debug information
that ``django-admin.py`` should print to the console.

View File

@ -106,9 +106,9 @@ Django quotes column and table names behind the scenes.
.. attribute:: Options.managed
Defaults to ``True``, meaning Django will create the appropriate database
tables in :djadmin:`syncdb` and remove them as part of a :djadmin:`flush`
management command. That is, Django *manages* the database tables'
lifecycles.
tables in :djadmin:`migrate` or as part of migrations and remove them as
part of a :djadmin:`flush` management command. That is, Django
*manages* the database tables' lifecycles.
If ``False``, no database table creation or deletion operations will be
performed for this model. This is useful if the model represents an existing
@ -192,9 +192,9 @@ Django quotes column and table names behind the scenes.
.. admonition:: Changing order_with_respect_to
``order_with_respect_to`` adds an additional field/database column
named ``_order``, so be sure to handle that as you would any other
change to your models if you add or change ``order_with_respect_to``
after your initial :djadmin:`syncdb`.
named ``_order``, so be sure to make and apply the appropriate
migrations if you add or change ``order_with_respect_to``
after your initial :djadmin:`migrate`.
``ordering``
------------

View File

@ -356,40 +356,36 @@ Management signals
Signals sent by :doc:`django-admin </ref/django-admin>`.
pre_syncdb
----------
pre_migrate
-----------
.. data:: django.db.models.signals.pre_syncdb
.. data:: django.db.models.signals.pre_migrate
:module:
Sent by the :djadmin:`syncdb` command before it starts to install an
Sent by the :djadmin:`migrate` command before it starts to install an
application.
Any handlers that listen to this signal need to be written in a particular
place: a ``management`` module in one of your :setting:`INSTALLED_APPS`. If
handlers are registered anywhere else they may not be loaded by
:djadmin:`syncdb`.
:djadmin:`migrate`.
Arguments sent with this signal:
``sender``
The ``models`` module that was just installed. That is, if
:djadmin:`syncdb` just installed an app called ``"foo.bar.myapp"``,
``sender`` will be the ``foo.bar.myapp.models`` module.
The ``models`` module of the app about to be migrated/synced.
For example, if :djadmin:`migrate` is about to install
an app called ``"foo.bar.myapp"``, ``sender`` will be the
``foo.bar.myapp.models`` module.
``app``
Same as ``sender``.
``create_models``
A list of the model classes from any app which :djadmin:`syncdb` plans to
create.
``verbosity``
Indicates how much information manage.py is printing on screen. See
the :djadminopt:`--verbosity` flag for details.
Functions which listen for :data:`pre_syncdb` should adjust what they
Functions which listen for :data:`pre_migrate` should adjust what they
output to the screen based on the value of this argument.
``interactive``
@ -403,42 +399,55 @@ Arguments sent with this signal:
``db``
The alias of database on which a command will operate.
post_syncdb
-----------
pre_syncdb
----------
.. data:: django.db.models.signals.post_syncdb
.. data:: django.db.models.signals.pre_syncdb
:module:
Sent by the :djadmin:`syncdb` command after it installs an application, and the
.. deprecated:: 1.7
This signal has been renamed to :data:`~django.db.models.signals.pre_migrate`.
Alias of :data:`django.db.models.signals.pre_migrate`. As long as this alias
is present, for backwards-compatability this signal has an extra argument it sends:
``create_models``
A list of the model classes from any app which :djadmin:`migrate` is
going to create, **only if the app has no migrations**.
post_migrate
------------
.. data:: django.db.models.signals.post_migrate
:module:
Sent by the :djadmin:`migrate` command after it installs an application, and the
:djadmin:`flush` command.
Any handlers that listen to this signal need to be written in a particular
place: a ``management`` module in one of your :setting:`INSTALLED_APPS`. If
handlers are registered anywhere else they may not be loaded by
:djadmin:`syncdb`. It is important that handlers of this signal perform
:djadmin:`migrate`. It is important that handlers of this signal perform
idempotent changes (e.g. no database alterations) as this may cause the
:djadmin:`flush` management command to fail if it also ran during the
:djadmin:`syncdb` command.
:djadmin:`migrate` command.
Arguments sent with this signal:
``sender``
The ``models`` module that was just installed. That is, if
:djadmin:`syncdb` just installed an app called ``"foo.bar.myapp"``,
:djadmin:`migrate` just installed an app called ``"foo.bar.myapp"``,
``sender`` will be the ``foo.bar.myapp.models`` module.
``app``
Same as ``sender``.
``created_models``
A list of the model classes from any app which :djadmin:`syncdb` has
created so far.
``verbosity``
Indicates how much information manage.py is printing on screen. See
the :djadminopt:`--verbosity` flag for details.
Functions which listen for :data:`post_syncdb` should adjust what they
Functions which listen for :data:`post_migrate` should adjust what they
output to the screen based on the value of this argument.
``interactive``
@ -455,14 +464,31 @@ Arguments sent with this signal:
For example, ``yourapp/management/__init__.py`` could be written like::
from django.db.models.signals import post_syncdb
from django.db.models.signals import post_migrate
import yourapp.models
def my_callback(sender, **kwargs):
# Your specific logic here
pass
post_syncdb.connect(my_callback, sender=yourapp.models)
post_migrate.connect(my_callback, sender=yourapp.models)
post_syncdb
-----------
.. data:: django.db.models.signals.post_syncdb
:module:
.. deprecated:: 1.7
This signal has been renamed to :data:`~django.db.models.signals.post_migrate`.
Alias of :data:`django.db.models.signals.post_migrate`. As long as this alias
is present, for backwards-compatability this signal has an extra argument it sends:
``created_models``
A list of the model classes from any app which :djadmin:`migrate` has
created, **only if the app has no migrations**.
Request/response signals
========================

View File

@ -30,6 +30,61 @@ security support until the release of Django 1.8.
What's new in Django 1.7
========================
Schema migrations
~~~~~~~~~~~~~~~~~
Django now has built-in support for schema migrations. It allows models
to be updated, changed, and deleted by creating migration files that represent
the model changes and which can be run on any development, staging or production
database.
Migrations are covered in :doc:`their own documentation</topics/migrations>`,
but a few of the key features are:
* ``syncdb`` has been deprecated and replaced by ``migrate``. Don't worry -
calls to ``syncdb`` will still work as before.
* A new ``makemigrations`` command provides an easy way to autodetect changes
to your models and make migrations for them.
* :data:`~django.db.models.signals.post_syncdb` and
:data:`~django.db.models.signals.post_syncdb` have been renamed to
:data:`~django.db.models.signals.pre_migrate` and
:data:`~django.db.models.signals.post_migrate` respectively. The
``create_models``/``created_models`` argument has also been deprecated.
* The ``allow_syncdb`` method on database routers is now called ``allow_migrate``,
but still performs the same function. Routers with ``allow_syncdb`` methods
will still work, but that method name is deprecated and you should change
it as soon as possible (nothing more than renaming is required).
New method on Field subclasses
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
To help power both schema migrations and composite keys, the :class:`~django.db.models.Field` API now
has a new required method: ``deconstruct()``.
This method takes no arguments, and returns a tuple of four items:
* ``name``: The field's attribute name on its parent model, or None if it is not part of a model
* ``path``: A dotted, Python path to the class of this field, including the class name.
* ``args``: Positional arguments, as a list
* ``kwargs``: Keyword arguments, as a dict
These four values allow any field to be serialized into a file, as well as
allowing the field to be copied safely, both essential parts of these new features.
This change should not affect you unless you write custom Field subclasses;
if you do, you may need to reimplement the ``deconstruct()`` method if your
subclass changes the method signature of ``__init__`` in any way. If your
field just inherits from a built-in Django field and doesn't override ``__init__``,
no changes are necessary.
If you do need to override ``deconstruct()``, a good place to start is the
built-in Django fields (``django/db/models/fields/__init__.py``) as several
fields, including ``DecimalField`` and ``DateField``, override it and show how
to call the method on the superclass and simply add or remove extra arguments.
Calling custom ``QuerySet`` methods from the ``Manager``
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
@ -170,6 +225,18 @@ Backwards incompatible changes in 1.7
deprecation timeline for a given feature, its removal may appear as a
backwards incompatible change.
allow_syncdb/allow_migrate
~~~~~~~~~~~~~~~~~~~~~~~~~~
While Django will still look at ``allow_syncdb`` methods even though they
should be renamed to ``allow_migrate``, there is a subtle difference in which
models get passed to these methods.
For apps with migrations, ``allow_migrate`` will now get passed
:ref:`historical models <historical-models>`, which are special versioned models
without custom attributes, methods or managers. Make sure your ``allow_migrate``
methods are only referring to fields or other items in ``model._meta``.
Miscellaneous
~~~~~~~~~~~~~
@ -249,3 +316,10 @@ work until Django 1.9.
it will go through a regular deprecation path. This attribute was mostly used
by methods that bypassed ``ModelAdmin.get_fieldsets()`` but this was considered
a bug and has been addressed.
``syncdb``
~~~~~~~~~~
The ``syncdb`` command has been deprecated in favour of the new ``migrate``
command. ``migrate`` takes the same arguments as ``syncdb`` used to plus a few
more, so it's safe to just change the name you're calling and nothing else.

View File

@ -275,7 +275,7 @@ can or cannot do with Task instances, specific to your application::
)
The only thing this does is create those extra permissions when you run
:djadmin:`manage.py syncdb <syncdb>`. Your code is in charge of checking the
:djadmin:`manage.py migrate <migrate>`. Your code is in charge of checking the
value of these permissions when an user is trying to access the functionality
provided by the application (viewing tasks, changing the status of tasks,
closing tasks.) Continuing the above example, the following checks if a user may
@ -378,14 +378,12 @@ use as your User model.
Changing :setting:`AUTH_USER_MODEL` has a big effect on your database
structure. It changes the tables that are available, and it will affect the
construction of foreign keys and many-to-many relationships. If you intend
to set :setting:`AUTH_USER_MODEL`, you should set it before running
``manage.py syncdb`` for the first time.
to set :setting:`AUTH_USER_MODEL`, you should set it before creating
any migrations or running ``manage.py migrate`` for the first time.
If you have an existing project and you want to migrate to using a custom
User model, you may need to look into using a migration tool like South_
to ease the transition.
.. _South: http://south.aeracode.org
Changing this setting after you have tables created is not supported
by :djadmin:`makemigrations` and will result in you having to manually
write a set of migrations to fix your schema.
Referencing the User model
--------------------------

View File

@ -65,7 +65,7 @@ interactively <auth-admin>`.
Creating superusers
-------------------
:djadmin:`manage.py syncdb <syncdb>` prompts you to create a superuser the
:djadmin:`manage.py migrate <migrate>` prompts you to create a superuser the
first time you run it with ``'django.contrib.auth'`` in your
:setting:`INSTALLED_APPS`. If you need to create a superuser at a later date,
you can use a command line utility::
@ -190,13 +190,13 @@ setting, it will ensure that three default permissions -- add, change and
delete -- are created for each Django model defined in one of your installed
applications.
These permissions will be created when you run :djadmin:`manage.py syncdb
<syncdb>`; the first time you run ``syncdb`` after adding
These permissions will be created when you run :djadmin:`manage.py migrate
<migrate>`; the first time you run ``migrate`` after adding
``django.contrib.auth`` to :setting:`INSTALLED_APPS`, the default permissions
will be created for all previously-installed models, as well as for any new
models being installed at that time. Afterward, it will create default
permissions for new models each time you run :djadmin:`manage.py syncdb
<syncdb>`.
permissions for new models each time you run :djadmin:`manage.py migrate
<migrate>`.
Assuming you have an application with an
:attr:`~django.db.models.Options.app_label` ``foo`` and a model named ``Bar``,

View File

@ -67,7 +67,7 @@ and two items in your :setting:`MIDDLEWARE_CLASSES` setting:
2. :class:`~django.contrib.auth.middleware.AuthenticationMiddleware` associates
users with requests using sessions.
With these settings in place, running the command ``manage.py syncdb`` creates
With these settings in place, running the command ``manage.py migrate`` creates
the necessary database tables for auth related models, creates permissions for
any models defined in your installed apps, and prompts you to create
a superuser account the first time you run it.

View File

@ -219,8 +219,8 @@ operations to ``cache_slave``, and all write operations to
return 'cache_master'
return None
def allow_syncdb(self, db, model):
"Only synchronize the cache model on master"
def allow_migrate(self, db, model):
"Only install the cache model on master"
if model._meta.app_label in ('django_cache',):
return db == 'cache_master'
return None

View File

@ -77,7 +77,8 @@ application by the :djadmin:`manage.py startapp <startapp>` script),
)
When you add new apps to :setting:`INSTALLED_APPS`, be sure to run
:djadmin:`manage.py syncdb <syncdb>`.
:djadmin:`manage.py migrate <migrate>`, optionally making migrations
for them first with :djadmin:`manage.py makemigrations <makemigrations>`.
Fields
======
@ -956,7 +957,7 @@ The reverse name of the ``common.ChildA.m2m`` field will be
reverse name of the ``rare.ChildB.m2m`` field will be ``rare_childb_related``.
It is up to you how you use the ``'%(class)s'`` and ``'%(app_label)s`` portion
to construct your related name, but if you forget to use it, Django will raise
errors when you validate your models (or run :djadmin:`syncdb`).
errors when you validate your models (or run :djadmin:`migrate`).
If you don't specify a :attr:`~django.db.models.ForeignKey.related_name`
attribute for a field in an abstract base class, the default reverse name will
@ -1049,7 +1050,7 @@ are putting those types of relations on a subclass of another model,
you **must** specify the
:attr:`~django.db.models.ForeignKey.related_name` attribute on each
such field. If you forget, Django will raise an error when you run
:djadmin:`validate` or :djadmin:`syncdb`.
:djadmin:`validate` or :djadmin:`migrate`.
For example, using the above ``Place`` class again, let's create another
subclass with a :class:`~django.db.models.ManyToManyField`::

View File

@ -155,14 +155,23 @@ A database Router is a class that provides up to four methods:
used by foreign key and many to many operations to determine if a
relation should be allowed between two objects.
.. method:: allow_syncdb(db, model)
.. method:: allow_migrate(db, model)
Determine if the ``model`` should be synchronized onto the
Determine if the ``model`` should have tables/indexes created in the
database with alias ``db``. Return True if the model should be
synchronized, False if it should not be synchronized, or None if
migrated, False if it should not be migrated, or None if
the router has no opinion. This method can be used to determine
the availability of a model on a given database.
Note that migrations will just silently not perform any operations
on a model for which this returns ``False``. This may result in broken
ForeignKeys, extra tables or missing tables if you change it once you
have applied some migrations.
The value passed for ``model`` may be a
:ref:`historical model <historical-models>`, and thus not have any
custom attributes, methods or managers. You should only rely on ``_meta``.
A router doesn't have to provide *all* these methods -- it may omit one
or more of them. If one of the methods is omitted, Django will skip
that router when performing the relevant check.
@ -288,7 +297,7 @@ send queries for the ``auth`` app to ``auth_db``::
return True
return None
def allow_syncdb(self, db, model):
def allow_migrate(self, db, model):
"""
Make sure the auth app only appears in the 'auth_db'
database.
@ -328,7 +337,7 @@ from::
return True
return None
def allow_syncdb(self, db, model):
def allow_migrate(self, db, model):
"""
All non-auth models end up in this pool.
"""
@ -347,7 +356,7 @@ be queried in the order the are listed in the
result, decisions concerning the models in ``auth`` are processed
before any other decision is made. If the :setting:`DATABASE_ROUTERS`
setting listed the two routers in the other order,
``MasterSlaveRouter.allow_syncdb()`` would be processed first. The
``MasterSlaveRouter.allow_migrate()`` would be processed first. The
catch-all nature of the MasterSlaveRouter implementation would mean
that all models would be available on all databases.

View File

@ -44,7 +44,7 @@ Using database-backed sessions
If you want to use a database-backed session, you need to add
``'django.contrib.sessions'`` to your :setting:`INSTALLED_APPS` setting.
Once you have configured your installation, run ``manage.py syncdb``
Once you have configured your installation, run ``manage.py migrate``
to install the single database table that stores session data.
.. _cached-sessions-backend:

View File

@ -12,6 +12,7 @@ Introductions to all the key parts of Django you'll need to know:
forms/index
templates
class-based-views/index
migrations
files
testing/index
auth/index

View File

@ -121,14 +121,12 @@ database bindings are installed.
* If you're using an unofficial 3rd party backend, please consult the
documentation provided for any additional requirements.
If you plan to use Django's ``manage.py syncdb`` command to automatically
If you plan to use Django's ``manage.py migrate`` command to automatically
create database tables for your models (after first installing Django and
creating a project), you'll need to ensure that Django has permission to create
and alter tables in the database you're using; if you plan to manually create
the tables, you can simply grant Django ``SELECT``, ``INSERT``, ``UPDATE`` and
``DELETE`` permissions. On some databases, Django will need ``ALTER TABLE``
privileges during ``syncdb`` but won't issue ``ALTER TABLE`` statements on a
table once ``syncdb`` has created it. After creating a database user with these
``DELETE`` permissions. After creating a database user with these
permissions, you'll specify the details in your project's settings file,
see :setting:`DATABASES` for details.

297
docs/topics/migrations.txt Normal file
View File

@ -0,0 +1,297 @@
==========
Migrations
==========
.. module:: django.db.migrations
:synopsis: Schema migration support for Django models
.. versionadded:: 1.7
Migrations are Django's way of propagating changes you make to your models
(adding a field, deleting a model, etc.) into your database schema. They're
designed to be mostly automatic, but you'll need to know when to make
migrations, when to run them, and the common problems you might run into.
A Brief History
---------------
Prior to version 1.7, Django only supported adding new models to the
database; it was not possible to alter or remove existing models via the
``syncdb`` command (the predecessor to ``migrate``).
Third-party tools, most notably `South <http://south.aeracode.org>`_,
provided support for these additional types of change, but it was considered
important enough that support was brought into core Django.
Two Commands
------------
There are two commands which you will use to interact with migrations
and Django's handling of database schema:
* :djadmin:`migrate`, which is responsible for applying migrations, as well as
unapplying and listing their status.
* :djadmin:`makemigrations`, which is responsible for creating new migrations
based on the changes you have made to your models.
It's worth noting that migrations are created and run on a per-app basis.
In particular, it's possible to have apps that *do not use migrations* (these
are referred to as "unmigrated" apps) - these apps will instead mimic the
legacy behaviour of just adding new models.
You should think of migrations as a version control system for your database
schema. ``makemigrations`` is responsible for packaging up your model changes
into individual migration files - analagous to commits - and ``migrate`` is
responsible for applying those to your database.
The migration files for each app live in a "migrations" directory inside
of that app, and are designed to be committed to, and distributed as part
of, its codebase. You should be making them once on your development machine
and then running the same migrations on your colleagues' machines, your
staging machines, and eventually your production machines.
Migrations will run the same way every time and produce consistent results,
meaning that what you see in development and staging is exactly what will
happen in production - no unexpected surprises.
Backend Support
---------------
Migrations are supported on all backends that Django ships with, as well
as any third-party backends if they have programmed in support for schema
alteration (done via the ``SchemaEditor`` class).
However, some databases are more capable than others when it comes to
schema migrations; some of the caveats are covered below.
PostgreSQL
~~~~~~~~~~
PostgreSQL is the most capable of all the databases here in terms of schema
support; the only caveat is that adding columns with default values will
lock a table for a time proportional to the number of rows in it.
For this reason, it's recommended you always create new columns with
``null=True``, as this way they will be added immediately.
MySQL
~~~~~
MySQL lacks support for transactions around schema alteration operations,
meaning that if a migration fails to apply you will have to manually unpick
the changes in order to try again (it's impossible to roll back to an
earlier point).
In addition, MySQL will lock tables for almost every schema operation and
generally takes a time proportional to the number of rows in the table to
add or remove columns. On slower hardware this can be worse than a minute
per million rows - adding a few columns to a table with just a few million
rows could lock your site up for over ten minutes.
Finally, MySQL has reasonably small limits on name lengths for columns, tables
and indexes, as well as a limit on the combined size of all columns an index
covers. This means that indexes that are possible on other backends will
fail to be created under MySQL.
SQLite
~~~~~~
SQLite has very little built-in schema alteration support, and so Django
attempts to emulate it by:
* Creating a new table with the new schema
* Copying the data across
* Dropping the old table
* Renaming the new table to match the original name
This process generally works well, but it can be slow and occasionally
buggy. It is not recommended that you run and migrate SQLite in a
production environment unless you are very aware of the risks and
its limitations; the support Django ships with is designed to allow
developers to use SQLite on their local machines to develop less complex
Django projects without the need for a full database.
Workflow
--------
Working with migrations is simple. Make changes to your models - say, add
a field and remove a model - and then run :djadmin:`makemigrations`::
$ python manage.py makemigrations
Migrations for 'books':
0003_auto.py:
- Alter field author on book
Your models will be scanned and compared to the versions currently
contained in your migration files, and then a new set of migrations
will be written out. Make sure to read the output to see what
``makemigrations`` thinks you have changed - it's not perfect, and for
complex changes it might not be detecting what you expect.
Once you have your new migration files, you should apply them to your
database to make sure they work as expected::
$ python manage.py migrate
Operations to perform:
Synchronize unmigrated apps: sessions, admin, messages, auth, staticfiles, contenttypes
Apply all migrations: books
Synchronizing apps without migrations:
Creating tables...
Installing custom SQL...
Installing indexes...
Installed 0 object(s) from 0 fixture(s)
Running migrations:
Applying books.0003_auto... OK
The command runs in two stages; first, it synchronizes unmigrated apps
(performing the same functionality that ``syncdb`` used to provide), and
then it runs any migrations that have not yet been applied.
Once the migration is applied, commit the migration and the models change
to your version control system as a single commit - that way, when other
developers (or your production servers) check out the code, they'll
get both the changes to your models and the accompanying migration at the
same time.
Version control
~~~~~~~~~~~~~~~
Because migrations are stored in version control, you'll occasionally
come across situations where you and another developer have both committed
a migration to the same app at the same time, resulting in two migrations
with the same number.
Don't worry - the numbers are just there for developers' reference, Django
just cares that each migration has a different name. Migrations specify which
other migrations they depend on - including earlier migrations in the same
app - in the file, so it's possible to detect when there's two new migrations
for the same app that aren't ordered.
When this happens, Django will prompt you and give you some options. If it
thinks it's safe enough, it will offer to automatically linearize the two
migrations for you. If not, you'll have to go in and modify the migrations
yourself - don't worry, this isn't difficult, and is explained more in
:ref:`migration-files` below.
Dependencies
------------
While migrations are per-app, the tables and relationships implied by
your models are too complex to be created for just one app at a time. When
you make a migration that requires something else to run - for example,
you add a ForeignKey in your ``books`` app to your ``authors`` app - the
resulting migration will contain a dependency on a migration in ``authors``.
This means that when you run the migrations, the ``authors`` migration runs
first and creates the table the ``ForeignKey`` references, and then the migration
that makes the ``ForeignKey`` column runs afterwards and creates the constraint.
If this didn't happen, the migration would try to create the ForeignKey column
without the table it's referencing existing and your database would
throw an error.
This dependency behaviour affects most migration operations where you
restrict to a single app. Restricting to a single app (either in
``makemigrations`` or ``migrate``) is a best-efforts promise, and not
a guarantee; any other apps that need to be used to get dependencies correct
will be.
.. migration-files:
Migration files
---------------
Migrations are stored as an on-disk format, referred to here as
"migration files". These files are actually just normal Python files with
an agreed-upon object layout, written in a declarative style.
A basic migration file looks like this::
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.DeleteModel("Tribble"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
]
What Django looks for when it loads a migration file (as a Python module) is
a subclass of ``django.db.migrations.Migration`` called ``Migration``. It then
inspects this object for four attributes, only two of which are used
most of the time:
* ``dependencies``, a list of migrations this one depends on.
* ``operations``, a list of Operation classes that define what this migration
does.
The operations are the key; they are a set of declarative instructions which
tell Django what schema changes need to be made. Django scans them and
builds an in-memory representation of all of the schema changes to all apps,
and uses this to generate the SQL which makes the schema changes.
That in-memory structure is also used to work out what the differences are
between your models and the current state of your migrations; Django runs
through all the changes, in order, on an in-memory set of models to come
up with the state of your models last time you ran ``makemigrations``. It
then uses these models to compare against the ones in your ``models.py`` files
to work out what you have changed.
You should rarely, if ever, need to edit migration files by hand, but
it's entirely possible to write them manually if you need to. Some of the
more complex operations are not autodetectable and are only available via
a hand-written migration, so don't be scared about editing them if you have to.
Adding migrations to apps
-------------------------
Adding migrations to new apps is straightforward - they come preconfigured to
accept migrations, and so just run :djadmin:`makemigrations` once you've made
some changes.
If your app already has models and database tables, and doesn't have migrations
yet (for example, you created it against a previous Django version), you'll
need to convert it to use migrations; this is a simple process::
python manage.py makemigrations --force yourappname
This will make a new initial migration for your app (the ``--force`` argument
is to override Django's default behaviour, as it thinks your app does not want
migrations). Now, when you run :djadmin:`migrate`, Django will detect that
you have an initial migration *and* that the tables it wants to create already
exist, and will mark the migration as already applied.
Note that this only works given two things:
* You have not changed your models since you made their tables. For migrations
to work, you must make the initial migration *first* and then make changes,
as Django compares changes against migration files, not the database.
* You have not manually edited your database - Django won't be able to detect
that your database doesn't match your models, you'll just get errors when
migrations try to modify those tables.
.. historical-models:
Historical models
-----------------
When you run migrations, Django is working from historical versions of
your models stored in the migration files. If you write Python code
using the ``django.db.migrations.RunPython`` operation, or if you have
``allow_migrate`` methods on your database routers, you will be exposed
to these versions of your models.
Because it's impossible to serialize arbitrary Python code, these historical
models will not have any custom methods or managers that you have defined.
They will, however, have the same fields, relationships and ``Meta`` options
(also versioned, so they may be different from your current ones).
In addition, the base classes of the model are just stored as pointers,
so you must always keep base classes around for as long as there is a migration
that contains a reference to them. On the plus side, methods and managers
from these base classes inherit normally, so if you absolutely need access
to these you can opt to move them into a superclass.

View File

@ -293,7 +293,7 @@ serialize an object that refers to a content type, then you need to have a way
to refer to that content type to begin with. Since ``ContentType`` objects are
automatically created by Django during the database synchronization process,
the primary key of a given content type isn't easy to predict; it will
depend on how and when :djadmin:`syncdb` was executed. This is true for all
depend on how and when :djadmin:`migrate` was executed. This is true for all
models which automatically generate objects, notably including
:class:`~django.contrib.auth.models.Permission`,
:class:`~django.contrib.auth.models.Group`, and

View File

@ -182,7 +182,7 @@ Advanced features of ``TransactionTestCase``
By default, ``available_apps`` is set to ``None``. After each test, Django
calls :djadmin:`flush` to reset the database state. This empties all tables
and emits the :data:`~django.db.models.signals.post_syncdb` signal, which
and emits the :data:`~django.db.models.signals.post_migrate` signal, which
re-creates one content type and three permissions for each model. This
operation gets expensive proportionally to the number of models.
@ -190,13 +190,13 @@ Advanced features of ``TransactionTestCase``
behave as if only the models from these applications were available. The
behavior of ``TransactionTestCase`` changes as follows:
- :data:`~django.db.models.signals.post_syncdb` is fired before each
- :data:`~django.db.models.signals.post_migrate` is fired before each
test to create the content types and permissions for each model in
available apps, in case they're missing.
- After each test, Django empties only tables corresponding to models in
available apps. However, at the database level, truncation may cascade to
related models in unavailable apps. Furthermore
:data:`~django.db.models.signals.post_syncdb` isn't fired; it will be
:data:`~django.db.models.signals.post_migrate` isn't fired; it will be
fired by the next ``TransactionTestCase``, after the correct set of
applications is selected.
@ -205,10 +205,10 @@ Advanced features of ``TransactionTestCase``
cause unrelated tests to fail. Be careful with tests that use sessions;
the default session engine stores them in the database.
Since :data:`~django.db.models.signals.post_syncdb` isn't emitted after
Since :data:`~django.db.models.signals.post_migrate` isn't emitted after
flushing the database, its state after a ``TransactionTestCase`` isn't the
same as after a ``TestCase``: it's missing the rows created by listeners
to :data:`~django.db.models.signals.post_syncdb`. Considering the
to :data:`~django.db.models.signals.post_migrate`. Considering the
:ref:`order in which tests are executed <order-of-tests>`, this isn't an
issue, provided either all ``TransactionTestCase`` in a given test suite
declare ``available_apps``, or none of them.
@ -276,7 +276,7 @@ testing behavior. This behavior involves:
#. Creating the test databases.
#. Running ``syncdb`` to install models and initial data into the test
#. Running ``migrate`` to install models and initial data into the test
databases.
#. Running the tests that were found.
@ -467,7 +467,7 @@ can be useful during testing.
.. function:: create_test_db([verbosity=1, autoclobber=False])
Creates a new test database and runs ``syncdb`` against it.
Creates a new test database and runs ``migrate`` against it.
``verbosity`` has the same behavior as in ``run_tests()``.

View File

@ -1170,9 +1170,9 @@ documentation<dumpdata>` for more details.
.. note::
If you've ever run :djadmin:`manage.py syncdb<syncdb>`, you've
If you've ever run :djadmin:`manage.py migrate<migrate>`, you've
already used a fixture without even knowing it! When you call
:djadmin:`syncdb` in the database for the first time, Django
:djadmin:`migrate` in the database for the first time, Django
installs a fixture called ``initial_data``. This gives you a way
of populating a new database with any initial data, such as a
default set of categories.

View File

17
tests/app_cache/models.py Normal file
View File

@ -0,0 +1,17 @@
from django.db import models
from django.db.models.loading import BaseAppCache
# We're testing app cache presence on load, so this is handy.
new_app_cache = BaseAppCache()
class TotallyNormal(models.Model):
name = models.CharField(max_length=255)
class SoAlternative(models.Model):
name = models.CharField(max_length=255)
class Meta:
app_cache = new_app_cache

44
tests/app_cache/tests.py Normal file
View File

@ -0,0 +1,44 @@
from __future__ import absolute_import
from django.test import TestCase
from django.db.models.loading import cache, BaseAppCache
from django.db import models
from .models import TotallyNormal, SoAlternative, new_app_cache
class AppCacheTests(TestCase):
"""
Tests the AppCache borg and non-borg versions
"""
def test_models_py(self):
"""
Tests that the models in the models.py file were loaded correctly.
"""
self.assertEqual(cache.get_model("app_cache", "TotallyNormal"), TotallyNormal)
self.assertEqual(cache.get_model("app_cache", "SoAlternative"), None)
self.assertEqual(new_app_cache.get_model("app_cache", "TotallyNormal"), None)
self.assertEqual(new_app_cache.get_model("app_cache", "SoAlternative"), SoAlternative)
def test_dynamic_load(self):
"""
Makes a new model at runtime and ensures it goes into the right place.
"""
old_models = cache.get_models(cache.get_app("app_cache"))
# Construct a new model in a new app cache
body = {}
new_app_cache = BaseAppCache()
meta_contents = {
'app_label': "app_cache",
'app_cache': new_app_cache,
}
meta = type("Meta", tuple(), meta_contents)
body['Meta'] = meta
body['__module__'] = TotallyNormal.__module__
temp_model = type("SouthPonies", (models.Model,), body)
# Make sure it appeared in the right place!
self.assertEqual(
old_models,
cache.get_models(cache.get_app("app_cache")),
)
self.assertEqual(new_app_cache.get_model("app_cache", "SouthPonies"), temp_model)

View File

@ -895,7 +895,7 @@ class DBCacheRouter(object):
if model._meta.app_label == 'django_cache':
return 'other'
def allow_syncdb(self, db, model):
def allow_migrate(self, db, model):
if model._meta.app_label == 'django_cache':
return db == 'other'

View File

View File

View File

@ -0,0 +1,274 @@
# encoding: utf8
from django.test import TestCase
from django.db.migrations.autodetector import MigrationAutodetector, MigrationQuestioner
from django.db.migrations.state import ProjectState, ModelState
from django.db.migrations.graph import MigrationGraph
from django.db import models
class AutodetectorTests(TestCase):
"""
Tests the migration autodetector.
"""
author_empty = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True))])
author_name = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200))])
author_name_longer = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=400))])
author_name_renamed = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("names", models.CharField(max_length=200))])
author_with_book = ModelState("testapp", "Author", [("id", models.AutoField(primary_key=True)), ("name", models.CharField(max_length=200)), ("book", models.ForeignKey("otherapp.Book"))])
other_pony = ModelState("otherapp", "Pony", [("id", models.AutoField(primary_key=True))])
other_stable = ModelState("otherapp", "Stable", [("id", models.AutoField(primary_key=True))])
third_thing = ModelState("thirdapp", "Thing", [("id", models.AutoField(primary_key=True))])
book = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))])
book_unique = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": [("author", "title")]})
book_unique_2 = ModelState("otherapp", "Book", [("id", models.AutoField(primary_key=True)), ("author", models.ForeignKey("testapp.Author")), ("title", models.CharField(max_length=200))], {"unique_together": [("title", "author")]})
edition = ModelState("thirdapp", "Edition", [("id", models.AutoField(primary_key=True)), ("book", models.ForeignKey("otherapp.Book"))])
def make_project_state(self, model_states):
"Shortcut to make ProjectStates from lists of predefined models"
project_state = ProjectState()
for model_state in model_states:
project_state.add_model_state(model_state.clone())
return project_state
def test_arrange_for_graph(self):
"Tests auto-naming of migrations for graph matching."
# Make a fake graph
graph = MigrationGraph()
graph.add_node(("testapp", "0001_initial"), None)
graph.add_node(("testapp", "0002_foobar"), None)
graph.add_node(("otherapp", "0001_initial"), None)
graph.add_dependency(("testapp", "0002_foobar"), ("testapp", "0001_initial"))
graph.add_dependency(("testapp", "0002_foobar"), ("otherapp", "0001_initial"))
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Run through arrange_for_graph
changes = autodetector._arrange_for_graph(changes, graph)
# Make sure there's a new name, deps match, etc.
self.assertEqual(changes["testapp"][0].name, "0003_author")
self.assertEqual(changes["testapp"][0].dependencies, [("testapp", "0002_foobar")])
self.assertEqual(changes["otherapp"][0].name, "0002_pony_stable")
self.assertEqual(changes["otherapp"][0].dependencies, [("otherapp", "0001_initial")])
def test_trim_apps(self):
"Tests that trim does not remove dependencies but does remove unwanted apps"
# Use project state to make a new migration change set
before = self.make_project_state([])
after = self.make_project_state([self.author_empty, self.other_pony, self.other_stable, self.third_thing])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_initial": True}))
changes = autodetector._detect_changes()
# Run through arrange_for_graph
graph = MigrationGraph()
changes = autodetector._arrange_for_graph(changes, graph)
changes["testapp"][0].dependencies.append(("otherapp", "0001_initial"))
changes = autodetector._trim_to_apps(changes, set(["testapp"]))
# Make sure there's the right set of migrations
self.assertEqual(changes["testapp"][0].name, "0001_initial")
self.assertEqual(changes["otherapp"][0].name, "0001_initial")
self.assertNotIn("thirdapp", changes)
def test_new_model(self):
"Tests autodetection of new models"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(action.name, "Author")
def test_old_model(self):
"Tests deletion of old models"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "DeleteModel")
self.assertEqual(action.name, "Author")
def test_add_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_empty])
after = self.make_project_state([self.author_name])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "name")
def test_remove_field(self):
"Tests autodetection of removed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_empty])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RemoveField")
self.assertEqual(action.name, "name")
def test_alter_field(self):
"Tests autodetection of new fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_longer])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterField")
self.assertEqual(action.name, "name")
def test_rename_field(self):
"Tests autodetection of renamed fields"
# Make state
before = self.make_project_state([self.author_name])
after = self.make_project_state([self.author_name_renamed])
autodetector = MigrationAutodetector(before, after, MigrationQuestioner({"ask_rename": True}))
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
# Right number of actions?
migration = changes['testapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "RenameField")
self.assertEqual(action.old_name, "name")
self.assertEqual(action.new_name, "names")
def test_fk_dependency(self):
"Tests that having a ForeignKey automatically adds a dependency"
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_name, self.book, self.edition])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 1)
self.assertEqual(len(changes['thirdapp']), 1)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['thirdapp'][0]
self.assertEqual(len(migration3.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
# Right dependencies?
self.assertEqual(migration1.dependencies, [])
self.assertEqual(migration2.dependencies, [("testapp", "auto_1")])
self.assertEqual(migration3.dependencies, [("otherapp", "auto_1")])
def test_circular_fk_dependency(self):
"""
Tests that having a circular ForeignKey dependency automatically
resolves the situation into 2 migrations on one side and 1 on the other.
"""
# Make state
before = self.make_project_state([])
after = self.make_project_state([self.author_with_book, self.book])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['testapp']), 1)
self.assertEqual(len(changes['otherapp']), 2)
# Right number of actions?
migration1 = changes['testapp'][0]
self.assertEqual(len(migration1.operations), 1)
migration2 = changes['otherapp'][0]
self.assertEqual(len(migration2.operations), 1)
migration3 = changes['otherapp'][1]
self.assertEqual(len(migration2.operations), 1)
# Right actions?
action = migration1.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
action = migration2.operations[0]
self.assertEqual(action.__class__.__name__, "CreateModel")
self.assertEqual(len(action.fields), 2)
action = migration3.operations[0]
self.assertEqual(action.__class__.__name__, "AddField")
self.assertEqual(action.name, "author")
# Right dependencies?
self.assertEqual(migration1.dependencies, [("otherapp", "auto_1")])
self.assertEqual(migration2.dependencies, [])
self.assertEqual(set(migration3.dependencies), set([("otherapp", "auto_1"), ("testapp", "auto_1")]))
def test_unique_together(self):
"Tests unique_together detection"
# Make state
before = self.make_project_state([self.author_empty, self.book])
after = self.make_project_state([self.author_empty, self.book_unique])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, set([("author", "title")]))
def test_unique_together_ordering(self):
"Tests that unique_together also triggers on ordering changes"
# Make state
before = self.make_project_state([self.author_empty, self.book_unique])
after = self.make_project_state([self.author_empty, self.book_unique_2])
autodetector = MigrationAutodetector(before, after)
changes = autodetector._detect_changes()
# Right number of migrations?
self.assertEqual(len(changes['otherapp']), 1)
# Right number of actions?
migration = changes['otherapp'][0]
self.assertEqual(len(migration.operations), 1)
# Right action?
action = migration.operations[0]
self.assertEqual(action.__class__.__name__, "AlterUniqueTogether")
self.assertEqual(action.name, "book")
self.assertEqual(action.unique_together, set([("title", "author")]))

View File

@ -0,0 +1,41 @@
from django.test import TransactionTestCase
from django.db import connection
class MigrationTestBase(TransactionTestCase):
"""
Contains an extended set of asserts for testing migrations and schema operations.
"""
available_apps = ["migrations"]
def assertTableExists(self, table):
self.assertIn(table, connection.introspection.get_table_list(connection.cursor()))
def assertTableNotExists(self, table):
self.assertNotIn(table, connection.introspection.get_table_list(connection.cursor()))
def assertColumnExists(self, table, column):
self.assertIn(column, [c.name for c in connection.introspection.get_table_description(connection.cursor(), table)])
def assertColumnNotExists(self, table, column):
self.assertNotIn(column, [c.name for c in connection.introspection.get_table_description(connection.cursor(), table)])
def assertColumnNull(self, table, column):
self.assertEqual([c.null_ok for c in connection.introspection.get_table_description(connection.cursor(), table) if c.name == column][0], True)
def assertColumnNotNull(self, table, column):
self.assertEqual([c.null_ok for c in connection.introspection.get_table_description(connection.cursor(), table) if c.name == column][0], False)
def assertIndexExists(self, table, columns, value=True):
self.assertEqual(
value,
any(
c["index"]
for c in connection.introspection.get_constraints(connection.cursor(), table).values()
if c['columns'] == list(columns)
),
)
def assertIndexNotExists(self, table, columns):
return self.assertIndexExists(table, columns, False)

View File

@ -0,0 +1,37 @@
from django.core.management import call_command
from django.test.utils import override_settings
from .test_base import MigrationTestBase
class CommandTests(MigrationTestBase):
"""
Tests running the commands (migrate, makemigrations).
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_migrate(self):
"""
Tests basic usage of the migrate command.
"""
# Make sure no tables are created
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run the migrations to 0001 only
call_command("migrate", "migrations", "0001", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableExists("migrations_tribble")
self.assertTableNotExists("migrations_book")
# Run migrations all the way
call_command("migrate", verbosity=0)
# Make sure the right tables exist
self.assertTableExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableExists("migrations_book")
# Unmigrate everything
call_command("migrate", "migrations", "zero", verbosity=0)
# Make sure it's all gone
self.assertTableNotExists("migrations_author")
self.assertTableNotExists("migrations_tribble")
self.assertTableNotExists("migrations_book")

View File

@ -0,0 +1,77 @@
from django.test import TransactionTestCase
from django.test.utils import override_settings
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
class ExecutorTests(TransactionTestCase):
"""
Tests the migration executor (full end-to-end running).
Bear in mind that if these are failing you should fix the other
test failures first, as they may be propagating into here.
"""
available_apps = ["migrations", "django.contrib.sessions"]
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_run(self):
"""
Tests running a simple set of migrations.
"""
executor = MigrationExecutor(connection)
executor.recorder.flush()
# Let's look at the plan first and make sure it's up to scratch
plan = executor.migration_plan([("migrations", "0002_second")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
],
)
# Were the tables there before?
self.assertNotIn("migrations_author", connection.introspection.get_table_list(connection.cursor()))
self.assertNotIn("migrations_book", connection.introspection.get_table_list(connection.cursor()))
# Alright, let's try running it
executor.migrate([("migrations", "0002_second")])
# Are the tables there now?
self.assertIn("migrations_author", connection.introspection.get_table_list(connection.cursor()))
self.assertIn("migrations_book", connection.introspection.get_table_list(connection.cursor()))
# Alright, let's undo what we did
executor.migrate([("migrations", None)])
# Are the tables gone?
self.assertNotIn("migrations_author", connection.introspection.get_table_list(connection.cursor()))
self.assertNotIn("migrations_book", connection.introspection.get_table_list(connection.cursor()))
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations", "sessions": "migrations.test_migrations_2"})
def test_empty_plan(self):
"""
Tests that re-planning a full migration of a fully-migrated set doesn't
perform spurious unmigrations and remigrations.
There was previously a bug where the executor just always performed the
backwards plan for applied migrations - which even for the most recent
migration in an app, might include other, dependent apps, and these
were being unmigrated.
"""
# Make the initial plan, check it
# We use 'sessions' here as the second app as it's always present
# in INSTALLED_APPS, so we can happily assign it test migrations.
executor = MigrationExecutor(connection)
plan = executor.migration_plan([("migrations", "0002_second"), ("sessions", "0001_initial")])
self.assertEqual(
plan,
[
(executor.loader.graph.nodes["migrations", "0001_initial"], False),
(executor.loader.graph.nodes["migrations", "0002_second"], False),
(executor.loader.graph.nodes["sessions", "0001_initial"], False),
],
)
# Fake-apply all migrations
executor.migrate([("migrations", "0002_second"), ("sessions", "0001_initial")], fake=True)
# Now plan a second time and make sure it's empty
plan = executor.migration_plan([("migrations", "0002_second"), ("sessions", "0001_initial")])
self.assertEqual(plan, [])
# Erase all the fake records
executor.recorder.flush()

View File

@ -0,0 +1,135 @@
from django.test import TestCase
from django.db.migrations.graph import MigrationGraph, CircularDependencyError
class GraphTests(TestCase):
"""
Tests the digraph structure.
"""
def test_simple_graph(self):
"""
Tests a basic dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
/
app_b: 0001 <-- 0002 <-/
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency(("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency(("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency(("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency(("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency(("app_b", "0002"), ("app_b", "0001"))
# Test root migration case
self.assertEqual(
graph.forwards_plan(("app_a", "0001")),
[('app_a', '0001')],
)
# Test branch B only
self.assertEqual(
graph.forwards_plan(("app_b", "0002")),
[("app_b", "0001"), ("app_b", "0002")],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[('app_b', '0001'), ('app_b', '0002'), ('app_a', '0001'), ('app_a', '0002'), ('app_a', '0003'), ('app_a', '0004')],
)
# Test reverse to b:0002
self.assertEqual(
graph.backwards_plan(("app_b", "0002")),
[('app_a', '0004'), ('app_a', '0003'), ('app_b', '0002')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
set([('app_a', '0001'), ('app_b', '0001')]),
)
self.assertEqual(
graph.leaf_nodes(),
set([('app_a', '0004'), ('app_b', '0002')]),
)
def test_complex_graph(self):
"""
Tests a complex dependency graph:
app_a: 0001 <-- 0002 <--- 0003 <-- 0004
\ \ / /
app_b: 0001 <-\ 0002 <-X /
\ \ /
app_c: \ 0001 <-- 0002 <-
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_a", "0004"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_node(("app_c", "0001"), None)
graph.add_node(("app_c", "0002"), None)
graph.add_dependency(("app_a", "0004"), ("app_a", "0003"))
graph.add_dependency(("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency(("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency(("app_a", "0003"), ("app_b", "0002"))
graph.add_dependency(("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency(("app_a", "0004"), ("app_c", "0002"))
graph.add_dependency(("app_c", "0002"), ("app_c", "0001"))
graph.add_dependency(("app_c", "0001"), ("app_b", "0001"))
graph.add_dependency(("app_c", "0002"), ("app_a", "0002"))
# Test branch C only
self.assertEqual(
graph.forwards_plan(("app_c", "0002")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002')],
)
# Test whole graph
self.assertEqual(
graph.forwards_plan(("app_a", "0004")),
[('app_b', '0001'), ('app_c', '0001'), ('app_a', '0001'), ('app_a', '0002'), ('app_c', '0002'), ('app_b', '0002'), ('app_a', '0003'), ('app_a', '0004')],
)
# Test reverse to b:0001
self.assertEqual(
graph.backwards_plan(("app_b", "0001")),
[('app_a', '0004'), ('app_c', '0002'), ('app_c', '0001'), ('app_a', '0003'), ('app_b', '0002'), ('app_b', '0001')],
)
# Test roots and leaves
self.assertEqual(
graph.root_nodes(),
set([('app_a', '0001'), ('app_b', '0001'), ('app_c', '0001')]),
)
self.assertEqual(
graph.leaf_nodes(),
set([('app_a', '0004'), ('app_b', '0002'), ('app_c', '0002')]),
)
def test_circular_graph(self):
"""
Tests a circular dependency graph.
"""
# Build graph
graph = MigrationGraph()
graph.add_node(("app_a", "0001"), None)
graph.add_node(("app_a", "0002"), None)
graph.add_node(("app_a", "0003"), None)
graph.add_node(("app_b", "0001"), None)
graph.add_node(("app_b", "0002"), None)
graph.add_dependency(("app_a", "0003"), ("app_a", "0002"))
graph.add_dependency(("app_a", "0002"), ("app_a", "0001"))
graph.add_dependency(("app_a", "0001"), ("app_b", "0002"))
graph.add_dependency(("app_b", "0002"), ("app_b", "0001"))
graph.add_dependency(("app_b", "0001"), ("app_a", "0003"))
# Test whole graph
self.assertRaises(
CircularDependencyError,
graph.forwards_plan, ("app_a", "0003"),
)

View File

@ -0,0 +1,79 @@
from django.test import TestCase
from django.test.utils import override_settings
from django.db import connection
from django.db.migrations.loader import MigrationLoader, AmbiguityError
from django.db.migrations.recorder import MigrationRecorder
class RecorderTests(TestCase):
"""
Tests recording migrations as applied or not.
"""
def test_apply(self):
"""
Tests marking migrations as applied/unapplied.
"""
recorder = MigrationRecorder(connection)
self.assertEqual(
recorder.applied_migrations(),
set(),
)
recorder.record_applied("myapp", "0432_ponies")
self.assertEqual(
recorder.applied_migrations(),
set([("myapp", "0432_ponies")]),
)
recorder.record_unapplied("myapp", "0432_ponies")
self.assertEqual(
recorder.applied_migrations(),
set(),
)
class LoaderTests(TestCase):
"""
Tests the disk and database loader, and running through migrations
in memory.
"""
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_load(self):
"""
Makes sure the loader can load the migrations for the test apps,
and then render them out to a new AppCache.
"""
# Load and test the plan
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.graph.forwards_plan(("migrations", "0002_second")),
[("migrations", "0001_initial"), ("migrations", "0002_second")],
)
# Now render it out!
project_state = migration_loader.graph.project_state(("migrations", "0002_second"))
self.assertEqual(len(project_state.models), 2)
author_state = project_state.models["migrations", "author"]
self.assertEqual(
[x for x, y in author_state.fields],
["id", "name", "slug", "age", "rating"]
)
book_state = project_state.models["migrations", "book"]
self.assertEqual(
[x for x, y in book_state.fields],
["id", "author"]
)
@override_settings(MIGRATION_MODULES={"migrations": "migrations.test_migrations"})
def test_name_match(self):
"Tests prefix name matching"
migration_loader = MigrationLoader(connection)
self.assertEqual(
migration_loader.get_migration_by_prefix("migrations", "0001").name,
"0001_initial",
)
with self.assertRaises(AmbiguityError):
migration_loader.get_migration_by_prefix("migrations", "0")
with self.assertRaises(KeyError):
migration_loader.get_migration_by_prefix("migrations", "blarg")

View File

@ -0,0 +1,27 @@
from django.db import migrations, models
class Migration(migrations.Migration):
operations = [
migrations.CreateModel(
"Author",
[
("id", models.AutoField(primary_key=True)),
("name", models.CharField(max_length=255)),
("slug", models.SlugField(null=True)),
("age", models.IntegerField(default=0)),
("silly_field", models.BooleanField(default=False)),
],
),
migrations.CreateModel(
"Tribble",
[
("id", models.AutoField(primary_key=True)),
("fluffy", models.BooleanField(default=True)),
],
)
]

View File

@ -0,0 +1,24 @@
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("migrations", "0001_initial")]
operations = [
migrations.DeleteModel("Tribble"),
migrations.RemoveField("Author", "silly_field"),
migrations.AddField("Author", "rating", models.IntegerField(default=0)),
migrations.CreateModel(
"Book",
[
("id", models.AutoField(primary_key=True)),
("author", models.ForeignKey("migrations.Author", null=True)),
],
)
]

Some files were not shown because too many files have changed in this diff Show More