Fixed #506 -- runtests.py now allows models to be tested individually. Thanks, Simon

git-svn-id: http://code.djangoproject.com/svn/django/trunk@646 bcc190cf-cafb-0310-a4f2-bffc1f526a37
This commit is contained in:
Adrian Holovaty 2005-09-19 01:18:04 +00:00
parent bcc2873c70
commit 09bd9d3ef6
1 changed files with 42 additions and 28 deletions

View File

@ -54,8 +54,9 @@ class DjangoDoctestOutputChecker(doctest.OutputChecker):
return ok return ok
class TestRunner: class TestRunner:
def __init__(self, verbosity_level=0): def __init__(self, verbosity_level=0, which_tests=None):
self.verbosity_level = verbosity_level self.verbosity_level = verbosity_level
self.which_tests = which_tests
def output(self, required_level, message): def output(self, required_level, message):
if self.verbosity_level > required_level - 1: if self.verbosity_level > required_level - 1:
@ -66,11 +67,22 @@ class TestRunner:
from django.core.db import db from django.core.db import db
from django.core import management, meta from django.core import management, meta
self.output(0, "Running tests with database %r" % settings.DATABASE_ENGINE)
# Manually set INSTALLED_APPS to point to the test app. # Manually set INSTALLED_APPS to point to the test app.
settings.INSTALLED_APPS = (APP_NAME,) settings.INSTALLED_APPS = (APP_NAME,)
# Determine which models we're going to test.
test_models = get_test_models()
if self.which_tests:
# Only run the specified tests.
bad_models = [m for m in self.which_tests if m not in test_models]
if bad_models:
sys.stderr.write("Models not found: %s\n" % bad_models)
sys.exit(1)
else:
test_models = self.which_tests
self.output(0, "Running tests with database %r" % settings.DATABASE_ENGINE)
# If we're using SQLite, it's more convenient to test against an # If we're using SQLite, it's more convenient to test against an
# in-memory database. # in-memory database.
if settings.DATABASE_ENGINE == "sqlite3": if settings.DATABASE_ENGINE == "sqlite3":
@ -107,7 +119,7 @@ class TestRunner:
# Run the tests for each test model. # Run the tests for each test model.
self.output(1, "Running app tests") self.output(1, "Running app tests")
for model_name in get_test_models(): for model_name in test_models:
self.output(1, "%s model: Importing" % model_name) self.output(1, "%s model: Importing" % model_name)
try: try:
mod = meta.get_app(model_name) mod = meta.get_app(model_name)
@ -132,6 +144,7 @@ class TestRunner:
# side effects on other tests. # side effects on other tests.
db.rollback() db.rollback()
if not self.which_tests:
# Run the non-model tests in the other tests dir # Run the non-model tests in the other tests dir
self.output(1, "Running other tests") self.output(1, "Running other tests")
other_tests_dir = os.path.join(os.path.dirname(__file__), OTHER_TESTS_DIR) other_tests_dir = os.path.join(os.path.dirname(__file__), OTHER_TESTS_DIR)
@ -147,10 +160,10 @@ class TestRunner:
p = doctest.DocTestParser() p = doctest.DocTestParser()
dtest = p.get_doctest(mod.__doc__, mod.__dict__, module, None, None) dtest = p.get_doctest(mod.__doc__, mod.__dict__, module, None, None)
runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False) runner = DjangoDoctestRunner(verbosity_level=verbosity_level, verbose=False)
self.output(1, "%s module: runing tests" % module) self.output(1, "%s module: running tests" % module)
runner.run(dtest, clear_globs=True, out=sys.stdout.write) runner.run(dtest, clear_globs=True, out=sys.stdout.write)
if hasattr(mod, "run_tests") and callable(mod.run_tests): if hasattr(mod, "run_tests") and callable(mod.run_tests):
self.output(1, "%s module: runing tests" % module) self.output(1, "%s module: running tests" % module)
try: try:
mod.run_tests(verbosity_level) mod.run_tests(verbosity_level)
except Exception, e: except Exception, e:
@ -176,17 +189,18 @@ class TestRunner:
# Display output. # Display output.
if error_list: if error_list:
print "Got %s error%s:" % (len(error_list), len(error_list) != 1 and 's' or '')
for d in error_list: for d in error_list:
print print
print d['title'] print d['title']
print "=" * len(d['title']) print "=" * len(d['title'])
print d['description'] print d['description']
print "%s error%s:" % (len(error_list), len(error_list) != 1 and 's' or '')
else: else:
print "All tests passed." print "All tests passed."
if __name__ == "__main__": if __name__ == "__main__":
from optparse import OptionParser from optparse import OptionParser
usage = "%prog [options] [model model model ...]"
parser = OptionParser() parser = OptionParser()
parser.add_option('-v', help='How verbose should the output be? Choices are 0, 1 and 2, where 2 is most verbose. Default is 0.', parser.add_option('-v', help='How verbose should the output be? Choices are 0, 1 and 2, where 2 is most verbose. Default is 0.',
type='choice', choices=['0', '1', '2']) type='choice', choices=['0', '1', '2'])
@ -198,5 +212,5 @@ if __name__ == "__main__":
verbosity_level = int(options.v) verbosity_level = int(options.v)
if options.settings: if options.settings:
os.environ['DJANGO_SETTINGS_MODULE'] = options.settings os.environ['DJANGO_SETTINGS_MODULE'] = options.settings
t = TestRunner(verbosity_level) t = TestRunner(verbosity_level, args)
t.run_tests() t.run_tests()