2007-09-04 08:50:06 +08:00
|
|
|
import re
|
|
|
|
import unittest
|
2007-11-11 11:54:21 +08:00
|
|
|
from urlparse import urlsplit, urlunsplit
|
2008-07-19 22:46:55 +08:00
|
|
|
from xml.dom.minidom import parseString, Node
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2008-06-30 20:34:29 +08:00
|
|
|
from django.conf import settings
|
2007-08-16 14:06:55 +08:00
|
|
|
from django.core import mail
|
|
|
|
from django.core.management import call_command
|
2008-07-19 22:46:55 +08:00
|
|
|
from django.core.urlresolvers import clear_url_caches
|
2009-12-22 23:18:51 +08:00
|
|
|
from django.db import transaction, connections, DEFAULT_DB_ALIAS
|
2008-07-19 22:46:55 +08:00
|
|
|
from django.http import QueryDict
|
2007-05-31 21:18:12 +08:00
|
|
|
from django.test import _doctest as doctest
|
2007-05-05 11:03:33 +08:00
|
|
|
from django.test.client import Client
|
2008-07-19 22:46:55 +08:00
|
|
|
from django.utils import simplejson
|
2009-04-07 20:06:05 +08:00
|
|
|
from django.utils.encoding import smart_str
|
2007-05-05 11:03:33 +08:00
|
|
|
|
2009-12-22 23:18:51 +08:00
|
|
|
try:
|
|
|
|
all
|
|
|
|
except NameError:
|
|
|
|
from django.utils.itercompat import all
|
|
|
|
|
2006-08-27 20:24:59 +08:00
|
|
|
normalize_long_ints = lambda s: re.sub(r'(?<![\w])(\d+)L(?![\w])', '\\1', s)
|
2009-01-15 19:06:34 +08:00
|
|
|
normalize_decimals = lambda s: re.sub(r"Decimal\('(\d+(\.\d*)?)'\)", lambda m: "Decimal(\"%s\")" % m.groups()[0], s)
|
2006-08-27 20:24:59 +08:00
|
|
|
|
2007-09-04 07:14:51 +08:00
|
|
|
def to_list(value):
|
|
|
|
"""
|
|
|
|
Puts value into a list if it's not already one.
|
|
|
|
Returns an empty list if value is None.
|
|
|
|
"""
|
|
|
|
if value is None:
|
|
|
|
value = []
|
|
|
|
elif not isinstance(value, list):
|
|
|
|
value = [value]
|
|
|
|
return value
|
|
|
|
|
2009-01-16 10:30:22 +08:00
|
|
|
real_commit = transaction.commit
|
|
|
|
real_rollback = transaction.rollback
|
|
|
|
real_enter_transaction_management = transaction.enter_transaction_management
|
|
|
|
real_leave_transaction_management = transaction.leave_transaction_management
|
|
|
|
real_savepoint_commit = transaction.savepoint_commit
|
|
|
|
real_savepoint_rollback = transaction.savepoint_rollback
|
2009-04-23 03:10:28 +08:00
|
|
|
real_managed = transaction.managed
|
2009-01-16 10:30:22 +08:00
|
|
|
|
2009-03-31 21:04:28 +08:00
|
|
|
def nop(*args, **kwargs):
|
2009-01-16 10:30:22 +08:00
|
|
|
return
|
|
|
|
|
|
|
|
def disable_transaction_methods():
|
|
|
|
transaction.commit = nop
|
|
|
|
transaction.rollback = nop
|
|
|
|
transaction.savepoint_commit = nop
|
|
|
|
transaction.savepoint_rollback = nop
|
|
|
|
transaction.enter_transaction_management = nop
|
2009-02-27 21:14:59 +08:00
|
|
|
transaction.leave_transaction_management = nop
|
2009-04-23 03:10:28 +08:00
|
|
|
transaction.managed = nop
|
2009-01-16 10:30:22 +08:00
|
|
|
|
|
|
|
def restore_transaction_methods():
|
|
|
|
transaction.commit = real_commit
|
|
|
|
transaction.rollback = real_rollback
|
|
|
|
transaction.savepoint_commit = real_savepoint_commit
|
|
|
|
transaction.savepoint_rollback = real_savepoint_rollback
|
|
|
|
transaction.enter_transaction_management = real_enter_transaction_management
|
|
|
|
transaction.leave_transaction_management = real_leave_transaction_management
|
2009-04-23 03:10:28 +08:00
|
|
|
transaction.managed = real_managed
|
2007-09-04 07:14:51 +08:00
|
|
|
|
2006-08-27 20:24:59 +08:00
|
|
|
class OutputChecker(doctest.OutputChecker):
|
|
|
|
def check_output(self, want, got, optionflags):
|
2008-07-19 22:46:55 +08:00
|
|
|
"The entry method for doctest output checking. Defers to a sequence of child checkers"
|
|
|
|
checks = (self.check_output_default,
|
2009-01-15 19:06:34 +08:00
|
|
|
self.check_output_numeric,
|
2008-07-19 22:46:55 +08:00
|
|
|
self.check_output_xml,
|
|
|
|
self.check_output_json)
|
|
|
|
for check in checks:
|
|
|
|
if check(want, got, optionflags):
|
|
|
|
return True
|
|
|
|
return False
|
|
|
|
|
|
|
|
def check_output_default(self, want, got, optionflags):
|
|
|
|
"The default comparator provided by doctest - not perfect, but good for most purposes"
|
|
|
|
return doctest.OutputChecker.check_output(self, want, got, optionflags)
|
|
|
|
|
2009-01-15 19:06:34 +08:00
|
|
|
def check_output_numeric(self, want, got, optionflags):
|
|
|
|
"""Doctest does an exact string comparison of output, which means that
|
|
|
|
some numerically equivalent values aren't equal. This check normalizes
|
|
|
|
* long integers (22L) so that they equal normal integers. (22)
|
|
|
|
* Decimals so that they are comparable, regardless of the change
|
|
|
|
made to __repr__ in Python 2.6.
|
2008-07-19 22:46:55 +08:00
|
|
|
"""
|
2009-01-15 19:06:34 +08:00
|
|
|
return doctest.OutputChecker.check_output(self,
|
|
|
|
normalize_decimals(normalize_long_ints(want)),
|
|
|
|
normalize_decimals(normalize_long_ints(got)),
|
|
|
|
optionflags)
|
2008-07-19 22:46:55 +08:00
|
|
|
|
|
|
|
def check_output_xml(self, want, got, optionsflags):
|
|
|
|
"""Tries to do a 'xml-comparision' of want and got. Plain string
|
|
|
|
comparision doesn't always work because, for example, attribute
|
|
|
|
ordering should not be important.
|
2009-01-15 19:06:34 +08:00
|
|
|
|
2008-07-19 22:46:55 +08:00
|
|
|
Based on http://codespeak.net/svn/lxml/trunk/src/lxml/doctestcompare.py
|
|
|
|
"""
|
|
|
|
_norm_whitespace_re = re.compile(r'[ \t\n][ \t\n]+')
|
|
|
|
def norm_whitespace(v):
|
|
|
|
return _norm_whitespace_re.sub(' ', v)
|
|
|
|
|
|
|
|
def child_text(element):
|
|
|
|
return ''.join([c.data for c in element.childNodes
|
|
|
|
if c.nodeType == Node.TEXT_NODE])
|
|
|
|
|
|
|
|
def children(element):
|
|
|
|
return [c for c in element.childNodes
|
|
|
|
if c.nodeType == Node.ELEMENT_NODE]
|
|
|
|
|
|
|
|
def norm_child_text(element):
|
|
|
|
return norm_whitespace(child_text(element))
|
|
|
|
|
|
|
|
def attrs_dict(element):
|
|
|
|
return dict(element.attributes.items())
|
|
|
|
|
|
|
|
def check_element(want_element, got_element):
|
|
|
|
if want_element.tagName != got_element.tagName:
|
|
|
|
return False
|
|
|
|
if norm_child_text(want_element) != norm_child_text(got_element):
|
|
|
|
return False
|
|
|
|
if attrs_dict(want_element) != attrs_dict(got_element):
|
|
|
|
return False
|
|
|
|
want_children = children(want_element)
|
|
|
|
got_children = children(got_element)
|
|
|
|
if len(want_children) != len(got_children):
|
|
|
|
return False
|
|
|
|
for want, got in zip(want_children, got_children):
|
|
|
|
if not check_element(want, got):
|
|
|
|
return False
|
|
|
|
return True
|
|
|
|
|
|
|
|
want, got = self._strip_quotes(want, got)
|
|
|
|
want = want.replace('\\n','\n')
|
|
|
|
got = got.replace('\\n','\n')
|
|
|
|
|
2008-07-20 13:46:41 +08:00
|
|
|
# If the string is not a complete xml document, we may need to add a
|
|
|
|
# root element. This allow us to compare fragments, like "<foo/><bar/>"
|
|
|
|
if not want.startswith('<?xml'):
|
|
|
|
wrapper = '<root>%s</root>'
|
|
|
|
want = wrapper % want
|
|
|
|
got = wrapper % got
|
2009-01-15 19:06:34 +08:00
|
|
|
|
2008-07-19 22:46:55 +08:00
|
|
|
# Parse the want and got strings, and compare the parsings.
|
|
|
|
try:
|
|
|
|
want_root = parseString(want).firstChild
|
|
|
|
got_root = parseString(got).firstChild
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
return check_element(want_root, got_root)
|
|
|
|
|
|
|
|
def check_output_json(self, want, got, optionsflags):
|
|
|
|
"Tries to compare want and got as if they were JSON-encoded data"
|
|
|
|
want, got = self._strip_quotes(want, got)
|
|
|
|
try:
|
|
|
|
want_json = simplejson.loads(want)
|
|
|
|
got_json = simplejson.loads(got)
|
|
|
|
except:
|
|
|
|
return False
|
|
|
|
return want_json == got_json
|
|
|
|
|
|
|
|
def _strip_quotes(self, want, got):
|
|
|
|
"""
|
|
|
|
Strip quotes of doctests output values:
|
|
|
|
|
|
|
|
>>> o = OutputChecker()
|
|
|
|
>>> o._strip_quotes("'foo'")
|
|
|
|
"foo"
|
|
|
|
>>> o._strip_quotes('"foo"')
|
|
|
|
"foo"
|
|
|
|
>>> o._strip_quotes("u'foo'")
|
|
|
|
"foo"
|
|
|
|
>>> o._strip_quotes('u"foo"')
|
|
|
|
"foo"
|
|
|
|
"""
|
|
|
|
def is_quoted_string(s):
|
|
|
|
s = s.strip()
|
|
|
|
return (len(s) >= 2
|
|
|
|
and s[0] == s[-1]
|
|
|
|
and s[0] in ('"', "'"))
|
|
|
|
|
|
|
|
def is_quoted_unicode(s):
|
|
|
|
s = s.strip()
|
|
|
|
return (len(s) >= 3
|
|
|
|
and s[0] == 'u'
|
|
|
|
and s[1] == s[-1]
|
|
|
|
and s[1] in ('"', "'"))
|
|
|
|
|
|
|
|
if is_quoted_string(want) and is_quoted_string(got):
|
|
|
|
want = want.strip()[1:-1]
|
|
|
|
got = got.strip()[1:-1]
|
|
|
|
elif is_quoted_unicode(want) and is_quoted_unicode(got):
|
|
|
|
want = want.strip()[2:-1]
|
|
|
|
got = got.strip()[2:-1]
|
|
|
|
return want, got
|
|
|
|
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2006-08-27 20:24:59 +08:00
|
|
|
class DocTestRunner(doctest.DocTestRunner):
|
|
|
|
def __init__(self, *args, **kwargs):
|
|
|
|
doctest.DocTestRunner.__init__(self, *args, **kwargs)
|
|
|
|
self.optionflags = doctest.ELLIPSIS
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2006-08-27 20:24:59 +08:00
|
|
|
def report_unexpected_exception(self, out, test, example, exc_info):
|
2007-09-04 08:50:06 +08:00
|
|
|
doctest.DocTestRunner.report_unexpected_exception(self, out, test,
|
|
|
|
example, exc_info)
|
2006-08-27 20:24:59 +08:00
|
|
|
# Rollback, in case of database errors. Otherwise they'd have
|
|
|
|
# side effects on other tests.
|
2009-12-22 23:18:51 +08:00
|
|
|
for conn in connections:
|
|
|
|
transaction.rollback_unless_managed(using=conn)
|
2009-02-27 21:14:59 +08:00
|
|
|
|
2009-01-16 10:30:22 +08:00
|
|
|
class TransactionTestCase(unittest.TestCase):
|
2007-05-08 19:19:34 +08:00
|
|
|
def _pre_setup(self):
|
2007-09-04 08:50:06 +08:00
|
|
|
"""Performs any pre-test setup. This includes:
|
|
|
|
|
2007-11-23 18:51:17 +08:00
|
|
|
* Flushing the database.
|
2009-01-15 19:06:34 +08:00
|
|
|
* If the Test Case class has a 'fixtures' member, installing the
|
2007-11-23 18:51:17 +08:00
|
|
|
named fixtures.
|
2008-06-30 20:34:29 +08:00
|
|
|
* If the Test Case class has a 'urls' member, replace the
|
|
|
|
ROOT_URLCONF with it.
|
2007-05-08 19:19:34 +08:00
|
|
|
* Clearing the mail test outbox.
|
2007-03-01 21:11:08 +08:00
|
|
|
"""
|
2009-01-16 10:30:22 +08:00
|
|
|
self._fixture_setup()
|
|
|
|
self._urlconf_setup()
|
|
|
|
mail.outbox = []
|
|
|
|
|
|
|
|
def _fixture_setup(self):
|
2009-12-22 23:18:51 +08:00
|
|
|
# If the test case has a multi_db=True flag, flush all databases.
|
|
|
|
# Otherwise, just flush default.
|
|
|
|
if getattr(self, 'multi_db', False):
|
|
|
|
databases = connections
|
|
|
|
else:
|
|
|
|
databases = [DEFAULT_DB_ALIAS]
|
|
|
|
for db in databases:
|
|
|
|
call_command('flush', verbosity=0, interactive=False, database=db)
|
|
|
|
|
|
|
|
if hasattr(self, 'fixtures'):
|
|
|
|
# We have to use this slightly awkward syntax due to the fact
|
|
|
|
# that we're using *args and **kwargs together.
|
|
|
|
call_command('loaddata', *self.fixtures, **{'verbosity': 0, 'database': db})
|
2009-01-16 10:30:22 +08:00
|
|
|
|
|
|
|
def _urlconf_setup(self):
|
2008-06-30 20:34:29 +08:00
|
|
|
if hasattr(self, 'urls'):
|
|
|
|
self._old_root_urlconf = settings.ROOT_URLCONF
|
|
|
|
settings.ROOT_URLCONF = self.urls
|
|
|
|
clear_url_caches()
|
2007-05-13 00:53:27 +08:00
|
|
|
|
|
|
|
def __call__(self, result=None):
|
|
|
|
"""
|
|
|
|
Wrapper around default __call__ method to perform common Django test
|
|
|
|
set up. This means that user-defined Test Cases aren't required to
|
|
|
|
include a call to super().setUp().
|
2007-03-01 21:11:08 +08:00
|
|
|
"""
|
2007-05-05 11:03:33 +08:00
|
|
|
self.client = Client()
|
2008-01-18 21:37:40 +08:00
|
|
|
try:
|
|
|
|
self._pre_setup()
|
2008-02-03 09:24:22 +08:00
|
|
|
except (KeyboardInterrupt, SystemExit):
|
|
|
|
raise
|
2008-01-18 21:37:40 +08:00
|
|
|
except Exception:
|
|
|
|
import sys
|
|
|
|
result.addError(self, sys.exc_info())
|
|
|
|
return
|
2009-02-27 21:14:59 +08:00
|
|
|
super(TransactionTestCase, self).__call__(result)
|
2008-06-30 20:34:29 +08:00
|
|
|
try:
|
|
|
|
self._post_teardown()
|
|
|
|
except (KeyboardInterrupt, SystemExit):
|
|
|
|
raise
|
|
|
|
except Exception:
|
|
|
|
import sys
|
|
|
|
result.addError(self, sys.exc_info())
|
|
|
|
return
|
|
|
|
|
|
|
|
def _post_teardown(self):
|
|
|
|
""" Performs any post-test things. This includes:
|
|
|
|
|
|
|
|
* Putting back the original ROOT_URLCONF if it was changed.
|
|
|
|
"""
|
2009-01-16 10:30:22 +08:00
|
|
|
self._fixture_teardown()
|
|
|
|
self._urlconf_teardown()
|
|
|
|
|
|
|
|
def _fixture_teardown(self):
|
|
|
|
pass
|
|
|
|
|
2009-02-27 21:14:59 +08:00
|
|
|
def _urlconf_teardown(self):
|
2008-06-30 20:34:29 +08:00
|
|
|
if hasattr(self, '_old_root_urlconf'):
|
|
|
|
settings.ROOT_URLCONF = self._old_root_urlconf
|
|
|
|
clear_url_caches()
|
2007-05-05 11:03:33 +08:00
|
|
|
|
2007-09-04 08:50:06 +08:00
|
|
|
def assertRedirects(self, response, expected_url, status_code=302,
|
2010-01-22 23:02:02 +08:00
|
|
|
target_status_code=200, host=None, msg_prefix=''):
|
2007-09-04 08:50:06 +08:00
|
|
|
"""Asserts that a response redirected to a specific URL, and that the
|
2007-05-05 11:03:33 +08:00
|
|
|
redirect URL can be loaded.
|
2007-09-04 08:50:06 +08:00
|
|
|
|
|
|
|
Note that assertRedirects won't work for external links since it uses
|
2007-08-31 19:37:28 +08:00
|
|
|
TestClient to do a request.
|
2007-05-05 11:03:33 +08:00
|
|
|
"""
|
2010-01-22 23:02:02 +08:00
|
|
|
if msg_prefix:
|
|
|
|
msg_prefix += ": "
|
|
|
|
|
2009-02-27 21:14:59 +08:00
|
|
|
if hasattr(response, 'redirect_chain'):
|
|
|
|
# The request was a followed redirect
|
2009-03-20 09:37:34 +08:00
|
|
|
self.failUnless(len(response.redirect_chain) > 0,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Response didn't redirect as expected: Response"
|
|
|
|
" code was %d (expected %d)" %
|
|
|
|
(response.status_code, status_code))
|
2009-02-27 21:14:59 +08:00
|
|
|
|
|
|
|
self.assertEqual(response.redirect_chain[0][1], status_code,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Initial response didn't redirect as expected:"
|
|
|
|
" Response code was %d (expected %d)" %
|
|
|
|
(response.redirect_chain[0][1], status_code))
|
2009-02-27 21:14:59 +08:00
|
|
|
|
|
|
|
url, status_code = response.redirect_chain[-1]
|
|
|
|
|
|
|
|
self.assertEqual(response.status_code, target_status_code,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Response didn't redirect as expected: Final"
|
|
|
|
" Response code was %d (expected %d)" %
|
|
|
|
(response.status_code, target_status_code))
|
2009-02-27 21:14:59 +08:00
|
|
|
|
|
|
|
else:
|
|
|
|
# Not a followed redirect
|
|
|
|
self.assertEqual(response.status_code, status_code,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Response didn't redirect as expected: Response"
|
|
|
|
" code was %d (expected %d)" %
|
|
|
|
(response.status_code, status_code))
|
2009-02-27 21:14:59 +08:00
|
|
|
|
|
|
|
url = response['Location']
|
|
|
|
scheme, netloc, path, query, fragment = urlsplit(url)
|
|
|
|
|
|
|
|
redirect_response = response.client.get(path, QueryDict(query))
|
|
|
|
|
|
|
|
# Get the redirection page, using the same client that was used
|
|
|
|
# to obtain the original response.
|
|
|
|
self.assertEqual(redirect_response.status_code, target_status_code,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Couldn't retrieve redirection page '%s':"
|
|
|
|
" response code was %d (expected %d)" %
|
|
|
|
(path, redirect_response.status_code, target_status_code))
|
2009-02-27 21:14:59 +08:00
|
|
|
|
2007-11-11 11:54:21 +08:00
|
|
|
e_scheme, e_netloc, e_path, e_query, e_fragment = urlsplit(expected_url)
|
|
|
|
if not (e_scheme or e_netloc):
|
|
|
|
expected_url = urlunsplit(('http', host or 'testserver', e_path,
|
2009-02-27 21:14:59 +08:00
|
|
|
e_query, e_fragment))
|
|
|
|
|
2007-09-04 08:50:06 +08:00
|
|
|
self.assertEqual(url, expected_url,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Response redirected to '%s', expected '%s'" %
|
|
|
|
(url, expected_url))
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2010-01-22 23:02:02 +08:00
|
|
|
def assertContains(self, response, text, count=None, status_code=200,
|
|
|
|
msg_prefix=''):
|
2007-09-04 08:50:06 +08:00
|
|
|
"""
|
2008-01-28 10:15:00 +08:00
|
|
|
Asserts that a response indicates that a page was retrieved
|
2007-09-04 08:50:06 +08:00
|
|
|
successfully, (i.e., the HTTP status code was as expected), and that
|
|
|
|
``text`` occurs ``count`` times in the content of the response.
|
|
|
|
If ``count`` is None, the count doesn't matter - the assertion is true
|
|
|
|
if the text occurs at least once in the response.
|
2007-05-05 11:03:33 +08:00
|
|
|
"""
|
2010-01-22 23:02:02 +08:00
|
|
|
if msg_prefix:
|
|
|
|
msg_prefix += ": "
|
|
|
|
|
2007-05-10 19:27:59 +08:00
|
|
|
self.assertEqual(response.status_code, status_code,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Couldn't retrieve page: Response code was %d"
|
|
|
|
" (expected %d)" % (response.status_code, status_code))
|
2009-04-07 20:06:05 +08:00
|
|
|
text = smart_str(text, response._charset)
|
2007-05-05 11:03:33 +08:00
|
|
|
real_count = response.content.count(text)
|
2007-07-21 12:36:28 +08:00
|
|
|
if count is not None:
|
2007-07-20 22:32:20 +08:00
|
|
|
self.assertEqual(real_count, count,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Found %d instances of '%s' in response"
|
|
|
|
" (expected %d)" % (real_count, text, count))
|
2007-07-20 22:32:20 +08:00
|
|
|
else:
|
2007-09-04 08:50:06 +08:00
|
|
|
self.failUnless(real_count != 0,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Couldn't find '%s' in response" % text)
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2010-01-22 23:02:02 +08:00
|
|
|
def assertNotContains(self, response, text, status_code=200,
|
|
|
|
msg_prefix=''):
|
2008-06-06 21:50:02 +08:00
|
|
|
"""
|
|
|
|
Asserts that a response indicates that a page was retrieved
|
|
|
|
successfully, (i.e., the HTTP status code was as expected), and that
|
|
|
|
``text`` doesn't occurs in the content of the response.
|
|
|
|
"""
|
2010-01-22 23:02:02 +08:00
|
|
|
if msg_prefix:
|
|
|
|
msg_prefix += ": "
|
|
|
|
|
2008-06-06 21:50:02 +08:00
|
|
|
self.assertEqual(response.status_code, status_code,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Couldn't retrieve page: Response code was %d"
|
|
|
|
" (expected %d)" % (response.status_code, status_code))
|
2009-04-07 20:06:05 +08:00
|
|
|
text = smart_str(text, response._charset)
|
2010-01-22 23:02:02 +08:00
|
|
|
self.assertEqual(response.content.count(text), 0,
|
|
|
|
msg_prefix + "Response should not contain '%s'" % text)
|
2008-06-06 21:50:02 +08:00
|
|
|
|
2010-01-22 23:02:02 +08:00
|
|
|
def assertFormError(self, response, form, field, errors, msg_prefix=''):
|
2007-09-04 08:50:06 +08:00
|
|
|
"""
|
|
|
|
Asserts that a form used to render the response has a specific field
|
|
|
|
error.
|
|
|
|
"""
|
2010-01-22 23:02:02 +08:00
|
|
|
if msg_prefix:
|
|
|
|
msg_prefix += ": "
|
|
|
|
|
2007-09-04 07:14:51 +08:00
|
|
|
# Put context(s) into a list to simplify processing.
|
2007-09-04 08:50:06 +08:00
|
|
|
contexts = to_list(response.context)
|
2007-09-04 07:14:51 +08:00
|
|
|
if not contexts:
|
2010-02-13 19:59:09 +08:00
|
|
|
self.fail(msg_prefix + "Response did not use any contexts to "
|
2010-01-22 23:02:02 +08:00
|
|
|
"render the response")
|
2007-05-07 20:34:18 +08:00
|
|
|
|
2007-09-04 07:14:51 +08:00
|
|
|
# Put error(s) into a list to simplify processing.
|
|
|
|
errors = to_list(errors)
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2007-05-07 20:34:18 +08:00
|
|
|
# Search all contexts for the error.
|
|
|
|
found_form = False
|
|
|
|
for i,context in enumerate(contexts):
|
2007-09-04 08:50:06 +08:00
|
|
|
if form not in context:
|
|
|
|
continue
|
|
|
|
found_form = True
|
|
|
|
for err in errors:
|
|
|
|
if field:
|
|
|
|
if field in context[form].errors:
|
|
|
|
field_errors = context[form].errors[field]
|
|
|
|
self.failUnless(err in field_errors,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "The field '%s' on form '%s' in"
|
|
|
|
" context %d does not contain the error '%s'"
|
|
|
|
" (actual errors: %s)" %
|
|
|
|
(field, form, i, err, repr(field_errors)))
|
2007-09-04 08:50:06 +08:00
|
|
|
elif field in context[form].fields:
|
2010-01-22 23:02:02 +08:00
|
|
|
self.fail(msg_prefix + "The field '%s' on form '%s'"
|
|
|
|
" in context %d contains no errors" %
|
|
|
|
(field, form, i))
|
2007-05-10 21:48:18 +08:00
|
|
|
else:
|
2010-01-22 23:02:02 +08:00
|
|
|
self.fail(msg_prefix + "The form '%s' in context %d"
|
|
|
|
" does not contain the field '%s'" %
|
2007-09-04 08:50:06 +08:00
|
|
|
(form, i, field))
|
|
|
|
else:
|
|
|
|
non_field_errors = context[form].non_field_errors()
|
|
|
|
self.failUnless(err in non_field_errors,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "The form '%s' in context %d does not"
|
|
|
|
" contain the non-field error '%s'"
|
|
|
|
" (actual errors: %s)" %
|
2007-09-04 08:50:06 +08:00
|
|
|
(form, i, err, non_field_errors))
|
2007-05-07 20:34:18 +08:00
|
|
|
if not found_form:
|
2010-01-22 23:02:02 +08:00
|
|
|
self.fail(msg_prefix + "The form '%s' was not used to render the"
|
|
|
|
" response" % form)
|
2007-09-04 08:50:06 +08:00
|
|
|
|
2010-01-22 23:02:02 +08:00
|
|
|
def assertTemplateUsed(self, response, template_name, msg_prefix=''):
|
2007-09-04 08:50:06 +08:00
|
|
|
"""
|
|
|
|
Asserts that the template with the provided name was used in rendering
|
|
|
|
the response.
|
|
|
|
"""
|
2010-01-22 23:02:02 +08:00
|
|
|
if msg_prefix:
|
|
|
|
msg_prefix += ": "
|
|
|
|
|
2007-09-04 07:14:51 +08:00
|
|
|
template_names = [t.name for t in to_list(response.template)]
|
|
|
|
if not template_names:
|
2010-01-22 23:02:02 +08:00
|
|
|
self.fail(msg_prefix + "No templates used to render the response")
|
2007-09-04 07:14:51 +08:00
|
|
|
self.failUnless(template_name in template_names,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Template '%s' was not a template used to render"
|
|
|
|
" the response. Actual template(s) used: %s" %
|
|
|
|
(template_name, u', '.join(template_names)))
|
2007-05-07 20:34:18 +08:00
|
|
|
|
2010-01-22 23:02:02 +08:00
|
|
|
def assertTemplateNotUsed(self, response, template_name, msg_prefix=''):
|
2007-09-04 08:50:06 +08:00
|
|
|
"""
|
|
|
|
Asserts that the template with the provided name was NOT used in
|
|
|
|
rendering the response.
|
|
|
|
"""
|
2010-01-22 23:02:02 +08:00
|
|
|
if msg_prefix:
|
|
|
|
msg_prefix += ": "
|
|
|
|
|
2007-09-04 07:14:51 +08:00
|
|
|
template_names = [t.name for t in to_list(response.template)]
|
|
|
|
self.failIf(template_name in template_names,
|
2010-01-22 23:02:02 +08:00
|
|
|
msg_prefix + "Template '%s' was used unexpectedly in rendering"
|
|
|
|
" the response" % template_name)
|
2009-01-16 10:30:22 +08:00
|
|
|
|
2009-12-22 23:18:51 +08:00
|
|
|
def connections_support_transactions():
|
|
|
|
"""
|
|
|
|
Returns True if all connections support transactions. This is messy
|
|
|
|
because 2.4 doesn't support any or all.
|
|
|
|
"""
|
|
|
|
return all(conn.settings_dict['SUPPORTS_TRANSACTIONS']
|
|
|
|
for conn in connections.all())
|
|
|
|
|
2009-01-16 10:30:22 +08:00
|
|
|
class TestCase(TransactionTestCase):
|
|
|
|
"""
|
|
|
|
Does basically the same as TransactionTestCase, but surrounds every test
|
2009-02-27 21:14:59 +08:00
|
|
|
with a transaction, monkey-patches the real transaction management routines to
|
|
|
|
do nothing, and rollsback the test transaction at the end of the test. You have
|
2009-01-16 10:30:22 +08:00
|
|
|
to use TransactionTestCase, if you need transaction management inside a test.
|
|
|
|
"""
|
|
|
|
|
|
|
|
def _fixture_setup(self):
|
2009-12-22 23:18:51 +08:00
|
|
|
if not connections_support_transactions():
|
2009-01-16 10:30:22 +08:00
|
|
|
return super(TestCase, self)._fixture_setup()
|
2009-02-27 21:14:59 +08:00
|
|
|
|
2009-12-22 23:18:51 +08:00
|
|
|
# If the test case has a multi_db=True flag, setup all databases.
|
|
|
|
# Otherwise, just use default.
|
|
|
|
if getattr(self, 'multi_db', False):
|
|
|
|
databases = connections
|
|
|
|
else:
|
|
|
|
databases = [DEFAULT_DB_ALIAS]
|
|
|
|
|
|
|
|
for db in databases:
|
|
|
|
transaction.enter_transaction_management(using=db)
|
|
|
|
transaction.managed(True, using=db)
|
2009-01-16 10:30:22 +08:00
|
|
|
disable_transaction_methods()
|
|
|
|
|
|
|
|
from django.contrib.sites.models import Site
|
|
|
|
Site.objects.clear_cache()
|
|
|
|
|
2009-12-22 23:18:51 +08:00
|
|
|
for db in databases:
|
|
|
|
if hasattr(self, 'fixtures'):
|
|
|
|
call_command('loaddata', *self.fixtures, **{
|
|
|
|
'verbosity': 0,
|
|
|
|
'commit': False,
|
|
|
|
'database': db
|
|
|
|
})
|
2009-01-16 10:30:22 +08:00
|
|
|
|
|
|
|
def _fixture_teardown(self):
|
2009-12-22 23:18:51 +08:00
|
|
|
if not connections_support_transactions():
|
2009-01-16 10:30:22 +08:00
|
|
|
return super(TestCase, self)._fixture_teardown()
|
2009-02-27 21:14:59 +08:00
|
|
|
|
2009-12-22 23:18:51 +08:00
|
|
|
# If the test case has a multi_db=True flag, teardown all databases.
|
|
|
|
# Otherwise, just teardown default.
|
|
|
|
if getattr(self, 'multi_db', False):
|
|
|
|
databases = connections
|
|
|
|
else:
|
|
|
|
databases = [DEFAULT_DB_ALIAS]
|
|
|
|
|
2009-01-16 10:30:22 +08:00
|
|
|
restore_transaction_methods()
|
2009-12-22 23:18:51 +08:00
|
|
|
for db in databases:
|
|
|
|
transaction.rollback(using=db)
|
|
|
|
transaction.leave_transaction_management(using=db)
|
|
|
|
|
|
|
|
for connection in connections.all():
|
|
|
|
connection.close()
|