mirror of https://github.com/django/django.git
Fixed #27463 -- Fixed E741 flake8 warnings.
This commit is contained in:
parent
c7bfcd2f37
commit
0a63ef3f61
1
AUTHORS
1
AUTHORS
|
@ -624,6 +624,7 @@ answer newbie questions, and generally made Django that much better:
|
|||
Radek Švarz <http://www.svarz.cz/translate/>
|
||||
Rajesh Dhawan <rajesh.dhawan@gmail.com>
|
||||
Ramez Ashraf <ramezashraf@gmail.com>
|
||||
Ramin Farajpour Cami <ramin.blackhat@gmail.com>
|
||||
Ramiro Morales <ramiro@rmorales.net>
|
||||
Ram Rachum <ram@rachum.com>
|
||||
Randy Barlow <randy@electronsweatshop.com>
|
||||
|
|
|
@ -340,11 +340,11 @@ class BaseModelAdmin(six.with_metaclass(forms.MediaDefiningClass)):
|
|||
# Check FKey lookups that are allowed, so that popups produced by
|
||||
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
|
||||
# are allowed to work.
|
||||
for l in model._meta.related_fkey_lookups:
|
||||
for fk_lookup in model._meta.related_fkey_lookups:
|
||||
# As ``limit_choices_to`` can be a callable, invoke it here.
|
||||
if callable(l):
|
||||
l = l()
|
||||
for k, v in widgets.url_params_from_lookup_dict(l).items():
|
||||
if callable(fk_lookup):
|
||||
fk_lookup = fk_lookup()
|
||||
for k, v in widgets.url_params_from_lookup_dict(fk_lookup).items():
|
||||
if k == lookup and v == value:
|
||||
return True
|
||||
|
||||
|
|
|
@ -100,16 +100,16 @@ class DataSource(GDALBase):
|
|||
def __getitem__(self, index):
|
||||
"Allows use of the index [] operator to get a layer at the index."
|
||||
if isinstance(index, six.string_types):
|
||||
l = capi.get_layer_by_name(self.ptr, force_bytes(index))
|
||||
if not l:
|
||||
layer = capi.get_layer_by_name(self.ptr, force_bytes(index))
|
||||
if not layer:
|
||||
raise OGRIndexError('invalid OGR Layer name given: "%s"' % index)
|
||||
elif isinstance(index, int):
|
||||
if index < 0 or index >= self.layer_count:
|
||||
raise OGRIndexError('index out of range')
|
||||
l = capi.get_layer(self._ptr, index)
|
||||
layer = capi.get_layer(self._ptr, index)
|
||||
else:
|
||||
raise TypeError('Invalid index type: %s' % type(index))
|
||||
return Layer(l, self)
|
||||
return Layer(layer, self)
|
||||
|
||||
def __len__(self):
|
||||
"Returns the number of layers within the data source."
|
||||
|
|
|
@ -129,9 +129,9 @@ class MigrationLoader(object):
|
|||
"Returns the migration(s) which match the given app label and name _prefix_"
|
||||
# Do the search
|
||||
results = []
|
||||
for l, n in self.disk_migrations:
|
||||
if l == app_label and n.startswith(name_prefix):
|
||||
results.append((l, n))
|
||||
for migration_app_label, migration_name in self.disk_migrations:
|
||||
if migration_app_label == app_label and migration_name.startswith(name_prefix):
|
||||
results.append((migration_app_label, migration_name))
|
||||
if len(results) > 1:
|
||||
raise AmbiguityError(
|
||||
"There is more than one migration for '%s' with the prefix '%s'" % (app_label, name_prefix)
|
||||
|
|
|
@ -152,15 +152,15 @@ class IfParser(object):
|
|||
|
||||
def __init__(self, tokens):
|
||||
# Turn 'is','not' and 'not','in' into single tokens.
|
||||
l = len(tokens)
|
||||
num_tokens = len(tokens)
|
||||
mapped_tokens = []
|
||||
i = 0
|
||||
while i < l:
|
||||
while i < num_tokens:
|
||||
token = tokens[i]
|
||||
if token == "is" and i + 1 < l and tokens[i + 1] == "not":
|
||||
if token == "is" and i + 1 < num_tokens and tokens[i + 1] == "not":
|
||||
token = "is not"
|
||||
i += 1 # skip 'not'
|
||||
elif token == "not" and i + 1 < l and tokens[i + 1] == "in":
|
||||
elif token == "not" and i + 1 < num_tokens and tokens[i + 1] == "in":
|
||||
token = "not in"
|
||||
i += 1 # skip 'in'
|
||||
mapped_tokens.append(self.translate_token(token))
|
||||
|
|
|
@ -164,8 +164,8 @@ else:
|
|||
dklen = hlen
|
||||
if dklen > (2 ** 32 - 1) * hlen:
|
||||
raise OverflowError('dklen too big')
|
||||
l = -(-dklen // hlen)
|
||||
r = dklen - (l - 1) * hlen
|
||||
L = -(-dklen // hlen)
|
||||
r = dklen - (L - 1) * hlen
|
||||
|
||||
hex_format_string = "%%0%ix" % (hlen * 2)
|
||||
|
||||
|
@ -187,5 +187,5 @@ else:
|
|||
result ^= _bin_to_long(u)
|
||||
return _long_to_bin(result, hex_format_string)
|
||||
|
||||
T = [F(x) for x in range(1, l)]
|
||||
return b''.join(T) + F(l)[:r]
|
||||
T = [F(x) for x in range(1, L)]
|
||||
return b''.join(T) + F(L)[:r]
|
||||
|
|
|
@ -130,7 +130,7 @@ class TimeFormat(Formatter):
|
|||
"Minutes; i.e. '00' to '59'"
|
||||
return '%02d' % self.data.minute
|
||||
|
||||
def O(self):
|
||||
def O(self): # NOQA: E743
|
||||
"""
|
||||
Difference to Greenwich time in hours; e.g. '+0200', '-0430'.
|
||||
|
||||
|
@ -247,7 +247,7 @@ class DateFormat(TimeFormat):
|
|||
"Month, textual, long; e.g. 'January'"
|
||||
return MONTHS[self.data.month]
|
||||
|
||||
def I(self):
|
||||
def I(self): # NOQA: E743
|
||||
"'1' if Daylight Savings Time, '0' otherwise."
|
||||
try:
|
||||
if self.timezone and self.timezone.dst(self.data):
|
||||
|
@ -264,7 +264,7 @@ class DateFormat(TimeFormat):
|
|||
"Day of the month without leading zeros; i.e. '1' to '31'"
|
||||
return self.data.day
|
||||
|
||||
def l(self):
|
||||
def l(self): # NOQA: E743
|
||||
"Day of the week, textual, long; e.g. 'Friday'"
|
||||
return WEEKDAYS[self.data.weekday()]
|
||||
|
||||
|
|
|
@ -309,10 +309,14 @@ class DjangoStandaloneHTMLBuilder(StandaloneHTMLBuilder):
|
|||
self.info(bold("writing templatebuiltins.js..."))
|
||||
xrefs = self.env.domaindata["std"]["objects"]
|
||||
templatebuiltins = {
|
||||
"ttags": [n for ((t, n), (l, a)) in xrefs.items()
|
||||
if t == "templatetag" and l == "ref/templates/builtins"],
|
||||
"tfilters": [n for ((t, n), (l, a)) in xrefs.items()
|
||||
if t == "templatefilter" and l == "ref/templates/builtins"],
|
||||
"ttags": [
|
||||
n for ((t, n), (k, a)) in xrefs.items()
|
||||
if t == "templatetag" and k == "ref/templates/builtins"
|
||||
],
|
||||
"tfilters": [
|
||||
n for ((t, n), (k, a)) in xrefs.items()
|
||||
if t == "templatefilter" and k == "ref/templates/builtins"
|
||||
],
|
||||
}
|
||||
outfilename = os.path.join(self.outdir, "templatebuiltins.js")
|
||||
with open(outfilename, 'w') as fp:
|
||||
|
|
|
@ -194,15 +194,15 @@ class GenericRelationTests(TestCase):
|
|||
HasLinkThing.objects.create()
|
||||
b = Board.objects.create(name=str(hs1.pk))
|
||||
Link.objects.create(content_object=hs2)
|
||||
l = Link.objects.create(content_object=hs1)
|
||||
link = Link.objects.create(content_object=hs1)
|
||||
Link.objects.create(content_object=b)
|
||||
qs = HasLinkThing.objects.annotate(Sum('links')).filter(pk=hs1.pk)
|
||||
# If content_type restriction isn't in the query's join condition,
|
||||
# then wrong results are produced here as the link to b will also match
|
||||
# (b and hs1 have equal pks).
|
||||
self.assertEqual(qs.count(), 1)
|
||||
self.assertEqual(qs[0].links__sum, l.id)
|
||||
l.delete()
|
||||
self.assertEqual(qs[0].links__sum, link.id)
|
||||
link.delete()
|
||||
# Now if we don't have proper left join, we will not produce any
|
||||
# results at all here.
|
||||
# clear cached results
|
||||
|
@ -217,9 +217,9 @@ class GenericRelationTests(TestCase):
|
|||
def test_filter_targets_related_pk(self):
|
||||
HasLinkThing.objects.create()
|
||||
hs2 = HasLinkThing.objects.create()
|
||||
l = Link.objects.create(content_object=hs2)
|
||||
self.assertNotEqual(l.object_id, l.pk)
|
||||
self.assertSequenceEqual(HasLinkThing.objects.filter(links=l.pk), [hs2])
|
||||
link = Link.objects.create(content_object=hs2)
|
||||
self.assertNotEqual(link.object_id, link.pk)
|
||||
self.assertSequenceEqual(HasLinkThing.objects.filter(links=link.pk), [hs2])
|
||||
|
||||
def test_editable_generic_rel(self):
|
||||
GenericRelationForm = modelform_factory(HasLinkThing, fields='__all__')
|
||||
|
|
|
@ -234,10 +234,10 @@ class SpecializedFieldTest(SimpleTestCase):
|
|||
|
||||
def test_linestringfield(self):
|
||||
class LineStringForm(forms.Form):
|
||||
l = forms.LineStringField()
|
||||
f = forms.LineStringField()
|
||||
|
||||
geom = self.geometries['linestring']
|
||||
form = LineStringForm(data={'l': geom})
|
||||
form = LineStringForm(data={'f': geom})
|
||||
self.assertTextarea(geom, form.as_p())
|
||||
self.assertMapWidget(form)
|
||||
self.assertFalse(LineStringForm().is_valid())
|
||||
|
@ -247,10 +247,10 @@ class SpecializedFieldTest(SimpleTestCase):
|
|||
|
||||
def test_multilinestringfield(self):
|
||||
class LineStringForm(forms.Form):
|
||||
l = forms.MultiLineStringField()
|
||||
f = forms.MultiLineStringField()
|
||||
|
||||
geom = self.geometries['multilinestring']
|
||||
form = LineStringForm(data={'l': geom})
|
||||
form = LineStringForm(data={'f': geom})
|
||||
self.assertTextarea(geom, form.as_p())
|
||||
self.assertMapWidget(form)
|
||||
self.assertFalse(LineStringForm().is_valid())
|
||||
|
|
|
@ -347,18 +347,18 @@ class HttpResponseTests(unittest.TestCase):
|
|||
r = HttpResponse()
|
||||
del r['Content-Type']
|
||||
r['foo'] = 'bar'
|
||||
l = list(r.items())
|
||||
self.assertEqual(len(l), 1)
|
||||
self.assertEqual(l[0], ('foo', 'bar'))
|
||||
self.assertIsInstance(l[0][0], str)
|
||||
headers = list(r.items())
|
||||
self.assertEqual(len(headers), 1)
|
||||
self.assertEqual(headers[0], ('foo', 'bar'))
|
||||
self.assertIsInstance(headers[0][0], str)
|
||||
|
||||
r = HttpResponse()
|
||||
del r['Content-Type']
|
||||
r[b'foo'] = 'bar'
|
||||
l = list(r.items())
|
||||
self.assertEqual(len(l), 1)
|
||||
self.assertEqual(l[0], ('foo', 'bar'))
|
||||
self.assertIsInstance(l[0][0], str)
|
||||
headers = list(r.items())
|
||||
self.assertEqual(len(headers), 1)
|
||||
self.assertEqual(headers[0], ('foo', 'bar'))
|
||||
self.assertIsInstance(headers[0][0], str)
|
||||
|
||||
r = HttpResponse()
|
||||
with self.assertRaises(UnicodeError):
|
||||
|
|
|
@ -544,14 +544,14 @@ class FormattingTests(SimpleTestCase):
|
|||
self.d = datetime.date(2009, 12, 31)
|
||||
self.dt = datetime.datetime(2009, 12, 31, 20, 50)
|
||||
self.t = datetime.time(10, 15, 48)
|
||||
self.l = 10000 if PY3 else long(10000) # NOQA: long undefined on PY3
|
||||
self.long = 10000 if PY3 else long(10000) # NOQA: long undefined on PY3
|
||||
self.ctxt = Context({
|
||||
'n': self.n,
|
||||
't': self.t,
|
||||
'd': self.d,
|
||||
'dt': self.dt,
|
||||
'f': self.f,
|
||||
'l': self.l,
|
||||
'l': self.long,
|
||||
})
|
||||
|
||||
def test_locale_independent(self):
|
||||
|
@ -574,7 +574,7 @@ class FormattingTests(SimpleTestCase):
|
|||
)
|
||||
self.assertEqual('-66666.6', nformat(-66666.666, decimal_sep='.', decimal_pos=1))
|
||||
self.assertEqual('-66666.0', nformat(int('-66666'), decimal_sep='.', decimal_pos=1))
|
||||
self.assertEqual('10000.0', nformat(self.l, decimal_sep='.', decimal_pos=1))
|
||||
self.assertEqual('10000.0', nformat(self.long, decimal_sep='.', decimal_pos=1))
|
||||
self.assertEqual(
|
||||
'10,00,00,000.00',
|
||||
nformat(100000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, 0), thousand_sep=',')
|
||||
|
@ -588,8 +588,10 @@ class FormattingTests(SimpleTestCase):
|
|||
nformat(1000000000.00, decimal_sep='.', decimal_pos=2, grouping=(3, 2, -1), thousand_sep=',')
|
||||
)
|
||||
# This unusual grouping/force_grouping combination may be triggered by the intcomma filter (#17414)
|
||||
self.assertEqual('10000', nformat(self.l, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True))
|
||||
|
||||
self.assertEqual(
|
||||
'10000',
|
||||
nformat(self.long, decimal_sep='.', decimal_pos=0, grouping=0, force_grouping=True)
|
||||
)
|
||||
# date filter
|
||||
self.assertEqual('31.12.2009 в 20:50', Template('{{ dt|date:"d.m.Y в H:i" }}').render(self.ctxt))
|
||||
self.assertEqual('⌚ 10:15', Template('{{ t|time:"⌚ H:i" }}').render(self.ctxt))
|
||||
|
@ -612,7 +614,7 @@ class FormattingTests(SimpleTestCase):
|
|||
self.assertEqual('No localizable', localize('No localizable'))
|
||||
self.assertEqual('66666.666', localize(self.n))
|
||||
self.assertEqual('99999.999', localize(self.f))
|
||||
self.assertEqual('10000', localize(self.l))
|
||||
self.assertEqual('10000', localize(self.long))
|
||||
self.assertEqual('des. 31, 2009', localize(self.d))
|
||||
self.assertEqual('des. 31, 2009, 8:50 p.m.', localize(self.dt))
|
||||
self.assertEqual('66666.666', Template('{{ n }}').render(self.ctxt))
|
||||
|
@ -757,13 +759,13 @@ class FormattingTests(SimpleTestCase):
|
|||
with self.settings(USE_THOUSAND_SEPARATOR=True):
|
||||
self.assertEqual('66.666,666', localize(self.n))
|
||||
self.assertEqual('99.999,999', localize(self.f))
|
||||
self.assertEqual('10.000', localize(self.l))
|
||||
self.assertEqual('10.000', localize(self.long))
|
||||
self.assertEqual('True', localize(True))
|
||||
|
||||
with self.settings(USE_THOUSAND_SEPARATOR=False):
|
||||
self.assertEqual('66666,666', localize(self.n))
|
||||
self.assertEqual('99999,999', localize(self.f))
|
||||
self.assertEqual('10000', localize(self.l))
|
||||
self.assertEqual('10000', localize(self.long))
|
||||
self.assertEqual('31 de desembre de 2009', localize(self.d))
|
||||
self.assertEqual('31 de desembre de 2009 a les 20:50', localize(self.dt))
|
||||
|
||||
|
@ -976,12 +978,12 @@ class FormattingTests(SimpleTestCase):
|
|||
with self.settings(USE_THOUSAND_SEPARATOR=True):
|
||||
self.assertEqual('66,666.666', localize(self.n))
|
||||
self.assertEqual('99,999.999', localize(self.f))
|
||||
self.assertEqual('10,000', localize(self.l))
|
||||
self.assertEqual('10,000', localize(self.long))
|
||||
|
||||
with self.settings(USE_THOUSAND_SEPARATOR=False):
|
||||
self.assertEqual('66666.666', localize(self.n))
|
||||
self.assertEqual('99999.999', localize(self.f))
|
||||
self.assertEqual('10000', localize(self.l))
|
||||
self.assertEqual('10000', localize(self.long))
|
||||
self.assertEqual('Dec. 31, 2009', localize(self.d))
|
||||
self.assertEqual('Dec. 31, 2009, 8:50 p.m.', localize(self.dt))
|
||||
|
||||
|
|
|
@ -181,15 +181,15 @@ class WriterTests(SimpleTestCase):
|
|||
"""
|
||||
|
||||
def safe_exec(self, string, value=None):
|
||||
l = {}
|
||||
d = {}
|
||||
try:
|
||||
exec(force_str(string), globals(), l)
|
||||
exec(force_str(string), globals(), d)
|
||||
except Exception as e:
|
||||
if value:
|
||||
self.fail("Could not exec %r (from value %r): %s" % (string.strip(), value, e))
|
||||
else:
|
||||
self.fail("Could not exec %r: %s" % (string.strip(), e))
|
||||
return l
|
||||
return d
|
||||
|
||||
def serialize_round_trip(self, value):
|
||||
string, imports = MigrationWriter.serialize(value)
|
||||
|
|
|
@ -947,7 +947,7 @@ class MultiTableInheritanceTest(TestCase):
|
|||
@override_settings(DEBUG=True)
|
||||
def test_child_link_prefetch(self):
|
||||
with self.assertNumQueries(2):
|
||||
l = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
|
||||
authors = [a.authorwithage for a in Author.objects.prefetch_related('authorwithage')]
|
||||
|
||||
# Regression for #18090: the prefetching query must include an IN clause.
|
||||
# Note that on Oracle the table name is upper case in the generated SQL,
|
||||
|
@ -955,7 +955,7 @@ class MultiTableInheritanceTest(TestCase):
|
|||
self.assertIn('authorwithage', connection.queries[-1]['sql'].lower())
|
||||
self.assertIn(' IN ', connection.queries[-1]['sql'])
|
||||
|
||||
self.assertEqual(l, [a.authorwithage for a in Author.objects.all()])
|
||||
self.assertEqual(authors, [a.authorwithage for a in Author.objects.all()])
|
||||
|
||||
|
||||
class ForeignKeyToFieldTest(TestCase):
|
||||
|
|
|
@ -564,14 +564,14 @@ class Queries1Tests(TestCase):
|
|||
self.assertEqual(d, {'a': 'one', 'b': 'two'})
|
||||
|
||||
# Order by the number of tags attached to an item.
|
||||
l = (
|
||||
qs = (
|
||||
Item.objects
|
||||
.extra(select={
|
||||
'count': 'select count(*) from queries_item_tags where queries_item_tags.item_id = queries_item.id'
|
||||
})
|
||||
.order_by('-count')
|
||||
)
|
||||
self.assertEqual([o.count for o in l], [2, 2, 1, 0])
|
||||
self.assertEqual([o.count for o in qs], [2, 2, 1, 0])
|
||||
|
||||
def test_ticket6154(self):
|
||||
# Multiple filter statements are joined using "AND" all the time.
|
||||
|
|
|
@ -76,29 +76,29 @@ class ParserTests(TestCase):
|
|||
Variable({})
|
||||
|
||||
def test_filter_args_count(self):
|
||||
p = Parser("")
|
||||
l = Library()
|
||||
parser = Parser("")
|
||||
register = Library()
|
||||
|
||||
@l.filter
|
||||
@register.filter
|
||||
def no_arguments(value):
|
||||
pass
|
||||
|
||||
@l.filter
|
||||
@register.filter
|
||||
def one_argument(value, arg):
|
||||
pass
|
||||
|
||||
@l.filter
|
||||
@register.filter
|
||||
def one_opt_argument(value, arg=False):
|
||||
pass
|
||||
|
||||
@l.filter
|
||||
@register.filter
|
||||
def two_arguments(value, arg, arg2):
|
||||
pass
|
||||
|
||||
@l.filter
|
||||
@register.filter
|
||||
def two_one_opt_arg(value, arg, arg2=False):
|
||||
pass
|
||||
p.add_library(l)
|
||||
parser.add_library(register)
|
||||
for expr in (
|
||||
'1|no_arguments:"1"',
|
||||
'1|two_arguments',
|
||||
|
@ -106,7 +106,7 @@ class ParserTests(TestCase):
|
|||
'1|two_one_opt_arg',
|
||||
):
|
||||
with self.assertRaises(TemplateSyntaxError):
|
||||
FilterExpression(expr, p)
|
||||
FilterExpression(expr, parser)
|
||||
for expr in (
|
||||
# Correct number of arguments
|
||||
'1|no_arguments',
|
||||
|
@ -117,4 +117,4 @@ class ParserTests(TestCase):
|
|||
# Not supplying all
|
||||
'1|two_one_opt_arg:"1"',
|
||||
):
|
||||
FilterExpression(expr, p)
|
||||
FilterExpression(expr, parser)
|
||||
|
|
|
@ -956,10 +956,10 @@ class ContextTests(TestDataMixin, TestCase):
|
|||
c2.update({'goodbye': 'world', 'python': 'rocks'})
|
||||
c2.update({'goodbye': 'dolly'})
|
||||
|
||||
l = ContextList([c1, c2])
|
||||
k = ContextList([c1, c2])
|
||||
# None, True and False are builtins of BaseContext, and present
|
||||
# in every Context without needing to be added.
|
||||
self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye', 'python', 'dolly'}, l.keys())
|
||||
self.assertEqual({'None', 'True', 'False', 'hello', 'goodbye', 'python', 'dolly'}, k.keys())
|
||||
|
||||
def test_15368(self):
|
||||
# Need to insert a context processor that assumes certain things about
|
||||
|
|
|
@ -199,10 +199,10 @@ class LazyObjectTestCase(TestCase):
|
|||
|
||||
def test_copy_list(self):
|
||||
# Copying a list works and returns the correct objects.
|
||||
l = [1, 2, 3]
|
||||
lst = [1, 2, 3]
|
||||
|
||||
obj = self.lazy_wrap(l)
|
||||
len(l) # forces evaluation
|
||||
obj = self.lazy_wrap(lst)
|
||||
len(lst) # forces evaluation
|
||||
obj2 = copy.copy(obj)
|
||||
|
||||
self.assertIsNot(obj, obj2)
|
||||
|
@ -211,9 +211,9 @@ class LazyObjectTestCase(TestCase):
|
|||
|
||||
def test_copy_list_no_evaluation(self):
|
||||
# Copying a list doesn't force evaluation.
|
||||
l = [1, 2, 3]
|
||||
lst = [1, 2, 3]
|
||||
|
||||
obj = self.lazy_wrap(l)
|
||||
obj = self.lazy_wrap(lst)
|
||||
obj2 = copy.copy(obj)
|
||||
|
||||
self.assertIsNot(obj, obj2)
|
||||
|
@ -245,10 +245,10 @@ class LazyObjectTestCase(TestCase):
|
|||
|
||||
def test_deepcopy_list(self):
|
||||
# Deep copying a list works and returns the correct objects.
|
||||
l = [1, 2, 3]
|
||||
lst = [1, 2, 3]
|
||||
|
||||
obj = self.lazy_wrap(l)
|
||||
len(l) # forces evaluation
|
||||
obj = self.lazy_wrap(lst)
|
||||
len(lst) # forces evaluation
|
||||
obj2 = copy.deepcopy(obj)
|
||||
|
||||
self.assertIsNot(obj, obj2)
|
||||
|
@ -257,9 +257,9 @@ class LazyObjectTestCase(TestCase):
|
|||
|
||||
def test_deepcopy_list_no_evaluation(self):
|
||||
# Deep copying doesn't force evaluation.
|
||||
l = [1, 2, 3]
|
||||
lst = [1, 2, 3]
|
||||
|
||||
obj = self.lazy_wrap(l)
|
||||
obj = self.lazy_wrap(lst)
|
||||
obj2 = copy.deepcopy(obj)
|
||||
|
||||
self.assertIsNot(obj, obj2)
|
||||
|
|
Loading…
Reference in New Issue