run pyupgrade
This commit is contained in:
parent
d41119ed04
commit
9aacb4635e
|
@ -455,7 +455,7 @@ class ExceptionInfo(object):
|
|||
"""
|
||||
__tracebackhide__ = True
|
||||
if not re.search(regexp, str(self.value)):
|
||||
assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
|
||||
assert 0, "Pattern '{!s}' not found in '{!s}'".format(
|
||||
regexp, self.value)
|
||||
return True
|
||||
|
||||
|
|
|
@ -904,7 +904,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
self.push_format_context()
|
||||
left_res, left_expl = self.visit(comp.left)
|
||||
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
|
||||
left_expl = "({0})".format(left_expl)
|
||||
left_expl = "({})".format(left_expl)
|
||||
res_variables = [self.variable() for i in range(len(comp.ops))]
|
||||
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
|
||||
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
|
||||
|
@ -915,7 +915,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
for i, op, next_operand in it:
|
||||
next_res, next_expl = self.visit(next_operand)
|
||||
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
|
||||
next_expl = "({0})".format(next_expl)
|
||||
next_expl = "({})".format(next_expl)
|
||||
results.append(next_res)
|
||||
sym = binop_map[op.__class__]
|
||||
syms.append(ast.Str(sym))
|
||||
|
|
|
@ -69,10 +69,10 @@ def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
|
|||
truncated_line_count += 1 # Account for the part-truncated final line
|
||||
msg = '...Full output truncated'
|
||||
if truncated_line_count == 1:
|
||||
msg += ' ({0} line hidden)'.format(truncated_line_count)
|
||||
msg += ' ({} line hidden)'.format(truncated_line_count)
|
||||
else:
|
||||
msg += ' ({0} lines hidden)'.format(truncated_line_count)
|
||||
msg += ", {0}" .format(USAGE_MSG)
|
||||
msg += ' ({} lines hidden)'.format(truncated_line_count)
|
||||
msg += ", {}" .format(USAGE_MSG)
|
||||
truncated_explanation.extend([
|
||||
six.text_type(""),
|
||||
six.text_type(msg),
|
||||
|
|
|
@ -275,14 +275,14 @@ def _compare_eq_set(left, right, verbose=False):
|
|||
def _compare_eq_dict(left, right, verbose=False):
|
||||
explanation = []
|
||||
common = set(left).intersection(set(right))
|
||||
same = dict((k, left[k]) for k in common if left[k] == right[k])
|
||||
same = {k: left[k] for k in common if left[k] == right[k]}
|
||||
if same and verbose < 2:
|
||||
explanation += [u('Omitting %s identical items, use -vv to show') %
|
||||
len(same)]
|
||||
elif same:
|
||||
explanation += [u('Common items:')]
|
||||
explanation += pprint.pformat(same).splitlines()
|
||||
diff = set(k for k in common if left[k] != right[k])
|
||||
diff = {k for k in common if left[k] != right[k]}
|
||||
if diff:
|
||||
explanation += [u('Differing items:')]
|
||||
for k in diff:
|
||||
|
@ -292,12 +292,12 @@ def _compare_eq_dict(left, right, verbose=False):
|
|||
if extra_left:
|
||||
explanation.append(u('Left contains more items:'))
|
||||
explanation.extend(pprint.pformat(
|
||||
dict((k, left[k]) for k in extra_left)).splitlines())
|
||||
{k: left[k] for k in extra_left}).splitlines())
|
||||
extra_right = set(right) - set(left)
|
||||
if extra_right:
|
||||
explanation.append(u('Right contains more items:'))
|
||||
explanation.extend(pprint.pformat(
|
||||
dict((k, right[k]) for k in extra_right)).splitlines())
|
||||
{k: right[k] for k in extra_right}).splitlines())
|
||||
return explanation
|
||||
|
||||
|
||||
|
|
|
@ -184,12 +184,12 @@ capture_fixtures = {'capfd', 'capfdbinary', 'capsys', 'capsysbinary'}
|
|||
|
||||
|
||||
def _ensure_only_one_capture_fixture(request, name):
|
||||
fixtures = set(request.fixturenames) & capture_fixtures - set((name,))
|
||||
fixtures = set(request.fixturenames) & capture_fixtures - {name}
|
||||
if fixtures:
|
||||
fixtures = sorted(fixtures)
|
||||
fixtures = fixtures[0] if len(fixtures) == 1 else fixtures
|
||||
raise request.raiseerror(
|
||||
"cannot use {0} and {1} at the same time".format(
|
||||
"cannot use {} and {} at the same time".format(
|
||||
fixtures, name,
|
||||
),
|
||||
)
|
||||
|
|
|
@ -89,7 +89,7 @@ def filename_arg(path, optname):
|
|||
:optname: name of the option
|
||||
"""
|
||||
if os.path.isdir(path):
|
||||
raise UsageError("{0} must be a filename, given: {1}".format(optname, path))
|
||||
raise UsageError("{} must be a filename, given: {}".format(optname, path))
|
||||
return path
|
||||
|
||||
|
||||
|
@ -100,7 +100,7 @@ def directory_arg(path, optname):
|
|||
:optname: name of the option
|
||||
"""
|
||||
if not os.path.isdir(path):
|
||||
raise UsageError("{0} must be a directory, given: {1}".format(optname, path))
|
||||
raise UsageError("{} must be a directory, given: {}".format(optname, path))
|
||||
return path
|
||||
|
||||
|
||||
|
@ -253,7 +253,7 @@ class PytestPluginManager(PluginManager):
|
|||
|
||||
def register(self, plugin, name=None):
|
||||
if name in ['pytest_catchlog', 'pytest_capturelog']:
|
||||
self._warn('{0} plugin has been merged into the core, '
|
||||
self._warn('{} plugin has been merged into the core, '
|
||||
'please remove it from your requirements.'.format(
|
||||
name.replace('_', '-')))
|
||||
return
|
||||
|
@ -735,7 +735,7 @@ class Argument(object):
|
|||
args += ['type: ' + repr(self.type)]
|
||||
if hasattr(self, 'default'):
|
||||
args += ['default: ' + repr(self.default)]
|
||||
return 'Argument({0})'.format(', '.join(args))
|
||||
return 'Argument({})'.format(', '.join(args))
|
||||
|
||||
|
||||
class OptionGroup(object):
|
||||
|
|
|
@ -493,8 +493,8 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
source_path = source_path.relto(funcitem.config.rootdir)
|
||||
msg = (
|
||||
"The requested fixture has no parameter defined for the "
|
||||
"current test.\n\nRequested fixture '{0}' defined in:\n{1}"
|
||||
"\n\nRequested here:\n{2}:{3}".format(
|
||||
"current test.\n\nRequested fixture '{}' defined in:\n{}"
|
||||
"\n\nRequested here:\n{}:{}".format(
|
||||
fixturedef.argname,
|
||||
getlocation(fixturedef.func, funcitem.config.rootdir),
|
||||
source_path,
|
||||
|
@ -612,8 +612,8 @@ def scope2index(scope, descr, where=None):
|
|||
return scopes.index(scope)
|
||||
except ValueError:
|
||||
raise ValueError(
|
||||
"{0} {1}has an unsupported scope value '{2}'".format(
|
||||
descr, 'from {0} '.format(where) if where else '',
|
||||
"{} {}has an unsupported scope value '{}'".format(
|
||||
descr, 'from {} '.format(where) if where else '',
|
||||
scope)
|
||||
)
|
||||
|
||||
|
@ -681,10 +681,10 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
|||
tw.line(tbline.rstrip())
|
||||
lines = self.errorstring.split("\n")
|
||||
if lines:
|
||||
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
|
||||
tw.line('{} {}'.format(FormattedExcinfo.fail_marker,
|
||||
lines[0].strip()), red=True)
|
||||
for line in lines[1:]:
|
||||
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
|
||||
tw.line('{} {}'.format(FormattedExcinfo.flow_marker,
|
||||
line.strip()), red=True)
|
||||
tw.line()
|
||||
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
|
||||
|
@ -732,7 +732,7 @@ class FixtureDef(object):
|
|||
self.scope = scope
|
||||
self.scopenum = scope2index(
|
||||
scope or "function",
|
||||
descr='fixture {0}'.format(func.__name__),
|
||||
descr='fixture {}'.format(func.__name__),
|
||||
where=baseid
|
||||
)
|
||||
self.params = params
|
||||
|
|
|
@ -318,8 +318,8 @@ def get_actual_log_level(config, *setting_names):
|
|||
except ValueError:
|
||||
# Python logging does not recognise this as a logging level
|
||||
raise pytest.UsageError(
|
||||
"'{0}' is not recognized as a logging level name for "
|
||||
"'{1}'. Please consider passing the "
|
||||
"'{}' is not recognized as a logging level name for "
|
||||
"'{}'. Please consider passing the "
|
||||
"logging level num instead.".format(
|
||||
log_level,
|
||||
setting_name))
|
||||
|
|
|
@ -112,7 +112,7 @@ def wrap_session(config, doit):
|
|||
except KeyboardInterrupt:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
if initstate < 2 and isinstance(excinfo.value, exit.Exception):
|
||||
sys.stderr.write('{0}: {1}\n'.format(
|
||||
sys.stderr.write('{}: {}\n'.format(
|
||||
excinfo.typename, excinfo.value.msg))
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = EXIT_INTERRUPTED
|
||||
|
|
|
@ -17,7 +17,7 @@ class MarkMapping(object):
|
|||
|
||||
@classmethod
|
||||
def from_item(cls, item):
|
||||
mark_names = set(mark.name for mark in item.iter_markers())
|
||||
mark_names = {mark.name for mark in item.iter_markers()}
|
||||
return cls(mark_names)
|
||||
|
||||
def __getitem__(self, name):
|
||||
|
|
|
@ -296,7 +296,7 @@ class MarkInfo(object):
|
|||
return cls([mark])
|
||||
|
||||
def __repr__(self):
|
||||
return "<MarkInfo {0!r}>".format(self.combined)
|
||||
return "<MarkInfo {!r}>".format(self.combined)
|
||||
|
||||
def add_mark(self, mark):
|
||||
""" add a MarkInfo with the given args and kwargs. """
|
||||
|
|
|
@ -75,7 +75,7 @@ def skip(msg="", **kwargs):
|
|||
allow_module_level = kwargs.pop('allow_module_level', False)
|
||||
if kwargs:
|
||||
keys = [k for k in kwargs.keys()]
|
||||
raise TypeError('unexpected keyword arguments: {0}'.format(keys))
|
||||
raise TypeError('unexpected keyword arguments: {}'.format(keys))
|
||||
raise Skipped(msg=msg, allow_module_level=allow_module_level)
|
||||
|
||||
|
||||
|
|
|
@ -97,7 +97,7 @@ class LsofFdLeakChecker(object):
|
|||
gc.collect()
|
||||
lines2 = self.get_open_files()
|
||||
|
||||
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
|
||||
new_fds = {t[0] for t in lines2} - {t[0] for t in lines1}
|
||||
leaked_files = [t for t in lines2 if t[0] in new_fds]
|
||||
if leaked_files:
|
||||
error = []
|
||||
|
|
|
@ -813,7 +813,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
if scope is None:
|
||||
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
|
||||
|
||||
scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize))
|
||||
scopenum = scope2index(scope, descr='call to {}'.format(self.parametrize))
|
||||
valtypes = {}
|
||||
for arg in argnames:
|
||||
if arg not in self.fixturenames:
|
||||
|
@ -858,8 +858,8 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
for a_id, param, param_index in elements:
|
||||
if len(param.values) != len(argnames):
|
||||
raise ValueError(
|
||||
'In "parametrize" the number of values ({0}) must be '
|
||||
'equal to the number of names ({1})'.format(
|
||||
'In "parametrize" the number of values ({}) must be '
|
||||
'equal to the number of names ({})'.format(
|
||||
param.values, argnames))
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti2(valtypes, argnames, param.values, a_id,
|
||||
|
@ -1016,7 +1016,7 @@ def _show_fixtures_per_test(config, session):
|
|||
return
|
||||
if verbose > 0:
|
||||
bestrel = get_best_relpath(fixture_def.func)
|
||||
funcargspec = "{0} -- {1}".format(argname, bestrel)
|
||||
funcargspec = "{} -- {}".format(argname, bestrel)
|
||||
else:
|
||||
funcargspec = argname
|
||||
tw.line(funcargspec, green=True)
|
||||
|
@ -1036,8 +1036,8 @@ def _show_fixtures_per_test(config, session):
|
|||
# this test item does not use any fixtures
|
||||
return
|
||||
tw.line()
|
||||
tw.sep('-', 'fixtures used by {0}'.format(item.name))
|
||||
tw.sep('-', '({0})'.format(get_best_relpath(item.function)))
|
||||
tw.sep('-', 'fixtures used by {}'.format(item.name))
|
||||
tw.sep('-', '({})'.format(get_best_relpath(item.function)))
|
||||
# dict key not used in loop but needed for sorting
|
||||
for _, fixturedefs in sorted(info.name2fixturedefs.items()):
|
||||
assert fixturedefs is not None
|
||||
|
|
|
@ -79,7 +79,7 @@ class ApproxNumpy(ApproxBase):
|
|||
# shape of the array...
|
||||
import numpy as np
|
||||
|
||||
return "approx({0!r})".format(list(
|
||||
return "approx({!r})".format(list(
|
||||
self._approx_scalar(x) for x in np.asarray(self.expected)))
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
|
@ -94,7 +94,7 @@ class ApproxNumpy(ApproxBase):
|
|||
try:
|
||||
actual = np.asarray(actual)
|
||||
except: # noqa
|
||||
raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual))
|
||||
raise TypeError("cannot compare '{}' to numpy.ndarray".format(actual))
|
||||
|
||||
if not np.isscalar(actual) and actual.shape != self.expected.shape:
|
||||
return False
|
||||
|
@ -123,9 +123,9 @@ class ApproxMapping(ApproxBase):
|
|||
"""
|
||||
|
||||
def __repr__(self):
|
||||
return "approx({0!r})".format(dict(
|
||||
(k, self._approx_scalar(v))
|
||||
for k, v in self.expected.items()))
|
||||
return "approx({!r})".format({
|
||||
k: self._approx_scalar(v)
|
||||
for k, v in self.expected.items()})
|
||||
|
||||
def __eq__(self, actual):
|
||||
if set(actual.keys()) != set(self.expected.keys()):
|
||||
|
@ -147,7 +147,7 @@ class ApproxSequence(ApproxBase):
|
|||
seq_type = type(self.expected)
|
||||
if seq_type not in (tuple, list, set):
|
||||
seq_type = list
|
||||
return "approx({0!r})".format(seq_type(
|
||||
return "approx({!r})".format(seq_type(
|
||||
self._approx_scalar(x) for x in self.expected))
|
||||
|
||||
def __eq__(self, actual):
|
||||
|
@ -189,9 +189,9 @@ class ApproxScalar(ApproxBase):
|
|||
vetted_tolerance = '???'
|
||||
|
||||
if sys.version_info[0] == 2:
|
||||
return '{0} +- {1}'.format(self.expected, vetted_tolerance)
|
||||
return '{} +- {}'.format(self.expected, vetted_tolerance)
|
||||
else:
|
||||
return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
|
||||
return u'{} \u00b1 {}'.format(self.expected, vetted_tolerance)
|
||||
|
||||
def __eq__(self, actual):
|
||||
"""
|
||||
|
@ -591,7 +591,7 @@ def raises(expected_exception, *args, **kwargs):
|
|||
" derived from BaseException, not %s")
|
||||
raise TypeError(msg % type(exc))
|
||||
|
||||
message = "DID NOT RAISE {0}".format(expected_exception)
|
||||
message = "DID NOT RAISE {}".format(expected_exception)
|
||||
match_expr = None
|
||||
|
||||
if not args:
|
||||
|
|
|
@ -217,8 +217,8 @@ class WarningsChecker(WarningsRecorder):
|
|||
if not any(issubclass(r.category, self.expected_warning)
|
||||
for r in self):
|
||||
__tracebackhide__ = True
|
||||
fail("DID NOT WARN. No warnings of type {0} was emitted. "
|
||||
"The list of emitted warnings is: {1}.".format(
|
||||
fail("DID NOT WARN. No warnings of type {} was emitted. "
|
||||
"The list of emitted warnings is: {}.".format(
|
||||
self.expected_warning,
|
||||
[each.message for each in self]))
|
||||
elif self.match_expr is not None:
|
||||
|
@ -227,7 +227,7 @@ class WarningsChecker(WarningsRecorder):
|
|||
if re.compile(self.match_expr).search(str(r.message)):
|
||||
break
|
||||
else:
|
||||
fail("DID NOT WARN. No warnings of type {0} matching"
|
||||
" ('{1}') was emitted. The list of emitted warnings"
|
||||
" is: {2}.".format(self.expected_warning, self.match_expr,
|
||||
fail("DID NOT WARN. No warnings of type {} matching"
|
||||
" ('{}') was emitted. The list of emitted warnings"
|
||||
" is: {}.".format(self.expected_warning, self.match_expr,
|
||||
[each.message for each in self]))
|
||||
|
|
|
@ -95,7 +95,7 @@ def show_test_item(item):
|
|||
tw.write(item._nodeid)
|
||||
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
|
||||
if used_fixtures:
|
||||
tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
|
||||
tw.write(' (fixtures used: {})'.format(', '.join(used_fixtures)))
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
|
@ -133,7 +133,7 @@ def _update_current_test_var(item, when):
|
|||
"""
|
||||
var_name = 'PYTEST_CURRENT_TEST'
|
||||
if when:
|
||||
value = '{0} ({1})'.format(item.nodeid, when)
|
||||
value = '{} ({})'.format(item.nodeid, when)
|
||||
# don't allow null bytes on environment variables (see #2644, #2957)
|
||||
value = value.replace('\x00', '(null)')
|
||||
os.environ[var_name] = value
|
||||
|
@ -297,7 +297,7 @@ class BaseReport(object):
|
|||
def pytest_runtest_makereport(item, call):
|
||||
when = call.when
|
||||
duration = call.stop - call.start
|
||||
keywords = dict([(x, 1) for x in item.keywords])
|
||||
keywords = {x: 1 for x in item.keywords}
|
||||
excinfo = call.excinfo
|
||||
sections = []
|
||||
if not call.excinfo:
|
||||
|
|
|
@ -57,10 +57,10 @@ def _show_fixture_action(fixturedef, msg):
|
|||
if msg == 'SETUP':
|
||||
deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
|
||||
if deps:
|
||||
tw.write(' (fixtures used: {0})'.format(', '.join(deps)))
|
||||
tw.write(' (fixtures used: {})'.format(', '.join(deps)))
|
||||
|
||||
if hasattr(fixturedef, 'cached_param'):
|
||||
tw.write('[{0}]'.format(fixturedef.cached_param))
|
||||
tw.write('[{}]'.format(fixturedef.cached_param))
|
||||
|
||||
if capman:
|
||||
capman.resume_global_capture()
|
||||
|
|
|
@ -116,7 +116,7 @@ def pytest_runtest_makereport(item, call):
|
|||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
||||
from _pytest.compat import _is_unittest_unexpected_success_a_failure
|
||||
if item._unexpectedsuccess:
|
||||
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
|
||||
rep.longrepr = "Unexpected success: {}".format(item._unexpectedsuccess)
|
||||
else:
|
||||
rep.longrepr = "Unexpected success"
|
||||
if _is_unittest_unexpected_success_a_failure():
|
||||
|
@ -143,7 +143,7 @@ def pytest_runtest_makereport(item, call):
|
|||
explanation = evalxfail.getexplanation()
|
||||
if is_strict_xfail:
|
||||
rep.outcome = "failed"
|
||||
rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
|
||||
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
|
||||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = explanation
|
||||
|
|
|
@ -70,7 +70,7 @@ class TestSpecialisedExplanations(object):
|
|||
assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
|
||||
|
||||
def test_eq_set(self):
|
||||
assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
assert {0, 10, 11, 12} == {0, 20, 21}
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
assert [1,2] == [1,2,3]
|
||||
|
|
|
@ -599,8 +599,8 @@ class TestInvocationVariants(object):
|
|||
"__import__('pkg_resources').declare_namespace(__name__)")
|
||||
lib = ns.mkdir(dirname)
|
||||
lib.ensure("__init__.py")
|
||||
lib.join("test_{0}.py".format(dirname)). \
|
||||
write("def test_{0}(): pass\n"
|
||||
lib.join("test_{}.py".format(dirname)). \
|
||||
write("def test_{}(): pass\n"
|
||||
"def test_other():pass".format(dirname))
|
||||
|
||||
# The structure of the test directory is now:
|
||||
|
|
|
@ -203,8 +203,8 @@ class TestTraceback_f_g_h(object):
|
|||
excinfo = pytest.raises(ValueError, h)
|
||||
traceback = excinfo.traceback
|
||||
ntraceback = traceback.filter()
|
||||
print('old: {0!r}'.format(traceback))
|
||||
print('new: {0!r}'.format(ntraceback))
|
||||
print('old: {!r}'.format(traceback))
|
||||
print('new: {!r}'.format(ntraceback))
|
||||
|
||||
if matching:
|
||||
assert len(ntraceback) == len(traceback) - 2
|
||||
|
|
|
@ -126,7 +126,7 @@ def test_terminal_reporter_writer_attr(pytestconfig):
|
|||
def test_pytest_catchlog_deprecated(testdir, plugin):
|
||||
testdir.makepyfile("""
|
||||
def test_func(pytestconfig):
|
||||
pytestconfig.pluginmanager.register(None, 'pytest_{0}')
|
||||
pytestconfig.pluginmanager.register(None, 'pytest_{}')
|
||||
""".format(plugin))
|
||||
res = testdir.runpytest()
|
||||
assert res.ret == 0
|
||||
|
|
|
@ -596,7 +596,7 @@ def test_log_file_cli(testdir):
|
|||
|
||||
log_file = testdir.tmpdir.join('pytest.log').strpath
|
||||
|
||||
result = testdir.runpytest('-s', '--log-file={0}'.format(log_file), '--log-file-level=WARNING')
|
||||
result = testdir.runpytest('-s', '--log-file={}'.format(log_file), '--log-file-level=WARNING')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
|
@ -628,7 +628,7 @@ def test_log_file_cli_level(testdir):
|
|||
log_file = testdir.tmpdir.join('pytest.log').strpath
|
||||
|
||||
result = testdir.runpytest('-s',
|
||||
'--log-file={0}'.format(log_file),
|
||||
'--log-file={}'.format(log_file),
|
||||
'--log-file-level=INFO')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
|
@ -661,7 +661,7 @@ def test_log_file_ini(testdir):
|
|||
testdir.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
log_file={0}
|
||||
log_file={}
|
||||
log_file_level=WARNING
|
||||
""".format(log_file))
|
||||
testdir.makepyfile('''
|
||||
|
@ -697,7 +697,7 @@ def test_log_file_ini_level(testdir):
|
|||
testdir.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
log_file={0}
|
||||
log_file={}
|
||||
log_file_level = INFO
|
||||
""".format(log_file))
|
||||
testdir.makepyfile('''
|
||||
|
|
|
@ -375,7 +375,7 @@ class TestApprox(object):
|
|||
expected = '4.0e-06'
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'*At index 0 diff: 3 != 4 * {0}'.format(expected),
|
||||
'*At index 0 diff: 3 != 4 * {}'.format(expected),
|
||||
'=* 1 failed in *=',
|
||||
])
|
||||
|
||||
|
|
|
@ -1749,7 +1749,7 @@ class TestAutouseManagement(object):
|
|||
def test_2(self):
|
||||
pass
|
||||
""")
|
||||
confcut = "--confcutdir={0}".format(testdir.tmpdir)
|
||||
confcut = "--confcutdir={}".format(testdir.tmpdir)
|
||||
reprec = testdir.inline_run("-v", "-s", confcut)
|
||||
reprec.assertoutcome(passed=8)
|
||||
config = reprec.getcalls("pytest_unconfigure")[0].config
|
||||
|
@ -3132,9 +3132,9 @@ class TestParameterizedSubRequest(object):
|
|||
E*Failed: The requested fixture has no parameter defined for the current test.
|
||||
E*
|
||||
E*Requested fixture 'fix_with_param' defined in:
|
||||
E*{0}:4
|
||||
E*{}:4
|
||||
E*Requested here:
|
||||
E*{1}:9
|
||||
E*{}:9
|
||||
*1 error*
|
||||
""".format(testfile.basename, testfile.basename))
|
||||
|
||||
|
@ -3154,9 +3154,9 @@ class TestParameterizedSubRequest(object):
|
|||
E*Failed: The requested fixture has no parameter defined for the current test.
|
||||
E*
|
||||
E*Requested fixture 'fix_with_param' defined in:
|
||||
E*{0}:4
|
||||
E*{}:4
|
||||
E*Requested here:
|
||||
E*{1}:8
|
||||
E*{}:8
|
||||
*1 failed*
|
||||
""".format(testfile.basename, testfile.basename))
|
||||
|
||||
|
@ -3178,9 +3178,9 @@ class TestParameterizedSubRequest(object):
|
|||
E*Failed: The requested fixture has no parameter defined for the current test.
|
||||
E*
|
||||
E*Requested fixture 'fix_with_param' defined in:
|
||||
E*{0}:4
|
||||
E*{}:4
|
||||
E*Requested here:
|
||||
E*{1}:2
|
||||
E*{}:2
|
||||
*1 failed*
|
||||
""".format(conffile.basename, testfile.basename))
|
||||
|
||||
|
@ -3211,9 +3211,9 @@ class TestParameterizedSubRequest(object):
|
|||
E*Failed: The requested fixture has no parameter defined for the current test.
|
||||
E*
|
||||
E*Requested fixture 'fix_with_param' defined in:
|
||||
E*{0}:5
|
||||
E*{}:5
|
||||
E*Requested here:
|
||||
E*{1}:5
|
||||
E*{}:5
|
||||
*1 failed*
|
||||
""".format(fixfile.strpath, testfile.basename))
|
||||
|
||||
|
|
|
@ -306,7 +306,7 @@ class TestMetafunc(object):
|
|||
pytest.param(re.compile('foo'), re.compile('bar')),
|
||||
pytest.param(str, int),
|
||||
pytest.param(list("six"), [66, 66]),
|
||||
pytest.param(set([7]), set("seven")),
|
||||
pytest.param({7}, set("seven")),
|
||||
pytest.param(tuple("eight"), (8, -8, 8)),
|
||||
pytest.param(b'\xc3\xb4', b"name"),
|
||||
pytest.param(b'\xc3\xb4', totext("other")),
|
||||
|
@ -1214,14 +1214,14 @@ class TestMetafuncFunctional(object):
|
|||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.{0}("x", range(2))
|
||||
@pytest.mark.{}("x", range(2))
|
||||
def test_foo(x):
|
||||
pass
|
||||
""".format(attr))
|
||||
reprec = testdir.inline_run('--collectonly')
|
||||
failures = reprec.getfailures()
|
||||
assert len(failures) == 1
|
||||
expectederror = "MarkerError: test_foo has '{0}', spelling should be 'parametrize'".format(attr)
|
||||
expectederror = "MarkerError: test_foo has '{}', spelling should be 'parametrize'".format(attr)
|
||||
assert expectederror in failures[0].longrepr.reprcrash.message
|
||||
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ class TestRaises(object):
|
|||
try:
|
||||
pytest.raises(ValueError, int, '0')
|
||||
except pytest.raises.Exception as e:
|
||||
assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError))
|
||||
assert e.msg == "DID NOT RAISE {}".format(repr(ValueError))
|
||||
else:
|
||||
assert False, "Expected pytest.raises.Exception"
|
||||
|
||||
|
@ -83,7 +83,7 @@ class TestRaises(object):
|
|||
with pytest.raises(ValueError):
|
||||
pass
|
||||
except pytest.raises.Exception as e:
|
||||
assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError))
|
||||
assert e.msg == "DID NOT RAISE {}".format(repr(ValueError))
|
||||
else:
|
||||
assert False, "Expected pytest.raises.Exception"
|
||||
|
||||
|
@ -134,7 +134,7 @@ class TestRaises(object):
|
|||
int('asdf')
|
||||
|
||||
msg = "with base 16"
|
||||
expr = r"Pattern '{0}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(msg)
|
||||
expr = r"Pattern '{}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(msg)
|
||||
with pytest.raises(AssertionError, match=expr):
|
||||
with pytest.raises(ValueError, match=msg):
|
||||
int('asdf', base=10)
|
||||
|
|
|
@ -359,7 +359,7 @@ class TestAssert_reprcompare(object):
|
|||
+ {0: 2}
|
||||
? ^
|
||||
"""),
|
||||
(set([0, 1]), set([0, 2]), """
|
||||
({0, 1}, {0, 2}, """
|
||||
Full diff:
|
||||
- set([0, 1])
|
||||
? ^
|
||||
|
@ -417,11 +417,11 @@ class TestAssert_reprcompare(object):
|
|||
assert lines[2] == "{'b': 1}"
|
||||
|
||||
def test_set(self):
|
||||
expl = callequal(set([0, 1]), set([0, 2]))
|
||||
expl = callequal({0, 1}, {0, 2})
|
||||
assert len(expl) > 1
|
||||
|
||||
def test_frozenzet(self):
|
||||
expl = callequal(frozenset([0, 1]), set([0, 2]))
|
||||
expl = callequal(frozenset([0, 1]), {0, 2})
|
||||
assert len(expl) > 1
|
||||
|
||||
def test_Sequence(self):
|
||||
|
@ -971,7 +971,7 @@ def test_set_with_unsortable_elements():
|
|||
raise RuntimeError()
|
||||
|
||||
def __repr__(self):
|
||||
return 'repr({0})'.format(self.name)
|
||||
return 'repr({})'.format(self.name)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.name == other.name
|
||||
|
@ -979,8 +979,8 @@ def test_set_with_unsortable_elements():
|
|||
def __hash__(self):
|
||||
return hash(self.name)
|
||||
|
||||
left_set = set(UnsortableKey(str(i)) for i in range(1, 3))
|
||||
right_set = set(UnsortableKey(str(i)) for i in range(2, 4))
|
||||
left_set = {UnsortableKey(str(i)) for i in range(1, 3)}
|
||||
right_set = {UnsortableKey(str(i)) for i in range(2, 4)}
|
||||
expl = callequal(left_set, right_set, verbose=True)
|
||||
# skip first line because it contains the "construction" of the set, which does not have a guaranteed order
|
||||
expl = expl[1:]
|
||||
|
|
|
@ -624,7 +624,7 @@ def test_load_initial_conftest_last_ordering(testdir):
|
|||
def test_get_plugin_specs_as_list():
|
||||
from _pytest.config import _get_plugin_specs_as_list
|
||||
with pytest.raises(pytest.UsageError):
|
||||
_get_plugin_specs_as_list(set(['foo']))
|
||||
_get_plugin_specs_as_list({'foo'})
|
||||
with pytest.raises(pytest.UsageError):
|
||||
_get_plugin_specs_as_list(dict())
|
||||
|
||||
|
|
|
@ -117,8 +117,8 @@ class TestDoctests(object):
|
|||
>>> 1
|
||||
1
|
||||
""")
|
||||
expected = set(['xdoc.txt', 'test.foo', 'test_normal.txt'])
|
||||
assert set(x.basename for x in testdir.tmpdir.listdir()) == expected
|
||||
expected = {'xdoc.txt', 'test.foo', 'test_normal.txt'}
|
||||
assert {x.basename for x in testdir.tmpdir.listdir()} == expected
|
||||
args = ["--doctest-glob=xdoc*.txt", "--doctest-glob=*.foo"]
|
||||
result = testdir.runpytest(*args)
|
||||
result.stdout.fnmatch_lines([
|
||||
|
@ -145,11 +145,11 @@ class TestDoctests(object):
|
|||
"""
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
doctest_encoding={0}
|
||||
doctest_encoding={}
|
||||
""".format(encoding))
|
||||
doctest = u"""
|
||||
>>> u"{0}"
|
||||
{1}
|
||||
>>> u"{}"
|
||||
{}
|
||||
""".format(test_string, repr(test_string))
|
||||
testdir._makefile(".txt", [doctest], {}, encoding=encoding)
|
||||
|
||||
|
|
|
@ -23,8 +23,8 @@ def assert_attr(node, **kwargs):
|
|||
if anode is not None:
|
||||
return anode.value
|
||||
|
||||
expected = dict((name, str(value)) for name, value in kwargs.items())
|
||||
on_node = dict((name, nodeval(node, name)) for name in expected)
|
||||
expected = {name: str(value) for name, value in kwargs.items()}
|
||||
on_node = {name: nodeval(node, name) for name in expected}
|
||||
assert on_node == expected
|
||||
|
||||
|
||||
|
@ -1079,7 +1079,7 @@ def test_set_suite_name(testdir, suite_name):
|
|||
if suite_name:
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
junit_suite_name={0}
|
||||
junit_suite_name={}
|
||||
""".format(suite_name))
|
||||
expected = suite_name
|
||||
else:
|
||||
|
|
|
@ -744,11 +744,11 @@ class TestFunctional(object):
|
|||
to other modules.
|
||||
"""
|
||||
from _pytest.mark import MarkInfo
|
||||
items = dict((x.name, x) for x in items)
|
||||
items = {x.name: x for x in items}
|
||||
for name, expected_markers in expected.items():
|
||||
markers = items[name].keywords._markers
|
||||
marker_names = set([name for (name, v) in markers.items()
|
||||
if isinstance(v, MarkInfo)])
|
||||
marker_names = {name for (name, v) in markers.items()
|
||||
if isinstance(v, MarkInfo)}
|
||||
assert marker_names == set(expected_markers)
|
||||
|
||||
@pytest.mark.issue1540
|
||||
|
|
|
@ -90,7 +90,7 @@ class TestParser(object):
|
|||
group.addoption("--option1", "--option-1", action="store_true")
|
||||
with pytest.raises(ValueError) as err:
|
||||
group.addoption("--option1", "--option-one", action="store_true")
|
||||
assert str(set(["--option1"])) in str(err.value)
|
||||
assert str({"--option1"}) in str(err.value)
|
||||
|
||||
def test_group_shortopt_lowercase(self, parser):
|
||||
group = parser.getgroup("hello")
|
||||
|
|
Loading…
Reference in New Issue