py36+: pyupgrade: py36+
This commit is contained in:
parent
b1bcb9fba8
commit
66bd44c13a
|
@ -5,12 +5,12 @@ repos:
|
|||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
- repo: https://github.com/asottile/blacken-docs
|
||||
rev: v1.7.0
|
||||
rev: v1.8.0
|
||||
hooks:
|
||||
- id: blacken-docs
|
||||
additional_dependencies: [black==19.10b0]
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v3.1.0
|
||||
rev: v3.2.0
|
||||
hooks:
|
||||
- id: trailing-whitespace
|
||||
- id: end-of-file-fixer
|
||||
|
@ -21,7 +21,7 @@ repos:
|
|||
exclude: _pytest/(debugging|hookspec).py
|
||||
language_version: python3
|
||||
- repo: https://gitlab.com/pycqa/flake8
|
||||
rev: 3.8.2
|
||||
rev: 3.8.3
|
||||
hooks:
|
||||
- id: flake8
|
||||
language_version: python3
|
||||
|
@ -29,23 +29,23 @@ repos:
|
|||
- flake8-typing-imports==1.9.0
|
||||
- flake8-docstrings==1.5.0
|
||||
- repo: https://github.com/asottile/reorder_python_imports
|
||||
rev: v2.3.0
|
||||
rev: v2.3.5
|
||||
hooks:
|
||||
- id: reorder-python-imports
|
||||
args: ['--application-directories=.:src', --py3-plus]
|
||||
args: ['--application-directories=.:src', --py36-plus]
|
||||
- repo: https://github.com/asottile/pyupgrade
|
||||
rev: v2.4.4
|
||||
rev: v2.7.2
|
||||
hooks:
|
||||
- id: pyupgrade
|
||||
args: [--py3-plus]
|
||||
args: [--py36-plus]
|
||||
- repo: https://github.com/asottile/setup-cfg-fmt
|
||||
rev: v1.9.0
|
||||
rev: v1.11.0
|
||||
hooks:
|
||||
- id: setup-cfg-fmt
|
||||
# TODO: when upgrading setup-cfg-fmt this can be removed
|
||||
args: [--max-py-version=3.9]
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v0.780 # NOTE: keep this in sync with setup.cfg.
|
||||
rev: v0.782 # NOTE: keep this in sync with setup.cfg.
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: ^(src/|testing/)
|
||||
|
|
|
@ -176,7 +176,7 @@ class TestRaises:
|
|||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
items = [1, 2, 3]
|
||||
print("items is {!r}".format(items))
|
||||
print(f"items is {items!r}")
|
||||
a, b = items.pop()
|
||||
|
||||
def test_some_error(self):
|
||||
|
|
|
@ -11,4 +11,4 @@ def pytest_runtest_setup(item):
|
|||
return
|
||||
mod = item.getparent(pytest.Module).obj
|
||||
if hasattr(mod, "hello"):
|
||||
print("mod.hello {!r}".format(mod.hello))
|
||||
print(f"mod.hello {mod.hello!r}")
|
||||
|
|
|
@ -26,7 +26,7 @@ class Python:
|
|||
def __init__(self, version, picklefile):
|
||||
self.pythonpath = shutil.which(version)
|
||||
if not self.pythonpath:
|
||||
pytest.skip("{!r} not found".format(version))
|
||||
pytest.skip(f"{version!r} not found")
|
||||
self.picklefile = picklefile
|
||||
|
||||
def dumps(self, obj):
|
||||
|
@ -69,4 +69,4 @@ class Python:
|
|||
@pytest.mark.parametrize("obj", [42, {}, {1: 3}])
|
||||
def test_basic_objects(python1, python2, obj):
|
||||
python1.dumps(obj)
|
||||
python2.load_and_is_true("obj == {}".format(obj))
|
||||
python2.load_and_is_true(f"obj == {obj}")
|
||||
|
|
|
@ -40,7 +40,7 @@ class YamlItem(pytest.Item):
|
|||
)
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, 0, "usecase: {}".format(self.name)
|
||||
return self.fspath, 0, f"usecase: {self.name}"
|
||||
|
||||
|
||||
class YamlException(Exception):
|
||||
|
|
|
@ -17,9 +17,7 @@ def announce(version):
|
|||
stdout = stdout.decode("utf-8")
|
||||
last_version = stdout.strip()
|
||||
|
||||
stdout = check_output(
|
||||
["git", "log", "{}..HEAD".format(last_version), "--format=%aN"]
|
||||
)
|
||||
stdout = check_output(["git", "log", f"{last_version}..HEAD", "--format=%aN"])
|
||||
stdout = stdout.decode("utf-8")
|
||||
|
||||
contributors = set(stdout.splitlines())
|
||||
|
@ -31,14 +29,10 @@ def announce(version):
|
|||
Path(__file__).parent.joinpath(template_name).read_text(encoding="UTF-8")
|
||||
)
|
||||
|
||||
contributors_text = (
|
||||
"\n".join("* {}".format(name) for name in sorted(contributors)) + "\n"
|
||||
)
|
||||
contributors_text = "\n".join(f"* {name}" for name in sorted(contributors)) + "\n"
|
||||
text = template_text.format(version=version, contributors=contributors_text)
|
||||
|
||||
target = Path(__file__).parent.joinpath(
|
||||
"../doc/en/announce/release-{}.rst".format(version)
|
||||
)
|
||||
target = Path(__file__).parent.joinpath(f"../doc/en/announce/release-{version}.rst")
|
||||
target.write_text(text, encoding="UTF-8")
|
||||
print(f"{Fore.CYAN}[generate.announce] {Fore.RESET}Generated {target.name}")
|
||||
|
||||
|
@ -47,7 +41,7 @@ def announce(version):
|
|||
lines = index_path.read_text(encoding="UTF-8").splitlines()
|
||||
indent = " "
|
||||
for index, line in enumerate(lines):
|
||||
if line.startswith("{}release-".format(indent)):
|
||||
if line.startswith(f"{indent}release-"):
|
||||
new_line = indent + target.stem
|
||||
if line != new_line:
|
||||
lines.insert(index, new_line)
|
||||
|
@ -96,7 +90,7 @@ def pre_release(version, *, skip_check_links):
|
|||
if not skip_check_links:
|
||||
check_links()
|
||||
|
||||
msg = "Prepare release version {}".format(version)
|
||||
msg = f"Prepare release version {version}"
|
||||
check_call(["git", "commit", "-a", "-m", msg])
|
||||
|
||||
print()
|
||||
|
|
|
@ -58,7 +58,7 @@ class Code:
|
|||
if not hasattr(rawcode, "co_filename"):
|
||||
rawcode = getrawcode(rawcode)
|
||||
if not isinstance(rawcode, CodeType):
|
||||
raise TypeError("not a code object: {!r}".format(rawcode))
|
||||
raise TypeError(f"not a code object: {rawcode!r}")
|
||||
self.filename = rawcode.co_filename
|
||||
self.firstlineno = rawcode.co_firstlineno - 1
|
||||
self.name = rawcode.co_name
|
||||
|
@ -747,7 +747,7 @@ class FormattedExcinfo:
|
|||
else:
|
||||
str_repr = safeformat(value)
|
||||
# if len(str_repr) < 70 or not isinstance(value, (list, tuple, dict)):
|
||||
lines.append("{:<10} = {}".format(name, str_repr))
|
||||
lines.append(f"{name:<10} = {str_repr}")
|
||||
# else:
|
||||
# self._line("%-10s =\\" % (name,))
|
||||
# # XXX
|
||||
|
@ -1056,7 +1056,7 @@ class ReprEntry(TerminalRepr):
|
|||
# separate indents and source lines that are not failures: we want to
|
||||
# highlight the code but not the indentation, which may contain markers
|
||||
# such as "> assert 0"
|
||||
fail_marker = "{} ".format(FormattedExcinfo.fail_marker)
|
||||
fail_marker = f"{FormattedExcinfo.fail_marker} "
|
||||
indent_size = len(fail_marker)
|
||||
indents = [] # type: List[str]
|
||||
source_lines = [] # type: List[str]
|
||||
|
@ -1122,7 +1122,7 @@ class ReprFileLocation(TerminalRepr):
|
|||
if i != -1:
|
||||
msg = msg[:i]
|
||||
tw.write(self.path, bold=True, red=True)
|
||||
tw.line(":{}: {}".format(self.lineno, msg))
|
||||
tw.line(f":{self.lineno}: {msg}")
|
||||
|
||||
|
||||
@attr.s(eq=False)
|
||||
|
@ -1142,7 +1142,7 @@ class ReprFuncArgs(TerminalRepr):
|
|||
if self.args:
|
||||
linesofar = ""
|
||||
for name, value in self.args:
|
||||
ns = "{} = {}".format(name, value)
|
||||
ns = f"{name} = {value}"
|
||||
if len(ns) + len(linesofar) + 2 > tw.fullwidth:
|
||||
if linesofar:
|
||||
tw.line(linesofar)
|
||||
|
|
|
@ -97,7 +97,7 @@ class TerminalWriter:
|
|||
def markup(self, text: str, **markup: bool) -> str:
|
||||
for name in markup:
|
||||
if name not in self._esctable:
|
||||
raise ValueError("unknown markup: {!r}".format(name))
|
||||
raise ValueError(f"unknown markup: {name!r}")
|
||||
if self.hasmarkup:
|
||||
esc = [self._esctable[name] for name, on in markup.items() if on]
|
||||
if esc:
|
||||
|
@ -128,7 +128,7 @@ class TerminalWriter:
|
|||
# N <= (fullwidth - len(title) - 2) // (2*len(sepchar))
|
||||
N = max((fullwidth - len(title) - 2) // (2 * len(sepchar)), 1)
|
||||
fill = sepchar * N
|
||||
line = "{} {} {}".format(fill, title, fill)
|
||||
line = f"{fill} {title} {fill}"
|
||||
else:
|
||||
# we want len(sepchar)*N <= fullwidth
|
||||
# i.e. N <= fullwidth // len(sepchar)
|
||||
|
|
|
@ -48,7 +48,7 @@ assertstate_key = StoreKey["AssertionState"]()
|
|||
|
||||
|
||||
# pytest caches rewritten pycs in pycache dirs
|
||||
PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version)
|
||||
PYTEST_TAG = f"{sys.implementation.cache_tag}-pytest-{version}"
|
||||
PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
||||
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
||||
|
||||
|
@ -149,7 +149,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
ok = try_makedirs(cache_dir)
|
||||
if not ok:
|
||||
write = False
|
||||
state.trace("read only directory: {}".format(cache_dir))
|
||||
state.trace(f"read only directory: {cache_dir}")
|
||||
|
||||
cache_name = fn.name[:-3] + PYC_TAIL
|
||||
pyc = cache_dir / cache_name
|
||||
|
@ -157,7 +157,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
# to check for a cached pyc. This may not be optimal...
|
||||
co = _read_pyc(fn, pyc, state.trace)
|
||||
if co is None:
|
||||
state.trace("rewriting {!r}".format(fn))
|
||||
state.trace(f"rewriting {fn!r}")
|
||||
source_stat, co = _rewrite_test(fn, self.config)
|
||||
if write:
|
||||
self._writing_pyc = True
|
||||
|
@ -166,7 +166,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
finally:
|
||||
self._writing_pyc = False
|
||||
else:
|
||||
state.trace("found cached rewritten pyc for {}".format(fn))
|
||||
state.trace(f"found cached rewritten pyc for {fn}")
|
||||
exec(co, module.__dict__)
|
||||
|
||||
def _early_rewrite_bailout(self, name: str, state: "AssertionState") -> bool:
|
||||
|
@ -205,20 +205,18 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
if self._is_marked_for_rewrite(name, state):
|
||||
return False
|
||||
|
||||
state.trace("early skip of rewriting module: {}".format(name))
|
||||
state.trace(f"early skip of rewriting module: {name}")
|
||||
return True
|
||||
|
||||
def _should_rewrite(self, name: str, fn: str, state: "AssertionState") -> bool:
|
||||
# always rewrite conftest files
|
||||
if os.path.basename(fn) == "conftest.py":
|
||||
state.trace("rewriting conftest file: {!r}".format(fn))
|
||||
state.trace(f"rewriting conftest file: {fn!r}")
|
||||
return True
|
||||
|
||||
if self.session is not None:
|
||||
if self.session.isinitpath(py.path.local(fn)):
|
||||
state.trace(
|
||||
"matched test file (was specified on cmdline): {!r}".format(fn)
|
||||
)
|
||||
state.trace(f"matched test file (was specified on cmdline): {fn!r}")
|
||||
return True
|
||||
|
||||
# modules not passed explicitly on the command line are only
|
||||
|
@ -226,7 +224,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
fn_path = PurePath(fn)
|
||||
for pat in self.fnpats:
|
||||
if fnmatch_ex(pat, fn_path):
|
||||
state.trace("matched test file {!r}".format(fn))
|
||||
state.trace(f"matched test file {fn!r}")
|
||||
return True
|
||||
|
||||
return self._is_marked_for_rewrite(name, state)
|
||||
|
@ -237,9 +235,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
except KeyError:
|
||||
for marked in self._must_rewrite:
|
||||
if name == marked or name.startswith(marked + "."):
|
||||
state.trace(
|
||||
"matched marked file {!r} (from {!r})".format(name, marked)
|
||||
)
|
||||
state.trace(f"matched marked file {name!r} (from {marked!r})")
|
||||
self._marked_for_rewrite_cache[name] = True
|
||||
return True
|
||||
|
||||
|
@ -308,7 +304,7 @@ if sys.platform == "win32":
|
|||
with atomic_write(os.fspath(pyc), mode="wb", overwrite=True) as fp:
|
||||
_write_pyc_fp(fp, source_stat, co)
|
||||
except OSError as e:
|
||||
state.trace("error writing pyc file at {}: {}".format(pyc, e))
|
||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
||||
# we ignore any failure to write the cache file
|
||||
# there are many reasons, permission-denied, pycache dir being a
|
||||
# file etc.
|
||||
|
@ -324,20 +320,18 @@ else:
|
|||
source_stat: os.stat_result,
|
||||
pyc: Path,
|
||||
) -> bool:
|
||||
proc_pyc = "{}.{}".format(pyc, os.getpid())
|
||||
proc_pyc = f"{pyc}.{os.getpid()}"
|
||||
try:
|
||||
fp = open(proc_pyc, "wb")
|
||||
except OSError as e:
|
||||
state.trace(
|
||||
"error writing pyc file at {}: errno={}".format(proc_pyc, e.errno)
|
||||
)
|
||||
state.trace(f"error writing pyc file at {proc_pyc}: errno={e.errno}")
|
||||
return False
|
||||
|
||||
try:
|
||||
_write_pyc_fp(fp, source_stat, co)
|
||||
os.rename(proc_pyc, os.fspath(pyc))
|
||||
except OSError as e:
|
||||
state.trace("error writing pyc file at {}: {}".format(pyc, e))
|
||||
state.trace(f"error writing pyc file at {pyc}: {e}")
|
||||
# we ignore any failure to write the cache file
|
||||
# there are many reasons, permission-denied, pycache dir being a
|
||||
# file etc.
|
||||
|
@ -377,7 +371,7 @@ def _read_pyc(
|
|||
size = stat_result.st_size
|
||||
data = fp.read(12)
|
||||
except OSError as e:
|
||||
trace("_read_pyc({}): OSError {}".format(source, e))
|
||||
trace(f"_read_pyc({source}): OSError {e}")
|
||||
return None
|
||||
# Check for invalid or out of date pyc file.
|
||||
if (
|
||||
|
@ -390,7 +384,7 @@ def _read_pyc(
|
|||
try:
|
||||
co = marshal.load(fp)
|
||||
except Exception as e:
|
||||
trace("_read_pyc({}): marshal.load error {}".format(source, e))
|
||||
trace(f"_read_pyc({source}): marshal.load error {e}")
|
||||
return None
|
||||
if not isinstance(co, types.CodeType):
|
||||
trace("_read_pyc(%s): not a code object" % source)
|
||||
|
@ -982,7 +976,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
symbol = BINOP_MAP[binop.op.__class__]
|
||||
left_expr, left_expl = self.visit(binop.left)
|
||||
right_expr, right_expl = self.visit(binop.right)
|
||||
explanation = "({} {} {})".format(left_expl, symbol, right_expl)
|
||||
explanation = f"({left_expl} {symbol} {right_expl})"
|
||||
res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
|
||||
return res, explanation
|
||||
|
||||
|
@ -1007,7 +1001,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
new_call = ast.Call(new_func, new_args, new_kwargs)
|
||||
res = self.assign(new_call)
|
||||
res_expl = self.explanation_param(self.display(res))
|
||||
outer_expl = "{}\n{{{} = {}\n}}".format(res_expl, res_expl, expl)
|
||||
outer_expl = f"{res_expl}\n{{{res_expl} = {expl}\n}}"
|
||||
return res, outer_expl
|
||||
|
||||
def visit_Starred(self, starred: ast.Starred) -> Tuple[ast.Starred, str]:
|
||||
|
@ -1030,7 +1024,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
self.push_format_context()
|
||||
left_res, left_expl = self.visit(comp.left)
|
||||
if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
|
||||
left_expl = "({})".format(left_expl)
|
||||
left_expl = f"({left_expl})"
|
||||
res_variables = [self.variable() for i in range(len(comp.ops))]
|
||||
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
|
||||
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
|
||||
|
@ -1041,11 +1035,11 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
for i, op, next_operand in it:
|
||||
next_res, next_expl = self.visit(next_operand)
|
||||
if isinstance(next_operand, (ast.Compare, ast.BoolOp)):
|
||||
next_expl = "({})".format(next_expl)
|
||||
next_expl = f"({next_expl})"
|
||||
results.append(next_res)
|
||||
sym = BINOP_MAP[op.__class__]
|
||||
syms.append(ast.Str(sym))
|
||||
expl = "{} {} {}".format(left_expl, sym, next_expl)
|
||||
expl = f"{left_expl} {sym} {next_expl}"
|
||||
expls.append(ast.Str(expl))
|
||||
res_expr = ast.Compare(left_res, [op], [next_res])
|
||||
self.statements.append(ast.Assign([store_names[i]], res_expr))
|
||||
|
|
|
@ -70,10 +70,10 @@ def _truncate_explanation(
|
|||
truncated_line_count += 1 # Account for the part-truncated final line
|
||||
msg = "...Full output truncated"
|
||||
if truncated_line_count == 1:
|
||||
msg += " ({} line hidden)".format(truncated_line_count)
|
||||
msg += f" ({truncated_line_count} line hidden)"
|
||||
else:
|
||||
msg += " ({} lines hidden)".format(truncated_line_count)
|
||||
msg += ", {}".format(USAGE_MSG)
|
||||
msg += f" ({truncated_line_count} lines hidden)"
|
||||
msg += f", {USAGE_MSG}"
|
||||
truncated_explanation.extend(["", str(msg)])
|
||||
return truncated_explanation
|
||||
|
||||
|
|
|
@ -142,7 +142,7 @@ def assertrepr_compare(config, op: str, left: Any, right: Any) -> Optional[List[
|
|||
left_repr = saferepr(left, maxsize=maxsize)
|
||||
right_repr = saferepr(right, maxsize=maxsize)
|
||||
|
||||
summary = "{} {} {}".format(left_repr, op, right_repr)
|
||||
summary = f"{left_repr} {op} {right_repr}"
|
||||
|
||||
explanation = None
|
||||
try:
|
||||
|
@ -316,9 +316,7 @@ def _compare_eq_sequence(
|
|||
left_value = left[i]
|
||||
right_value = right[i]
|
||||
|
||||
explanation += [
|
||||
"At index {} diff: {!r} != {!r}".format(i, left_value, right_value)
|
||||
]
|
||||
explanation += [f"At index {i} diff: {left_value!r} != {right_value!r}"]
|
||||
break
|
||||
|
||||
if comparing_bytes:
|
||||
|
@ -338,9 +336,7 @@ def _compare_eq_sequence(
|
|||
extra = saferepr(right[len_left])
|
||||
|
||||
if len_diff == 1:
|
||||
explanation += [
|
||||
"{} contains one more item: {}".format(dir_with_more, extra)
|
||||
]
|
||||
explanation += [f"{dir_with_more} contains one more item: {extra}"]
|
||||
else:
|
||||
explanation += [
|
||||
"%s contains %d more items, first extra item: %s"
|
||||
|
|
|
@ -500,7 +500,7 @@ def pytest_report_header(config: Config) -> Optional[str]:
|
|||
displaypath = cachedir.relative_to(config.rootpath)
|
||||
except ValueError:
|
||||
displaypath = cachedir
|
||||
return "cachedir: {}".format(displaypath)
|
||||
return f"cachedir: {displaypath}"
|
||||
return None
|
||||
|
||||
|
||||
|
@ -542,5 +542,5 @@ def cacheshow(config: Config, session: Session) -> int:
|
|||
# print("%s/" % p.relto(basedir))
|
||||
if p.is_file():
|
||||
key = str(p.relative_to(basedir))
|
||||
tw.line("{} is a file of length {:d}".format(key, p.stat().st_size))
|
||||
tw.line(f"{key} is a file of length {p.stat().st_size:d}")
|
||||
return 0
|
||||
|
|
|
@ -544,7 +544,7 @@ class CaptureResult(Generic[AnyStr]):
|
|||
return tuple(self) < tuple(other)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "CaptureResult(out={!r}, err={!r})".format(self.out, self.err)
|
||||
return f"CaptureResult(out={self.out!r}, err={self.err!r})"
|
||||
|
||||
|
||||
class MultiCapture(Generic[AnyStr]):
|
||||
|
@ -638,7 +638,7 @@ def _get_multicapture(method: "_CaptureMethod") -> MultiCapture[str]:
|
|||
return MultiCapture(
|
||||
in_=None, out=SysCapture(1, tee=True), err=SysCapture(2, tee=True)
|
||||
)
|
||||
raise ValueError("unknown capturing method: {!r}".format(method))
|
||||
raise ValueError(f"unknown capturing method: {method!r}")
|
||||
|
||||
|
||||
# CaptureManager and CaptureFixture
|
||||
|
|
|
@ -143,8 +143,7 @@ def getfuncargnames(
|
|||
parameters = signature(function).parameters
|
||||
except (ValueError, TypeError) as e:
|
||||
fail(
|
||||
"Could not determine arguments of {!r}: {}".format(function, e),
|
||||
pytrace=False,
|
||||
f"Could not determine arguments of {function!r}: {e}", pytrace=False,
|
||||
)
|
||||
|
||||
arg_names = tuple(
|
||||
|
@ -197,7 +196,7 @@ def get_default_arg_names(function: Callable[..., Any]) -> Tuple[str, ...]:
|
|||
|
||||
|
||||
_non_printable_ascii_translate_table = {
|
||||
i: "\\x{:02x}".format(i) for i in range(128) if i not in range(32, 127)
|
||||
i: f"\\x{i:02x}" for i in range(128) if i not in range(32, 127)
|
||||
}
|
||||
_non_printable_ascii_translate_table.update(
|
||||
{ord("\t"): "\\t", ord("\r"): "\\r", ord("\n"): "\\n"}
|
||||
|
|
|
@ -144,9 +144,7 @@ def main(
|
|||
except ConftestImportFailure as e:
|
||||
exc_info = ExceptionInfo(e.excinfo)
|
||||
tw = TerminalWriter(sys.stderr)
|
||||
tw.line(
|
||||
"ImportError while loading conftest '{e.path}'.".format(e=e), red=True
|
||||
)
|
||||
tw.line(f"ImportError while loading conftest '{e.path}'.", red=True)
|
||||
exc_info.traceback = exc_info.traceback.filter(
|
||||
filter_traceback_for_conftest_import_failure
|
||||
)
|
||||
|
@ -173,7 +171,7 @@ def main(
|
|||
except UsageError as e:
|
||||
tw = TerminalWriter(sys.stderr)
|
||||
for msg in e.args:
|
||||
tw.line("ERROR: {}\n".format(msg), red=True)
|
||||
tw.line(f"ERROR: {msg}\n", red=True)
|
||||
return ExitCode.USAGE_ERROR
|
||||
|
||||
|
||||
|
@ -206,7 +204,7 @@ def filename_arg(path: str, optname: str) -> str:
|
|||
:optname: Name of the option.
|
||||
"""
|
||||
if os.path.isdir(path):
|
||||
raise UsageError("{} must be a filename, given: {}".format(optname, path))
|
||||
raise UsageError(f"{optname} must be a filename, given: {path}")
|
||||
return path
|
||||
|
||||
|
||||
|
@ -217,7 +215,7 @@ def directory_arg(path: str, optname: str) -> str:
|
|||
:optname: Name of the option.
|
||||
"""
|
||||
if not os.path.isdir(path):
|
||||
raise UsageError("{} must be a directory, given: {}".format(optname, path))
|
||||
raise UsageError(f"{optname} must be a directory, given: {path}")
|
||||
return path
|
||||
|
||||
|
||||
|
@ -583,7 +581,7 @@ class PytestPluginManager(PluginManager):
|
|||
if path and path.relto(dirpath) or path == dirpath:
|
||||
assert mod not in mods
|
||||
mods.append(mod)
|
||||
self.trace("loading conftestmodule {!r}".format(mod))
|
||||
self.trace(f"loading conftestmodule {mod!r}")
|
||||
self.consider_conftest(mod)
|
||||
return mod
|
||||
|
||||
|
@ -889,7 +887,7 @@ class Config:
|
|||
|
||||
_a = FILE_OR_DIR
|
||||
self._parser = Parser(
|
||||
usage="%(prog)s [options] [{}] [{}] [...]".format(_a, _a),
|
||||
usage=f"%(prog)s [options] [{_a}] [{_a}] [...]",
|
||||
processopt=self._processopt,
|
||||
)
|
||||
self.pluginmanager = pluginmanager
|
||||
|
@ -1191,9 +1189,7 @@ class Config:
|
|||
# we don't want to prevent --help/--version to work
|
||||
# so just let is pass and print a warning at the end
|
||||
self.issue_config_time_warning(
|
||||
PytestConfigWarning(
|
||||
"could not load initial conftests: {}".format(e.path)
|
||||
),
|
||||
PytestConfigWarning(f"could not load initial conftests: {e.path}"),
|
||||
stacklevel=2,
|
||||
)
|
||||
else:
|
||||
|
@ -1227,7 +1223,7 @@ class Config:
|
|||
|
||||
def _validate_config_options(self) -> None:
|
||||
for key in sorted(self._get_unknown_ini_keys()):
|
||||
self._warn_or_fail_if_strict("Unknown config option: {}\n".format(key))
|
||||
self._warn_or_fail_if_strict(f"Unknown config option: {key}\n")
|
||||
|
||||
def _validate_plugins(self) -> None:
|
||||
required_plugins = sorted(self.getini("required_plugins"))
|
||||
|
@ -1362,7 +1358,7 @@ class Config:
|
|||
try:
|
||||
description, type, default = self._parser._inidict[name]
|
||||
except KeyError as e:
|
||||
raise ValueError("unknown configuration value: {!r}".format(name)) from e
|
||||
raise ValueError(f"unknown configuration value: {name!r}") from e
|
||||
override_value = self._get_override_ini_value(name)
|
||||
if override_value is None:
|
||||
try:
|
||||
|
@ -1467,8 +1463,8 @@ class Config:
|
|||
if skip:
|
||||
import pytest
|
||||
|
||||
pytest.skip("no {!r} option found".format(name))
|
||||
raise ValueError("no option named {!r}".format(name)) from e
|
||||
pytest.skip(f"no {name!r} option found")
|
||||
raise ValueError(f"no option named {name!r}") from e
|
||||
|
||||
def getvalue(self, name: str, path=None):
|
||||
"""Deprecated, use getoption() instead."""
|
||||
|
@ -1501,7 +1497,7 @@ class Config:
|
|||
def _warn_about_skipped_plugins(self) -> None:
|
||||
for module_name, msg in self.pluginmanager.skipped_plugins:
|
||||
self.issue_config_time_warning(
|
||||
PytestConfigWarning("skipped plugin {!r}: {}".format(module_name, msg)),
|
||||
PytestConfigWarning(f"skipped plugin {module_name!r}: {msg}"),
|
||||
stacklevel=2,
|
||||
)
|
||||
|
||||
|
@ -1554,7 +1550,7 @@ def _strtobool(val: str) -> bool:
|
|||
elif val in ("n", "no", "f", "false", "off", "0"):
|
||||
return False
|
||||
else:
|
||||
raise ValueError("invalid truth value {!r}".format(val))
|
||||
raise ValueError(f"invalid truth value {val!r}")
|
||||
|
||||
|
||||
@lru_cache(maxsize=50)
|
||||
|
@ -1568,7 +1564,7 @@ def parse_warning_filter(
|
|||
"""
|
||||
parts = arg.split(":")
|
||||
if len(parts) > 5:
|
||||
raise warnings._OptionError("too many fields (max 5): {!r}".format(arg))
|
||||
raise warnings._OptionError(f"too many fields (max 5): {arg!r}")
|
||||
while len(parts) < 5:
|
||||
parts.append("")
|
||||
action_, message, category_, module, lineno_ = [s.strip() for s in parts]
|
||||
|
@ -1584,7 +1580,7 @@ def parse_warning_filter(
|
|||
if lineno < 0:
|
||||
raise ValueError
|
||||
except (ValueError, OverflowError) as e:
|
||||
raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) from e
|
||||
raise warnings._OptionError(f"invalid lineno {lineno_!r}") from e
|
||||
else:
|
||||
lineno = 0
|
||||
return action, message, category, module, lineno
|
||||
|
|
|
@ -188,7 +188,7 @@ class ArgumentError(Exception):
|
|||
|
||||
def __str__(self) -> str:
|
||||
if self.option_id:
|
||||
return "option {}: {}".format(self.option_id, self.msg)
|
||||
return f"option {self.option_id}: {self.msg}"
|
||||
else:
|
||||
return self.msg
|
||||
|
||||
|
@ -389,11 +389,11 @@ class MyOptionParser(argparse.ArgumentParser):
|
|||
|
||||
def error(self, message: str) -> "NoReturn":
|
||||
"""Transform argparse error message into UsageError."""
|
||||
msg = "{}: error: {}".format(self.prog, message)
|
||||
msg = f"{self.prog}: error: {message}"
|
||||
|
||||
if hasattr(self._parser, "_config_source_hint"):
|
||||
# Type ignored because the attribute is set dynamically.
|
||||
msg = "{} ({})".format(msg, self._parser._config_source_hint) # type: ignore
|
||||
msg = f"{msg} ({self._parser._config_source_hint})" # type: ignore
|
||||
|
||||
raise UsageError(self.format_usage() + msg)
|
||||
|
||||
|
@ -410,7 +410,7 @@ class MyOptionParser(argparse.ArgumentParser):
|
|||
if arg and arg[0] == "-":
|
||||
lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))]
|
||||
for k, v in sorted(self.extra_info.items()):
|
||||
lines.append(" {}: {}".format(k, v))
|
||||
lines.append(f" {k}: {v}")
|
||||
self.error("\n".join(lines))
|
||||
getattr(parsed, FILE_OR_DIR).extend(unrecognized)
|
||||
return parsed
|
||||
|
|
|
@ -35,7 +35,7 @@ def _validate_usepdb_cls(value: str) -> Tuple[str, str]:
|
|||
modname, classname = value.split(":")
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(
|
||||
"{!r} is not in the format 'modname:classname'".format(value)
|
||||
f"{value!r} is not in the format 'modname:classname'"
|
||||
) from e
|
||||
return (modname, classname)
|
||||
|
||||
|
@ -136,7 +136,7 @@ class pytestPDB:
|
|||
except Exception as exc:
|
||||
value = ":".join((modname, classname))
|
||||
raise UsageError(
|
||||
"--pdbcls: could not import {!r}: {}".format(value, exc)
|
||||
f"--pdbcls: could not import {value!r}: {exc}"
|
||||
) from exc
|
||||
else:
|
||||
import pdb
|
||||
|
@ -257,7 +257,7 @@ class pytestPDB:
|
|||
else:
|
||||
capturing = cls._is_capturing(capman)
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB {} (IO-capturing turned off)".format(method))
|
||||
tw.sep(">", f"PDB {method} (IO-capturing turned off)")
|
||||
elif capturing:
|
||||
tw.sep(
|
||||
">",
|
||||
|
@ -265,7 +265,7 @@ class pytestPDB:
|
|||
% (method, capturing),
|
||||
)
|
||||
else:
|
||||
tw.sep(">", "PDB {}".format(method))
|
||||
tw.sep(">", f"PDB {method}")
|
||||
|
||||
_pdb = cls._import_pdb_cls(capman)(**kwargs)
|
||||
|
||||
|
|
|
@ -349,7 +349,7 @@ class DoctestItem(pytest.Item):
|
|||
]
|
||||
indent = ">>>"
|
||||
for line in example.source.splitlines():
|
||||
lines.append("??? {} {}".format(indent, line))
|
||||
lines.append(f"??? {indent} {line}")
|
||||
indent = "..."
|
||||
if isinstance(failure, doctest.DocTestFailure):
|
||||
lines += checker.output_difference(
|
||||
|
|
|
@ -469,7 +469,7 @@ class FixtureRequest:
|
|||
"""Test function object if the request has a per-function scope."""
|
||||
if self.scope != "function":
|
||||
raise AttributeError(
|
||||
"function not available in {}-scoped context".format(self.scope)
|
||||
f"function not available in {self.scope}-scoped context"
|
||||
)
|
||||
return self._pyfuncitem.obj
|
||||
|
||||
|
@ -477,9 +477,7 @@ class FixtureRequest:
|
|||
def cls(self):
|
||||
"""Class (can be None) where the test function was collected."""
|
||||
if self.scope not in ("class", "function"):
|
||||
raise AttributeError(
|
||||
"cls not available in {}-scoped context".format(self.scope)
|
||||
)
|
||||
raise AttributeError(f"cls not available in {self.scope}-scoped context")
|
||||
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
|
||||
if clscol:
|
||||
return clscol.obj
|
||||
|
@ -498,18 +496,14 @@ class FixtureRequest:
|
|||
def module(self):
|
||||
"""Python module object where the test function was collected."""
|
||||
if self.scope not in ("function", "class", "module"):
|
||||
raise AttributeError(
|
||||
"module not available in {}-scoped context".format(self.scope)
|
||||
)
|
||||
raise AttributeError(f"module not available in {self.scope}-scoped context")
|
||||
return self._pyfuncitem.getparent(_pytest.python.Module).obj
|
||||
|
||||
@property
|
||||
def fspath(self) -> py.path.local:
|
||||
"""The file system path of the test module which collected this test."""
|
||||
if self.scope not in ("function", "class", "module", "package"):
|
||||
raise AttributeError(
|
||||
"module not available in {}-scoped context".format(self.scope)
|
||||
)
|
||||
raise AttributeError(f"module not available in {self.scope}-scoped context")
|
||||
# TODO: Remove ignore once _pyfuncitem is properly typed.
|
||||
return self._pyfuncitem.fspath # type: ignore
|
||||
|
||||
|
@ -757,7 +751,7 @@ class SubRequest(FixtureRequest):
|
|||
self._fixturemanager = request._fixturemanager
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<SubRequest {!r} for {!r}>".format(self.fixturename, self._pyfuncitem)
|
||||
return f"<SubRequest {self.fixturename!r} for {self._pyfuncitem!r}>"
|
||||
|
||||
def addfinalizer(self, finalizer: Callable[[], object]) -> None:
|
||||
self._fixturedef.addfinalizer(finalizer)
|
||||
|
@ -792,7 +786,7 @@ def scope2index(scope: str, descr: str, where: Optional[str] = None) -> int:
|
|||
except ValueError:
|
||||
fail(
|
||||
"{} {}got an unexpected scope value '{}'".format(
|
||||
descr, "from {} ".format(where) if where else "", scope
|
||||
descr, f"from {where} " if where else "", scope
|
||||
),
|
||||
pytrace=False,
|
||||
)
|
||||
|
@ -848,7 +842,7 @@ class FixtureLookupError(LookupError):
|
|||
self.argname
|
||||
)
|
||||
else:
|
||||
msg = "fixture '{}' not found".format(self.argname)
|
||||
msg = f"fixture '{self.argname}' not found"
|
||||
msg += "\n available fixtures: {}".format(", ".join(sorted(available)))
|
||||
msg += "\n use 'pytest --fixtures [testpath]' for help on them."
|
||||
|
||||
|
@ -882,8 +876,7 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
|||
)
|
||||
for line in lines[1:]:
|
||||
tw.line(
|
||||
"{} {}".format(FormattedExcinfo.flow_marker, line.strip()),
|
||||
red=True,
|
||||
f"{FormattedExcinfo.flow_marker} {line.strip()}", red=True,
|
||||
)
|
||||
tw.line()
|
||||
tw.line("%s:%d" % (self.filename, self.firstlineno + 1))
|
||||
|
@ -907,9 +900,7 @@ def call_fixture_func(
|
|||
try:
|
||||
fixture_result = next(generator)
|
||||
except StopIteration:
|
||||
raise ValueError(
|
||||
"{} did not yield a value".format(request.fixturename)
|
||||
) from None
|
||||
raise ValueError(f"{request.fixturename} did not yield a value") from None
|
||||
finalizer = functools.partial(_teardown_yield_fixture, fixturefunc, generator)
|
||||
request.addfinalizer(finalizer)
|
||||
else:
|
||||
|
@ -987,7 +978,7 @@ class FixtureDef(Generic[_FixtureValue]):
|
|||
self.scopenum = scope2index(
|
||||
# TODO: Check if the `or` here is really necessary.
|
||||
scope_ or "function", # type: ignore[unreachable]
|
||||
descr="Fixture '{}'".format(func.__name__),
|
||||
descr=f"Fixture '{func.__name__}'",
|
||||
where=baseid,
|
||||
)
|
||||
self.scope = scope_
|
||||
|
|
|
@ -137,7 +137,7 @@ def showversion(config: Config) -> None:
|
|||
for line in plugininfo:
|
||||
sys.stderr.write(line + "\n")
|
||||
else:
|
||||
sys.stderr.write("pytest {}\n".format(pytest.__version__))
|
||||
sys.stderr.write(f"pytest {pytest.__version__}\n")
|
||||
|
||||
|
||||
def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
|
||||
|
@ -172,8 +172,8 @@ def showhelp(config: Config) -> None:
|
|||
if type is None:
|
||||
type = "string"
|
||||
if help is None:
|
||||
raise TypeError("help argument cannot be None for {}".format(name))
|
||||
spec = "{} ({}):".format(name, type)
|
||||
raise TypeError(f"help argument cannot be None for {name}")
|
||||
spec = f"{name} ({type}):"
|
||||
tw.write(" %s" % spec)
|
||||
spec_len = len(spec)
|
||||
if spec_len > (indent_len - 3):
|
||||
|
@ -208,7 +208,7 @@ def showhelp(config: Config) -> None:
|
|||
("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals"),
|
||||
]
|
||||
for name, help in vars:
|
||||
tw.line(" {:<24} {}".format(name, help))
|
||||
tw.line(f" {name:<24} {help}")
|
||||
tw.line()
|
||||
tw.line()
|
||||
|
||||
|
@ -235,7 +235,7 @@ def getpluginversioninfo(config: Config) -> List[str]:
|
|||
lines.append("setuptools registered plugins:")
|
||||
for plugin, dist in plugininfo:
|
||||
loc = getattr(plugin, "__file__", repr(plugin))
|
||||
content = "{}-{} at {}".format(dist.project_name, dist.version, loc)
|
||||
content = f"{dist.project_name}-{dist.version} at {loc}"
|
||||
lines.append(" " + content)
|
||||
return lines
|
||||
|
||||
|
@ -243,9 +243,7 @@ def getpluginversioninfo(config: Config) -> List[str]:
|
|||
def pytest_report_header(config: Config) -> List[str]:
|
||||
lines = []
|
||||
if config.option.debug or config.option.traceconfig:
|
||||
lines.append(
|
||||
"using: pytest-{} pylib-{}".format(pytest.__version__, py.__version__)
|
||||
)
|
||||
lines.append(f"using: pytest-{pytest.__version__} pylib-{py.__version__}")
|
||||
|
||||
verinfo = getpluginversioninfo(config)
|
||||
if verinfo:
|
||||
|
@ -259,5 +257,5 @@ def pytest_report_header(config: Config) -> List[str]:
|
|||
r = plugin.__file__
|
||||
else:
|
||||
r = repr(plugin)
|
||||
lines.append(" {:<20}: {}".format(name, r))
|
||||
lines.append(f" {name:<20}: {r}")
|
||||
return lines
|
||||
|
|
|
@ -228,9 +228,9 @@ class _NodeReporter:
|
|||
reason = str(report.longrepr)
|
||||
|
||||
if report.when == "teardown":
|
||||
msg = 'failed on teardown with "{}"'.format(reason)
|
||||
msg = f'failed on teardown with "{reason}"'
|
||||
else:
|
||||
msg = 'failed on setup with "{}"'.format(reason)
|
||||
msg = f'failed on setup with "{reason}"'
|
||||
self._add_simple("error", msg, str(report.longrepr))
|
||||
|
||||
def append_skipped(self, report: TestReport) -> None:
|
||||
|
@ -246,7 +246,7 @@ class _NodeReporter:
|
|||
filename, lineno, skipreason = report.longrepr
|
||||
if skipreason.startswith("Skipped: "):
|
||||
skipreason = skipreason[9:]
|
||||
details = "{}:{}: {}".format(filename, lineno, skipreason)
|
||||
details = f"{filename}:{lineno}: {skipreason}"
|
||||
|
||||
skipped = ET.Element("skipped", type="pytest.skip", message=skipreason)
|
||||
skipped.text = bin_xml_escape(details)
|
||||
|
@ -683,7 +683,7 @@ class LogXML:
|
|||
logfile.close()
|
||||
|
||||
def pytest_terminal_summary(self, terminalreporter: TerminalReporter) -> None:
|
||||
terminalreporter.write_sep("-", "generated xml file: {}".format(self.logfile))
|
||||
terminalreporter.write_sep("-", f"generated xml file: {self.logfile}")
|
||||
|
||||
def add_global_property(self, name: str, value: object) -> None:
|
||||
__tracebackhide__ = True
|
||||
|
|
|
@ -267,9 +267,7 @@ def wrap_session(
|
|||
if excinfo.value.returncode is not None:
|
||||
exitstatus = excinfo.value.returncode
|
||||
if initstate < 2:
|
||||
sys.stderr.write(
|
||||
"{}: {}\n".format(excinfo.typename, excinfo.value.msg)
|
||||
)
|
||||
sys.stderr.write(f"{excinfo.typename}: {excinfo.value.msg}\n")
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
session.exitstatus = exitstatus
|
||||
except BaseException:
|
||||
|
@ -615,8 +613,8 @@ class Session(nodes.FSCollector):
|
|||
if self._notfound:
|
||||
errors = []
|
||||
for arg, cols in self._notfound:
|
||||
line = "(no name {!r} in any of {!r})".format(arg, cols)
|
||||
errors.append("not found: {}\n{}".format(arg, line))
|
||||
line = f"(no name {arg!r} in any of {cols!r})"
|
||||
errors.append(f"not found: {arg}\n{line}")
|
||||
raise UsageError(*errors)
|
||||
if not genitems:
|
||||
items = rep.result
|
||||
|
|
|
@ -201,7 +201,7 @@ def deselect_by_keyword(items: "List[Item]", config: Config) -> None:
|
|||
expression = Expression.compile(keywordexpr)
|
||||
except ParseError as e:
|
||||
raise UsageError(
|
||||
"Wrong expression passed to '-k': {}: {}".format(keywordexpr, e)
|
||||
f"Wrong expression passed to '-k': {keywordexpr}: {e}"
|
||||
) from None
|
||||
|
||||
remaining = []
|
||||
|
@ -245,9 +245,7 @@ def deselect_by_mark(items: "List[Item]", config: Config) -> None:
|
|||
try:
|
||||
expression = Expression.compile(matchexpr)
|
||||
except ParseError as e:
|
||||
raise UsageError(
|
||||
"Wrong expression passed to '-m': {}: {}".format(matchexpr, e)
|
||||
) from None
|
||||
raise UsageError(f"Wrong expression passed to '-m': {matchexpr}: {e}") from None
|
||||
|
||||
remaining = []
|
||||
deselected = []
|
||||
|
|
|
@ -66,7 +66,7 @@ class ParseError(Exception):
|
|||
self.message = message
|
||||
|
||||
def __str__(self) -> str:
|
||||
return "at column {}: {}".format(self.column, self.message)
|
||||
return f"at column {self.column}: {self.message}"
|
||||
|
||||
|
||||
class Scanner:
|
||||
|
|
|
@ -310,7 +310,7 @@ class MarkDecorator:
|
|||
return self.name # for backward-compat (2.4.1 had this attr)
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<MarkDecorator {!r}>".format(self.mark)
|
||||
return f"<MarkDecorator {self.mark!r}>"
|
||||
|
||||
def with_args(self, *args: object, **kwargs: object) -> "MarkDecorator":
|
||||
"""Return a MarkDecorator with extra arguments added.
|
||||
|
@ -364,7 +364,7 @@ def normalize_mark_list(mark_list: Iterable[Union[Mark, MarkDecorator]]) -> List
|
|||
] # unpack MarkDecorator
|
||||
for mark in extracted:
|
||||
if not isinstance(mark, Mark):
|
||||
raise TypeError("got {!r} instead of Mark".format(mark))
|
||||
raise TypeError(f"got {mark!r} instead of Mark")
|
||||
return [x for x in extracted if isinstance(x, Mark)]
|
||||
|
||||
|
||||
|
@ -498,14 +498,14 @@ class MarkGenerator:
|
|||
if name not in self._markers:
|
||||
if self._config.option.strict_markers:
|
||||
fail(
|
||||
"{!r} not found in `markers` configuration option".format(name),
|
||||
f"{name!r} not found in `markers` configuration option",
|
||||
pytrace=False,
|
||||
)
|
||||
|
||||
# Raise a specific error for common misspellings of "parametrize".
|
||||
if name in ["parameterize", "parametrise", "parameterise"]:
|
||||
__tracebackhide__ = True
|
||||
fail("Unknown '{}' mark, did you mean 'parametrize'?".format(name))
|
||||
fail(f"Unknown '{name}' mark, did you mean 'parametrize'?")
|
||||
|
||||
warnings.warn(
|
||||
"Unknown pytest.mark.%s - is this a typo? You can register "
|
||||
|
@ -556,4 +556,4 @@ class NodeKeywords(MutableMapping[str, Any]):
|
|||
return len(self._seen())
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<NodeKeywords for node {}>".format(self.node)
|
||||
return f"<NodeKeywords for node {self.node}>"
|
||||
|
|
|
@ -74,7 +74,7 @@ def resolve(name: str) -> object:
|
|||
if expected == used:
|
||||
raise
|
||||
else:
|
||||
raise ImportError("import error in {}: {}".format(used, ex)) from ex
|
||||
raise ImportError(f"import error in {used}: {ex}") from ex
|
||||
found = annotated_getattr(found, part, used)
|
||||
return found
|
||||
|
||||
|
@ -93,9 +93,7 @@ def annotated_getattr(obj: object, name: str, ann: str) -> object:
|
|||
|
||||
def derive_importpath(import_path: str, raising: bool) -> Tuple[str, object]:
|
||||
if not isinstance(import_path, str) or "." not in import_path: # type: ignore[unreachable]
|
||||
raise TypeError(
|
||||
"must be absolute import path string, not {!r}".format(import_path)
|
||||
)
|
||||
raise TypeError(f"must be absolute import path string, not {import_path!r}")
|
||||
module, attr = import_path.rsplit(".", 1)
|
||||
target = resolve(module)
|
||||
if raising:
|
||||
|
@ -202,7 +200,7 @@ class MonkeyPatch:
|
|||
|
||||
oldval = getattr(target, name, notset)
|
||||
if raising and oldval is notset:
|
||||
raise AttributeError("{!r} has no attribute {!r}".format(target, name))
|
||||
raise AttributeError(f"{target!r} has no attribute {name!r}")
|
||||
|
||||
# avoid class descriptors like staticmethod/classmethod
|
||||
if inspect.isclass(target):
|
||||
|
|
|
@ -40,7 +40,7 @@ class OutcomeException(BaseException):
|
|||
def __repr__(self) -> str:
|
||||
if self.msg:
|
||||
return self.msg
|
||||
return "<{} instance>".format(self.__class__.__name__)
|
||||
return f"<{self.__class__.__name__} instance>"
|
||||
|
||||
__str__ = __repr__
|
||||
|
||||
|
@ -208,7 +208,7 @@ def importorskip(
|
|||
__import__(modname)
|
||||
except ImportError as exc:
|
||||
if reason is None:
|
||||
reason = "could not import {!r}: {}".format(modname, exc)
|
||||
reason = f"could not import {modname!r}: {exc}"
|
||||
raise Skipped(reason, allow_module_level=True) from None
|
||||
mod = sys.modules[modname]
|
||||
if minversion is None:
|
||||
|
|
|
@ -107,4 +107,4 @@ def pytest_terminal_summary(terminalreporter: TerminalReporter) -> None:
|
|||
s = file.getvalue()
|
||||
assert len(s)
|
||||
pastebinurl = create_new_paste(s)
|
||||
terminalreporter.write_line("{} --> {}".format(msg, pastebinurl))
|
||||
terminalreporter.write_line(f"{msg} --> {pastebinurl}")
|
||||
|
|
|
@ -63,9 +63,7 @@ def on_rm_rf_error(func, path: str, exc, *, start_path: Path) -> bool:
|
|||
|
||||
if not isinstance(excvalue, PermissionError):
|
||||
warnings.warn(
|
||||
PytestWarning(
|
||||
"(rm_rf) error removing {}\n{}: {}".format(path, exctype, excvalue)
|
||||
)
|
||||
PytestWarning(f"(rm_rf) error removing {path}\n{exctype}: {excvalue}")
|
||||
)
|
||||
return False
|
||||
|
||||
|
@ -200,7 +198,7 @@ def make_numbered_dir(root: Path, prefix: str) -> Path:
|
|||
# try up to 10 times to create the folder
|
||||
max_existing = max(map(parse_num, find_suffixes(root, prefix)), default=-1)
|
||||
new_number = max_existing + 1
|
||||
new_path = root.joinpath("{}{}".format(prefix, new_number))
|
||||
new_path = root.joinpath(f"{prefix}{new_number}")
|
||||
try:
|
||||
new_path.mkdir()
|
||||
except Exception:
|
||||
|
@ -221,7 +219,7 @@ def create_cleanup_lock(p: Path) -> Path:
|
|||
try:
|
||||
fd = os.open(str(lock_path), os.O_WRONLY | os.O_CREAT | os.O_EXCL, 0o644)
|
||||
except FileExistsError as e:
|
||||
raise OSError("cannot create lockfile in {path}".format(path=p)) from e
|
||||
raise OSError(f"cannot create lockfile in {p}") from e
|
||||
else:
|
||||
pid = os.getpid()
|
||||
spid = str(pid).encode()
|
||||
|
@ -258,7 +256,7 @@ def maybe_delete_a_numbered_dir(path: Path) -> None:
|
|||
lock_path = create_cleanup_lock(path)
|
||||
parent = path.parent
|
||||
|
||||
garbage = parent.joinpath("garbage-{}".format(uuid.uuid4()))
|
||||
garbage = parent.joinpath(f"garbage-{uuid.uuid4()}")
|
||||
path.rename(garbage)
|
||||
rm_rf(garbage)
|
||||
except OSError:
|
||||
|
@ -401,7 +399,7 @@ def fnmatch_ex(pattern: str, path) -> bool:
|
|||
else:
|
||||
name = str(path)
|
||||
if path.is_absolute() and not os.path.isabs(pattern):
|
||||
pattern = "*{}{}".format(os.sep, pattern)
|
||||
pattern = f"*{os.sep}{pattern}"
|
||||
return fnmatch.fnmatch(name, pattern)
|
||||
|
||||
|
||||
|
@ -415,7 +413,7 @@ def symlink_or_skip(src, dst, **kwargs):
|
|||
try:
|
||||
os.symlink(str(src), str(dst), **kwargs)
|
||||
except OSError as e:
|
||||
skip("symlinks not supported: {}".format(e))
|
||||
skip(f"symlinks not supported: {e}")
|
||||
|
||||
|
||||
class ImportMode(Enum):
|
||||
|
|
|
@ -197,7 +197,7 @@ class ParsedCall:
|
|||
def __repr__(self) -> str:
|
||||
d = self.__dict__.copy()
|
||||
del d["_name"]
|
||||
return "<ParsedCall {!r}(**{!r})>".format(self._name, d)
|
||||
return f"<ParsedCall {self._name!r}(**{d!r})>"
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# The class has undetermined attributes, this tells mypy about it.
|
||||
|
@ -252,7 +252,7 @@ class HookRecorder:
|
|||
break
|
||||
print("NONAMEMATCH", name, "with", call)
|
||||
else:
|
||||
pytest.fail("could not find {!r} check {!r}".format(name, check))
|
||||
pytest.fail(f"could not find {name!r} check {check!r}")
|
||||
|
||||
def popcall(self, name: str) -> ParsedCall:
|
||||
__tracebackhide__ = True
|
||||
|
@ -260,7 +260,7 @@ class HookRecorder:
|
|||
if call._name == name:
|
||||
del self.calls[i]
|
||||
return call
|
||||
lines = ["could not find call {!r}, in:".format(name)]
|
||||
lines = [f"could not find call {name!r}, in:"]
|
||||
lines.extend([" %s" % x for x in self.calls])
|
||||
pytest.fail("\n".join(lines))
|
||||
|
||||
|
@ -388,7 +388,7 @@ class HookRecorder:
|
|||
elif rep.skipped:
|
||||
skipped.append(rep)
|
||||
else:
|
||||
assert rep.failed, "Unexpected outcome: {!r}".format(rep)
|
||||
assert rep.failed, f"Unexpected outcome: {rep!r}"
|
||||
failed.append(rep)
|
||||
return passed, skipped, failed
|
||||
|
||||
|
@ -658,7 +658,7 @@ class Testdir:
|
|||
mp.setenv("PY_COLORS", "0")
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<Testdir {!r}>".format(self.tmpdir)
|
||||
return f"<Testdir {self.tmpdir!r}>"
|
||||
|
||||
def __str__(self) -> str:
|
||||
return str(self.tmpdir)
|
||||
|
@ -874,7 +874,7 @@ class Testdir:
|
|||
return result
|
||||
else:
|
||||
raise LookupError(
|
||||
'example "{}" is not found as a file or directory'.format(example_path)
|
||||
f'example "{example_path}" is not found as a file or directory'
|
||||
)
|
||||
|
||||
Session = Session
|
||||
|
@ -1087,7 +1087,7 @@ class Testdir:
|
|||
return self.runpytest_inprocess(*args, **kwargs)
|
||||
elif self._method == "subprocess":
|
||||
return self.runpytest_subprocess(*args, **kwargs)
|
||||
raise RuntimeError("Unrecognized runpytest option: {}".format(self._method))
|
||||
raise RuntimeError(f"Unrecognized runpytest option: {self._method}")
|
||||
|
||||
def _ensure_basetemp(self, args):
|
||||
args = list(args)
|
||||
|
@ -1329,7 +1329,7 @@ class Testdir:
|
|||
for line in lines:
|
||||
print(line, file=fp)
|
||||
except UnicodeEncodeError:
|
||||
print("couldn't print to {} because of encoding".format(fp))
|
||||
print(f"couldn't print to {fp} because of encoding")
|
||||
|
||||
def _getpytestargs(self) -> Tuple[str, ...]:
|
||||
return sys.executable, "-mpytest"
|
||||
|
@ -1386,7 +1386,7 @@ class Testdir:
|
|||
"""
|
||||
basetemp = self.tmpdir.mkdir("temp-pexpect")
|
||||
invoke = " ".join(map(str, self._getpytestargs()))
|
||||
cmd = "{} --basetemp={} {}".format(invoke, basetemp, string)
|
||||
cmd = f"{invoke} --basetemp={basetemp} {string}"
|
||||
return self.spawn(cmd, expect_timeout=expect_timeout)
|
||||
|
||||
def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
|
||||
|
@ -1573,7 +1573,7 @@ class LineMatcher:
|
|||
break
|
||||
else:
|
||||
if consecutive and started:
|
||||
msg = "no consecutive match: {!r}".format(line)
|
||||
msg = f"no consecutive match: {line!r}"
|
||||
self._log(msg)
|
||||
self._log(
|
||||
"{:>{width}}".format("with:", width=wnick), repr(nextline)
|
||||
|
@ -1587,7 +1587,7 @@ class LineMatcher:
|
|||
self._log("{:>{width}}".format("and:", width=wnick), repr(nextline))
|
||||
extralines.append(nextline)
|
||||
else:
|
||||
msg = "remains unmatched: {!r}".format(line)
|
||||
msg = f"remains unmatched: {line!r}"
|
||||
self._log(msg)
|
||||
self._fail(msg)
|
||||
self._log_output = []
|
||||
|
@ -1622,7 +1622,7 @@ class LineMatcher:
|
|||
wnick = len(match_nickname) + 1
|
||||
for line in self.lines:
|
||||
if match_func(line, pat):
|
||||
msg = "{}: {!r}".format(match_nickname, pat)
|
||||
msg = f"{match_nickname}: {pat!r}"
|
||||
self._log(msg)
|
||||
self._log("{:>{width}}".format("with:", width=wnick), repr(line))
|
||||
self._fail(msg)
|
||||
|
|
|
@ -483,7 +483,7 @@ class PyCollector(PyobjMixin, nodes.Collector):
|
|||
fixtureinfo.prune_dependency_tree()
|
||||
|
||||
for callspec in metafunc._calls:
|
||||
subname = "{}[{}]".format(name, callspec.id)
|
||||
subname = f"{name}[{callspec.id}]"
|
||||
yield Function.from_parent(
|
||||
self,
|
||||
name=subname,
|
||||
|
@ -888,7 +888,7 @@ class CallSpec2:
|
|||
|
||||
def _checkargnotcontained(self, arg: str) -> None:
|
||||
if arg in self.params or arg in self.funcargs:
|
||||
raise ValueError("duplicate {!r}".format(arg))
|
||||
raise ValueError(f"duplicate {arg!r}")
|
||||
|
||||
def getparam(self, name: str) -> object:
|
||||
try:
|
||||
|
@ -918,7 +918,7 @@ class CallSpec2:
|
|||
elif valtype_for_arg == "funcargs":
|
||||
self.funcargs[arg] = val
|
||||
else: # pragma: no cover
|
||||
assert False, "Unhandled valtype for arg: {}".format(valtype_for_arg)
|
||||
assert False, f"Unhandled valtype for arg: {valtype_for_arg}"
|
||||
self.indices[arg] = param_index
|
||||
self._arg2scopenum[arg] = scopenum
|
||||
self._idlist.append(id)
|
||||
|
@ -1068,7 +1068,7 @@ class Metafunc:
|
|||
object.__setattr__(_param_mark._param_ids_from, "_param_ids_generated", ids)
|
||||
|
||||
scopenum = scope2index(
|
||||
scope, descr="parametrize() call in {}".format(self.function.__name__)
|
||||
scope, descr=f"parametrize() call in {self.function.__name__}"
|
||||
)
|
||||
|
||||
# Create the new calls: if we are parametrize() multiple times (by applying the decorator
|
||||
|
@ -1224,7 +1224,7 @@ class Metafunc:
|
|||
else:
|
||||
name = "fixture" if indirect else "argument"
|
||||
fail(
|
||||
"In {}: function uses no {} '{}'".format(func_name, name, arg),
|
||||
f"In {func_name}: function uses no {name} '{arg}'",
|
||||
pytrace=False,
|
||||
)
|
||||
|
||||
|
@ -1291,7 +1291,7 @@ def _idval(
|
|||
if generated_id is not None:
|
||||
val = generated_id
|
||||
except Exception as e:
|
||||
prefix = "{}: ".format(nodeid) if nodeid is not None else ""
|
||||
prefix = f"{nodeid}: " if nodeid is not None else ""
|
||||
msg = "error raised while trying to determine id of parameter '{}' at position {}"
|
||||
msg = prefix + msg.format(argname, idx)
|
||||
raise ValueError(msg) from e
|
||||
|
@ -1400,7 +1400,7 @@ def _show_fixtures_per_test(config: Config, session: Session) -> None:
|
|||
return
|
||||
if verbose > 0:
|
||||
bestrel = get_best_relpath(fixture_def.func)
|
||||
funcargspec = "{} -- {}".format(argname, bestrel)
|
||||
funcargspec = f"{argname} -- {bestrel}"
|
||||
else:
|
||||
funcargspec = argname
|
||||
tw.line(funcargspec, green=True)
|
||||
|
@ -1417,7 +1417,7 @@ def _show_fixtures_per_test(config: Config, session: Session) -> None:
|
|||
# This test item does not use any fixtures.
|
||||
return
|
||||
tw.line()
|
||||
tw.sep("-", "fixtures used by {}".format(item.name))
|
||||
tw.sep("-", f"fixtures used by {item.name}")
|
||||
# TODO: Fix this type ignore.
|
||||
tw.sep("-", "({})".format(get_best_relpath(item.function))) # type: ignore[attr-defined]
|
||||
# dict key not used in loop but needed for sorting.
|
||||
|
@ -1476,7 +1476,7 @@ def _showfixtures_main(config: Config, session: Session) -> None:
|
|||
if currentmodule != module:
|
||||
if not module.startswith("_pytest."):
|
||||
tw.line()
|
||||
tw.sep("-", "fixtures defined from {}".format(module))
|
||||
tw.sep("-", f"fixtures defined from {module}")
|
||||
currentmodule = module
|
||||
if verbose <= 0 and argname[0] == "_":
|
||||
continue
|
||||
|
@ -1491,7 +1491,7 @@ def _showfixtures_main(config: Config, session: Session) -> None:
|
|||
if doc:
|
||||
write_docstring(tw, doc)
|
||||
else:
|
||||
tw.line(" {}: no docstring available".format(loc), red=True)
|
||||
tw.line(f" {loc}: no docstring available", red=True)
|
||||
tw.line()
|
||||
|
||||
|
||||
|
|
|
@ -25,7 +25,7 @@ from _pytest.outcomes import fail
|
|||
|
||||
|
||||
def _non_numeric_type_error(value, at: Optional[str]) -> TypeError:
|
||||
at_str = " at {}".format(at) if at else ""
|
||||
at_str = f" at {at}" if at else ""
|
||||
return TypeError(
|
||||
"cannot make approximate comparisons to non-numeric values: {!r} {}".format(
|
||||
value, at_str
|
||||
|
@ -98,7 +98,7 @@ class ApproxNumpy(ApproxBase):
|
|||
|
||||
def __repr__(self) -> str:
|
||||
list_scalars = _recursive_list_map(self._approx_scalar, self.expected.tolist())
|
||||
return "approx({!r})".format(list_scalars)
|
||||
return f"approx({list_scalars!r})"
|
||||
|
||||
def __eq__(self, actual) -> bool:
|
||||
import numpy as np
|
||||
|
@ -109,9 +109,7 @@ class ApproxNumpy(ApproxBase):
|
|||
try:
|
||||
actual = np.asarray(actual)
|
||||
except Exception as e:
|
||||
raise TypeError(
|
||||
"cannot compare '{}' to numpy.ndarray".format(actual)
|
||||
) from e
|
||||
raise TypeError(f"cannot compare '{actual}' to numpy.ndarray") from e
|
||||
|
||||
if not np.isscalar(actual) and actual.shape != self.expected.shape:
|
||||
return False
|
||||
|
@ -219,7 +217,7 @@ class ApproxScalar(ApproxBase):
|
|||
# If a sensible tolerance can't be calculated, self.tolerance will
|
||||
# raise a ValueError. In this case, display '???'.
|
||||
try:
|
||||
vetted_tolerance = "{:.1e}".format(self.tolerance)
|
||||
vetted_tolerance = f"{self.tolerance:.1e}"
|
||||
if (
|
||||
isinstance(self.expected, Complex)
|
||||
and self.expected.imag
|
||||
|
@ -229,7 +227,7 @@ class ApproxScalar(ApproxBase):
|
|||
except ValueError:
|
||||
vetted_tolerance = "???"
|
||||
|
||||
return "{} ± {}".format(self.expected, vetted_tolerance)
|
||||
return f"{self.expected} ± {vetted_tolerance}"
|
||||
|
||||
def __eq__(self, actual) -> bool:
|
||||
"""Return whether the given value is equal to the expected value
|
||||
|
@ -291,7 +289,7 @@ class ApproxScalar(ApproxBase):
|
|||
|
||||
if absolute_tolerance < 0:
|
||||
raise ValueError(
|
||||
"absolute tolerance can't be negative: {}".format(absolute_tolerance)
|
||||
f"absolute tolerance can't be negative: {absolute_tolerance}"
|
||||
)
|
||||
if math.isnan(absolute_tolerance):
|
||||
raise ValueError("absolute tolerance can't be NaN.")
|
||||
|
@ -313,7 +311,7 @@ class ApproxScalar(ApproxBase):
|
|||
|
||||
if relative_tolerance < 0:
|
||||
raise ValueError(
|
||||
"relative tolerance can't be negative: {}".format(absolute_tolerance)
|
||||
f"relative tolerance can't be negative: {absolute_tolerance}"
|
||||
)
|
||||
if math.isnan(relative_tolerance):
|
||||
raise ValueError("relative tolerance can't be NaN.")
|
||||
|
@ -698,7 +696,7 @@ def raises(
|
|||
not_a = exc.__name__ if isinstance(exc, type) else type(exc).__name__
|
||||
raise TypeError(msg.format(not_a))
|
||||
|
||||
message = "DID NOT RAISE {}".format(expected_exception)
|
||||
message = f"DID NOT RAISE {expected_exception}"
|
||||
|
||||
if not args:
|
||||
match = kwargs.pop("match", None) # type: Optional[Union[str, Pattern[str]]]
|
||||
|
|
|
@ -321,7 +321,7 @@ class TestReport(BaseReport):
|
|||
excinfo, style=item.config.getoption("tbstyle", "auto")
|
||||
)
|
||||
for rwhen, key, content in item._report_sections:
|
||||
sections.append(("Captured {} {}".format(key, rwhen), content))
|
||||
sections.append((f"Captured {key} {rwhen}", content))
|
||||
return cls(
|
||||
item.nodeid,
|
||||
item.location,
|
||||
|
|
|
@ -93,7 +93,7 @@ def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None:
|
|||
% (len(dlist) - i, durations_min)
|
||||
)
|
||||
break
|
||||
tr.write_line("{:02.2f}s {:<8} {}".format(rep.duration, rep.when, rep.nodeid))
|
||||
tr.write_line(f"{rep.duration:02.2f}s {rep.when:<8} {rep.nodeid}")
|
||||
|
||||
|
||||
def pytest_sessionstart(session: "Session") -> None:
|
||||
|
@ -186,7 +186,7 @@ def _update_current_test_var(
|
|||
"""
|
||||
var_name = "PYTEST_CURRENT_TEST"
|
||||
if when:
|
||||
value = "{} ({})".format(item.nodeid, when)
|
||||
value = f"{item.nodeid} ({when})"
|
||||
# don't allow null bytes on environment variables (see #2644, #2957)
|
||||
value = value.replace("\x00", "(null)")
|
||||
os.environ[var_name] = value
|
||||
|
@ -248,7 +248,7 @@ def call_runtest_hook(
|
|||
elif when == "teardown":
|
||||
ihook = item.ihook.pytest_runtest_teardown
|
||||
else:
|
||||
assert False, "Unhandled runtest hook case: {}".format(when)
|
||||
assert False, f"Unhandled runtest hook case: {when}"
|
||||
reraise = (Exit,) # type: Tuple[Type[BaseException], ...]
|
||||
if not item.config.getoption("usepdb", False):
|
||||
reraise += (KeyboardInterrupt,)
|
||||
|
@ -290,7 +290,7 @@ class CallInfo(Generic[TResult]):
|
|||
@property
|
||||
def result(self) -> TResult:
|
||||
if self.excinfo is not None:
|
||||
raise AttributeError("{!r} has no valid result".format(self))
|
||||
raise AttributeError(f"{self!r} has no valid result")
|
||||
# The cast is safe because an exception wasn't raised, hence
|
||||
# _result has the expected function return type (which may be
|
||||
# None, that's why a cast and not an assert).
|
||||
|
@ -330,8 +330,8 @@ class CallInfo(Generic[TResult]):
|
|||
|
||||
def __repr__(self) -> str:
|
||||
if self.excinfo is None:
|
||||
return "<CallInfo when={!r} result: {!r}>".format(self.when, self._result)
|
||||
return "<CallInfo when={!r} excinfo={!r}>".format(self.when, self.excinfo)
|
||||
return f"<CallInfo when={self.when!r} result: {self._result!r}>"
|
||||
return f"<CallInfo when={self.when!r} excinfo={self.excinfo!r}>"
|
||||
|
||||
|
||||
def pytest_runtest_makereport(item: Item, call: CallInfo[None]) -> TestReport:
|
||||
|
|
|
@ -101,7 +101,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
|
|||
if hasattr(item, "obj"):
|
||||
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
|
||||
try:
|
||||
filename = "<{} condition>".format(mark.name)
|
||||
filename = f"<{mark.name} condition>"
|
||||
condition_code = compile(condition, filename, "eval")
|
||||
result = eval(condition_code, globals_)
|
||||
except SyntaxError as exc:
|
||||
|
@ -264,7 +264,7 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
|||
if unexpectedsuccess_key in item._store and rep.when == "call":
|
||||
reason = item._store[unexpectedsuccess_key]
|
||||
if reason:
|
||||
rep.longrepr = "Unexpected success: {}".format(reason)
|
||||
rep.longrepr = f"Unexpected success: {reason}"
|
||||
else:
|
||||
rep.longrepr = "Unexpected success"
|
||||
rep.outcome = "failed"
|
||||
|
|
|
@ -304,7 +304,7 @@ class WarningReport:
|
|||
relpath = bestrelpath(
|
||||
config.invocation_params.dir, absolutepath(filename)
|
||||
)
|
||||
return "{}:{}".format(relpath, linenum)
|
||||
return f"{relpath}:{linenum}"
|
||||
else:
|
||||
return str(self.fslocation)
|
||||
return None
|
||||
|
@ -487,7 +487,7 @@ class TerminalReporter:
|
|||
|
||||
def pytest_plugin_registered(self, plugin: _PluggyPlugin) -> None:
|
||||
if self.config.option.traceconfig:
|
||||
msg = "PLUGIN registered: {}".format(plugin)
|
||||
msg = f"PLUGIN registered: {plugin}"
|
||||
# XXX This event may happen during setup/teardown time
|
||||
# which unfortunately captures our output here
|
||||
# which garbles our output if we use self.write_line.
|
||||
|
@ -593,9 +593,9 @@ class TerminalReporter:
|
|||
if collected:
|
||||
progress = self._progress_nodeids_reported
|
||||
counter_format = "{{:{}d}}".format(len(str(collected)))
|
||||
format_string = " [{}/{{}}]".format(counter_format)
|
||||
format_string = f" [{counter_format}/{{}}]"
|
||||
return format_string.format(len(progress), collected)
|
||||
return " [ {} / {} ]".format(collected, collected)
|
||||
return f" [ {collected} / {collected} ]"
|
||||
else:
|
||||
if collected:
|
||||
return " [{:3d}%]".format(
|
||||
|
@ -682,7 +682,7 @@ class TerminalReporter:
|
|||
self.write_sep("=", "test session starts", bold=True)
|
||||
verinfo = platform.python_version()
|
||||
if not self.no_header:
|
||||
msg = "platform {} -- Python {}".format(sys.platform, verinfo)
|
||||
msg = f"platform {sys.platform} -- Python {verinfo}"
|
||||
pypy_version_info = getattr(sys, "pypy_version_info", None)
|
||||
if pypy_version_info:
|
||||
verinfo = ".".join(map(str, pypy_version_info[:3]))
|
||||
|
@ -778,7 +778,7 @@ class TerminalReporter:
|
|||
if col.name == "()": # Skip Instances.
|
||||
continue
|
||||
indent = (len(stack) - 1) * " "
|
||||
self._tw.line("{}{}".format(indent, col))
|
||||
self._tw.line(f"{indent}{col}")
|
||||
if self.config.option.verbose >= 1:
|
||||
obj = getattr(col, "obj", None)
|
||||
doc = inspect.getdoc(obj) if obj else None
|
||||
|
@ -1018,7 +1018,7 @@ class TerminalReporter:
|
|||
if rep.when == "collect":
|
||||
msg = "ERROR collecting " + msg
|
||||
else:
|
||||
msg = "ERROR at {} of {}".format(rep.when, msg)
|
||||
msg = f"ERROR at {rep.when} of {msg}"
|
||||
self.write_sep("_", msg, red=True, bold=True)
|
||||
self._outrep_summary(rep)
|
||||
|
||||
|
@ -1091,7 +1091,7 @@ class TerminalReporter:
|
|||
for rep in xfailed:
|
||||
verbose_word = rep._get_verbose_word(self.config)
|
||||
pos = _get_pos(self.config, rep)
|
||||
lines.append("{} {}".format(verbose_word, pos))
|
||||
lines.append(f"{verbose_word} {pos}")
|
||||
reason = rep.wasxfail
|
||||
if reason:
|
||||
lines.append(" " + str(reason))
|
||||
|
@ -1102,7 +1102,7 @@ class TerminalReporter:
|
|||
verbose_word = rep._get_verbose_word(self.config)
|
||||
pos = _get_pos(self.config, rep)
|
||||
reason = rep.wasxfail
|
||||
lines.append("{} {} {}".format(verbose_word, pos, reason))
|
||||
lines.append(f"{verbose_word} {pos} {reason}")
|
||||
|
||||
def show_skipped(lines: List[str]) -> None:
|
||||
skipped = self.stats.get("skipped", []) # type: List[CollectReport]
|
||||
|
@ -1201,7 +1201,7 @@ def _get_line_with_reprcrash_message(
|
|||
verbose_word = rep._get_verbose_word(config)
|
||||
pos = _get_pos(config, rep)
|
||||
|
||||
line = "{} {}".format(verbose_word, pos)
|
||||
line = f"{verbose_word} {pos}"
|
||||
len_line = wcswidth(line)
|
||||
ellipsis, len_ellipsis = "...", 3
|
||||
if len_line > termwidth - len_ellipsis:
|
||||
|
@ -1302,7 +1302,7 @@ def _plugin_nameversions(plugininfo) -> List[str]:
|
|||
def format_session_duration(seconds: float) -> str:
|
||||
"""Format the given seconds in a human readable manner to show in the final summary."""
|
||||
if seconds < 60:
|
||||
return "{:.2f}s".format(seconds)
|
||||
return f"{seconds:.2f}s"
|
||||
else:
|
||||
dt = datetime.timedelta(seconds=int(seconds))
|
||||
return "{:.2f}s ({})".format(seconds, dt)
|
||||
return f"{seconds:.2f}s ({dt})"
|
||||
|
|
|
@ -50,9 +50,7 @@ class TempPathFactory:
|
|||
def _ensure_relative_to_basetemp(self, basename: str) -> str:
|
||||
basename = os.path.normpath(basename)
|
||||
if (self.getbasetemp() / basename).resolve().parent != self.getbasetemp():
|
||||
raise ValueError(
|
||||
"{} is not a normalized and relative path".format(basename)
|
||||
)
|
||||
raise ValueError(f"{basename} is not a normalized and relative path")
|
||||
return basename
|
||||
|
||||
def mktemp(self, basename: str, numbered: bool = True) -> Path:
|
||||
|
@ -94,7 +92,7 @@ class TempPathFactory:
|
|||
user = get_user() or "unknown"
|
||||
# use a sub-directory in the temproot to speed-up
|
||||
# make_numbered_dir() call
|
||||
rootdir = temproot.joinpath("pytest-of-{}".format(user))
|
||||
rootdir = temproot.joinpath(f"pytest-of-{user}")
|
||||
rootdir.mkdir(exist_ok=True)
|
||||
basetemp = make_numbered_dir_with_cleanup(
|
||||
prefix="pytest-", root=rootdir, keep=3, lock_timeout=LOCK_TIMEOUT
|
||||
|
|
|
@ -156,7 +156,7 @@ class TestGeneralUsage:
|
|||
assert x
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(p, "--import-mode={}".format(import_mode))
|
||||
result = testdir.runpytest(p, f"--import-mode={import_mode}")
|
||||
result.stdout.fnmatch_lines(["> assert x", "E assert 0"])
|
||||
assert result.ret == 1
|
||||
|
||||
|
@ -185,7 +185,7 @@ class TestGeneralUsage:
|
|||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
result.stderr.fnmatch_lines(
|
||||
[
|
||||
"ERROR: not found: {}".format(p2),
|
||||
f"ERROR: not found: {p2}",
|
||||
"(no name {!r} in any of [[][]])".format(str(p2)),
|
||||
"",
|
||||
]
|
||||
|
@ -212,7 +212,7 @@ class TestGeneralUsage:
|
|||
result = testdir.runpytest()
|
||||
assert result.stdout.lines == []
|
||||
assert result.stderr.lines == [
|
||||
"ImportError while loading conftest '{}'.".format(conftest),
|
||||
f"ImportError while loading conftest '{conftest}'.",
|
||||
"conftest.py:3: in <module>",
|
||||
" foo()",
|
||||
"conftest.py:2: in foo",
|
||||
|
@ -503,7 +503,7 @@ class TestInvocationVariants:
|
|||
|
||||
def test_pydoc(self, testdir):
|
||||
for name in ("py.test", "pytest"):
|
||||
result = testdir.runpython_c("import {};help({})".format(name, name))
|
||||
result = testdir.runpython_c(f"import {name};help({name})")
|
||||
assert result.ret == 0
|
||||
s = result.stdout.str()
|
||||
assert "MarkGenerator" in s
|
||||
|
@ -671,8 +671,8 @@ class TestInvocationVariants:
|
|||
)
|
||||
lib = ns.mkdir(dirname)
|
||||
lib.ensure("__init__.py")
|
||||
lib.join("test_{}.py".format(dirname)).write(
|
||||
"def test_{}(): pass\ndef test_other():pass".format(dirname)
|
||||
lib.join(f"test_{dirname}.py").write(
|
||||
f"def test_{dirname}(): pass\ndef test_other():pass"
|
||||
)
|
||||
|
||||
# The structure of the test directory is now:
|
||||
|
@ -891,7 +891,7 @@ class TestDurations:
|
|||
if ("test_%s" % x) in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError("not found {} {}".format(x, y))
|
||||
raise AssertionError(f"not found {x} {y}")
|
||||
|
||||
def test_calls_showall_verbose(self, testdir, mock_timing):
|
||||
testdir.makepyfile(self.source)
|
||||
|
@ -904,7 +904,7 @@ class TestDurations:
|
|||
if ("test_%s" % x) in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError("not found {} {}".format(x, y))
|
||||
raise AssertionError(f"not found {x} {y}")
|
||||
|
||||
def test_with_deselected(self, testdir, mock_timing):
|
||||
testdir.makepyfile(self.source)
|
||||
|
|
|
@ -206,8 +206,8 @@ class TestTraceback_f_g_h:
|
|||
excinfo = pytest.raises(ValueError, h)
|
||||
traceback = excinfo.traceback
|
||||
ntraceback = traceback.filter()
|
||||
print("old: {!r}".format(traceback))
|
||||
print("new: {!r}".format(ntraceback))
|
||||
print(f"old: {traceback!r}")
|
||||
print(f"new: {ntraceback!r}")
|
||||
|
||||
if matching:
|
||||
assert len(ntraceback) == len(traceback) - 2
|
||||
|
@ -265,7 +265,7 @@ class TestTraceback_f_g_h:
|
|||
decorator = pytest.importorskip("decorator").decorator
|
||||
|
||||
def log(f, *k, **kw):
|
||||
print("{} {}".format(k, kw))
|
||||
print(f"{k} {kw}")
|
||||
f(*k, **kw)
|
||||
|
||||
log = decorator(log)
|
||||
|
@ -426,13 +426,13 @@ def test_match_raises_error(testdir):
|
|||
assert result.ret != 0
|
||||
|
||||
exc_msg = "Regex pattern '[[]123[]]+' does not match 'division by zero'."
|
||||
result.stdout.fnmatch_lines(["E * AssertionError: {}".format(exc_msg)])
|
||||
result.stdout.fnmatch_lines([f"E * AssertionError: {exc_msg}"])
|
||||
result.stdout.no_fnmatch_line("*__tracebackhide__ = True*")
|
||||
|
||||
result = testdir.runpytest("--fulltrace")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines(
|
||||
["*__tracebackhide__ = True*", "E * AssertionError: {}".format(exc_msg)]
|
||||
["*__tracebackhide__ = True*", f"E * AssertionError: {exc_msg}"]
|
||||
)
|
||||
|
||||
|
||||
|
@ -834,14 +834,14 @@ raise ValueError()
|
|||
"def entry():",
|
||||
"> f(0)",
|
||||
"",
|
||||
"{}:5: ".format(mod.__file__),
|
||||
f"{mod.__file__}:5: ",
|
||||
"_ _ *",
|
||||
"",
|
||||
" def f(x):",
|
||||
"> raise ValueError(x)",
|
||||
"E ValueError: 0",
|
||||
"",
|
||||
"{}:3: ValueError".format(mod.__file__),
|
||||
f"{mod.__file__}:3: ValueError",
|
||||
]
|
||||
)
|
||||
assert raised == 3
|
||||
|
|
|
@ -262,10 +262,10 @@ def test_log_cli_default_level_multiple_tests(testdir, request):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"{}::test_log_1 ".format(filename),
|
||||
f"{filename}::test_log_1 ",
|
||||
"*WARNING*log message from test_log_1*",
|
||||
"PASSED *50%*",
|
||||
"{}::test_log_2 ".format(filename),
|
||||
f"{filename}::test_log_2 ",
|
||||
"*WARNING*log message from test_log_2*",
|
||||
"PASSED *100%*",
|
||||
"=* 2 passed in *=",
|
||||
|
@ -318,7 +318,7 @@ def test_log_cli_default_level_sections(testdir, request):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"{}::test_log_1 ".format(filename),
|
||||
f"{filename}::test_log_1 ",
|
||||
"*-- live log start --*",
|
||||
"*WARNING* >>>>> START >>>>>*",
|
||||
"*-- live log setup --*",
|
||||
|
@ -330,7 +330,7 @@ def test_log_cli_default_level_sections(testdir, request):
|
|||
"*WARNING*log message from teardown of test_log_1*",
|
||||
"*-- live log finish --*",
|
||||
"*WARNING* <<<<< END <<<<<<<*",
|
||||
"{}::test_log_2 ".format(filename),
|
||||
f"{filename}::test_log_2 ",
|
||||
"*-- live log start --*",
|
||||
"*WARNING* >>>>> START >>>>>*",
|
||||
"*-- live log setup --*",
|
||||
|
@ -394,7 +394,7 @@ def test_live_logs_unknown_sections(testdir, request):
|
|||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*WARNING*Unknown Section*",
|
||||
"{}::test_log_1 ".format(filename),
|
||||
f"{filename}::test_log_1 ",
|
||||
"*WARNING* >>>>> START >>>>>*",
|
||||
"*-- live log setup --*",
|
||||
"*WARNING*log message from setup of test_log_1*",
|
||||
|
@ -453,7 +453,7 @@ def test_sections_single_new_line_after_test_outcome(testdir, request):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"{}::test_log_1 ".format(filename),
|
||||
f"{filename}::test_log_1 ",
|
||||
"*-- live log start --*",
|
||||
"*WARNING* >>>>> START >>>>>*",
|
||||
"*-- live log setup --*",
|
||||
|
@ -638,7 +638,7 @@ def test_log_file_cli(testdir):
|
|||
log_file = testdir.tmpdir.join("pytest.log").strpath
|
||||
|
||||
result = testdir.runpytest(
|
||||
"-s", "--log-file={}".format(log_file), "--log-file-level=WARNING"
|
||||
"-s", f"--log-file={log_file}", "--log-file-level=WARNING"
|
||||
)
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
|
@ -670,9 +670,7 @@ def test_log_file_cli_level(testdir):
|
|||
|
||||
log_file = testdir.tmpdir.join("pytest.log").strpath
|
||||
|
||||
result = testdir.runpytest(
|
||||
"-s", "--log-file={}".format(log_file), "--log-file-level=INFO"
|
||||
)
|
||||
result = testdir.runpytest("-s", f"--log-file={log_file}", "--log-file-level=INFO")
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines(["test_log_file_cli_level.py PASSED"])
|
||||
|
|
|
@ -471,7 +471,7 @@ class TestApprox:
|
|||
expected = "4.0e-06"
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
["*At index 0 diff: 3 != 4 ± {}".format(expected), "=* 1 failed in *="]
|
||||
[f"*At index 0 diff: 3 != 4 ± {expected}", "=* 1 failed in *="]
|
||||
)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -483,8 +483,7 @@ class TestApprox:
|
|||
)
|
||||
def test_expected_value_type_error(self, x, name):
|
||||
with pytest.raises(
|
||||
TypeError,
|
||||
match=r"pytest.approx\(\) does not support nested {}:".format(name),
|
||||
TypeError, match=fr"pytest.approx\(\) does not support nested {name}:",
|
||||
):
|
||||
approx(x)
|
||||
|
||||
|
|
|
@ -1993,7 +1993,7 @@ class TestAutouseManagement:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
confcut = "--confcutdir={}".format(testdir.tmpdir)
|
||||
confcut = f"--confcutdir={testdir.tmpdir}"
|
||||
reprec = testdir.inline_run("-v", "-s", confcut)
|
||||
reprec.assertoutcome(passed=8)
|
||||
config = reprec.getcalls("pytest_unconfigure")[0].config
|
||||
|
@ -3796,7 +3796,7 @@ class TestParameterizedSubRequest:
|
|||
" test_foos.py::test_foo",
|
||||
"",
|
||||
"Requested fixture 'fix_with_param' defined in:",
|
||||
"{}:4".format(fixfile),
|
||||
f"{fixfile}:4",
|
||||
"Requested here:",
|
||||
"test_foos.py:4",
|
||||
"*1 failed*",
|
||||
|
@ -3813,9 +3813,9 @@ class TestParameterizedSubRequest:
|
|||
" test_foos.py::test_foo",
|
||||
"",
|
||||
"Requested fixture 'fix_with_param' defined in:",
|
||||
"{}:4".format(fixfile),
|
||||
f"{fixfile}:4",
|
||||
"Requested here:",
|
||||
"{}:4".format(testfile),
|
||||
f"{testfile}:4",
|
||||
"*1 failed*",
|
||||
]
|
||||
)
|
||||
|
|
|
@ -211,7 +211,7 @@ class TestRaises:
|
|||
pytest.raises(TypeError, int, match="invalid")
|
||||
|
||||
def tfunc(match):
|
||||
raise ValueError("match={}".format(match))
|
||||
raise ValueError(f"match={match}")
|
||||
|
||||
pytest.raises(ValueError, tfunc, match="asdf").match("match=asdf")
|
||||
pytest.raises(ValueError, tfunc, match="").match("match=")
|
||||
|
|
|
@ -11,7 +11,7 @@ def equal_with_bash(prefix, ffc, fc, out=None):
|
|||
res_bash = set(fc(prefix))
|
||||
retval = set(res) == res_bash
|
||||
if out:
|
||||
out.write("equal_with_bash({}) {} {}\n".format(prefix, retval, res))
|
||||
out.write(f"equal_with_bash({prefix}) {retval} {res}\n")
|
||||
if not retval:
|
||||
out.write(" python - bash: %s\n" % (set(res) - res_bash))
|
||||
out.write(" bash - python: %s\n" % (res_bash - set(res)))
|
||||
|
@ -45,26 +45,16 @@ class FilesCompleter:
|
|||
completion = []
|
||||
if self.allowednames:
|
||||
if self.directories:
|
||||
files = _wrapcall(
|
||||
["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)]
|
||||
)
|
||||
files = _wrapcall(["bash", "-c", f"compgen -A directory -- '{prefix}'"])
|
||||
completion += [f + "/" for f in files]
|
||||
for x in self.allowednames:
|
||||
completion += _wrapcall(
|
||||
[
|
||||
"bash",
|
||||
"-c",
|
||||
"compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix),
|
||||
]
|
||||
["bash", "-c", f"compgen -A file -X '!*.{x}' -- '{prefix}'"]
|
||||
)
|
||||
else:
|
||||
completion += _wrapcall(
|
||||
["bash", "-c", "compgen -A file -- '{p}'".format(p=prefix)]
|
||||
)
|
||||
completion += _wrapcall(["bash", "-c", f"compgen -A file -- '{prefix}'"])
|
||||
|
||||
anticomp = _wrapcall(
|
||||
["bash", "-c", "compgen -A directory -- '{p}'".format(p=prefix)]
|
||||
)
|
||||
anticomp = _wrapcall(["bash", "-c", f"compgen -A directory -- '{prefix}'"])
|
||||
|
||||
completion = list(set(completion) - set(anticomp))
|
||||
|
||||
|
|
|
@ -1494,7 +1494,7 @@ def test_assert_tuple_warning(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*test_assert_tuple_warning.py:2:*{}*".format(msg)])
|
||||
result.stdout.fnmatch_lines([f"*test_assert_tuple_warning.py:2:*{msg}*"])
|
||||
|
||||
# tuples with size != 2 should not trigger the warning
|
||||
testdir.makepyfile(
|
||||
|
|
|
@ -198,14 +198,14 @@ class TestAssertionRewrite:
|
|||
lines = msg.splitlines()
|
||||
if verbose > 1:
|
||||
assert lines == [
|
||||
"assert {!r} == 42".format(X),
|
||||
" +{!r}".format(X),
|
||||
f"assert {X!r} == 42",
|
||||
f" +{X!r}",
|
||||
" -42",
|
||||
]
|
||||
elif verbose > 0:
|
||||
assert lines == [
|
||||
"assert <class 'test_...e.<locals>.X'> == 42",
|
||||
" +{!r}".format(X),
|
||||
f" +{X!r}",
|
||||
" -42",
|
||||
]
|
||||
else:
|
||||
|
@ -652,7 +652,7 @@ class TestAssertionRewrite:
|
|||
assert getmsg(f1) == "assert 42"
|
||||
|
||||
def my_reprcompare2(op, left, right) -> str:
|
||||
return "{} {} {}".format(left, op, right)
|
||||
return f"{left} {op} {right}"
|
||||
|
||||
monkeypatch.setattr(util, "_reprcompare", my_reprcompare2)
|
||||
|
||||
|
@ -834,9 +834,7 @@ def test_rewritten():
|
|||
)
|
||||
result = testdir.runpytest_subprocess()
|
||||
assert result.ret == 0
|
||||
found_names = glob.glob(
|
||||
"__pycache__/*-pytest-{}.pyc".format(pytest.__version__)
|
||||
)
|
||||
found_names = glob.glob(f"__pycache__/*-pytest-{pytest.__version__}.pyc")
|
||||
assert found_names, "pyc with expected tag not found in names: {}".format(
|
||||
glob.glob("__pycache__/*.pyc")
|
||||
)
|
||||
|
|
|
@ -189,9 +189,7 @@ def test_cache_reportheader_external_abspath(testdir, tmpdir_factory):
|
|||
)
|
||||
)
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines(
|
||||
["cachedir: {abscache}".format(abscache=external_cache)]
|
||||
)
|
||||
result.stdout.fnmatch_lines([f"cachedir: {external_cache}"])
|
||||
|
||||
|
||||
def test_cache_show(testdir):
|
||||
|
|
|
@ -937,7 +937,7 @@ def lsof_check():
|
|||
out = subprocess.check_output(("lsof", "-p", str(pid))).decode()
|
||||
except (OSError, subprocess.CalledProcessError, UnicodeDecodeError) as exc:
|
||||
# about UnicodeDecodeError, see note on pytester
|
||||
pytest.skip("could not run 'lsof' ({!r})".format(exc))
|
||||
pytest.skip(f"could not run 'lsof' ({exc!r})")
|
||||
yield
|
||||
out2 = subprocess.check_output(("lsof", "-p", str(pid))).decode()
|
||||
len1 = len([x for x in out.split("\n") if "REG" in x])
|
||||
|
|
|
@ -35,7 +35,7 @@ def test_real_func_loop_limit():
|
|||
self.left = 1000
|
||||
|
||||
def __repr__(self):
|
||||
return "<Evil left={left}>".format(left=self.left)
|
||||
return f"<Evil left={self.left}>"
|
||||
|
||||
def __getattr__(self, attr):
|
||||
if not self.left:
|
||||
|
|
|
@ -1652,7 +1652,7 @@ def test_help_and_version_after_argument_error(testdir):
|
|||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
|
||||
result = testdir.runpytest("--version")
|
||||
result.stderr.fnmatch_lines(["pytest {}".format(pytest.__version__)])
|
||||
result.stderr.fnmatch_lines([f"pytest {pytest.__version__}"])
|
||||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
|
||||
|
||||
|
@ -1797,12 +1797,7 @@ class TestPytestPluginsVariable:
|
|||
res = testdir.runpytest()
|
||||
assert res.ret == 2
|
||||
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
|
||||
res.stdout.fnmatch_lines(
|
||||
[
|
||||
"*{msg}*".format(msg=msg),
|
||||
"*subdirectory{sep}conftest.py*".format(sep=os.sep),
|
||||
]
|
||||
)
|
||||
res.stdout.fnmatch_lines([f"*{msg}*", f"*subdirectory{os.sep}conftest.py*"])
|
||||
|
||||
@pytest.mark.parametrize("use_pyargs", [True, False])
|
||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs(
|
||||
|
@ -1830,7 +1825,7 @@ class TestPytestPluginsVariable:
|
|||
if use_pyargs:
|
||||
assert msg not in res.stdout.str()
|
||||
else:
|
||||
res.stdout.fnmatch_lines(["*{msg}*".format(msg=msg)])
|
||||
res.stdout.fnmatch_lines([f"*{msg}*"])
|
||||
|
||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest(
|
||||
self, testdir
|
||||
|
@ -1854,12 +1849,7 @@ class TestPytestPluginsVariable:
|
|||
res = testdir.runpytest_subprocess()
|
||||
assert res.ret == 2
|
||||
msg = "Defining 'pytest_plugins' in a non-top-level conftest is no longer supported"
|
||||
res.stdout.fnmatch_lines(
|
||||
[
|
||||
"*{msg}*".format(msg=msg),
|
||||
"*subdirectory{sep}conftest.py*".format(sep=os.sep),
|
||||
]
|
||||
)
|
||||
res.stdout.fnmatch_lines([f"*{msg}*", f"*subdirectory{os.sep}conftest.py*"])
|
||||
|
||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_false_positives(
|
||||
self, testdir
|
||||
|
@ -1887,7 +1877,7 @@ def test_conftest_import_error_repr(tmpdir):
|
|||
path = tmpdir.join("foo/conftest.py")
|
||||
with pytest.raises(
|
||||
ConftestImportFailure,
|
||||
match=re.escape("RuntimeError: some error (from {})".format(path)),
|
||||
match=re.escape(f"RuntimeError: some error (from {path})"),
|
||||
):
|
||||
try:
|
||||
raise RuntimeError("some error")
|
||||
|
|
|
@ -251,9 +251,7 @@ class TestPDB:
|
|||
assert False
|
||||
"""
|
||||
)
|
||||
child = testdir.spawn_pytest(
|
||||
"--show-capture={} --pdb {}".format(showcapture, p1)
|
||||
)
|
||||
child = testdir.spawn_pytest(f"--show-capture={showcapture} --pdb {p1}")
|
||||
if showcapture in ("all", "log"):
|
||||
child.expect("captured log")
|
||||
child.expect("get rekt")
|
||||
|
@ -706,7 +704,7 @@ class TestPDB:
|
|||
set_trace()
|
||||
"""
|
||||
)
|
||||
child = testdir.spawn_pytest("--tb=short {} {}".format(p1, capture_arg))
|
||||
child = testdir.spawn_pytest(f"--tb=short {p1} {capture_arg}")
|
||||
child.expect("=== SET_TRACE ===")
|
||||
before = child.before.decode("utf8")
|
||||
if not capture_arg:
|
||||
|
@ -744,7 +742,7 @@ class TestPDB:
|
|||
x = 5
|
||||
"""
|
||||
)
|
||||
child = testdir.spawn("{} {}".format(sys.executable, p1))
|
||||
child = testdir.spawn(f"{sys.executable} {p1}")
|
||||
child.expect("x = 5")
|
||||
child.expect("Pdb")
|
||||
child.sendeof()
|
||||
|
@ -1085,12 +1083,12 @@ class TestTraceOption:
|
|||
child.expect_exact(func)
|
||||
child.expect_exact("Pdb")
|
||||
child.sendline("args")
|
||||
child.expect_exact("{} = 1\r\n".format(argname))
|
||||
child.expect_exact(f"{argname} = 1\r\n")
|
||||
child.expect_exact("Pdb")
|
||||
child.sendline("c")
|
||||
child.expect_exact("Pdb")
|
||||
child.sendline("args")
|
||||
child.expect_exact("{} = 2\r\n".format(argname))
|
||||
child.expect_exact(f"{argname} = 2\r\n")
|
||||
child.expect_exact("Pdb")
|
||||
child.sendline("c")
|
||||
child.expect_exact("> PDB continue (IO-capturing resumed) >")
|
||||
|
|
|
@ -1494,7 +1494,7 @@ def test_is_setup_py_not_named_setup_py(tmpdir):
|
|||
@pytest.mark.parametrize("mod", ("setuptools", "distutils.core"))
|
||||
def test_is_setup_py_is_a_setup_py(tmpdir, mod):
|
||||
setup_py = tmpdir.join("setup.py")
|
||||
setup_py.write('from {} import setup; setup(name="foo")'.format(mod))
|
||||
setup_py.write(f'from {mod} import setup; setup(name="foo")')
|
||||
assert _is_setup_py(setup_py)
|
||||
|
||||
|
||||
|
|
|
@ -124,7 +124,7 @@ def test_already_initialized(faulthandler_timeout, testdir):
|
|||
"-mpytest",
|
||||
testdir.tmpdir,
|
||||
"-o",
|
||||
"faulthandler_timeout={}".format(faulthandler_timeout),
|
||||
f"faulthandler_timeout={faulthandler_timeout}",
|
||||
)
|
||||
# ensure warning is emitted if faulthandler_timeout is configured
|
||||
warning_line = "*faulthandler.py*faulthandler module enabled before*"
|
||||
|
|
|
@ -6,9 +6,7 @@ def test_version_verbose(testdir, pytestconfig):
|
|||
testdir.monkeypatch.delenv("PYTEST_DISABLE_PLUGIN_AUTOLOAD")
|
||||
result = testdir.runpytest("--version", "--version")
|
||||
assert result.ret == 0
|
||||
result.stderr.fnmatch_lines(
|
||||
["*pytest*{}*imported from*".format(pytest.__version__)]
|
||||
)
|
||||
result.stderr.fnmatch_lines([f"*pytest*{pytest.__version__}*imported from*"])
|
||||
if pytestconfig.pluginmanager.list_plugin_distinfo():
|
||||
result.stderr.fnmatch_lines(["*setuptools registered plugins:", "*at*"])
|
||||
|
||||
|
@ -18,7 +16,7 @@ def test_version_less_verbose(testdir, pytestconfig):
|
|||
result = testdir.runpytest("--version")
|
||||
assert result.ret == 0
|
||||
# p = py.path.local(py.__file__).dirpath()
|
||||
result.stderr.fnmatch_lines(["pytest {}".format(pytest.__version__)])
|
||||
result.stderr.fnmatch_lines([f"pytest {pytest.__version__}"])
|
||||
|
||||
|
||||
def test_help(testdir):
|
||||
|
|
|
@ -245,9 +245,7 @@ class TestPython:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse(
|
||||
"-o", "junit_duration_report={}".format(duration_report)
|
||||
)
|
||||
result, dom = run_and_parse("-o", f"junit_duration_report={duration_report}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
val = float(tnode["time"])
|
||||
|
|
|
@ -77,7 +77,5 @@ def test_link_resolve(testdir: pytester.Testdir) -> None:
|
|||
|
||||
# i.e.: Expect drive on windows because we just have drive:filename, whereas
|
||||
# we expect a relative path on Linux.
|
||||
expect = (
|
||||
"*{}*".format(subst_p) if sys.platform == "win32" else "*sub2/test_foo.py*"
|
||||
)
|
||||
expect = f"*{subst_p}*" if sys.platform == "win32" else "*sub2/test_foo.py*"
|
||||
result.stdout.fnmatch_lines([expect])
|
||||
|
|
|
@ -48,20 +48,20 @@ def test_wrap_session_notify_exception(ret_exc, testdir):
|
|||
|
||||
if exc == SystemExit:
|
||||
assert result.stdout.lines[-3:] == [
|
||||
'INTERNALERROR> File "{}", line 4, in pytest_sessionstart'.format(c1),
|
||||
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
|
||||
'INTERNALERROR> raise SystemExit("boom")',
|
||||
"INTERNALERROR> SystemExit: boom",
|
||||
]
|
||||
else:
|
||||
assert result.stdout.lines[-3:] == [
|
||||
'INTERNALERROR> File "{}", line 4, in pytest_sessionstart'.format(c1),
|
||||
f'INTERNALERROR> File "{c1}", line 4, in pytest_sessionstart',
|
||||
'INTERNALERROR> raise ValueError("boom")',
|
||||
"INTERNALERROR> ValueError: boom",
|
||||
]
|
||||
if returncode is False:
|
||||
assert result.stderr.lines == ["mainloop: caught unexpected SystemExit!"]
|
||||
else:
|
||||
assert result.stderr.lines == ["Exit: exiting after {}...".format(exc.__name__)]
|
||||
assert result.stderr.lines == [f"Exit: exiting after {exc.__name__}..."]
|
||||
|
||||
|
||||
@pytest.mark.parametrize("returncode", (None, 42))
|
||||
|
|
|
@ -1094,7 +1094,7 @@ def test_marker_expr_eval_failure_handling(testdir, expr):
|
|||
pass
|
||||
"""
|
||||
)
|
||||
expected = "ERROR: Wrong expression passed to '-m': {}: *".format(expr)
|
||||
expected = f"ERROR: Wrong expression passed to '-m': {expr}: *"
|
||||
result = testdir.runpytest(foo, "-m", expr)
|
||||
result.stderr.fnmatch_lines([expected])
|
||||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
|
|
|
@ -27,6 +27,6 @@ def test_no_warnings(module: str) -> None:
|
|||
subprocess.check_call((
|
||||
sys.executable,
|
||||
"-W", "error",
|
||||
"-c", "__import__({!r})".format(module),
|
||||
"-c", f"__import__({module!r})",
|
||||
))
|
||||
# fmt: on
|
||||
|
|
|
@ -580,20 +580,20 @@ def test_linematcher_no_matching(function) -> None:
|
|||
obtained = str(e.value).splitlines()
|
||||
if function == "no_fnmatch_line":
|
||||
assert obtained == [
|
||||
"nomatch: '{}'".format(good_pattern),
|
||||
f"nomatch: '{good_pattern}'",
|
||||
" and: 'cachedir: .pytest_cache'",
|
||||
" and: 'collecting ... collected 1 item'",
|
||||
" and: ''",
|
||||
"fnmatch: '{}'".format(good_pattern),
|
||||
f"fnmatch: '{good_pattern}'",
|
||||
" with: 'show_fixtures_per_test.py OK'",
|
||||
]
|
||||
else:
|
||||
assert obtained == [
|
||||
" nomatch: '{}'".format(good_pattern),
|
||||
f" nomatch: '{good_pattern}'",
|
||||
" and: 'cachedir: .pytest_cache'",
|
||||
" and: 'collecting ... collected 1 item'",
|
||||
" and: ''",
|
||||
"re.match: '{}'".format(good_pattern),
|
||||
f"re.match: '{good_pattern}'",
|
||||
" with: 'show_fixtures_per_test.py OK'",
|
||||
]
|
||||
|
||||
|
|
|
@ -472,7 +472,7 @@ def test_callinfo() -> None:
|
|||
ci2 = runner.CallInfo.from_call(lambda: 0 / 0, "collect")
|
||||
assert ci2.when == "collect"
|
||||
assert not hasattr(ci2, "result")
|
||||
assert repr(ci2) == "<CallInfo when='collect' excinfo={!r}>".format(ci2.excinfo)
|
||||
assert repr(ci2) == f"<CallInfo when='collect' excinfo={ci2.excinfo!r}>"
|
||||
assert str(ci2) == repr(ci2)
|
||||
assert ci2.excinfo
|
||||
|
||||
|
@ -481,7 +481,7 @@ def test_callinfo() -> None:
|
|||
assert 0, "assert_msg"
|
||||
|
||||
ci3 = runner.CallInfo.from_call(raise_assertion, "call")
|
||||
assert repr(ci3) == "<CallInfo when='call' excinfo={!r}>".format(ci3.excinfo)
|
||||
assert repr(ci3) == f"<CallInfo when='call' excinfo={ci3.excinfo!r}>"
|
||||
assert "\n" not in repr(ci3)
|
||||
|
||||
|
||||
|
|
|
@ -348,10 +348,10 @@ def test_rootdir_option_arg(testdir, monkeypatch, path):
|
|||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest("--rootdir={}".format(path))
|
||||
result = testdir.runpytest(f"--rootdir={path}")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*rootdir: {}/root".format(testdir.tmpdir),
|
||||
f"*rootdir: {testdir.tmpdir}/root",
|
||||
"root/test_rootdir_option_arg.py *",
|
||||
"*1 passed*",
|
||||
]
|
||||
|
|
|
@ -758,10 +758,7 @@ class TestSkipif:
|
|||
)
|
||||
result = testdir.runpytest("-s", "-rsxX")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*{msg1}*test_foo.py*second_condition*".format(msg1=msg1),
|
||||
"*1 {msg2}*".format(msg2=msg2),
|
||||
]
|
||||
[f"*{msg1}*test_foo.py*second_condition*", f"*1 {msg2}*"]
|
||||
)
|
||||
assert result.ret == 0
|
||||
|
||||
|
|
|
@ -1725,9 +1725,9 @@ def test_summary_stats(
|
|||
tr._main_color = None
|
||||
|
||||
print("Based on stats: %s" % stats_arg)
|
||||
print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
|
||||
print(f'Expect summary: "{exp_line}"; with color "{exp_color}"')
|
||||
(line, color) = tr.build_summary_stats_line()
|
||||
print('Actually got: "{}"; with color "{}"'.format(line, color))
|
||||
print(f'Actually got: "{line}"; with color "{color}"')
|
||||
assert line == exp_line
|
||||
assert color == exp_color
|
||||
|
||||
|
@ -1773,7 +1773,7 @@ class TestClassicOutputStyle:
|
|||
[
|
||||
"test_one.py .",
|
||||
"test_two.py F",
|
||||
"sub{}test_three.py .F.".format(os.sep),
|
||||
f"sub{os.sep}test_three.py .F.",
|
||||
"*2 failed, 3 passed in*",
|
||||
]
|
||||
)
|
||||
|
@ -1784,9 +1784,9 @@ class TestClassicOutputStyle:
|
|||
[
|
||||
"test_one.py::test_one PASSED",
|
||||
"test_two.py::test_two FAILED",
|
||||
"sub{}test_three.py::test_three_1 PASSED".format(os.sep),
|
||||
"sub{}test_three.py::test_three_2 FAILED".format(os.sep),
|
||||
"sub{}test_three.py::test_three_3 PASSED".format(os.sep),
|
||||
f"sub{os.sep}test_three.py::test_three_1 PASSED",
|
||||
f"sub{os.sep}test_three.py::test_three_2 FAILED",
|
||||
f"sub{os.sep}test_three.py::test_three_3 PASSED",
|
||||
"*2 failed, 3 passed in*",
|
||||
]
|
||||
)
|
||||
|
@ -2146,7 +2146,7 @@ def test_line_with_reprcrash(monkeypatch):
|
|||
actual = _get_line_with_reprcrash_message(config, rep(), width) # type: ignore
|
||||
|
||||
assert actual == expected
|
||||
if actual != "{} {}".format(mocked_verbose_word, mocked_pos):
|
||||
if actual != f"{mocked_verbose_word} {mocked_pos}":
|
||||
assert len(actual) <= width
|
||||
assert wcswidth(actual) <= width
|
||||
|
||||
|
|
|
@ -1086,9 +1086,9 @@ def test_error_message_with_parametrized_fixtures(testdir):
|
|||
)
|
||||
def test_setup_inheritance_skipping(testdir, test_name, expected_outcome):
|
||||
"""Issue #4700"""
|
||||
testdir.copy_example("unittest/{}".format(test_name))
|
||||
testdir.copy_example(f"unittest/{test_name}")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["* {} in *".format(expected_outcome)])
|
||||
result.stdout.fnmatch_lines([f"* {expected_outcome} in *"])
|
||||
|
||||
|
||||
def test_BdbQuit(testdir):
|
||||
|
|
|
@ -712,7 +712,7 @@ class TestStackLevel:
|
|||
file, _, func = location
|
||||
|
||||
assert "could not load initial conftests" in str(warning.message)
|
||||
assert "config{sep}__init__.py".format(sep=os.sep) in file
|
||||
assert f"config{os.sep}__init__.py" in file
|
||||
assert func == "_preparse"
|
||||
|
||||
@pytest.mark.filterwarnings("default")
|
||||
|
@ -748,7 +748,7 @@ class TestStackLevel:
|
|||
file, _, func = location
|
||||
|
||||
assert "skipped plugin 'some_plugin': thing" in str(warning.message)
|
||||
assert "config{sep}__init__.py".format(sep=os.sep) in file
|
||||
assert f"config{os.sep}__init__.py" in file
|
||||
assert func == "_warn_about_skipped_plugins"
|
||||
|
||||
def test_issue4445_issue5928_mark_generator(self, testdir):
|
||||
|
|
Loading…
Reference in New Issue