Merge pull request #12274 from pytest-dev/pre-commit-ci-update-config
[pre-commit.ci] pre-commit autoupdate
This commit is contained in:
commit
1385ec117d
|
@ -31,3 +31,5 @@ c9df77cbd6a365dcb73c39618e4842711817e871
|
|||
4546d5445aaefe6a03957db028c263521dfb5c4b
|
||||
# Migration to ruff / ruff format
|
||||
4588653b2497ed25976b7aaff225b889fb476756
|
||||
# Use format specifiers instead of percent format
|
||||
4788165e69d08e10fc6b9c0124083fb358e2e9b0
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
repos:
|
||||
- repo: https://github.com/astral-sh/ruff-pre-commit
|
||||
rev: "v0.4.1"
|
||||
rev: "v0.4.2"
|
||||
hooks:
|
||||
- id: ruff
|
||||
args: ["--fix"]
|
||||
|
@ -21,7 +21,7 @@ repos:
|
|||
hooks:
|
||||
- id: python-use-type-annotations
|
||||
- repo: https://github.com/pre-commit/mirrors-mypy
|
||||
rev: v1.9.0
|
||||
rev: v1.10.0
|
||||
hooks:
|
||||
- id: mypy
|
||||
files: ^(src/|testing/|scripts/)
|
||||
|
|
|
@ -8,7 +8,7 @@ if __name__ == "__main__":
|
|||
import pytest # noqa: F401
|
||||
|
||||
script = sys.argv[1:] if len(sys.argv) > 1 else ["empty.py"]
|
||||
cProfile.run("pytest.cmdline.main(%r)" % script, "prof")
|
||||
cProfile.run(f"pytest.cmdline.main({script!r})", "prof")
|
||||
p = pstats.Stats("prof")
|
||||
p.strip_dirs()
|
||||
p.sort_stats("cumulative")
|
||||
|
|
|
@ -236,7 +236,7 @@ html_theme = "flask"
|
|||
html_title = "pytest documentation"
|
||||
|
||||
# A shorter title for the navigation bar. Default is the same as html_title.
|
||||
html_short_title = "pytest-%s" % release
|
||||
html_short_title = f"pytest-{release}"
|
||||
|
||||
# The name of an image file (relative to this directory) to place at the top
|
||||
# of the sidebar.
|
||||
|
|
|
@ -60,7 +60,7 @@ def report(issues):
|
|||
kind = _get_kind(issue)
|
||||
status = issue["state"]
|
||||
number = issue["number"]
|
||||
link = "https://github.com/pytest-dev/pytest/issues/%s/" % number
|
||||
link = f"https://github.com/pytest-dev/pytest/issues/{number}/"
|
||||
print("----")
|
||||
print(status, kind, link)
|
||||
print(title)
|
||||
|
@ -69,7 +69,7 @@ def report(issues):
|
|||
# print("\n".join(lines[:3]))
|
||||
# if len(lines) > 3 or len(body) > 240:
|
||||
# print("...")
|
||||
print("\n\nFound %s open issues" % len(issues))
|
||||
print(f"\n\nFound {len(issues)} open issues")
|
||||
|
||||
|
||||
if __name__ == "__main__":
|
||||
|
|
|
@ -940,7 +940,7 @@ class FormattedExcinfo:
|
|||
s = self.get_source(source, line_index, excinfo, short=short)
|
||||
lines.extend(s)
|
||||
if short:
|
||||
message = "in %s" % (entry.name)
|
||||
message = f"in {entry.name}"
|
||||
else:
|
||||
message = excinfo and excinfo.typename or ""
|
||||
entry_path = entry.path
|
||||
|
|
|
@ -616,7 +616,7 @@ class PrettyPrinter:
|
|||
vrepr = self._safe_repr(v, context, maxlevels, level)
|
||||
append(f"{krepr}: {vrepr}")
|
||||
context.remove(objid)
|
||||
return "{%s}" % ", ".join(components)
|
||||
return "{{{}}}".format(", ".join(components))
|
||||
|
||||
if (issubclass(typ, list) and r is list.__repr__) or (
|
||||
issubclass(typ, tuple) and r is tuple.__repr__
|
||||
|
|
|
@ -104,7 +104,7 @@ class TerminalWriter:
|
|||
if self.hasmarkup:
|
||||
esc = [self._esctable[name] for name, on in markup.items() if on]
|
||||
if esc:
|
||||
text = "".join("\x1b[%sm" % cod for cod in esc) + text + "\x1b[0m"
|
||||
text = "".join(f"\x1b[{cod}m" for cod in esc) + text + "\x1b[0m"
|
||||
return text
|
||||
|
||||
def sep(
|
||||
|
|
|
@ -659,7 +659,7 @@ class LocalPath:
|
|||
)
|
||||
if "basename" in kw:
|
||||
if "purebasename" in kw or "ext" in kw:
|
||||
raise ValueError("invalid specification %r" % kw)
|
||||
raise ValueError(f"invalid specification {kw!r}")
|
||||
else:
|
||||
pb = kw.setdefault("purebasename", purebasename)
|
||||
try:
|
||||
|
@ -705,7 +705,7 @@ class LocalPath:
|
|||
elif name == "ext":
|
||||
res.append(ext)
|
||||
else:
|
||||
raise ValueError("invalid part specification %r" % name)
|
||||
raise ValueError(f"invalid part specification {name!r}")
|
||||
return res
|
||||
|
||||
def dirpath(self, *args, **kwargs):
|
||||
|
@ -1026,7 +1026,7 @@ class LocalPath:
|
|||
return self.stat().atime
|
||||
|
||||
def __repr__(self):
|
||||
return "local(%r)" % self.strpath
|
||||
return f"local({self.strpath!r})"
|
||||
|
||||
def __str__(self):
|
||||
"""Return string representation of the Path."""
|
||||
|
|
|
@ -101,7 +101,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
state = self.config.stash[assertstate_key]
|
||||
if self._early_rewrite_bailout(name, state):
|
||||
return None
|
||||
state.trace("find_module called for: %s" % name)
|
||||
state.trace(f"find_module called for: {name}")
|
||||
|
||||
# Type ignored because mypy is confused about the `self` binding here.
|
||||
spec = self._find_spec(name, path) # type: ignore
|
||||
|
@ -273,7 +273,7 @@ class AssertionRewritingHook(importlib.abc.MetaPathFinder, importlib.abc.Loader)
|
|||
|
||||
self.config.issue_config_time_warning(
|
||||
PytestAssertRewriteWarning(
|
||||
"Module already imported so cannot be rewritten: %s" % name
|
||||
f"Module already imported so cannot be rewritten: {name}"
|
||||
),
|
||||
stacklevel=5,
|
||||
)
|
||||
|
@ -374,21 +374,21 @@ def _read_pyc(
|
|||
return None
|
||||
# Check for invalid or out of date pyc file.
|
||||
if len(data) != (16):
|
||||
trace("_read_pyc(%s): invalid pyc (too short)" % source)
|
||||
trace(f"_read_pyc({source}): invalid pyc (too short)")
|
||||
return None
|
||||
if data[:4] != importlib.util.MAGIC_NUMBER:
|
||||
trace("_read_pyc(%s): invalid pyc (bad magic number)" % source)
|
||||
trace(f"_read_pyc({source}): invalid pyc (bad magic number)")
|
||||
return None
|
||||
if data[4:8] != b"\x00\x00\x00\x00":
|
||||
trace("_read_pyc(%s): invalid pyc (unsupported flags)" % source)
|
||||
trace(f"_read_pyc({source}): invalid pyc (unsupported flags)")
|
||||
return None
|
||||
mtime_data = data[8:12]
|
||||
if int.from_bytes(mtime_data, "little") != mtime & 0xFFFFFFFF:
|
||||
trace("_read_pyc(%s): out of date" % source)
|
||||
trace(f"_read_pyc({source}): out of date")
|
||||
return None
|
||||
size_data = data[12:16]
|
||||
if int.from_bytes(size_data, "little") != size & 0xFFFFFFFF:
|
||||
trace("_read_pyc(%s): invalid pyc (incorrect size)" % source)
|
||||
trace(f"_read_pyc({source}): invalid pyc (incorrect size)")
|
||||
return None
|
||||
try:
|
||||
co = marshal.load(fp)
|
||||
|
@ -396,7 +396,7 @@ def _read_pyc(
|
|||
trace(f"_read_pyc({source}): marshal.load error {e}")
|
||||
return None
|
||||
if not isinstance(co, types.CodeType):
|
||||
trace("_read_pyc(%s): not a code object" % source)
|
||||
trace(f"_read_pyc({source}): not a code object")
|
||||
return None
|
||||
return co
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ def _diff_text(left: str, right: str, verbose: int = 0) -> List[str]:
|
|||
if i > 42:
|
||||
i -= 10 # Provide some context
|
||||
explanation = [
|
||||
"Skipping %s identical leading characters in diff, use -v to show" % i
|
||||
f"Skipping {i} identical leading characters in diff, use -v to show"
|
||||
]
|
||||
left = left[i:]
|
||||
right = right[i:]
|
||||
|
@ -493,7 +493,7 @@ def _compare_eq_dict(
|
|||
common = set_left.intersection(set_right)
|
||||
same = {k: left[k] for k in common if left[k] == right[k]}
|
||||
if same and verbose < 2:
|
||||
explanation += ["Omitting %s identical items, use -vv to show" % len(same)]
|
||||
explanation += [f"Omitting {len(same)} identical items, use -vv to show"]
|
||||
elif same:
|
||||
explanation += ["Common items:"]
|
||||
explanation += highlighter(pprint.pformat(same)).splitlines()
|
||||
|
@ -560,7 +560,7 @@ def _compare_eq_cls(
|
|||
if same or diff:
|
||||
explanation += [""]
|
||||
if same and verbose < 2:
|
||||
explanation.append("Omitting %s identical items, use -vv to show" % len(same))
|
||||
explanation.append(f"Omitting {len(same)} identical items, use -vv to show")
|
||||
elif same:
|
||||
explanation += ["Matching attributes:"]
|
||||
explanation += highlighter(pprint.pformat(same)).splitlines()
|
||||
|
@ -590,7 +590,7 @@ def _notin_text(term: str, text: str, verbose: int = 0) -> List[str]:
|
|||
tail = text[index + len(term) :]
|
||||
correct_text = head + tail
|
||||
diff = _diff_text(text, correct_text, verbose)
|
||||
newdiff = ["%s is contained here:" % saferepr(term, maxsize=42)]
|
||||
newdiff = [f"{saferepr(term, maxsize=42)} is contained here:"]
|
||||
for line in diff:
|
||||
if line.startswith("Skipping"):
|
||||
continue
|
||||
|
|
|
@ -332,7 +332,7 @@ class LFPlugin:
|
|||
|
||||
def pytest_report_collectionfinish(self) -> Optional[str]:
|
||||
if self.active and self.config.getoption("verbose") >= 0:
|
||||
return "run-last-failure: %s" % self._report_status
|
||||
return f"run-last-failure: {self._report_status}"
|
||||
return None
|
||||
|
||||
def pytest_runtest_logreport(self, report: TestReport) -> None:
|
||||
|
@ -588,21 +588,21 @@ def cacheshow(config: Config, session: Session) -> int:
|
|||
dummy = object()
|
||||
basedir = config.cache._cachedir
|
||||
vdir = basedir / Cache._CACHE_PREFIX_VALUES
|
||||
tw.sep("-", "cache values for %r" % glob)
|
||||
tw.sep("-", f"cache values for {glob!r}")
|
||||
for valpath in sorted(x for x in vdir.rglob(glob) if x.is_file()):
|
||||
key = str(valpath.relative_to(vdir))
|
||||
val = config.cache.get(key, dummy)
|
||||
if val is dummy:
|
||||
tw.line("%s contains unreadable content, will be ignored" % key)
|
||||
tw.line(f"{key} contains unreadable content, will be ignored")
|
||||
else:
|
||||
tw.line("%s contains:" % key)
|
||||
tw.line(f"{key} contains:")
|
||||
for line in pformat(val).splitlines():
|
||||
tw.line(" " + line)
|
||||
|
||||
ddir = basedir / Cache._CACHE_PREFIX_DIRS
|
||||
if ddir.is_dir():
|
||||
contents = sorted(ddir.rglob(glob))
|
||||
tw.sep("-", "cache directories for %r" % glob)
|
||||
tw.sep("-", f"cache directories for {glob!r}")
|
||||
for p in contents:
|
||||
# if p.is_dir():
|
||||
# print("%s/" % p.relative_to(basedir))
|
||||
|
|
|
@ -738,7 +738,7 @@ class CaptureManager:
|
|||
if self.is_globally_capturing():
|
||||
return "global"
|
||||
if self._capture_fixture:
|
||||
return "fixture %s" % self._capture_fixture.request.fixturename
|
||||
return f"fixture {self._capture_fixture.request.fixturename}"
|
||||
return False
|
||||
|
||||
# Global capturing control
|
||||
|
|
|
@ -798,7 +798,7 @@ class PytestPluginManager(PluginManager):
|
|||
if arg.startswith("no:"):
|
||||
name = arg[3:]
|
||||
if name in essential_plugins:
|
||||
raise UsageError("plugin %s cannot be disabled" % name)
|
||||
raise UsageError(f"plugin {name} cannot be disabled")
|
||||
|
||||
# PR #4304: remove stepwise if cacheprovider is blocked.
|
||||
if name == "cacheprovider":
|
||||
|
@ -847,9 +847,9 @@ class PytestPluginManager(PluginManager):
|
|||
# "terminal" or "capture". Those plugins are registered under their
|
||||
# basename for historic purposes but must be imported with the
|
||||
# _pytest prefix.
|
||||
assert isinstance(modname, str), (
|
||||
"module name as text required, got %r" % modname
|
||||
)
|
||||
assert isinstance(
|
||||
modname, str
|
||||
), f"module name as text required, got {modname!r}"
|
||||
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
|
||||
return
|
||||
|
||||
|
@ -892,8 +892,7 @@ def _get_plugin_specs_as_list(
|
|||
if isinstance(specs, collections.abc.Sequence):
|
||||
return list(specs)
|
||||
raise UsageError(
|
||||
"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r"
|
||||
% specs
|
||||
f"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: {specs!r}"
|
||||
)
|
||||
|
||||
|
||||
|
@ -1185,7 +1184,7 @@ class Config:
|
|||
res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo)
|
||||
if not any(res):
|
||||
for line in str(excrepr).split("\n"):
|
||||
sys.stderr.write("INTERNALERROR> %s\n" % line)
|
||||
sys.stderr.write(f"INTERNALERROR> {line}\n")
|
||||
sys.stderr.flush()
|
||||
|
||||
def cwd_relative_nodeid(self, nodeid: str) -> str:
|
||||
|
@ -1435,7 +1434,7 @@ class Config:
|
|||
|
||||
if not isinstance(minver, str):
|
||||
raise pytest.UsageError(
|
||||
"%s: 'minversion' must be a single value" % self.inipath
|
||||
f"{self.inipath}: 'minversion' must be a single value"
|
||||
)
|
||||
|
||||
if Version(minver) > Version(pytest.__version__):
|
||||
|
|
|
@ -313,23 +313,23 @@ class Argument:
|
|||
for opt in opts:
|
||||
if len(opt) < 2:
|
||||
raise ArgumentError(
|
||||
"invalid option string %r: "
|
||||
"must be at least two characters long" % opt,
|
||||
f"invalid option string {opt!r}: "
|
||||
"must be at least two characters long",
|
||||
self,
|
||||
)
|
||||
elif len(opt) == 2:
|
||||
if not (opt[0] == "-" and opt[1] != "-"):
|
||||
raise ArgumentError(
|
||||
"invalid short option string %r: "
|
||||
"must be of the form -x, (x any non-dash char)" % opt,
|
||||
f"invalid short option string {opt!r}: "
|
||||
"must be of the form -x, (x any non-dash char)",
|
||||
self,
|
||||
)
|
||||
self._short_opts.append(opt)
|
||||
else:
|
||||
if not (opt[0:2] == "--" and opt[2] != "-"):
|
||||
raise ArgumentError(
|
||||
"invalid long option string %r: "
|
||||
"must start with --, followed by non-dash" % opt,
|
||||
f"invalid long option string {opt!r}: "
|
||||
"must start with --, followed by non-dash",
|
||||
self,
|
||||
)
|
||||
self._long_opts.append(opt)
|
||||
|
@ -383,7 +383,7 @@ class OptionGroup:
|
|||
name for opt in self.options for name in opt.names()
|
||||
)
|
||||
if conflict:
|
||||
raise ValueError("option names %s already added" % conflict)
|
||||
raise ValueError(f"option names {conflict} already added")
|
||||
option = Argument(*opts, **attrs)
|
||||
self._addoption_instance(option, shortupper=False)
|
||||
|
||||
|
@ -441,7 +441,9 @@ class MyOptionParser(argparse.ArgumentParser):
|
|||
if unrecognized:
|
||||
for arg in unrecognized:
|
||||
if arg and arg[0] == "-":
|
||||
lines = ["unrecognized arguments: %s" % (" ".join(unrecognized))]
|
||||
lines = [
|
||||
"unrecognized arguments: {}".format(" ".join(unrecognized))
|
||||
]
|
||||
for k, v in sorted(self.extra_info.items()):
|
||||
lines.append(f" {k}: {v}")
|
||||
self.error("\n".join(lines))
|
||||
|
@ -520,7 +522,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
|||
continue
|
||||
if not option.startswith("--"):
|
||||
raise ArgumentError(
|
||||
'long optional argument without "--": [%s]' % (option), option
|
||||
f'long optional argument without "--": [{option}]', option
|
||||
)
|
||||
xxoption = option[2:]
|
||||
shortened = xxoption.replace("-", "")
|
||||
|
|
|
@ -181,8 +181,7 @@ class pytestPDB:
|
|||
else:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB continue (IO-capturing resumed for %s)"
|
||||
% capturing,
|
||||
f"PDB continue (IO-capturing resumed for {capturing})",
|
||||
)
|
||||
assert capman is not None
|
||||
capman.resume()
|
||||
|
|
|
@ -374,7 +374,7 @@ class DoctestItem(Item):
|
|||
).split("\n")
|
||||
else:
|
||||
inner_excinfo = ExceptionInfo.from_exc_info(failure.exc_info)
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" % repr(inner_excinfo.value)]
|
||||
lines += [f"UNEXPECTED EXCEPTION: {inner_excinfo.value!r}"]
|
||||
lines += [
|
||||
x.strip("\n") for x in traceback.format_exception(*failure.exc_info)
|
||||
]
|
||||
|
@ -382,7 +382,7 @@ class DoctestItem(Item):
|
|||
return ReprFailDoctest(reprlocation_lines)
|
||||
|
||||
def reportinfo(self) -> Tuple[Union["os.PathLike[str]", str], Optional[int], str]:
|
||||
return self.path, self.dtest.lineno, "[doctest] %s" % self.name
|
||||
return self.path, self.dtest.lineno, f"[doctest] {self.name}"
|
||||
|
||||
|
||||
def _get_flag_lookup() -> Dict[str, int]:
|
||||
|
@ -563,7 +563,7 @@ class DoctestModule(Module):
|
|||
module = self.obj
|
||||
except Collector.CollectError:
|
||||
if self.config.getvalue("doctest_ignore_import_errors"):
|
||||
skip("unable to import module %r" % self.path)
|
||||
skip(f"unable to import module {self.path!r}")
|
||||
else:
|
||||
raise
|
||||
|
||||
|
|
|
@ -675,7 +675,7 @@ class TopRequest(FixtureRequest):
|
|||
return self._pyfuncitem
|
||||
|
||||
def __repr__(self) -> str:
|
||||
return "<FixtureRequest for %r>" % (self.node)
|
||||
return f"<FixtureRequest for {self.node!r}>"
|
||||
|
||||
def _fillfixtures(self) -> None:
|
||||
item = self._pyfuncitem
|
||||
|
@ -1897,7 +1897,7 @@ def _showfixtures_main(config: Config, session: "Session") -> None:
|
|||
continue
|
||||
tw.write(f"{argname}", green=True)
|
||||
if fixturedef.scope != "function":
|
||||
tw.write(" [%s scope]" % fixturedef.scope, cyan=True)
|
||||
tw.write(f" [{fixturedef.scope} scope]", cyan=True)
|
||||
tw.write(f" -- {prettypath}", yellow=True)
|
||||
tw.write("\n")
|
||||
doc = inspect.getdoc(fixturedef.func)
|
||||
|
|
|
@ -121,11 +121,11 @@ def pytest_cmdline_parse() -> Generator[None, Config, Config]:
|
|||
)
|
||||
config.trace.root.setwriter(debugfile.write)
|
||||
undo_tracing = config.pluginmanager.enable_tracing()
|
||||
sys.stderr.write("writing pytest debug information to %s\n" % path)
|
||||
sys.stderr.write(f"writing pytest debug information to {path}\n")
|
||||
|
||||
def unset_tracing() -> None:
|
||||
debugfile.close()
|
||||
sys.stderr.write("wrote pytest debug information to %s\n" % debugfile.name)
|
||||
sys.stderr.write(f"wrote pytest debug information to {debugfile.name}\n")
|
||||
config.trace.root.setwriter(None)
|
||||
undo_tracing()
|
||||
|
||||
|
@ -185,7 +185,7 @@ def showhelp(config: Config) -> None:
|
|||
if help is None:
|
||||
raise TypeError(f"help argument cannot be None for {name}")
|
||||
spec = f"{name} ({type}):"
|
||||
tw.write(" %s" % spec)
|
||||
tw.write(f" {spec}")
|
||||
spec_len = len(spec)
|
||||
if spec_len > (indent_len - 3):
|
||||
# Display help starting at a new line.
|
||||
|
|
|
@ -53,9 +53,9 @@ def bin_xml_escape(arg: object) -> str:
|
|||
def repl(matchobj: Match[str]) -> str:
|
||||
i = ord(matchobj.group())
|
||||
if i <= 0xFF:
|
||||
return "#x%02X" % i
|
||||
return f"#x{i:02X}"
|
||||
else:
|
||||
return "#x%04X" % i
|
||||
return f"#x{i:04X}"
|
||||
|
||||
# The spec range of valid chars is:
|
||||
# Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD] | [#x10000-#x10FFFF]
|
||||
|
@ -149,7 +149,7 @@ class _NodeReporter:
|
|||
self.attrs = temp_attrs
|
||||
|
||||
def to_xml(self) -> ET.Element:
|
||||
testcase = ET.Element("testcase", self.attrs, time="%.3f" % self.duration)
|
||||
testcase = ET.Element("testcase", self.attrs, time=f"{self.duration:.3f}")
|
||||
properties = self.make_properties_node()
|
||||
if properties is not None:
|
||||
testcase.append(properties)
|
||||
|
@ -670,7 +670,7 @@ class LogXML:
|
|||
failures=str(self.stats["failure"]),
|
||||
skipped=str(self.stats["skipped"]),
|
||||
tests=str(numtests),
|
||||
time="%.3f" % suite_time_delta,
|
||||
time=f"{suite_time_delta:.3f}",
|
||||
timestamp=datetime.fromtimestamp(self.suite_start_time).isoformat(),
|
||||
hostname=platform.node(),
|
||||
)
|
||||
|
|
|
@ -122,7 +122,7 @@ def pytest_cmdline_main(config: Config) -> Optional[Union[int, ExitCode]]:
|
|||
parts = line.split(":", 1)
|
||||
name = parts[0]
|
||||
rest = parts[1] if len(parts) == 2 else ""
|
||||
tw.write("@pytest.mark.%s:" % name, bold=True)
|
||||
tw.write(f"@pytest.mark.{name}:", bold=True)
|
||||
tw.line(rest)
|
||||
tw.line()
|
||||
config._ensure_unconfigure()
|
||||
|
|
|
@ -552,9 +552,9 @@ class MarkGenerator:
|
|||
fail(f"Unknown '{name}' mark, did you mean 'parametrize'?")
|
||||
|
||||
warnings.warn(
|
||||
"Unknown pytest.mark.%s - is this a typo? You can register "
|
||||
f"Unknown pytest.mark.{name} - is this a typo? You can register "
|
||||
"custom marks to avoid this warning - for details, see "
|
||||
"https://docs.pytest.org/en/stable/how-to/mark.html" % name,
|
||||
"https://docs.pytest.org/en/stable/how-to/mark.html",
|
||||
PytestUnknownMarkWarning,
|
||||
2,
|
||||
)
|
||||
|
|
|
@ -65,7 +65,7 @@ def pytest_unconfigure(config: Config) -> None:
|
|||
# Write summary.
|
||||
tr.write_sep("=", "Sending information to Paste Service")
|
||||
pastebinurl = create_new_paste(sessionlog)
|
||||
tr.write_line("pastebin session-log: %s\n" % pastebinurl)
|
||||
tr.write_line(f"pastebin session-log: {pastebinurl}\n")
|
||||
|
||||
|
||||
def create_new_paste(contents: Union[str, bytes]) -> str:
|
||||
|
@ -85,7 +85,7 @@ def create_new_paste(contents: Union[str, bytes]) -> str:
|
|||
urlopen(url, data=urlencode(params).encode("ascii")).read().decode("utf-8")
|
||||
)
|
||||
except OSError as exc_info: # urllib errors
|
||||
return "bad response: %s" % exc_info
|
||||
return f"bad response: {exc_info}"
|
||||
m = re.search(r'href="/raw/(\w+)"', response)
|
||||
if m:
|
||||
return f"{url}/show/{m.group(1)}"
|
||||
|
|
|
@ -182,13 +182,13 @@ class LsofFdLeakChecker:
|
|||
leaked_files = [t for t in lines2 if t[0] in new_fds]
|
||||
if leaked_files:
|
||||
error = [
|
||||
"***** %s FD leakage detected" % len(leaked_files),
|
||||
f"***** {len(leaked_files)} FD leakage detected",
|
||||
*(str(f) for f in leaked_files),
|
||||
"*** Before:",
|
||||
*(str(f) for f in lines1),
|
||||
"*** After:",
|
||||
*(str(f) for f in lines2),
|
||||
"***** %s FD leakage detected" % len(leaked_files),
|
||||
f"***** {len(leaked_files)} FD leakage detected",
|
||||
"*** function {}:{}: {} ".format(*item.location),
|
||||
"See issue #2366",
|
||||
]
|
||||
|
@ -313,7 +313,7 @@ class HookRecorder:
|
|||
del self.calls[i]
|
||||
return call
|
||||
lines = [f"could not find call {name!r}, in:"]
|
||||
lines.extend([" %s" % x for x in self.calls])
|
||||
lines.extend([f" {x}" for x in self.calls])
|
||||
fail("\n".join(lines))
|
||||
|
||||
def getcall(self, name: str) -> RecordedHookCall:
|
||||
|
@ -1204,7 +1204,9 @@ class Pytester:
|
|||
if str(x).startswith("--basetemp"):
|
||||
break
|
||||
else:
|
||||
new_args.append("--basetemp=%s" % self.path.parent.joinpath("basetemp"))
|
||||
new_args.append(
|
||||
"--basetemp={}".format(self.path.parent.joinpath("basetemp"))
|
||||
)
|
||||
return new_args
|
||||
|
||||
def parseconfig(self, *args: Union[str, "os.PathLike[str]"]) -> Config:
|
||||
|
@ -1485,7 +1487,7 @@ class Pytester:
|
|||
"""
|
||||
__tracebackhide__ = True
|
||||
p = make_numbered_dir(root=self.path, prefix="runpytest-", mode=0o700)
|
||||
args = ("--basetemp=%s" % p, *args)
|
||||
args = (f"--basetemp={p}", *args)
|
||||
plugins = [x for x in self.plugins if isinstance(x, str)]
|
||||
if plugins:
|
||||
args = ("-p", plugins[0], *args)
|
||||
|
@ -1593,7 +1595,7 @@ class LineMatcher:
|
|||
self._log("matched: ", repr(line))
|
||||
break
|
||||
else:
|
||||
msg = "line %r not found in output" % line
|
||||
msg = f"line {line!r} not found in output"
|
||||
self._log(msg)
|
||||
self._fail(msg)
|
||||
|
||||
|
@ -1605,7 +1607,7 @@ class LineMatcher:
|
|||
for i, line in enumerate(self.lines):
|
||||
if fnline == line or fnmatch(line, fnline):
|
||||
return self.lines[i + 1 :]
|
||||
raise ValueError("line %r not found in output" % fnline)
|
||||
raise ValueError(f"line {fnline!r} not found in output")
|
||||
|
||||
def _log(self, *args) -> None:
|
||||
self._log_output.append(" ".join(str(x) for x in args))
|
||||
|
@ -1690,7 +1692,7 @@ class LineMatcher:
|
|||
started = True
|
||||
break
|
||||
elif match_func(nextline, line):
|
||||
self._log("%s:" % match_nickname, repr(line))
|
||||
self._log(f"{match_nickname}:", repr(line))
|
||||
self._log(
|
||||
"{:>{width}}".format("with:", width=wnick), repr(nextline)
|
||||
)
|
||||
|
|
|
@ -224,7 +224,7 @@ def pytest_pycollect_makeitem(
|
|||
filename, lineno = getfslineno(obj)
|
||||
warnings.warn_explicit(
|
||||
message=PytestCollectionWarning(
|
||||
"cannot collect %r because it is not a function." % name
|
||||
f"cannot collect {name!r} because it is not a function."
|
||||
),
|
||||
category=None,
|
||||
filename=str(filename),
|
||||
|
|
|
@ -454,7 +454,7 @@ class ApproxScalar(ApproxBase):
|
|||
return False
|
||||
|
||||
# Return true if the two numbers are within the tolerance.
|
||||
result: bool = abs(self.expected - actual) <= self.tolerance # type: ignore[arg-type]
|
||||
result: bool = abs(self.expected - actual) <= self.tolerance
|
||||
return result
|
||||
|
||||
# Ignore type because of https://github.com/python/mypy/issues/4266.
|
||||
|
|
|
@ -232,10 +232,10 @@ def _report_unserialization_failure(
|
|||
url = "https://github.com/pytest-dev/pytest/issues"
|
||||
stream = StringIO()
|
||||
pprint("-" * 100, stream=stream)
|
||||
pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
|
||||
pprint("report_name: %s" % report_class, stream=stream)
|
||||
pprint(f"INTERNALERROR: Unknown entry type returned: {type_name}", stream=stream)
|
||||
pprint(f"report_name: {report_class}", stream=stream)
|
||||
pprint(reportdict, stream=stream)
|
||||
pprint("Please report this bug at %s" % url, stream=stream)
|
||||
pprint(f"Please report this bug at {url}", stream=stream)
|
||||
pprint("-" * 100, stream=stream)
|
||||
raise RuntimeError(stream.getvalue())
|
||||
|
||||
|
|
|
@ -90,7 +90,7 @@ def pytest_terminal_summary(terminalreporter: "TerminalReporter") -> None:
|
|||
if not durations:
|
||||
tr.write_sep("=", "slowest durations")
|
||||
else:
|
||||
tr.write_sep("=", "slowest %s durations" % durations)
|
||||
tr.write_sep("=", f"slowest {durations} durations")
|
||||
dlist = dlist[:durations]
|
||||
|
||||
for i, rep in enumerate(dlist):
|
||||
|
@ -167,7 +167,7 @@ def pytest_runtest_call(item: Item) -> None:
|
|||
del sys.last_value
|
||||
del sys.last_traceback
|
||||
if sys.version_info >= (3, 12, 0):
|
||||
del sys.last_exc # type: ignore[attr-defined]
|
||||
del sys.last_exc
|
||||
except AttributeError:
|
||||
pass
|
||||
try:
|
||||
|
@ -177,7 +177,7 @@ def pytest_runtest_call(item: Item) -> None:
|
|||
sys.last_type = type(e)
|
||||
sys.last_value = e
|
||||
if sys.version_info >= (3, 12, 0):
|
||||
sys.last_exc = e # type: ignore[attr-defined]
|
||||
sys.last_exc = e
|
||||
assert e.__traceback__ is not None
|
||||
# Skip *this* frame
|
||||
sys.last_traceback = e.__traceback__.tb_next
|
||||
|
|
|
@ -117,7 +117,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
|
|||
result = eval(condition_code, globals_)
|
||||
except SyntaxError as exc:
|
||||
msglines = [
|
||||
"Error evaluating %r condition" % mark.name,
|
||||
f"Error evaluating {mark.name!r} condition",
|
||||
" " + condition,
|
||||
" " + " " * (exc.offset or 0) + "^",
|
||||
"SyntaxError: invalid syntax",
|
||||
|
@ -125,7 +125,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
|
|||
fail("\n".join(msglines), pytrace=False)
|
||||
except Exception as exc:
|
||||
msglines = [
|
||||
"Error evaluating %r condition" % mark.name,
|
||||
f"Error evaluating {mark.name!r} condition",
|
||||
" " + condition,
|
||||
*traceback.format_exception_only(type(exc), exc),
|
||||
]
|
||||
|
@ -137,7 +137,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
|
|||
result = bool(condition)
|
||||
except Exception as exc:
|
||||
msglines = [
|
||||
"Error evaluating %r condition as a boolean" % mark.name,
|
||||
f"Error evaluating {mark.name!r} condition as a boolean",
|
||||
*traceback.format_exception_only(type(exc), exc),
|
||||
]
|
||||
fail("\n".join(msglines), pytrace=False)
|
||||
|
@ -149,7 +149,7 @@ def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool,
|
|||
else:
|
||||
# XXX better be checked at collection time
|
||||
msg = (
|
||||
"Error evaluating %r: " % mark.name
|
||||
f"Error evaluating {mark.name!r}: "
|
||||
+ "you need to specify reason=STRING when using booleans as conditions."
|
||||
)
|
||||
fail(msg, pytrace=False)
|
||||
|
|
|
@ -640,7 +640,7 @@ class TerminalReporter:
|
|||
self._write_progress_information_filling_space()
|
||||
else:
|
||||
self.ensure_newline()
|
||||
self._tw.write("[%s]" % rep.node.gateway.id)
|
||||
self._tw.write(f"[{rep.node.gateway.id}]")
|
||||
if self._show_progress_info:
|
||||
self._tw.write(
|
||||
self._get_progress_information_message() + " ", cyan=True
|
||||
|
@ -818,7 +818,9 @@ class TerminalReporter:
|
|||
|
||||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||
if plugininfo:
|
||||
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
|
||||
result.append(
|
||||
"plugins: {}".format(", ".join(_plugin_nameversions(plugininfo)))
|
||||
)
|
||||
return result
|
||||
|
||||
def pytest_collection_finish(self, session: "Session") -> None:
|
||||
|
|
|
@ -551,7 +551,7 @@ def batch_make_numbered_dirs(rootdir, repeats):
|
|||
for i in range(repeats):
|
||||
dir_ = local.make_numbered_dir(prefix="repro-", rootdir=rootdir)
|
||||
file_ = dir_.join("foo")
|
||||
file_.write_text("%s" % i, encoding="utf-8")
|
||||
file_.write_text(f"{i}", encoding="utf-8")
|
||||
actual = int(file_.read_text(encoding="utf-8"))
|
||||
assert (
|
||||
actual == i
|
||||
|
@ -563,9 +563,9 @@ def batch_make_numbered_dirs(rootdir, repeats):
|
|||
class TestLocalPath(CommonFSTests):
|
||||
def test_join_normpath(self, tmpdir):
|
||||
assert tmpdir.join(".") == tmpdir
|
||||
p = tmpdir.join("../%s" % tmpdir.basename)
|
||||
p = tmpdir.join(f"../{tmpdir.basename}")
|
||||
assert p == tmpdir
|
||||
p = tmpdir.join("..//%s/" % tmpdir.basename)
|
||||
p = tmpdir.join(f"..//{tmpdir.basename}/")
|
||||
assert p == tmpdir
|
||||
|
||||
@skiponwin32
|
||||
|
@ -722,7 +722,7 @@ class TestLocalPath(CommonFSTests):
|
|||
|
||||
@pytest.mark.parametrize("bin", (False, True))
|
||||
def test_dump(self, tmpdir, bin):
|
||||
path = tmpdir.join("dumpfile%s" % int(bin))
|
||||
path = tmpdir.join(f"dumpfile{int(bin)}")
|
||||
try:
|
||||
d = {"answer": 42}
|
||||
path.dump(d, bin=bin)
|
||||
|
|
|
@ -400,7 +400,7 @@ class TestGeneralUsage:
|
|||
|
||||
for name, value in vars(hookspec).items():
|
||||
if name.startswith("pytest_"):
|
||||
assert value.__doc__, "no docstring for %s" % name
|
||||
assert value.__doc__, f"no docstring for {name}"
|
||||
|
||||
def test_initialization_error_issue49(self, pytester: Pytester) -> None:
|
||||
pytester.makeconftest(
|
||||
|
@ -973,7 +973,7 @@ class TestDurations:
|
|||
for x in tested:
|
||||
for y in ("call",): # 'setup', 'call', 'teardown':
|
||||
for line in result.stdout.lines:
|
||||
if ("test_%s" % x) in line and y in line:
|
||||
if (f"test_{x}") in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError(f"not found {x} {y}")
|
||||
|
@ -986,7 +986,7 @@ class TestDurations:
|
|||
for x in "123":
|
||||
for y in ("call",): # 'setup', 'call', 'teardown':
|
||||
for line in result.stdout.lines:
|
||||
if ("test_%s" % x) in line and y in line:
|
||||
if (f"test_{x}") in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError(f"not found {x} {y}")
|
||||
|
|
|
@ -1406,7 +1406,7 @@ raise ValueError()
|
|||
mod.f()
|
||||
|
||||
# emulate the issue described in #1984
|
||||
attr = "__%s__" % reason
|
||||
attr = f"__{reason}__"
|
||||
getattr(excinfo.value, attr).__traceback__ = None
|
||||
|
||||
r = excinfo.getrepr()
|
||||
|
|
|
@ -10,4 +10,4 @@ if __name__ == "__main__":
|
|||
executable = os.path.join(os.getcwd(), "dist", "runtests_script", "runtests_script")
|
||||
if sys.platform.startswith("win"):
|
||||
executable += ".exe"
|
||||
sys.exit(os.system("%s tests" % executable))
|
||||
sys.exit(os.system(f"{executable} tests"))
|
||||
|
|
|
@ -144,7 +144,7 @@ def test_big_repr():
|
|||
def test_repr_on_newstyle() -> None:
|
||||
class Function:
|
||||
def __repr__(self):
|
||||
return "<%s>" % (self.name) # type: ignore[attr-defined]
|
||||
return f"<{self.name}>" # type: ignore[attr-defined]
|
||||
|
||||
assert saferepr(Function())
|
||||
|
||||
|
|
|
@ -36,9 +36,9 @@ class TestModule:
|
|||
[
|
||||
"*import*mismatch*",
|
||||
"*imported*test_whatever*",
|
||||
"*%s*" % p1,
|
||||
f"*{p1}*",
|
||||
"*not the same*",
|
||||
"*%s*" % p2,
|
||||
f"*{p2}*",
|
||||
"*HINT*",
|
||||
]
|
||||
)
|
||||
|
|
|
@ -2268,18 +2268,17 @@ class TestFixtureMarker:
|
|||
This was a regression introduced in the fix for #736.
|
||||
"""
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
f"""
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(params=[1, 2])
|
||||
def fixt(request):
|
||||
return request.param
|
||||
|
||||
@pytest.mark.parametrize(%s, [(3, 'x'), (4, 'x')])
|
||||
@pytest.mark.parametrize({param_args}, [(3, 'x'), (4, 'x')])
|
||||
def test_foo(fixt, val):
|
||||
pass
|
||||
"""
|
||||
% param_args
|
||||
)
|
||||
reprec = pytester.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
|
|
@ -101,7 +101,7 @@ class TestImportHookInstallation:
|
|||
""",
|
||||
}
|
||||
pytester.makepyfile(**contents)
|
||||
result = pytester.runpytest_subprocess("--assert=%s" % mode)
|
||||
result = pytester.runpytest_subprocess(f"--assert={mode}")
|
||||
if mode == "plain":
|
||||
expected = "E AssertionError"
|
||||
elif mode == "rewrite":
|
||||
|
@ -163,7 +163,7 @@ class TestImportHookInstallation:
|
|||
""",
|
||||
}
|
||||
pytester.makepyfile(**contents)
|
||||
result = pytester.runpytest_subprocess("--assert=%s" % mode)
|
||||
result = pytester.runpytest_subprocess(f"--assert={mode}")
|
||||
if mode == "plain":
|
||||
expected = "E AssertionError"
|
||||
elif mode == "rewrite":
|
||||
|
@ -280,7 +280,7 @@ class TestImportHookInstallation:
|
|||
}
|
||||
pytester.makepyfile(**contents)
|
||||
result = pytester.run(
|
||||
sys.executable, "mainwrapper.py", "-s", "--assert=%s" % mode
|
||||
sys.executable, "mainwrapper.py", "-s", f"--assert={mode}"
|
||||
)
|
||||
if mode == "plain":
|
||||
expected = "E AssertionError"
|
||||
|
|
|
@ -308,9 +308,7 @@ class TestAssertionRewrite:
|
|||
)
|
||||
result = pytester.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(
|
||||
["*AssertionError*%s*" % repr((1, 2)), "*assert 1 == 2*"]
|
||||
)
|
||||
result.stdout.fnmatch_lines([f"*AssertionError*{(1, 2)!r}*", "*assert 1 == 2*"])
|
||||
|
||||
def test_assertion_message_expr(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
|
@ -908,7 +906,7 @@ def test_rewritten():
|
|||
assert test_optimized.__doc__ is None"""
|
||||
)
|
||||
p = make_numbered_dir(root=Path(pytester.path), prefix="runpytest-")
|
||||
tmp = "--basetemp=%s" % p
|
||||
tmp = f"--basetemp={p}"
|
||||
with monkeypatch.context() as mp:
|
||||
mp.setenv("PYTHONOPTIMIZE", "2")
|
||||
mp.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
|
||||
|
|
|
@ -191,7 +191,7 @@ def test_cache_reportheader(
|
|||
monkeypatch.delenv("TOX_ENV_DIR", raising=False)
|
||||
expected = ".pytest_cache"
|
||||
result = pytester.runpytest("-v")
|
||||
result.stdout.fnmatch_lines(["cachedir: %s" % expected])
|
||||
result.stdout.fnmatch_lines([f"cachedir: {expected}"])
|
||||
|
||||
|
||||
def test_cache_reportheader_external_abspath(
|
||||
|
|
|
@ -103,16 +103,15 @@ class TestCaptureManager:
|
|||
def test_capturing_unicode(pytester: Pytester, method: str) -> None:
|
||||
obj = "'b\u00f6y'"
|
||||
pytester.makepyfile(
|
||||
"""\
|
||||
f"""\
|
||||
# taken from issue 227 from nosetests
|
||||
def test_unicode():
|
||||
import sys
|
||||
print(sys.stdout)
|
||||
print(%s)
|
||||
print({obj})
|
||||
"""
|
||||
% obj
|
||||
)
|
||||
result = pytester.runpytest("--capture=%s" % method)
|
||||
result = pytester.runpytest(f"--capture={method}")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
|
@ -124,7 +123,7 @@ def test_capturing_bytes_in_utf8_encoding(pytester: Pytester, method: str) -> No
|
|||
print('b\\u00f6y')
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest("--capture=%s" % method)
|
||||
result = pytester.runpytest(f"--capture={method}")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
|
|
|
@ -275,14 +275,14 @@ class TestCollectFS:
|
|||
# collects the tests
|
||||
for dirname in ("a", "b", "c"):
|
||||
items, reprec = pytester.inline_genitems(tmp_path.joinpath(dirname))
|
||||
assert [x.name for x in items] == ["test_%s" % dirname]
|
||||
assert [x.name for x in items] == [f"test_{dirname}"]
|
||||
|
||||
# changing cwd to each subdirectory and running pytest without
|
||||
# arguments collects the tests in that directory normally
|
||||
for dirname in ("a", "b", "c"):
|
||||
monkeypatch.chdir(pytester.path.joinpath(dirname))
|
||||
items, reprec = pytester.inline_genitems()
|
||||
assert [x.name for x in items] == ["test_%s" % dirname]
|
||||
assert [x.name for x in items] == [f"test_{dirname}"]
|
||||
|
||||
|
||||
class TestCollectPluginHookRelay:
|
||||
|
@ -572,7 +572,7 @@ class TestSession:
|
|||
def test_collect_custom_nodes_multi_id(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile("def test_func(): pass")
|
||||
pytester.makeconftest(
|
||||
"""
|
||||
f"""
|
||||
import pytest
|
||||
class SpecialItem(pytest.Item):
|
||||
def runtest(self):
|
||||
|
@ -581,10 +581,9 @@ class TestSession:
|
|||
def collect(self):
|
||||
return [SpecialItem.from_parent(name="check", parent=self)]
|
||||
def pytest_collect_file(file_path, parent):
|
||||
if file_path.name == %r:
|
||||
if file_path.name == {p.name!r}:
|
||||
return SpecialFile.from_parent(path=file_path, parent=parent)
|
||||
"""
|
||||
% p.name
|
||||
)
|
||||
id = p.name
|
||||
|
||||
|
@ -862,7 +861,7 @@ def test_matchnodes_two_collections_same_file(pytester: Pytester) -> None:
|
|||
result = pytester.runpytest()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(["*2 passed*"])
|
||||
res = pytester.runpytest("%s::item2" % p.name)
|
||||
res = pytester.runpytest(f"{p.name}::item2")
|
||||
res.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
|
@ -1444,7 +1443,7 @@ def test_collect_symlink_out_of_tree(pytester: Pytester) -> None:
|
|||
symlink_to_sub = out_of_tree.joinpath("symlink_to_sub")
|
||||
symlink_or_skip(sub, symlink_to_sub)
|
||||
os.chdir(sub)
|
||||
result = pytester.runpytest("-vs", "--rootdir=%s" % sub, symlink_to_sub)
|
||||
result = pytester.runpytest("-vs", f"--rootdir={sub}", symlink_to_sub)
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
# Should not contain "sub/"!
|
||||
|
|
|
@ -67,13 +67,12 @@ class TestParseIni:
|
|||
p1 = pytester.makepyfile("def test(): pass")
|
||||
pytester.makefile(
|
||||
".cfg",
|
||||
setup="""
|
||||
setup=f"""
|
||||
[tool:pytest]
|
||||
testpaths=%s
|
||||
testpaths={p1.name}
|
||||
[pytest]
|
||||
testpaths=ignored
|
||||
"""
|
||||
% p1.name,
|
||||
""",
|
||||
)
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(["configfile: setup.cfg", "* 1 passed in *"])
|
||||
|
@ -838,11 +837,10 @@ class TestConfigAPI:
|
|||
)
|
||||
if str_val != "no-ini":
|
||||
pytester.makeini(
|
||||
"""
|
||||
f"""
|
||||
[pytest]
|
||||
strip=%s
|
||||
strip={str_val}
|
||||
"""
|
||||
% str_val
|
||||
)
|
||||
config = pytester.parseconfig()
|
||||
assert config.getini("strip") is bool_val
|
||||
|
@ -1290,8 +1288,8 @@ def test_invalid_options_show_extra_information(pytester: Pytester) -> None:
|
|||
result.stderr.fnmatch_lines(
|
||||
[
|
||||
"*error: unrecognized arguments: --invalid-option*",
|
||||
"* inifile: %s*" % pytester.path.joinpath("tox.ini"),
|
||||
"* rootdir: %s*" % pytester.path,
|
||||
"* inifile: {}*".format(pytester.path.joinpath("tox.ini")),
|
||||
f"* rootdir: {pytester.path}*",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -1423,8 +1421,8 @@ def test_load_initial_conftest_last_ordering(_config_for_test):
|
|||
def test_get_plugin_specs_as_list() -> None:
|
||||
def exp_match(val: object) -> str:
|
||||
return (
|
||||
"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %s"
|
||||
% re.escape(repr(val))
|
||||
f"Plugins may be specified as a sequence or a ','-separated string "
|
||||
f"of plugin names. Got: {re.escape(repr(val))}"
|
||||
)
|
||||
|
||||
with pytest.raises(pytest.UsageError, match=exp_match({"foo"})):
|
||||
|
@ -1837,10 +1835,10 @@ class TestOverrideIniArgs:
|
|||
self, monkeypatch: MonkeyPatch, _config_for_test, _sys_snapshot
|
||||
) -> None:
|
||||
cache_dir = ".custom_cache"
|
||||
monkeypatch.setenv("PYTEST_ADDOPTS", "-o cache_dir=%s" % cache_dir)
|
||||
monkeypatch.setenv("PYTEST_ADDOPTS", f"-o cache_dir={cache_dir}")
|
||||
config = _config_for_test
|
||||
config._preparse([], addopts=True)
|
||||
assert config._override_ini == ["cache_dir=%s" % cache_dir]
|
||||
assert config._override_ini == [f"cache_dir={cache_dir}"]
|
||||
|
||||
def test_addopts_from_env_not_concatenated(
|
||||
self, monkeypatch: MonkeyPatch, _config_for_test
|
||||
|
@ -2048,7 +2046,7 @@ def test_invocation_args(pytester: Pytester) -> None:
|
|||
)
|
||||
def test_config_blocked_default_plugins(pytester: Pytester, plugin: str) -> None:
|
||||
p = pytester.makepyfile("def test(): pass")
|
||||
result = pytester.runpytest(str(p), "-pno:%s" % plugin)
|
||||
result = pytester.runpytest(str(p), f"-pno:{plugin}")
|
||||
|
||||
if plugin == "python":
|
||||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
|
@ -2065,7 +2063,7 @@ def test_config_blocked_default_plugins(pytester: Pytester, plugin: str) -> None
|
|||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
||||
|
||||
p = pytester.makepyfile("def test(): assert 0")
|
||||
result = pytester.runpytest(str(p), "-pno:%s" % plugin)
|
||||
result = pytester.runpytest(str(p), f"-pno:{plugin}")
|
||||
assert result.ret == ExitCode.TESTS_FAILED
|
||||
if plugin != "terminal":
|
||||
result.stdout.fnmatch_lines(["* 1 failed in *"])
|
||||
|
|
|
@ -280,7 +280,7 @@ def test_conftest_confcutdir(pytester: Pytester) -> None:
|
|||
),
|
||||
encoding="utf-8",
|
||||
)
|
||||
result = pytester.runpytest("-h", "--confcutdir=%s" % x, x)
|
||||
result = pytester.runpytest("-h", f"--confcutdir={x}", x)
|
||||
result.stdout.fnmatch_lines(["*--xyz*"])
|
||||
result.stdout.no_fnmatch_line("*warning: could not load initial*")
|
||||
|
||||
|
@ -380,7 +380,7 @@ def test_conftest_symlink_files(pytester: Pytester) -> None:
|
|||
"""
|
||||
),
|
||||
}
|
||||
pytester.makepyfile(**{"real/%s" % k: v for k, v in source.items()})
|
||||
pytester.makepyfile(**{f"real/{k}": v for k, v in source.items()})
|
||||
|
||||
# Create a build directory that contains symlinks to actual files
|
||||
# but doesn't symlink actual directories.
|
||||
|
@ -402,7 +402,7 @@ def test_conftest_badcase(pytester: Pytester) -> None:
|
|||
"""Check conftest.py loading when directory casing is wrong (#5792)."""
|
||||
pytester.path.joinpath("JenkinsRoot/test").mkdir(parents=True)
|
||||
source = {"setup.py": "", "test/__init__.py": "", "test/conftest.py": ""}
|
||||
pytester.makepyfile(**{"JenkinsRoot/%s" % k: v for k, v in source.items()})
|
||||
pytester.makepyfile(**{f"JenkinsRoot/{k}": v for k, v in source.items()})
|
||||
|
||||
os.chdir(pytester.path.joinpath("jenkinsroot/test"))
|
||||
result = pytester.runpytest()
|
||||
|
@ -638,9 +638,9 @@ class TestConftestVisibility:
|
|||
) -> None:
|
||||
"""#616"""
|
||||
dirs = self._setup_tree(pytester)
|
||||
print("pytest run in cwd: %s" % (dirs[chdir].relative_to(pytester.path)))
|
||||
print("pytestarg : %s" % testarg)
|
||||
print("expected pass : %s" % expect_ntests_passed)
|
||||
print(f"pytest run in cwd: {dirs[chdir].relative_to(pytester.path)}")
|
||||
print(f"pytestarg : {testarg}")
|
||||
print(f"expected pass : {expect_ntests_passed}")
|
||||
os.chdir(dirs[chdir])
|
||||
reprec = pytester.inline_run(
|
||||
testarg,
|
||||
|
@ -699,7 +699,7 @@ def test_search_conftest_up_to_inifile(
|
|||
|
||||
args = [str(src)]
|
||||
if confcutdir:
|
||||
args = ["--confcutdir=%s" % root.joinpath(confcutdir)]
|
||||
args = [f"--confcutdir={root.joinpath(confcutdir)}"]
|
||||
result = pytester.runpytest(*args)
|
||||
match = ""
|
||||
if passed:
|
||||
|
|
|
@ -221,7 +221,7 @@ class TestPDB:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--pdb %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--pdb {p1}")
|
||||
child.expect("captured stdout")
|
||||
child.expect("get rekt")
|
||||
child.expect("captured stderr")
|
||||
|
@ -246,7 +246,7 @@ class TestPDB:
|
|||
assert False
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--pdb %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--pdb {p1}")
|
||||
child.expect("Pdb")
|
||||
output = child.before.decode("utf8")
|
||||
child.sendeof()
|
||||
|
@ -283,7 +283,7 @@ class TestPDB:
|
|||
assert False
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--show-capture=all --pdb -p no:logging %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--show-capture=all --pdb -p no:logging {p1}")
|
||||
child.expect("get rekt")
|
||||
output = child.before.decode("utf8")
|
||||
assert "captured log" not in output
|
||||
|
@ -303,7 +303,7 @@ class TestPDB:
|
|||
pytest.raises(ValueError, globalfunc)
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--pdb %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--pdb {p1}")
|
||||
child.expect(".*def test_1")
|
||||
child.expect(".*pytest.raises.*globalfunc")
|
||||
child.expect("Pdb")
|
||||
|
@ -320,7 +320,7 @@ class TestPDB:
|
|||
xxx
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--pdb %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--pdb {p1}")
|
||||
# child.expect(".*import pytest.*")
|
||||
child.expect("Pdb")
|
||||
child.sendline("c")
|
||||
|
@ -335,7 +335,7 @@ class TestPDB:
|
|||
"""
|
||||
)
|
||||
p1 = pytester.makepyfile("def test_func(): pass")
|
||||
child = pytester.spawn_pytest("--pdb %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--pdb {p1}")
|
||||
child.expect("Pdb")
|
||||
|
||||
# INTERNALERROR is only displayed once via terminal reporter.
|
||||
|
@ -461,7 +461,7 @@ class TestPDB:
|
|||
assert 0
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--pdb %s" % str(p1))
|
||||
child = pytester.spawn_pytest(f"--pdb {p1!s}")
|
||||
child.send("caplog.record_tuples\n")
|
||||
child.expect_exact(
|
||||
"[('test_pdb_with_caplog_on_pdb_invocation', 30, 'some_warning')]"
|
||||
|
@ -501,7 +501,7 @@ class TestPDB:
|
|||
'''
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--doctest-modules --pdb %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--doctest-modules --pdb {p1}")
|
||||
child.expect("Pdb")
|
||||
|
||||
assert "UNEXPECTED EXCEPTION: AssertionError()" in child.before.decode("utf8")
|
||||
|
@ -528,7 +528,7 @@ class TestPDB:
|
|||
)
|
||||
# NOTE: does not use pytest.set_trace, but Python's patched pdb,
|
||||
# therefore "-s" is required.
|
||||
child = pytester.spawn_pytest("--doctest-modules --pdb -s %s" % p1)
|
||||
child = pytester.spawn_pytest(f"--doctest-modules --pdb -s {p1}")
|
||||
child.expect("Pdb")
|
||||
child.sendline("q")
|
||||
rest = child.read().decode("utf8")
|
||||
|
@ -621,7 +621,7 @@ class TestPDB:
|
|||
pytest.fail("expected_failure")
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("--pdbcls=mytest:CustomPdb %s" % str(p1))
|
||||
child = pytester.spawn_pytest(f"--pdbcls=mytest:CustomPdb {p1!s}")
|
||||
child.expect(r"PDB set_trace \(IO-capturing turned off\)")
|
||||
child.expect(r"\n\(Pdb")
|
||||
child.sendline("debug foo()")
|
||||
|
@ -658,7 +658,7 @@ class TestPDB:
|
|||
pytest.set_trace()
|
||||
"""
|
||||
)
|
||||
child = pytester.spawn_pytest("-s %s" % p1)
|
||||
child = pytester.spawn_pytest(f"-s {p1}")
|
||||
child.expect(r">>> PDB set_trace >>>")
|
||||
child.expect("Pdb")
|
||||
child.sendline("c")
|
||||
|
@ -914,7 +914,7 @@ class TestPDB:
|
|||
"""
|
||||
)
|
||||
monkeypatch.setenv("PYTHONPATH", str(pytester.path))
|
||||
child = pytester.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1))
|
||||
child = pytester.spawn_pytest(f"--pdbcls=custom_pdb:CustomPdb {p1!s}")
|
||||
|
||||
child.expect("__init__")
|
||||
child.expect("custom set_trace>")
|
||||
|
@ -1208,8 +1208,7 @@ def test_pdb_suspends_fixture_capturing(pytester: Pytester, fixture: str) -> Non
|
|||
child.expect("Pdb")
|
||||
before = child.before.decode("utf8")
|
||||
assert (
|
||||
"> PDB set_trace (IO-capturing turned off for fixture %s) >" % (fixture)
|
||||
in before
|
||||
f"> PDB set_trace (IO-capturing turned off for fixture {fixture}) >" in before
|
||||
)
|
||||
|
||||
# Test that capturing is really suspended.
|
||||
|
@ -1225,7 +1224,7 @@ def test_pdb_suspends_fixture_capturing(pytester: Pytester, fixture: str) -> Non
|
|||
TestPDB.flush(child)
|
||||
assert child.exitstatus == 0
|
||||
assert "= 1 passed in" in rest
|
||||
assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest
|
||||
assert f"> PDB continue (IO-capturing resumed for fixture {fixture}) >" in rest
|
||||
|
||||
|
||||
def test_pdbcls_via_local_module(pytester: Pytester) -> None:
|
||||
|
|
|
@ -1160,7 +1160,7 @@ class TestDoctestSkips:
|
|||
pytester.maketxtfile(doctest)
|
||||
else:
|
||||
assert mode == "module"
|
||||
pytester.makepyfile('"""\n%s"""' % doctest)
|
||||
pytester.makepyfile(f'"""\n{doctest}"""')
|
||||
|
||||
return makeit
|
||||
|
||||
|
|
|
@ -101,7 +101,7 @@ def test_timeout(pytester: Pytester, enabled: bool) -> None:
|
|||
result = pytester.runpytest_subprocess(*args)
|
||||
tb_output = "most recent call first"
|
||||
if enabled:
|
||||
result.stderr.fnmatch_lines(["*%s*" % tb_output])
|
||||
result.stderr.fnmatch_lines([f"*{tb_output}*"])
|
||||
else:
|
||||
assert tb_output not in result.stderr.str()
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
|
|
@ -44,7 +44,7 @@ class RunAndParse:
|
|||
if family:
|
||||
args = ("-o", "junit_family=" + family, *args)
|
||||
xml_path = self.pytester.path.joinpath("junit.xml")
|
||||
result = self.pytester.runpytest("--junitxml=%s" % xml_path, *args)
|
||||
result = self.pytester.runpytest(f"--junitxml={xml_path}", *args)
|
||||
if family == "xunit2":
|
||||
with xml_path.open(encoding="utf-8") as f:
|
||||
self.schema.validate(f)
|
||||
|
@ -520,7 +520,7 @@ class TestPython:
|
|||
)
|
||||
|
||||
result, dom = run_and_parse(
|
||||
"-o", "junit_logging=%s" % junit_logging, family=xunit_family
|
||||
"-o", f"junit_logging={junit_logging}", family=xunit_family
|
||||
)
|
||||
assert result.ret, "Expected ret > 0"
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
|
@ -605,11 +605,11 @@ class TestPython:
|
|||
for index, char in enumerate("<&'"):
|
||||
tnode = node.find_nth_by_tag("testcase", index)
|
||||
tnode.assert_attr(
|
||||
classname="test_failure_escape", name="test_func[%s]" % char
|
||||
classname="test_failure_escape", name=f"test_func[{char}]"
|
||||
)
|
||||
sysout = tnode.find_first_by_tag("system-out")
|
||||
text = sysout.text
|
||||
assert "%s\n" % char in text
|
||||
assert f"{char}\n" in text
|
||||
|
||||
@parametrize_families
|
||||
def test_junit_prefixing(
|
||||
|
@ -694,7 +694,7 @@ class TestPython:
|
|||
assert 0
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse("-o", "junit_logging=%s" % junit_logging)
|
||||
result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
if junit_logging in ["system-err", "out-err", "all"]:
|
||||
|
@ -764,13 +764,12 @@ class TestPython:
|
|||
def test_unicode(self, pytester: Pytester, run_and_parse: RunAndParse) -> None:
|
||||
value = "hx\xc4\x85\xc4\x87\n"
|
||||
pytester.makepyfile(
|
||||
"""\
|
||||
f"""\
|
||||
# coding: latin1
|
||||
def test_hello():
|
||||
print(%r)
|
||||
print({value!r})
|
||||
assert 0
|
||||
"""
|
||||
% value
|
||||
)
|
||||
result, dom = run_and_parse()
|
||||
assert result.ret == 1
|
||||
|
@ -805,7 +804,7 @@ class TestPython:
|
|||
print('hello-stdout')
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse("-o", "junit_logging=%s" % junit_logging)
|
||||
result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
pnode = node.find_first_by_tag("testcase")
|
||||
if junit_logging == "no":
|
||||
|
@ -829,7 +828,7 @@ class TestPython:
|
|||
sys.stderr.write('hello-stderr')
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse("-o", "junit_logging=%s" % junit_logging)
|
||||
result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
pnode = node.find_first_by_tag("testcase")
|
||||
if junit_logging == "no":
|
||||
|
@ -858,7 +857,7 @@ class TestPython:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse("-o", "junit_logging=%s" % junit_logging)
|
||||
result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
pnode = node.find_first_by_tag("testcase")
|
||||
if junit_logging == "no":
|
||||
|
@ -888,7 +887,7 @@ class TestPython:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse("-o", "junit_logging=%s" % junit_logging)
|
||||
result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
pnode = node.find_first_by_tag("testcase")
|
||||
if junit_logging == "no":
|
||||
|
@ -919,7 +918,7 @@ class TestPython:
|
|||
sys.stdout.write('hello-stdout call')
|
||||
"""
|
||||
)
|
||||
result, dom = run_and_parse("-o", "junit_logging=%s" % junit_logging)
|
||||
result, dom = run_and_parse("-o", f"junit_logging={junit_logging}")
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
pnode = node.find_first_by_tag("testcase")
|
||||
if junit_logging == "no":
|
||||
|
@ -1013,7 +1012,7 @@ def test_nullbyte(pytester: Pytester, junit_logging: str) -> None:
|
|||
"""
|
||||
)
|
||||
xmlf = pytester.path.joinpath("junit.xml")
|
||||
pytester.runpytest("--junitxml=%s" % xmlf, "-o", "junit_logging=%s" % junit_logging)
|
||||
pytester.runpytest(f"--junitxml={xmlf}", "-o", f"junit_logging={junit_logging}")
|
||||
text = xmlf.read_text(encoding="utf-8")
|
||||
assert "\x00" not in text
|
||||
if junit_logging == "system-out":
|
||||
|
@ -1035,7 +1034,7 @@ def test_nullbyte_replace(pytester: Pytester, junit_logging: str) -> None:
|
|||
"""
|
||||
)
|
||||
xmlf = pytester.path.joinpath("junit.xml")
|
||||
pytester.runpytest("--junitxml=%s" % xmlf, "-o", "junit_logging=%s" % junit_logging)
|
||||
pytester.runpytest(f"--junitxml={xmlf}", "-o", f"junit_logging={junit_logging}")
|
||||
text = xmlf.read_text(encoding="utf-8")
|
||||
if junit_logging == "system-out":
|
||||
assert "#x0" in text
|
||||
|
@ -1071,9 +1070,9 @@ def test_invalid_xml_escape() -> None:
|
|||
for i in invalid:
|
||||
got = bin_xml_escape(chr(i))
|
||||
if i <= 0xFF:
|
||||
expected = "#x%02X" % i
|
||||
expected = f"#x{i:02X}"
|
||||
else:
|
||||
expected = "#x%04X" % i
|
||||
expected = f"#x{i:04X}"
|
||||
assert got == expected
|
||||
for i in valid:
|
||||
assert chr(i) == bin_xml_escape(chr(i))
|
||||
|
@ -1748,7 +1747,7 @@ def test_logging_passing_tests_disabled_logs_output_for_failing_test_issue5430(
|
|||
"""
|
||||
)
|
||||
result, dom = run_and_parse(
|
||||
"-o", "junit_logging=%s" % junit_logging, family=xunit_family
|
||||
"-o", f"junit_logging={junit_logging}", family=xunit_family
|
||||
)
|
||||
assert result.ret == 1
|
||||
node = dom.find_first_by_tag("testcase")
|
||||
|
|
|
@ -79,7 +79,7 @@ def test_tmpdir_always_is_realpath(pytester: pytest.Pytester) -> None:
|
|||
assert os.path.realpath(str(tmpdir)) == str(tmpdir)
|
||||
"""
|
||||
)
|
||||
result = pytester.runpytest("-s", p, "--basetemp=%s/bt" % linktemp)
|
||||
result = pytester.runpytest("-s", p, f"--basetemp={linktemp}/bt")
|
||||
assert not result.ret
|
||||
|
||||
|
||||
|
|
|
@ -442,7 +442,7 @@ def test_syspath_prepend_with_namespace_packages(
|
|||
lib = ns.joinpath(dirname)
|
||||
lib.mkdir()
|
||||
lib.joinpath("__init__.py").write_text(
|
||||
"def check(): return %r" % dirname, encoding="utf-8"
|
||||
f"def check(): return {dirname!r}", encoding="utf-8"
|
||||
)
|
||||
|
||||
monkeypatch.syspath_prepend("hello")
|
||||
|
|
|
@ -171,7 +171,7 @@ class TestPaste:
|
|||
assert type(data) is bytes
|
||||
lexer = "text"
|
||||
assert url == "https://bpa.st"
|
||||
assert "lexer=%s" % lexer in data.decode()
|
||||
assert f"lexer={lexer}" in data.decode()
|
||||
assert "code=full-paste-contents" in data.decode()
|
||||
assert "expiry=1week" in data.decode()
|
||||
|
||||
|
|
|
@ -1032,7 +1032,7 @@ def test_store_except_info_on_error() -> None:
|
|||
assert sys.last_type is IndexError
|
||||
assert isinstance(sys.last_value, IndexError)
|
||||
if sys.version_info >= (3, 12, 0):
|
||||
assert isinstance(sys.last_exc, IndexError) # type: ignore[attr-defined]
|
||||
assert isinstance(sys.last_exc, IndexError)
|
||||
|
||||
assert sys.last_value.args[0] == "TEST"
|
||||
assert sys.last_traceback
|
||||
|
|
|
@ -297,13 +297,12 @@ class TestXFail:
|
|||
@pytest.mark.parametrize("strict", [True, False])
|
||||
def test_xfail_simple(self, pytester: Pytester, strict: bool) -> None:
|
||||
item = pytester.getitem(
|
||||
"""
|
||||
f"""
|
||||
import pytest
|
||||
@pytest.mark.xfail(strict=%s)
|
||||
@pytest.mark.xfail(strict={strict})
|
||||
def test_func():
|
||||
assert 0
|
||||
"""
|
||||
% strict
|
||||
)
|
||||
reports = runtestprotocol(item, log=False)
|
||||
assert len(reports) == 3
|
||||
|
@ -630,15 +629,14 @@ class TestXFail:
|
|||
@pytest.mark.parametrize("strict", [True, False])
|
||||
def test_strict_xfail(self, pytester: Pytester, strict: bool) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
f"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
|
||||
@pytest.mark.xfail(reason='unsupported feature', strict={strict})
|
||||
def test_foo():
|
||||
with open('foo_executed', 'w', encoding='utf-8'):
|
||||
pass # make sure test executes
|
||||
"""
|
||||
% strict
|
||||
)
|
||||
result = pytester.runpytest(p, "-rxX")
|
||||
if strict:
|
||||
|
@ -658,14 +656,13 @@ class TestXFail:
|
|||
@pytest.mark.parametrize("strict", [True, False])
|
||||
def test_strict_xfail_condition(self, pytester: Pytester, strict: bool) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
f"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
|
||||
@pytest.mark.xfail(False, reason='unsupported feature', strict={strict})
|
||||
def test_foo():
|
||||
pass
|
||||
"""
|
||||
% strict
|
||||
)
|
||||
result = pytester.runpytest(p, "-rxX")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
@ -674,14 +671,13 @@ class TestXFail:
|
|||
@pytest.mark.parametrize("strict", [True, False])
|
||||
def test_xfail_condition_keyword(self, pytester: Pytester, strict: bool) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
f"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.xfail(condition=False, reason='unsupported feature', strict=%s)
|
||||
@pytest.mark.xfail(condition=False, reason='unsupported feature', strict={strict})
|
||||
def test_foo():
|
||||
pass
|
||||
"""
|
||||
% strict
|
||||
)
|
||||
result = pytester.runpytest(p, "-rxX")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
@ -692,11 +688,10 @@ class TestXFail:
|
|||
self, pytester: Pytester, strict_val
|
||||
) -> None:
|
||||
pytester.makeini(
|
||||
"""
|
||||
f"""
|
||||
[pytest]
|
||||
xfail_strict = %s
|
||||
xfail_strict = {strict_val}
|
||||
"""
|
||||
% strict_val
|
||||
)
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
|
|
|
@ -1421,7 +1421,7 @@ def test_tbstyle_short(pytester: Pytester) -> None:
|
|||
s = result.stdout.str()
|
||||
assert "arg = 42" not in s
|
||||
assert "x = 0" not in s
|
||||
result.stdout.fnmatch_lines(["*%s:8*" % p.name, " assert x", "E assert*"])
|
||||
result.stdout.fnmatch_lines([f"*{p.name}:8*", " assert x", "E assert*"])
|
||||
result = pytester.runpytest()
|
||||
s = result.stdout.str()
|
||||
assert "x = 0" in s
|
||||
|
@ -1497,8 +1497,8 @@ class TestGenericReporting:
|
|||
"""
|
||||
)
|
||||
for tbopt in ["long", "short", "no"]:
|
||||
print("testing --tb=%s..." % tbopt)
|
||||
result = pytester.runpytest("-rN", "--tb=%s" % tbopt)
|
||||
print(f"testing --tb={tbopt}...")
|
||||
result = pytester.runpytest("-rN", f"--tb={tbopt}")
|
||||
s = result.stdout.str()
|
||||
if tbopt == "long":
|
||||
assert "print(6*7)" in s
|
||||
|
@ -1528,7 +1528,7 @@ class TestGenericReporting:
|
|||
result = pytester.runpytest("--tb=line")
|
||||
bn = p.name
|
||||
result.stdout.fnmatch_lines(
|
||||
["*%s:3: IndexError*" % bn, "*%s:8: AssertionError: hello*" % bn]
|
||||
[f"*{bn}:3: IndexError*", f"*{bn}:8: AssertionError: hello*"]
|
||||
)
|
||||
s = result.stdout.str()
|
||||
assert "def test_func2" not in s
|
||||
|
@ -1544,7 +1544,7 @@ class TestGenericReporting:
|
|||
result = pytester.runpytest("--tb=line")
|
||||
result.stdout.str()
|
||||
bn = p.name
|
||||
result.stdout.fnmatch_lines(["*%s:3: Failed: test_func1" % bn])
|
||||
result.stdout.fnmatch_lines([f"*{bn}:3: Failed: test_func1"])
|
||||
|
||||
def test_pytest_report_header(self, pytester: Pytester, option) -> None:
|
||||
pytester.makeconftest(
|
||||
|
@ -1945,7 +1945,7 @@ def test_summary_stats(
|
|||
# Reset cache.
|
||||
tr._main_color = None
|
||||
|
||||
print("Based on stats: %s" % stats_arg)
|
||||
print(f"Based on stats: {stats_arg}")
|
||||
print(f'Expect summary: "{exp_line}"; with color "{exp_color}"')
|
||||
(line, color) = tr.build_summary_stats_line()
|
||||
print(f'Actually got: "{line}"; with color "{color}"')
|
||||
|
|
|
@ -87,11 +87,11 @@ class TestConfigTmpPath:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
pytester.runpytest(p, "--basetemp=%s" % mytemp)
|
||||
pytester.runpytest(p, f"--basetemp={mytemp}")
|
||||
assert mytemp.exists()
|
||||
mytemp.joinpath("hello").touch()
|
||||
|
||||
pytester.runpytest(p, "--basetemp=%s" % mytemp)
|
||||
pytester.runpytest(p, f"--basetemp={mytemp}")
|
||||
assert mytemp.exists()
|
||||
assert not mytemp.joinpath("hello").exists()
|
||||
|
||||
|
@ -248,7 +248,7 @@ def test_mktemp(pytester: Pytester, basename: str, is_ok: bool) -> None:
|
|||
"""
|
||||
)
|
||||
|
||||
result = pytester.runpytest(p, "--basetemp=%s" % mytemp)
|
||||
result = pytester.runpytest(p, f"--basetemp={mytemp}")
|
||||
if is_ok:
|
||||
assert result.ret == 0
|
||||
assert mytemp.joinpath(basename).exists()
|
||||
|
|
|
@ -44,7 +44,7 @@ def test_normal_flow(pytester: Pytester, pyfile_with_warnings) -> None:
|
|||
result = pytester.runpytest(pyfile_with_warnings)
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"test_normal_flow.py::test_func",
|
||||
"*normal_flow_module.py:3: UserWarning: user warning",
|
||||
'* warnings.warn(UserWarning("user warning"))',
|
||||
|
@ -75,7 +75,7 @@ def test_setup_teardown_warnings(pytester: Pytester) -> None:
|
|||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"*test_setup_teardown_warnings.py:6: UserWarning: warning during setup",
|
||||
'*warnings.warn(UserWarning("warning during setup"))',
|
||||
"*test_setup_teardown_warnings.py:8: UserWarning: warning during teardown",
|
||||
|
@ -143,7 +143,7 @@ def test_unicode(pytester: Pytester) -> None:
|
|||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*",
|
||||
"* 1 passed, 1 warning*",
|
||||
]
|
||||
|
@ -315,7 +315,7 @@ def test_collection_warnings(pytester: Pytester) -> None:
|
|||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
" *collection_warnings.py:3: UserWarning: collection warning",
|
||||
' warnings.warn(UserWarning("collection warning"))',
|
||||
"* 1 passed, 1 warning*",
|
||||
|
@ -374,7 +374,7 @@ def test_hide_pytest_internal_warnings(
|
|||
else:
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
|
||||
"* 1 passed, 1 warning *",
|
||||
]
|
||||
|
@ -461,7 +461,7 @@ class TestDeprecationWarningsByDefault:
|
|||
result = pytester.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"*test_shown_by_default.py:3: DeprecationWarning: collection",
|
||||
"*test_shown_by_default.py:7: PendingDeprecationWarning: test run",
|
||||
"* 1 passed, 2 warnings*",
|
||||
|
@ -492,7 +492,7 @@ class TestDeprecationWarningsByDefault:
|
|||
result = pytester.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"*test_hidden_by_mark.py:3: DeprecationWarning: collection",
|
||||
"* 1 passed, 1 warning*",
|
||||
]
|
||||
|
@ -555,7 +555,7 @@ def test_removed_in_x_warning_as_error(pytester: Pytester, change_default) -> No
|
|||
class TestAssertionWarnings:
|
||||
@staticmethod
|
||||
def assert_result_warns(result, msg) -> None:
|
||||
result.stdout.fnmatch_lines(["*PytestAssertRewriteWarning: %s*" % msg])
|
||||
result.stdout.fnmatch_lines([f"*PytestAssertRewriteWarning: {msg}*"])
|
||||
|
||||
def test_tuple_warning(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
|
@ -585,7 +585,7 @@ def test_group_warnings_by_message(pytester: Pytester) -> None:
|
|||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"test_group_warnings_by_message.py::test_foo[[]0[]]",
|
||||
"test_group_warnings_by_message.py::test_foo[[]1[]]",
|
||||
"test_group_warnings_by_message.py::test_foo[[]2[]]",
|
||||
|
@ -617,7 +617,7 @@ def test_group_warnings_by_message_summary(pytester: Pytester) -> None:
|
|||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
f"*== {WARNINGS_SUMMARY_HEADER} ==*",
|
||||
"test_1.py: 21 warnings",
|
||||
"test_2.py: 1 warning",
|
||||
" */test_1.py:8: UserWarning: foo",
|
||||
|
|
Loading…
Reference in New Issue