Fix regressions with `--lf` plugin
Only filter with known failures, and explicitly keep paths of passed arguments. This also displays the "run-last-failure" status before collected files, and does not update the cache with "--collect-only". Fixes https://github.com/pytest-dev/pytest/issues/6968.
This commit is contained in:
parent
f0f552d60c
commit
d530d70128
|
@ -0,0 +1 @@
|
|||
Fix regressions with `--lf` filtering too much since pytest 5.4.
|
|
@ -0,0 +1 @@
|
|||
Collected files are displayed after any reports from hooks, e.g. the status from ``--lf``.
|
|
@ -183,27 +183,35 @@ class LFPluginCollWrapper:
|
|||
res.result = sorted(
|
||||
res.result, key=lambda x: 0 if Path(str(x.fspath)) in lf_paths else 1,
|
||||
)
|
||||
out.force_result(res)
|
||||
return
|
||||
|
||||
elif isinstance(collector, Module):
|
||||
if Path(str(collector.fspath)) in self.lfplugin._last_failed_paths:
|
||||
out = yield
|
||||
res = out.get_result()
|
||||
result = res.result
|
||||
lastfailed = self.lfplugin.lastfailed
|
||||
|
||||
filtered_result = [
|
||||
x for x in res.result if x.nodeid in self.lfplugin.lastfailed
|
||||
# Only filter with known failures.
|
||||
if not self._collected_at_least_one_failure:
|
||||
if not any(x.nodeid in lastfailed for x in result):
|
||||
return
|
||||
self.lfplugin.config.pluginmanager.register(
|
||||
LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
|
||||
)
|
||||
self._collected_at_least_one_failure = True
|
||||
|
||||
session = collector.session
|
||||
result[:] = [
|
||||
x
|
||||
for x in result
|
||||
if x.nodeid in lastfailed
|
||||
# Include any passed arguments (not trivial to filter).
|
||||
or session.isinitpath(x.fspath)
|
||||
# Keep all sub-collectors.
|
||||
or isinstance(x, nodes.Collector)
|
||||
]
|
||||
if filtered_result:
|
||||
res.result = filtered_result
|
||||
out.force_result(res)
|
||||
|
||||
if not self._collected_at_least_one_failure:
|
||||
self.lfplugin.config.pluginmanager.register(
|
||||
LFPluginCollSkipfiles(self.lfplugin), "lfplugin-collskip"
|
||||
)
|
||||
self._collected_at_least_one_failure = True
|
||||
return res
|
||||
return
|
||||
yield
|
||||
|
||||
|
||||
|
@ -234,8 +242,8 @@ class LFPlugin:
|
|||
self.lastfailed = config.cache.get(
|
||||
"cache/lastfailed", {}
|
||||
) # type: Dict[str, bool]
|
||||
self._previously_failed_count = None
|
||||
self._report_status = None
|
||||
self._previously_failed_count = None # type: Optional[int]
|
||||
self._report_status = None # type: Optional[str]
|
||||
self._skipped_files = 0 # count skipped files during collection due to --lf
|
||||
|
||||
if config.getoption("lf"):
|
||||
|
@ -269,7 +277,12 @@ class LFPlugin:
|
|||
else:
|
||||
self.lastfailed[report.nodeid] = True
|
||||
|
||||
def pytest_collection_modifyitems(self, session, config, items):
|
||||
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_collection_modifyitems(
|
||||
self, config: Config, items: List[nodes.Item]
|
||||
) -> Generator[None, None, None]:
|
||||
yield
|
||||
|
||||
if not self.active:
|
||||
return
|
||||
|
||||
|
@ -334,9 +347,12 @@ class NFPlugin:
|
|||
self.active = config.option.newfirst
|
||||
self.cached_nodeids = set(config.cache.get("cache/nodeids", []))
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_collection_modifyitems(
|
||||
self, session: Session, config: Config, items: List[nodes.Item]
|
||||
) -> None:
|
||||
self, items: List[nodes.Item]
|
||||
) -> Generator[None, None, None]:
|
||||
yield
|
||||
|
||||
if self.active:
|
||||
new_items = order_preserving_dict() # type: Dict[str, nodes.Item]
|
||||
other_items = order_preserving_dict() # type: Dict[str, nodes.Item]
|
||||
|
@ -356,11 +372,13 @@ class NFPlugin:
|
|||
def _get_increasing_order(self, items):
|
||||
return sorted(items, key=lambda item: item.fspath.mtime(), reverse=True)
|
||||
|
||||
def pytest_sessionfinish(self, session):
|
||||
def pytest_sessionfinish(self) -> None:
|
||||
config = self.config
|
||||
if config.getoption("cacheshow") or hasattr(config, "slaveinput"):
|
||||
return
|
||||
|
||||
if config.getoption("collectonly"):
|
||||
return
|
||||
config.cache.set("cache/nodeids", sorted(self.cached_nodeids))
|
||||
|
||||
|
||||
|
|
|
@ -664,15 +664,17 @@ class TerminalReporter:
|
|||
def pytest_collection_finish(self, session):
|
||||
self.report_collect(True)
|
||||
|
||||
if self.config.getoption("collectonly"):
|
||||
self._printcollecteditems(session.items)
|
||||
|
||||
lines = self.config.hook.pytest_report_collectionfinish(
|
||||
config=self.config, startdir=self.startdir, items=session.items
|
||||
)
|
||||
self._write_report_lines_from_hooks(lines)
|
||||
|
||||
if self.config.getoption("collectonly"):
|
||||
if session.items:
|
||||
if self.config.option.verbose > -1:
|
||||
self._tw.line("")
|
||||
self._printcollecteditems(session.items)
|
||||
|
||||
failed = self.stats.get("failed")
|
||||
if failed:
|
||||
self._tw.sep("!", "collection failures")
|
||||
|
|
|
@ -7,6 +7,7 @@ import py
|
|||
|
||||
import pytest
|
||||
from _pytest.config import ExitCode
|
||||
from _pytest.pytester import Testdir
|
||||
|
||||
pytest_plugins = ("pytester",)
|
||||
|
||||
|
@ -267,9 +268,9 @@ class TestLastFailed:
|
|||
result = testdir.runpytest(str(p), "--lf")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 2 items",
|
||||
"collected 3 items / 1 deselected / 2 selected",
|
||||
"run-last-failure: rerun previous 2 failures",
|
||||
"*= 2 passed in *",
|
||||
"*= 2 passed, 1 deselected in *",
|
||||
]
|
||||
)
|
||||
result = testdir.runpytest(str(p), "--lf")
|
||||
|
@ -345,7 +346,13 @@ class TestLastFailed:
|
|||
result = testdir.runpytest("--lf", p2)
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.runpytest("--lf", p)
|
||||
result.stdout.fnmatch_lines(["collected 1 item", "*= 1 failed in *"])
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 2 items / 1 deselected / 1 selected",
|
||||
"run-last-failure: rerun previous 1 failure",
|
||||
"*= 1 failed, 1 deselected in *",
|
||||
]
|
||||
)
|
||||
|
||||
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
|
||||
monkeypatch.setattr("sys.dont_write_bytecode", True)
|
||||
|
@ -690,9 +697,9 @@ class TestLastFailed:
|
|||
result = testdir.runpytest(test_foo, "--last-failed")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 1 item",
|
||||
"collected 2 items / 1 deselected / 1 selected",
|
||||
"run-last-failure: rerun previous 1 failure",
|
||||
"*= 1 passed in *",
|
||||
"*= 1 passed, 1 deselected in *",
|
||||
]
|
||||
)
|
||||
assert self.get_cached_last_failed(testdir) == []
|
||||
|
@ -838,7 +845,7 @@ class TestLastFailed:
|
|||
]
|
||||
)
|
||||
|
||||
# Remove/rename test.
|
||||
# Remove/rename test: collects the file again.
|
||||
testdir.makepyfile(**{"pkg1/test_1.py": """def test_renamed(): assert 0"""})
|
||||
result = testdir.runpytest("--lf", "-rf")
|
||||
result.stdout.fnmatch_lines(
|
||||
|
@ -852,6 +859,133 @@ class TestLastFailed:
|
|||
]
|
||||
)
|
||||
|
||||
result = testdir.runpytest("--lf", "--co")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 1 item",
|
||||
"run-last-failure: rerun previous 1 failure (skipped 1 file)",
|
||||
"",
|
||||
"<Module pkg1/test_1.py>",
|
||||
" <Function test_renamed>",
|
||||
]
|
||||
)
|
||||
|
||||
def test_lastfailed_args_with_deselected(self, testdir: Testdir) -> None:
|
||||
"""Test regression with --lf running into NoMatch error.
|
||||
|
||||
This was caused by it not collecting (non-failed) nodes given as
|
||||
arguments.
|
||||
"""
|
||||
testdir.makepyfile(
|
||||
**{
|
||||
"pkg1/test_1.py": """
|
||||
def test_pass(): pass
|
||||
def test_fail(): assert 0
|
||||
""",
|
||||
}
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])
|
||||
assert result.ret == 1
|
||||
|
||||
result = testdir.runpytest("pkg1/test_1.py::test_pass", "--lf", "--co")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*collected 1 item",
|
||||
"run-last-failure: 1 known failures not in selected tests",
|
||||
"",
|
||||
"<Module pkg1/test_1.py>",
|
||||
" <Function test_pass>",
|
||||
],
|
||||
consecutive=True,
|
||||
)
|
||||
|
||||
result = testdir.runpytest(
|
||||
"pkg1/test_1.py::test_pass", "pkg1/test_1.py::test_fail", "--lf", "--co"
|
||||
)
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 2 items / 1 deselected / 1 selected",
|
||||
"run-last-failure: rerun previous 1 failure",
|
||||
"",
|
||||
"<Module pkg1/test_1.py>",
|
||||
" <Function test_fail>",
|
||||
"*= 1 deselected in *",
|
||||
],
|
||||
)
|
||||
|
||||
def test_lastfailed_with_class_items(self, testdir: Testdir) -> None:
|
||||
"""Test regression with --lf deselecting whole classes."""
|
||||
testdir.makepyfile(
|
||||
**{
|
||||
"pkg1/test_1.py": """
|
||||
class TestFoo:
|
||||
def test_pass(self): pass
|
||||
def test_fail(self): assert 0
|
||||
|
||||
def test_other(): assert 0
|
||||
""",
|
||||
}
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["collected 3 items", "* 2 failed, 1 passed in *"])
|
||||
assert result.ret == 1
|
||||
|
||||
result = testdir.runpytest("--lf", "--co")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 3 items / 1 deselected / 2 selected",
|
||||
"run-last-failure: rerun previous 2 failures",
|
||||
"",
|
||||
"<Module pkg1/test_1.py>",
|
||||
" <Class TestFoo>",
|
||||
" <Function test_fail>",
|
||||
" <Function test_other>",
|
||||
"",
|
||||
"*= 1 deselected in *",
|
||||
],
|
||||
consecutive=True,
|
||||
)
|
||||
|
||||
def test_lastfailed_with_all_filtered(self, testdir: Testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
**{
|
||||
"pkg1/test_1.py": """
|
||||
def test_fail(): assert 0
|
||||
def test_pass(): pass
|
||||
""",
|
||||
}
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["collected 2 items", "* 1 failed, 1 passed in *"])
|
||||
assert result.ret == 1
|
||||
|
||||
# Remove known failure.
|
||||
testdir.makepyfile(
|
||||
**{
|
||||
"pkg1/test_1.py": """
|
||||
def test_pass(): pass
|
||||
""",
|
||||
}
|
||||
)
|
||||
result = testdir.runpytest("--lf", "--co")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 1 item",
|
||||
"run-last-failure: 1 known failures not in selected tests",
|
||||
"",
|
||||
"<Module pkg1/test_1.py>",
|
||||
" <Function test_pass>",
|
||||
"",
|
||||
"*= no tests ran in*",
|
||||
],
|
||||
consecutive=True,
|
||||
)
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
class TestNewFirst:
|
||||
def test_newfirst_usecase(self, testdir):
|
||||
|
@ -859,63 +993,54 @@ class TestNewFirst:
|
|||
**{
|
||||
"test_1/test_1.py": """
|
||||
def test_1(): assert 1
|
||||
def test_2(): assert 1
|
||||
def test_3(): assert 1
|
||||
""",
|
||||
"test_2/test_2.py": """
|
||||
def test_1(): assert 1
|
||||
def test_2(): assert 1
|
||||
def test_3(): assert 1
|
||||
""",
|
||||
}
|
||||
)
|
||||
|
||||
testdir.tmpdir.join("test_1/test_1.py").setmtime(1)
|
||||
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_1/test_1.py::test_1 PASSED*",
|
||||
"*test_1/test_1.py::test_2 PASSED*",
|
||||
"*test_1/test_1.py::test_3 PASSED*",
|
||||
"*test_2/test_2.py::test_1 PASSED*",
|
||||
"*test_2/test_2.py::test_2 PASSED*",
|
||||
"*test_2/test_2.py::test_3 PASSED*",
|
||||
]
|
||||
["*test_1/test_1.py::test_1 PASSED*", "*test_2/test_2.py::test_1 PASSED*"]
|
||||
)
|
||||
|
||||
result = testdir.runpytest("-v", "--nf")
|
||||
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_2/test_2.py::test_1 PASSED*",
|
||||
"*test_2/test_2.py::test_2 PASSED*",
|
||||
"*test_2/test_2.py::test_3 PASSED*",
|
||||
"*test_1/test_1.py::test_1 PASSED*",
|
||||
"*test_1/test_1.py::test_2 PASSED*",
|
||||
"*test_1/test_1.py::test_3 PASSED*",
|
||||
]
|
||||
["*test_2/test_2.py::test_1 PASSED*", "*test_1/test_1.py::test_1 PASSED*"]
|
||||
)
|
||||
|
||||
testdir.tmpdir.join("test_1/test_1.py").write(
|
||||
"def test_1(): assert 1\n"
|
||||
"def test_2(): assert 1\n"
|
||||
"def test_3(): assert 1\n"
|
||||
"def test_4(): assert 1\n"
|
||||
"def test_1(): assert 1\n" "def test_2(): assert 1\n"
|
||||
)
|
||||
testdir.tmpdir.join("test_1/test_1.py").setmtime(1)
|
||||
|
||||
result = testdir.runpytest("-v", "--nf")
|
||||
|
||||
result = testdir.runpytest("--nf", "--collect-only", "-q")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_1/test_1.py::test_4 PASSED*",
|
||||
"*test_2/test_2.py::test_1 PASSED*",
|
||||
"*test_2/test_2.py::test_2 PASSED*",
|
||||
"*test_2/test_2.py::test_3 PASSED*",
|
||||
"*test_1/test_1.py::test_1 PASSED*",
|
||||
"*test_1/test_1.py::test_2 PASSED*",
|
||||
"*test_1/test_1.py::test_3 PASSED*",
|
||||
"test_1/test_1.py::test_2",
|
||||
"test_2/test_2.py::test_1",
|
||||
"test_1/test_1.py::test_1",
|
||||
]
|
||||
)
|
||||
|
||||
# Newest first with (plugin) pytest_collection_modifyitems hook.
|
||||
testdir.makepyfile(
|
||||
myplugin="""
|
||||
def pytest_collection_modifyitems(items):
|
||||
items[:] = sorted(items, key=lambda item: item.nodeid)
|
||||
print("new_items:", [x.nodeid for x in items])
|
||||
"""
|
||||
)
|
||||
testdir.syspathinsert()
|
||||
result = testdir.runpytest("--nf", "-p", "myplugin", "--collect-only", "-q")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"new_items: *test_1.py*test_1.py*test_2.py*",
|
||||
"test_1/test_1.py::test_2",
|
||||
"test_2/test_2.py::test_1",
|
||||
"test_1/test_1.py::test_1",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -948,7 +1073,6 @@ class TestNewFirst:
|
|||
)
|
||||
|
||||
result = testdir.runpytest("-v", "--nf")
|
||||
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_2/test_2.py::test_1[1*",
|
||||
|
|
Loading…
Reference in New Issue