Use new hook to report accurate tests skipped in --lf and --ff
This commit is contained in:
parent
17121960b4
commit
75e6f7717c
|
@ -106,14 +106,18 @@ class LFPlugin:
|
|||
active_keys = 'lf', 'failedfirst'
|
||||
self.active = any(config.getvalue(key) for key in active_keys)
|
||||
self.lastfailed = config.cache.get("cache/lastfailed", {})
|
||||
self._previously_failed_count = None
|
||||
|
||||
def pytest_report_header(self):
|
||||
def pytest_report_collectionfinish(self):
|
||||
if self.active:
|
||||
if not self.lastfailed:
|
||||
if not self._previously_failed_count:
|
||||
mode = "run all (no recorded failures)"
|
||||
else:
|
||||
mode = "rerun previous failures%s" % (
|
||||
" first" if self.config.getvalue("failedfirst") else "")
|
||||
noun = 'failure' if self._previously_failed_count == 1 else 'failures'
|
||||
suffix = " first" if self.config.getvalue("failedfirst") else ""
|
||||
mode = "rerun previous {count} {noun}{suffix}".format(
|
||||
count=self._previously_failed_count, suffix=suffix, noun=noun
|
||||
)
|
||||
return "run-last-failure: %s" % mode
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
|
@ -142,6 +146,7 @@ class LFPlugin:
|
|||
previously_failed.append(item)
|
||||
else:
|
||||
previously_passed.append(item)
|
||||
self._previously_failed_count = len(previously_failed)
|
||||
if not previously_failed:
|
||||
# running a subset of all tests with recorded failures outside
|
||||
# of the set of tests currently executing
|
||||
|
|
|
@ -340,6 +340,73 @@ class TestLastFailed(object):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*1 failed in*')
|
||||
|
||||
def test_terminal_report_lastfailed(self, testdir):
|
||||
test_a = testdir.makepyfile(test_a="""
|
||||
def test_a1():
|
||||
pass
|
||||
def test_a2():
|
||||
pass
|
||||
""")
|
||||
test_b = testdir.makepyfile(test_b="""
|
||||
def test_b1():
|
||||
assert 0
|
||||
def test_b2():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 4 items',
|
||||
'*2 failed, 2 passed in*',
|
||||
])
|
||||
|
||||
result = testdir.runpytest('--lf')
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 4 items',
|
||||
'run-last-failure: rerun previous 2 failures',
|
||||
'*2 failed, 2 deselected in*',
|
||||
])
|
||||
|
||||
result = testdir.runpytest(test_a, '--lf')
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 2 items',
|
||||
'run-last-failure: run all (no recorded failures)',
|
||||
'*2 passed in*',
|
||||
])
|
||||
|
||||
result = testdir.runpytest(test_b, '--lf')
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 2 items',
|
||||
'run-last-failure: rerun previous 2 failures',
|
||||
'*2 failed in*',
|
||||
])
|
||||
|
||||
result = testdir.runpytest('test_b.py::test_b1', '--lf')
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 1 item',
|
||||
'run-last-failure: rerun previous 1 failure',
|
||||
'*1 failed in*',
|
||||
])
|
||||
|
||||
def test_terminal_report_failedfirst(self, testdir):
|
||||
testdir.makepyfile(test_a="""
|
||||
def test_a1():
|
||||
assert 0
|
||||
def test_a2():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 2 items',
|
||||
'*1 failed, 1 passed in*',
|
||||
])
|
||||
|
||||
result = testdir.runpytest('--ff')
|
||||
result.stdout.fnmatch_lines([
|
||||
'collected 2 items',
|
||||
'run-last-failure: rerun previous 1 failure first',
|
||||
'*1 failed, 1 passed in*',
|
||||
])
|
||||
|
||||
def test_lastfailed_collectfailure(self, testdir, monkeypatch):
|
||||
|
||||
testdir.makepyfile(test_maybe="""
|
||||
|
|
Loading…
Reference in New Issue