Merge pull request #3295 from brianmaissy/feature/last-failed-no-failures-behavior

implemented --last-failed-no-failures
This commit is contained in:
Bruno Oliveira 2018-03-20 19:17:25 -03:00 committed by GitHub
commit d03e38941b
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
4 changed files with 70 additions and 18 deletions

View File

@ -112,11 +112,12 @@ class LFPlugin(object):
self.active = any(config.getoption(key) for key in active_keys)
self.lastfailed = config.cache.get("cache/lastfailed", {})
self._previously_failed_count = None
self._no_failures_behavior = self.config.getoption('last_failed_no_failures')
def pytest_report_collectionfinish(self):
if self.active:
if not self._previously_failed_count:
mode = "run all (no recorded failures)"
mode = "run {} (no recorded failures)".format(self._no_failures_behavior)
else:
noun = 'failure' if self._previously_failed_count == 1 else 'failures'
suffix = " first" if self.config.getoption(
@ -144,24 +145,28 @@ class LFPlugin(object):
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
if self.active:
if self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
previously_passed.append(item)
self._previously_failed_count = len(previously_failed)
if not previously_failed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
return
if self.config.getoption("lf"):
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
else:
items[:] = previously_failed + previously_passed
items[:] = previously_failed + previously_passed
elif self._no_failures_behavior == 'none':
config.hook.pytest_deselected(items=items)
items[:] = []
def pytest_sessionfinish(self, session):
config = self.config
@ -230,6 +235,12 @@ def pytest_addoption(parser):
parser.addini(
"cache_dir", default='.pytest_cache',
help="cache directory path.")
group.addoption(
'--lfnf', '--last-failed-no-failures', action='store',
dest='last_failed_no_failures', choices=('all', 'none'), default='all',
help='change the behavior when no test failed in the last run or no '
'information about the last failures was found in the cache'
)
def pytest_cmdline_main(config):

1
changelog/3139.feature Normal file
View File

@ -0,0 +1 @@
New ``--last-failed-no-failures`` command-line option that allows to specify the behavior of the cache plugin's ```--last-failed`` feature when no tests failed in the last run (or no cache was found): ``none`` or ``all`` (the default).

View File

@ -156,6 +156,16 @@ New ``--nf``, ``--new-first`` options: run new tests first followed by the rest
of the tests, in both cases tests are also sorted by the file modified time,
with more recent files coming first.
Behavior when no tests failed in the last run
---------------------------------------------
When no tests failed in the last run, or when no cached ``lastfailed`` data was
found, ``pytest`` can be configured either to run all of the tests or no tests,
using the ``--last-failed-no-failures`` option, which takes one of the following values::
pytest --last-failed-no-failures all # run all tests (default behavior)
pytest --last-failed-no-failures none # run no tests and exit
The new config.cache object
--------------------------------

View File

@ -604,6 +604,36 @@ class TestLastFailed(object):
result.stdout.fnmatch_lines('*4 passed*')
assert self.get_cached_last_failed(testdir) == []
def test_lastfailed_no_failures_behavior_all_passed(self, testdir):
testdir.makepyfile("""
def test_1():
assert True
def test_2():
assert True
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--lf", "--lfnf", "all")
result.stdout.fnmatch_lines(["*2 passed*"])
result = testdir.runpytest("--lf", "--lfnf", "none")
result.stdout.fnmatch_lines(["*2 desel*"])
def test_lastfailed_no_failures_behavior_empty_cache(self, testdir):
testdir.makepyfile("""
def test_1():
assert True
def test_2():
assert False
""")
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines(["*1 failed*1 passed*"])
result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "all")
result.stdout.fnmatch_lines(["*1 failed*1 passed*"])
result = testdir.runpytest("--lf", "--cache-clear", "--lfnf", "none")
result.stdout.fnmatch_lines(["*2 desel*"])
class TestNewFirst(object):
def test_newfirst_usecase(self, testdir):