Restructured project.
This commit is contained in:
parent
58b6e8616c
commit
661055105c
|
@ -0,0 +1 @@
|
||||||
|
__version__ = '0.4'
|
|
@ -0,0 +1,4 @@
|
||||||
|
try:
|
||||||
|
from _pytest.cacheprovider import Cache
|
||||||
|
except ImportError:
|
||||||
|
from pytest_cache import Cache
|
|
@ -0,0 +1,90 @@
|
||||||
|
import pytest
|
||||||
|
from .compat import Cache
|
||||||
|
|
||||||
|
|
||||||
|
def pytest_addoption(parser):
|
||||||
|
group = parser.getgroup('general')
|
||||||
|
group.addoption('--sw', action='store_true', dest='stepwise',
|
||||||
|
help='alias for --stepwise')
|
||||||
|
group.addoption('--stepwise', action='store_true', dest='stepwise',
|
||||||
|
help='exit on test fail and continue from last failing test next time')
|
||||||
|
group.addoption('--skip', action='store_true', dest='skip',
|
||||||
|
help='ignore the first failing test but stop on the next failing test')
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.hookimpl(tryfirst=True)
|
||||||
|
def pytest_configure(config):
|
||||||
|
config.cache = Cache(config)
|
||||||
|
config.pluginmanager.register(StepwisePlugin(config), 'stepwiseplugin')
|
||||||
|
|
||||||
|
|
||||||
|
class StepwisePlugin:
|
||||||
|
def __init__(self, config):
|
||||||
|
self.config = config
|
||||||
|
self.active = config.getvalue('stepwise')
|
||||||
|
self.session = None
|
||||||
|
|
||||||
|
if self.active:
|
||||||
|
self.lastfailed = config.cache.get('cache/stepwise', set())
|
||||||
|
self.skip = config.getvalue('skip')
|
||||||
|
|
||||||
|
def pytest_sessionstart(self, session):
|
||||||
|
self.session = session
|
||||||
|
|
||||||
|
def pytest_collection_modifyitems(self, session, config, items):
|
||||||
|
if not self.active or not self.lastfailed:
|
||||||
|
return
|
||||||
|
|
||||||
|
already_passed = []
|
||||||
|
found = False
|
||||||
|
|
||||||
|
# Make a list of all tests that has been runned before the last failing one.
|
||||||
|
for item in items:
|
||||||
|
if item.nodeid in self.lastfailed:
|
||||||
|
found = True
|
||||||
|
break
|
||||||
|
else:
|
||||||
|
already_passed.append(item)
|
||||||
|
|
||||||
|
# If the previously failed test was not found among the test items,
|
||||||
|
# do not skip any tests.
|
||||||
|
if not found:
|
||||||
|
already_passed = []
|
||||||
|
|
||||||
|
for item in already_passed:
|
||||||
|
items.remove(item)
|
||||||
|
|
||||||
|
config.hook.pytest_deselected(items=already_passed)
|
||||||
|
|
||||||
|
def pytest_collectreport(self, report):
|
||||||
|
if self.active and report.failed:
|
||||||
|
self.session.shouldstop = 'Error when collecting test, stopping test execution.'
|
||||||
|
|
||||||
|
def pytest_runtest_logreport(self, report):
|
||||||
|
# Skip this hook if plugin is not active or the test is xfailed.
|
||||||
|
if not self.active or 'xfail' in report.keywords:
|
||||||
|
return
|
||||||
|
|
||||||
|
if report.failed:
|
||||||
|
if self.skip:
|
||||||
|
# Remove test from the failed ones (if it exists) and unset the skip option
|
||||||
|
# to make sure the following tests will not be skipped.
|
||||||
|
self.lastfailed.discard(report.nodeid)
|
||||||
|
self.skip = False
|
||||||
|
else:
|
||||||
|
# Mark test as the last failing and interrupt the test session.
|
||||||
|
self.lastfailed.add(report.nodeid)
|
||||||
|
self.session.shouldstop = 'Test failed, continuing from this test next run.'
|
||||||
|
|
||||||
|
else:
|
||||||
|
# If the test was actually run and did pass.
|
||||||
|
if report.when == 'call':
|
||||||
|
# Remove test from the failed ones, if exists.
|
||||||
|
self.lastfailed.discard(report.nodeid)
|
||||||
|
|
||||||
|
def pytest_sessionfinish(self, session):
|
||||||
|
if self.active:
|
||||||
|
self.config.cache.set('cache/stepwise', self.lastfailed)
|
||||||
|
else:
|
||||||
|
# Clear the list of failing tests if the plugin is not active.
|
||||||
|
self.config.cache.set('cache/stepwise', set())
|
|
@ -0,0 +1 @@
|
||||||
|
pytest_plugins = 'pytester'
|
|
@ -0,0 +1,136 @@
|
||||||
|
import pytest
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def stepwise_testdir(testdir):
|
||||||
|
# Rather than having to modify our testfile between tests, we introduce
|
||||||
|
# a flag for wether or not the second test should fail.
|
||||||
|
testdir.makeconftest('''
|
||||||
|
def pytest_addoption(parser):
|
||||||
|
group = parser.getgroup('general')
|
||||||
|
group.addoption('--fail', action='store_true', dest='fail')
|
||||||
|
group.addoption('--fail-last', action='store_true', dest='fail_last')
|
||||||
|
''')
|
||||||
|
|
||||||
|
# Create a simple test suite.
|
||||||
|
testdir.makepyfile(test_stepwise='''
|
||||||
|
def test_success_before_fail():
|
||||||
|
assert 1
|
||||||
|
|
||||||
|
def test_fail_on_flag(request):
|
||||||
|
assert not request.config.getvalue('fail')
|
||||||
|
|
||||||
|
def test_success_after_fail():
|
||||||
|
assert 1
|
||||||
|
|
||||||
|
def test_fail_last_on_flag(request):
|
||||||
|
assert not request.config.getvalue('fail_last')
|
||||||
|
|
||||||
|
def test_success_after_last_fail():
|
||||||
|
assert 1
|
||||||
|
''')
|
||||||
|
|
||||||
|
testdir.makepyfile(testfile_b='''
|
||||||
|
def test_success():
|
||||||
|
assert 1
|
||||||
|
''')
|
||||||
|
|
||||||
|
return testdir
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def error_testdir(testdir):
|
||||||
|
testdir.makepyfile(test_stepwise='''
|
||||||
|
def test_error(nonexisting_fixture):
|
||||||
|
assert 1
|
||||||
|
|
||||||
|
def test_success_after_fail():
|
||||||
|
assert 1
|
||||||
|
''')
|
||||||
|
|
||||||
|
return testdir
|
||||||
|
|
||||||
|
|
||||||
|
@pytest.fixture
|
||||||
|
def broken_testdir(testdir):
|
||||||
|
testdir.makepyfile(working_testfile='def test_proper(): assert 1', broken_testfile='foobar')
|
||||||
|
return testdir
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_without_stepwise(stepwise_testdir):
|
||||||
|
result = stepwise_testdir.runpytest('-v', '--strict', '--fail')
|
||||||
|
|
||||||
|
assert not result.errlines
|
||||||
|
result.stdout.fnmatch_lines(['*test_success_before_fail PASSED*'])
|
||||||
|
result.stdout.fnmatch_lines(['*test_fail_on_flag FAILED*'])
|
||||||
|
result.stdout.fnmatch_lines(['*test_success_after_fail PASSED*'])
|
||||||
|
|
||||||
|
|
||||||
|
def test_fail_and_continue_with_stepwise(stepwise_testdir):
|
||||||
|
# Run the tests with a failing second test.
|
||||||
|
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise', '--fail')
|
||||||
|
assert not result.errlines
|
||||||
|
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
# Make sure we stop after first failing test.
|
||||||
|
assert 'test_success_before_fail PASSED' in stdout
|
||||||
|
assert 'test_fail_on_flag FAILED' in stdout
|
||||||
|
assert 'test_success_after_fail' not in stdout
|
||||||
|
|
||||||
|
# "Fix" the test that failed in the last run and run it again.
|
||||||
|
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise')
|
||||||
|
assert not result.errlines
|
||||||
|
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
# Make sure the latest failing test runs and then continues.
|
||||||
|
assert 'test_success_before_fail' not in stdout
|
||||||
|
assert 'test_fail_on_flag PASSED' in stdout
|
||||||
|
assert 'test_success_after_fail PASSED' in stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_run_with_skip_option(stepwise_testdir):
|
||||||
|
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise', '--skip',
|
||||||
|
'--fail', '--fail-last')
|
||||||
|
assert not result.errlines
|
||||||
|
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
# Make sure first fail is ignore and second fail stops the test run.
|
||||||
|
assert 'test_fail_on_flag FAILED' in stdout
|
||||||
|
assert 'test_success_after_fail PASSED' in stdout
|
||||||
|
assert 'test_fail_last_on_flag FAILED' in stdout
|
||||||
|
assert 'test_success_after_last_fail' not in stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_fail_on_errors(error_testdir):
|
||||||
|
result = error_testdir.runpytest('-v', '--strict', '--stepwise')
|
||||||
|
|
||||||
|
assert not result.errlines
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
|
||||||
|
assert 'test_error ERROR' in stdout
|
||||||
|
assert 'test_success_after_fail' not in stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_change_testfile(stepwise_testdir):
|
||||||
|
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise', '--fail',
|
||||||
|
'test_stepwise.py')
|
||||||
|
assert not result.errlines
|
||||||
|
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
assert 'test_fail_on_flag FAILED' in stdout
|
||||||
|
|
||||||
|
# Make sure the second test run starts from the beginning, since the
|
||||||
|
# test to continue from does not exist in testfile_b.
|
||||||
|
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise',
|
||||||
|
'testfile_b.py')
|
||||||
|
assert not result.errlines
|
||||||
|
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
assert 'test_success PASSED' in stdout
|
||||||
|
|
||||||
|
|
||||||
|
def test_stop_on_collection_errors(broken_testdir):
|
||||||
|
result = broken_testdir.runpytest('-v', '--strict', '--stepwise', 'working_testfile.py', 'broken_testfile.py')
|
||||||
|
|
||||||
|
stdout = result.stdout.str()
|
||||||
|
assert 'Error when collecting test' in stdout
|
Loading…
Reference in New Issue