linting
This commit is contained in:
parent
4f652c9045
commit
e773c8ceda
|
@ -3,28 +3,37 @@ import pytest
|
|||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('general')
|
||||
group.addoption('--sw', '--stepwise', action='store_true', dest='stepwise',
|
||||
help='exit on test fail and continue from last failing test next time')
|
||||
group.addoption('--stepwise-skip', action='store_true', dest='stepwise_skip',
|
||||
help='ignore the first failing test but stop on the next failing test')
|
||||
group = parser.getgroup("general")
|
||||
group.addoption(
|
||||
"--sw",
|
||||
"--stepwise",
|
||||
action="store_true",
|
||||
dest="stepwise",
|
||||
help="exit on test fail and continue from last failing test next time",
|
||||
)
|
||||
group.addoption(
|
||||
"--stepwise-skip",
|
||||
action="store_true",
|
||||
dest="stepwise_skip",
|
||||
help="ignore the first failing test but stop on the next failing test",
|
||||
)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_configure(config):
|
||||
config.cache = Cache.for_config(config)
|
||||
config.pluginmanager.register(StepwisePlugin(config), 'stepwiseplugin')
|
||||
config.pluginmanager.register(StepwisePlugin(config), "stepwiseplugin")
|
||||
|
||||
|
||||
class StepwisePlugin:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.active = config.getvalue('stepwise')
|
||||
self.active = config.getvalue("stepwise")
|
||||
self.session = None
|
||||
|
||||
if self.active:
|
||||
self.lastfailed = config.cache.get('cache/stepwise', None)
|
||||
self.skip = config.getvalue('stepwise_skip')
|
||||
self.lastfailed = config.cache.get("cache/stepwise", None)
|
||||
self.skip = config.getvalue("stepwise_skip")
|
||||
|
||||
def pytest_sessionstart(self, session):
|
||||
self.session = session
|
||||
|
@ -56,11 +65,13 @@ class StepwisePlugin:
|
|||
|
||||
def pytest_collectreport(self, report):
|
||||
if self.active and report.failed:
|
||||
self.session.shouldstop = 'Error when collecting test, stopping test execution.'
|
||||
self.session.shouldstop = (
|
||||
"Error when collecting test, stopping test execution."
|
||||
)
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
# Skip this hook if plugin is not active or the test is xfailed.
|
||||
if not self.active or 'xfail' in report.keywords:
|
||||
if not self.active or "xfail" in report.keywords:
|
||||
return
|
||||
|
||||
if report.failed:
|
||||
|
@ -74,18 +85,20 @@ class StepwisePlugin:
|
|||
else:
|
||||
# Mark test as the last failing and interrupt the test session.
|
||||
self.lastfailed = report.nodeid
|
||||
self.session.shouldstop = 'Test failed, continuing from this test next run.'
|
||||
self.session.shouldstop = (
|
||||
"Test failed, continuing from this test next run."
|
||||
)
|
||||
|
||||
else:
|
||||
# If the test was actually run and did pass.
|
||||
if report.when == 'call':
|
||||
if report.when == "call":
|
||||
# Remove test from the failed ones, if exists.
|
||||
if report.nodeid == self.lastfailed:
|
||||
self.lastfailed = None
|
||||
|
||||
def pytest_sessionfinish(self, session):
|
||||
if self.active:
|
||||
self.config.cache.set('cache/stepwise', self.lastfailed)
|
||||
self.config.cache.set("cache/stepwise", self.lastfailed)
|
||||
else:
|
||||
# Clear the list of failing tests if the plugin is not active.
|
||||
self.config.cache.set('cache/stepwise', [])
|
||||
self.config.cache.set("cache/stepwise", [])
|
||||
|
|
|
@ -5,15 +5,18 @@ import pytest
|
|||
def stepwise_testdir(testdir):
|
||||
# Rather than having to modify our testfile between tests, we introduce
|
||||
# a flag for wether or not the second test should fail.
|
||||
testdir.makeconftest('''
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('general')
|
||||
group.addoption('--fail', action='store_true', dest='fail')
|
||||
group.addoption('--fail-last', action='store_true', dest='fail_last')
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
# Create a simple test suite.
|
||||
testdir.makepyfile(test_a='''
|
||||
testdir.makepyfile(
|
||||
test_a="""
|
||||
def test_success_before_fail():
|
||||
assert 1
|
||||
|
||||
|
@ -28,111 +31,121 @@ def test_fail_last_on_flag(request):
|
|||
|
||||
def test_success_after_last_fail():
|
||||
assert 1
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
testdir.makepyfile(test_b='''
|
||||
testdir.makepyfile(
|
||||
test_b="""
|
||||
def test_success():
|
||||
assert 1
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
return testdir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def error_testdir(testdir):
|
||||
testdir.makepyfile(test_a='''
|
||||
testdir.makepyfile(
|
||||
test_a="""
|
||||
def test_error(nonexisting_fixture):
|
||||
assert 1
|
||||
|
||||
def test_success_after_fail():
|
||||
assert 1
|
||||
''')
|
||||
"""
|
||||
)
|
||||
|
||||
return testdir
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def broken_testdir(testdir):
|
||||
testdir.makepyfile(working_testfile='def test_proper(): assert 1', broken_testfile='foobar')
|
||||
testdir.makepyfile(
|
||||
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
|
||||
)
|
||||
return testdir
|
||||
|
||||
|
||||
def test_run_without_stepwise(stepwise_testdir):
|
||||
result = stepwise_testdir.runpytest('-v', '--strict', '--fail')
|
||||
result = stepwise_testdir.runpytest("-v", "--strict", "--fail")
|
||||
|
||||
result.stdout.fnmatch_lines(['*test_success_before_fail PASSED*'])
|
||||
result.stdout.fnmatch_lines(['*test_fail_on_flag FAILED*'])
|
||||
result.stdout.fnmatch_lines(['*test_success_after_fail PASSED*'])
|
||||
result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
|
||||
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
|
||||
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
|
||||
|
||||
|
||||
def test_fail_and_continue_with_stepwise(stepwise_testdir):
|
||||
# Run the tests with a failing second test.
|
||||
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise', '--fail')
|
||||
result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "--fail")
|
||||
assert not result.stderr.str()
|
||||
|
||||
stdout = result.stdout.str()
|
||||
# Make sure we stop after first failing test.
|
||||
assert 'test_success_before_fail PASSED' in stdout
|
||||
assert 'test_fail_on_flag FAILED' in stdout
|
||||
assert 'test_success_after_fail' not in stdout
|
||||
assert "test_success_before_fail PASSED" in stdout
|
||||
assert "test_fail_on_flag FAILED" in stdout
|
||||
assert "test_success_after_fail" not in stdout
|
||||
|
||||
# "Fix" the test that failed in the last run and run it again.
|
||||
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise')
|
||||
result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise")
|
||||
assert not result.stderr.str()
|
||||
|
||||
stdout = result.stdout.str()
|
||||
# Make sure the latest failing test runs and then continues.
|
||||
assert 'test_success_before_fail' not in stdout
|
||||
assert 'test_fail_on_flag PASSED' in stdout
|
||||
assert 'test_success_after_fail PASSED' in stdout
|
||||
assert "test_success_before_fail" not in stdout
|
||||
assert "test_fail_on_flag PASSED" in stdout
|
||||
assert "test_success_after_fail PASSED" in stdout
|
||||
|
||||
|
||||
def test_run_with_skip_option(stepwise_testdir):
|
||||
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise', '--stepwise-skip',
|
||||
'--fail', '--fail-last')
|
||||
result = stepwise_testdir.runpytest(
|
||||
"-v", "--strict", "--stepwise", "--stepwise-skip", "--fail", "--fail-last"
|
||||
)
|
||||
assert not result.stderr.str()
|
||||
|
||||
stdout = result.stdout.str()
|
||||
# Make sure first fail is ignore and second fail stops the test run.
|
||||
assert 'test_fail_on_flag FAILED' in stdout
|
||||
assert 'test_success_after_fail PASSED' in stdout
|
||||
assert 'test_fail_last_on_flag FAILED' in stdout
|
||||
assert 'test_success_after_last_fail' not in stdout
|
||||
assert "test_fail_on_flag FAILED" in stdout
|
||||
assert "test_success_after_fail PASSED" in stdout
|
||||
assert "test_fail_last_on_flag FAILED" in stdout
|
||||
assert "test_success_after_last_fail" not in stdout
|
||||
|
||||
|
||||
def test_fail_on_errors(error_testdir):
|
||||
result = error_testdir.runpytest('-v', '--strict', '--stepwise')
|
||||
result = error_testdir.runpytest("-v", "--strict", "--stepwise")
|
||||
|
||||
assert not result.stderr.str()
|
||||
stdout = result.stdout.str()
|
||||
|
||||
assert 'test_error ERROR' in stdout
|
||||
assert 'test_success_after_fail' not in stdout
|
||||
assert "test_error ERROR" in stdout
|
||||
assert "test_success_after_fail" not in stdout
|
||||
|
||||
|
||||
def test_change_testfile(stepwise_testdir):
|
||||
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise', '--fail',
|
||||
'test_a.py')
|
||||
result = stepwise_testdir.runpytest(
|
||||
"-v", "--strict", "--stepwise", "--fail", "test_a.py"
|
||||
)
|
||||
assert not result.stderr.str()
|
||||
|
||||
stdout = result.stdout.str()
|
||||
assert 'test_fail_on_flag FAILED' in stdout
|
||||
assert "test_fail_on_flag FAILED" in stdout
|
||||
|
||||
# Make sure the second test run starts from the beginning, since the
|
||||
# test to continue from does not exist in testfile_b.
|
||||
result = stepwise_testdir.runpytest('-v', '--strict', '--stepwise',
|
||||
'test_b.py')
|
||||
result = stepwise_testdir.runpytest("-v", "--strict", "--stepwise", "test_b.py")
|
||||
assert not result.stderr.str()
|
||||
|
||||
stdout = result.stdout.str()
|
||||
assert 'test_success PASSED' in stdout
|
||||
assert "test_success PASSED" in stdout
|
||||
|
||||
|
||||
def test_stop_on_collection_errors(broken_testdir):
|
||||
result = broken_testdir.runpytest('-v', '--strict', '--stepwise', 'working_testfile.py', 'broken_testfile.py')
|
||||
result = broken_testdir.runpytest(
|
||||
"-v", "--strict", "--stepwise", "working_testfile.py", "broken_testfile.py"
|
||||
)
|
||||
|
||||
stdout = result.stdout.str()
|
||||
if pytest.__version__ < '3.0.0':
|
||||
assert 'Error when collecting test' in stdout
|
||||
if pytest.__version__ < "3.0.0":
|
||||
assert "Error when collecting test" in stdout
|
||||
else:
|
||||
assert 'errors during collection' in stdout
|
||||
assert "errors during collection" in stdout
|
||||
|
|
Loading…
Reference in New Issue