2015-09-26 20:30:16 +08:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def stepwise_testdir(testdir):
|
|
|
|
# Rather than having to modify our testfile between tests, we introduce
|
2019-11-01 11:28:25 +08:00
|
|
|
# a flag for whether or not the second test should fail.
|
2018-10-15 03:48:46 +08:00
|
|
|
testdir.makeconftest(
|
|
|
|
"""
|
2015-09-26 20:30:16 +08:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
group = parser.getgroup('general')
|
|
|
|
group.addoption('--fail', action='store_true', dest='fail')
|
|
|
|
group.addoption('--fail-last', action='store_true', dest='fail_last')
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
# Create a simple test suite.
|
2018-10-15 03:48:46 +08:00
|
|
|
testdir.makepyfile(
|
|
|
|
test_a="""
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_success_before_fail():
|
|
|
|
assert 1
|
|
|
|
|
|
|
|
def test_fail_on_flag(request):
|
|
|
|
assert not request.config.getvalue('fail')
|
|
|
|
|
|
|
|
def test_success_after_fail():
|
|
|
|
assert 1
|
|
|
|
|
|
|
|
def test_fail_last_on_flag(request):
|
|
|
|
assert not request.config.getvalue('fail_last')
|
|
|
|
|
|
|
|
def test_success_after_last_fail():
|
|
|
|
assert 1
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2018-10-15 03:48:46 +08:00
|
|
|
testdir.makepyfile(
|
|
|
|
test_b="""
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_success():
|
|
|
|
assert 1
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2018-11-13 02:57:39 +08:00
|
|
|
# customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky
|
|
|
|
testdir.makeini(
|
|
|
|
"""
|
|
|
|
[pytest]
|
|
|
|
cache_dir = .cache
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
2015-09-26 20:30:16 +08:00
|
|
|
return testdir
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def error_testdir(testdir):
|
2018-10-15 03:48:46 +08:00
|
|
|
testdir.makepyfile(
|
|
|
|
test_a="""
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_error(nonexisting_fixture):
|
|
|
|
assert 1
|
|
|
|
|
|
|
|
def test_success_after_fail():
|
|
|
|
assert 1
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
return testdir
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
|
|
|
def broken_testdir(testdir):
|
2018-10-15 03:48:46 +08:00
|
|
|
testdir.makepyfile(
|
|
|
|
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
return testdir
|
|
|
|
|
|
|
|
|
2020-06-26 20:50:19 +08:00
|
|
|
def _strip_resource_warnings(lines):
|
|
|
|
# Strip unreliable ResourceWarnings, so no-output assertions on stderr can work.
|
|
|
|
# (https://github.com/pytest-dev/pytest/issues/5088)
|
|
|
|
return [
|
|
|
|
x
|
|
|
|
for x in lines
|
|
|
|
if not x.startswith(("Exception ignored in:", "ResourceWarning"))
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_run_without_stepwise(stepwise_testdir):
|
2019-04-27 21:52:12 +08:00
|
|
|
result = stepwise_testdir.runpytest("-v", "--strict-markers", "--fail")
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2018-10-15 03:48:46 +08:00
|
|
|
result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
|
|
|
|
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
|
|
|
|
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_fail_and_continue_with_stepwise(stepwise_testdir):
|
|
|
|
# Run the tests with a failing second test.
|
2019-04-27 21:52:12 +08:00
|
|
|
result = stepwise_testdir.runpytest(
|
|
|
|
"-v", "--strict-markers", "--stepwise", "--fail"
|
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
|
|
|
# Make sure we stop after first failing test.
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_success_before_fail PASSED" in stdout
|
|
|
|
assert "test_fail_on_flag FAILED" in stdout
|
|
|
|
assert "test_success_after_fail" not in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
# "Fix" the test that failed in the last run and run it again.
|
2019-04-27 21:52:12 +08:00
|
|
|
result = stepwise_testdir.runpytest("-v", "--strict-markers", "--stepwise")
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
|
|
|
# Make sure the latest failing test runs and then continues.
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_success_before_fail" not in stdout
|
|
|
|
assert "test_fail_on_flag PASSED" in stdout
|
|
|
|
assert "test_success_after_fail PASSED" in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_run_with_skip_option(stepwise_testdir):
|
2018-10-15 03:48:46 +08:00
|
|
|
result = stepwise_testdir.runpytest(
|
2019-04-27 21:52:12 +08:00
|
|
|
"-v",
|
|
|
|
"--strict-markers",
|
|
|
|
"--stepwise",
|
|
|
|
"--stepwise-skip",
|
|
|
|
"--fail",
|
|
|
|
"--fail-last",
|
2018-10-15 03:48:46 +08:00
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
|
|
|
# Make sure first fail is ignore and second fail stops the test run.
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_fail_on_flag FAILED" in stdout
|
|
|
|
assert "test_success_after_fail PASSED" in stdout
|
|
|
|
assert "test_fail_last_on_flag FAILED" in stdout
|
|
|
|
assert "test_success_after_last_fail" not in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_fail_on_errors(error_testdir):
|
2019-04-27 21:52:12 +08:00
|
|
|
result = error_testdir.runpytest("-v", "--strict-markers", "--stepwise")
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
stdout = result.stdout.str()
|
|
|
|
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_error ERROR" in stdout
|
|
|
|
assert "test_success_after_fail" not in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_change_testfile(stepwise_testdir):
|
2018-10-15 03:48:46 +08:00
|
|
|
result = stepwise_testdir.runpytest(
|
2019-04-27 21:52:12 +08:00
|
|
|
"-v", "--strict-markers", "--stepwise", "--fail", "test_a.py"
|
2018-10-15 03:48:46 +08:00
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_fail_on_flag FAILED" in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
# Make sure the second test run starts from the beginning, since the
|
|
|
|
# test to continue from does not exist in testfile_b.
|
2019-04-27 21:52:12 +08:00
|
|
|
result = stepwise_testdir.runpytest(
|
|
|
|
"-v", "--strict-markers", "--stepwise", "test_b.py"
|
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_success PASSED" in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2019-06-14 03:45:27 +08:00
|
|
|
@pytest.mark.parametrize("broken_first", [True, False])
|
|
|
|
def test_stop_on_collection_errors(broken_testdir, broken_first):
|
2019-06-14 10:10:13 +08:00
|
|
|
"""Stop during collection errors. Broken test first or broken test last
|
|
|
|
actually surfaced a bug (#5444), so we test both situations."""
|
2019-06-14 03:45:27 +08:00
|
|
|
files = ["working_testfile.py", "broken_testfile.py"]
|
|
|
|
if broken_first:
|
|
|
|
files.reverse()
|
|
|
|
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
2019-10-27 23:02:37 +08:00
|
|
|
result.stdout.fnmatch_lines("*error during collection*")
|
2019-07-05 07:06:50 +08:00
|
|
|
|
|
|
|
|
2020-01-12 06:04:43 +08:00
|
|
|
def test_xfail_handling(testdir, monkeypatch):
|
2019-07-05 07:06:50 +08:00
|
|
|
"""Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode
|
|
|
|
|
|
|
|
(#5547)
|
|
|
|
"""
|
2020-01-12 06:04:43 +08:00
|
|
|
monkeypatch.setattr("sys.dont_write_bytecode", True)
|
|
|
|
|
2019-07-05 07:06:50 +08:00
|
|
|
contents = """
|
|
|
|
import pytest
|
|
|
|
def test_a(): pass
|
|
|
|
|
|
|
|
@pytest.mark.xfail(strict={strict})
|
|
|
|
def test_b(): assert {assert_value}
|
|
|
|
|
|
|
|
def test_c(): pass
|
|
|
|
def test_d(): pass
|
|
|
|
"""
|
|
|
|
testdir.makepyfile(contents.format(assert_value="0", strict="False"))
|
|
|
|
result = testdir.runpytest("--sw", "-v")
|
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"*::test_a PASSED *",
|
|
|
|
"*::test_b XFAIL *",
|
|
|
|
"*::test_c PASSED *",
|
|
|
|
"*::test_d PASSED *",
|
|
|
|
"* 3 passed, 1 xfailed in *",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
testdir.makepyfile(contents.format(assert_value="1", strict="True"))
|
|
|
|
result = testdir.runpytest("--sw", "-v")
|
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"*::test_a PASSED *",
|
|
|
|
"*::test_b FAILED *",
|
|
|
|
"* Interrupted*",
|
|
|
|
"* 1 failed, 1 passed in *",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
testdir.makepyfile(contents.format(assert_value="0", strict="True"))
|
|
|
|
result = testdir.runpytest("--sw", "-v")
|
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"*::test_b XFAIL *",
|
|
|
|
"*::test_c PASSED *",
|
|
|
|
"*::test_d PASSED *",
|
|
|
|
"* 2 passed, 1 deselected, 1 xfailed in *",
|
|
|
|
]
|
|
|
|
)
|