#7942 refactor stepwise tests to utilize pytester
This commit is contained in:
parent
6cddeb8cb3
commit
c58abf7ad1
|
@ -1,11 +1,13 @@
|
|||
import pytest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from _pytest.pytester import Pytester
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def stepwise_testdir(testdir):
|
||||
def stepwise_pytester(pytester: Pytester) -> Pytester:
|
||||
# Rather than having to modify our testfile between tests, we introduce
|
||||
# a flag for whether or not the second test should fail.
|
||||
testdir.makeconftest(
|
||||
pytester.makeconftest(
|
||||
"""
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('general')
|
||||
|
@ -15,7 +17,7 @@ def pytest_addoption(parser):
|
|||
)
|
||||
|
||||
# Create a simple test suite.
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
test_a="""
|
||||
def test_success_before_fail():
|
||||
assert 1
|
||||
|
@ -34,7 +36,7 @@ def test_success_after_last_fail():
|
|||
"""
|
||||
)
|
||||
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
test_b="""
|
||||
def test_success():
|
||||
assert 1
|
||||
|
@ -42,19 +44,19 @@ def test_success():
|
|||
)
|
||||
|
||||
# customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky
|
||||
testdir.makeini(
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
cache_dir = .cache
|
||||
"""
|
||||
)
|
||||
|
||||
return testdir
|
||||
return pytester
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def error_testdir(testdir):
|
||||
testdir.makepyfile(
|
||||
def error_pytester(pytester: Pytester) -> Pytester:
|
||||
pytester.makepyfile(
|
||||
test_a="""
|
||||
def test_error(nonexisting_fixture):
|
||||
assert 1
|
||||
|
@ -64,15 +66,15 @@ def test_success_after_fail():
|
|||
"""
|
||||
)
|
||||
|
||||
return testdir
|
||||
return pytester
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def broken_testdir(testdir):
|
||||
testdir.makepyfile(
|
||||
def broken_pytester(pytester: Pytester) -> Pytester:
|
||||
pytester.makepyfile(
|
||||
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
|
||||
)
|
||||
return testdir
|
||||
return pytester
|
||||
|
||||
|
||||
def _strip_resource_warnings(lines):
|
||||
|
@ -85,16 +87,15 @@ def _strip_resource_warnings(lines):
|
|||
]
|
||||
|
||||
|
||||
def test_run_without_stepwise(stepwise_testdir):
|
||||
result = stepwise_testdir.runpytest("-v", "--strict-markers", "--fail")
|
||||
|
||||
def test_run_without_stepwise(stepwise_pytester: Pytester) -> None:
|
||||
result = stepwise_pytester.runpytest("-v", "--strict-markers", "--fail")
|
||||
result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
|
||||
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
|
||||
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
|
||||
|
||||
|
||||
def test_stepwise_output_summary(testdir):
|
||||
testdir.makepyfile(
|
||||
def test_stepwise_output_summary(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.parametrize("expected", [True, True, True, True, False])
|
||||
|
@ -102,17 +103,17 @@ def test_stepwise_output_summary(testdir):
|
|||
assert expected
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("-v", "--stepwise")
|
||||
result = pytester.runpytest("-v", "--stepwise")
|
||||
result.stdout.fnmatch_lines(["stepwise: no previously failed tests, not skipping."])
|
||||
result = testdir.runpytest("-v", "--stepwise")
|
||||
result = pytester.runpytest("-v", "--stepwise")
|
||||
result.stdout.fnmatch_lines(
|
||||
["stepwise: skipping 4 already passed items.", "*1 failed, 4 deselected*"]
|
||||
)
|
||||
|
||||
|
||||
def test_fail_and_continue_with_stepwise(stepwise_testdir):
|
||||
def test_fail_and_continue_with_stepwise(stepwise_pytester: Pytester) -> None:
|
||||
# Run the tests with a failing second test.
|
||||
result = stepwise_testdir.runpytest(
|
||||
result = stepwise_pytester.runpytest(
|
||||
"-v", "--strict-markers", "--stepwise", "--fail"
|
||||
)
|
||||
assert _strip_resource_warnings(result.stderr.lines) == []
|
||||
|
@ -124,7 +125,7 @@ def test_fail_and_continue_with_stepwise(stepwise_testdir):
|
|||
assert "test_success_after_fail" not in stdout
|
||||
|
||||
# "Fix" the test that failed in the last run and run it again.
|
||||
result = stepwise_testdir.runpytest("-v", "--strict-markers", "--stepwise")
|
||||
result = stepwise_pytester.runpytest("-v", "--strict-markers", "--stepwise")
|
||||
assert _strip_resource_warnings(result.stderr.lines) == []
|
||||
|
||||
stdout = result.stdout.str()
|
||||
|
@ -135,8 +136,8 @@ def test_fail_and_continue_with_stepwise(stepwise_testdir):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("stepwise_skip", ["--stepwise-skip", "--sw-skip"])
|
||||
def test_run_with_skip_option(stepwise_testdir, stepwise_skip):
|
||||
result = stepwise_testdir.runpytest(
|
||||
def test_run_with_skip_option(stepwise_pytester: Pytester, stepwise_skip: str) -> None:
|
||||
result = stepwise_pytester.runpytest(
|
||||
"-v", "--strict-markers", "--stepwise", stepwise_skip, "--fail", "--fail-last",
|
||||
)
|
||||
assert _strip_resource_warnings(result.stderr.lines) == []
|
||||
|
@ -149,8 +150,8 @@ def test_run_with_skip_option(stepwise_testdir, stepwise_skip):
|
|||
assert "test_success_after_last_fail" not in stdout
|
||||
|
||||
|
||||
def test_fail_on_errors(error_testdir):
|
||||
result = error_testdir.runpytest("-v", "--strict-markers", "--stepwise")
|
||||
def test_fail_on_errors(error_pytester: Pytester) -> None:
|
||||
result = error_pytester.runpytest("-v", "--strict-markers", "--stepwise")
|
||||
|
||||
assert _strip_resource_warnings(result.stderr.lines) == []
|
||||
stdout = result.stdout.str()
|
||||
|
@ -159,8 +160,8 @@ def test_fail_on_errors(error_testdir):
|
|||
assert "test_success_after_fail" not in stdout
|
||||
|
||||
|
||||
def test_change_testfile(stepwise_testdir):
|
||||
result = stepwise_testdir.runpytest(
|
||||
def test_change_testfile(stepwise_pytester: Pytester) -> None:
|
||||
result = stepwise_pytester.runpytest(
|
||||
"-v", "--strict-markers", "--stepwise", "--fail", "test_a.py"
|
||||
)
|
||||
assert _strip_resource_warnings(result.stderr.lines) == []
|
||||
|
@ -170,7 +171,7 @@ def test_change_testfile(stepwise_testdir):
|
|||
|
||||
# Make sure the second test run starts from the beginning, since the
|
||||
# test to continue from does not exist in testfile_b.
|
||||
result = stepwise_testdir.runpytest(
|
||||
result = stepwise_pytester.runpytest(
|
||||
"-v", "--strict-markers", "--stepwise", "test_b.py"
|
||||
)
|
||||
assert _strip_resource_warnings(result.stderr.lines) == []
|
||||
|
@ -180,17 +181,19 @@ def test_change_testfile(stepwise_testdir):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("broken_first", [True, False])
|
||||
def test_stop_on_collection_errors(broken_testdir, broken_first):
|
||||
def test_stop_on_collection_errors(
|
||||
broken_pytester: Pytester, broken_first: bool
|
||||
) -> None:
|
||||
"""Stop during collection errors. Broken test first or broken test last
|
||||
actually surfaced a bug (#5444), so we test both situations."""
|
||||
files = ["working_testfile.py", "broken_testfile.py"]
|
||||
if broken_first:
|
||||
files.reverse()
|
||||
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
||||
result = broken_pytester.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
||||
result.stdout.fnmatch_lines("*error during collection*")
|
||||
|
||||
|
||||
def test_xfail_handling(testdir, monkeypatch):
|
||||
def test_xfail_handling(pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
|
||||
"""Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode
|
||||
|
||||
(#5547)
|
||||
|
@ -207,8 +210,8 @@ def test_xfail_handling(testdir, monkeypatch):
|
|||
def test_c(): pass
|
||||
def test_d(): pass
|
||||
"""
|
||||
testdir.makepyfile(contents.format(assert_value="0", strict="False"))
|
||||
result = testdir.runpytest("--sw", "-v")
|
||||
pytester.makepyfile(contents.format(assert_value="0", strict="False"))
|
||||
result = pytester.runpytest("--sw", "-v")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*::test_a PASSED *",
|
||||
|
@ -219,8 +222,8 @@ def test_xfail_handling(testdir, monkeypatch):
|
|||
]
|
||||
)
|
||||
|
||||
testdir.makepyfile(contents.format(assert_value="1", strict="True"))
|
||||
result = testdir.runpytest("--sw", "-v")
|
||||
pytester.makepyfile(contents.format(assert_value="1", strict="True"))
|
||||
result = pytester.runpytest("--sw", "-v")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*::test_a PASSED *",
|
||||
|
@ -230,8 +233,8 @@ def test_xfail_handling(testdir, monkeypatch):
|
|||
]
|
||||
)
|
||||
|
||||
testdir.makepyfile(contents.format(assert_value="0", strict="True"))
|
||||
result = testdir.runpytest("--sw", "-v")
|
||||
pytester.makepyfile(contents.format(assert_value="0", strict="True"))
|
||||
result = pytester.runpytest("--sw", "-v")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*::test_b XFAIL *",
|
||||
|
|
Loading…
Reference in New Issue