2024-01-28 21:12:42 +08:00
|
|
|
# mypy: allow-untyped-defs
|
2023-01-13 18:14:52 +08:00
|
|
|
from pathlib import Path
|
|
|
|
|
|
|
|
from _pytest.cacheprovider import Cache
|
2020-10-31 03:21:42 +08:00
|
|
|
from _pytest.monkeypatch import MonkeyPatch
|
|
|
|
from _pytest.pytester import Pytester
|
2023-01-13 18:14:52 +08:00
|
|
|
from _pytest.stepwise import STEPWISE_CACHE_DIR
|
2015-09-26 20:30:16 +08:00
|
|
|
import pytest
|
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2020-10-31 03:21:42 +08:00
|
|
|
def stepwise_pytester(pytester: Pytester) -> Pytester:
|
2015-09-26 20:30:16 +08:00
|
|
|
# Rather than having to modify our testfile between tests, we introduce
|
2019-11-01 11:28:25 +08:00
|
|
|
# a flag for whether or not the second test should fail.
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makeconftest(
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
2015-09-26 20:30:16 +08:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
group = parser.getgroup('general')
|
|
|
|
group.addoption('--fail', action='store_true', dest='fail')
|
|
|
|
group.addoption('--fail-last', action='store_true', dest='fail_last')
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
# Create a simple test suite.
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makepyfile(
|
2018-10-15 03:48:46 +08:00
|
|
|
test_a="""
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_success_before_fail():
|
|
|
|
assert 1
|
|
|
|
|
|
|
|
def test_fail_on_flag(request):
|
|
|
|
assert not request.config.getvalue('fail')
|
|
|
|
|
|
|
|
def test_success_after_fail():
|
|
|
|
assert 1
|
|
|
|
|
|
|
|
def test_fail_last_on_flag(request):
|
|
|
|
assert not request.config.getvalue('fail_last')
|
|
|
|
|
|
|
|
def test_success_after_last_fail():
|
|
|
|
assert 1
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makepyfile(
|
2018-10-15 03:48:46 +08:00
|
|
|
test_b="""
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_success():
|
|
|
|
assert 1
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2018-11-13 02:57:39 +08:00
|
|
|
# customize cache directory so we don't use the tox's cache directory, which makes tests in this module flaky
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makeini(
|
2018-11-13 02:57:39 +08:00
|
|
|
"""
|
|
|
|
[pytest]
|
|
|
|
cache_dir = .cache
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
return pytester
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2020-10-31 03:21:42 +08:00
|
|
|
def error_pytester(pytester: Pytester) -> Pytester:
|
|
|
|
pytester.makepyfile(
|
2018-10-15 03:48:46 +08:00
|
|
|
test_a="""
|
2015-09-26 20:30:16 +08:00
|
|
|
def test_error(nonexisting_fixture):
|
|
|
|
assert 1
|
|
|
|
|
|
|
|
def test_success_after_fail():
|
|
|
|
assert 1
|
2018-10-15 03:48:46 +08:00
|
|
|
"""
|
|
|
|
)
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
return pytester
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
|
|
|
@pytest.fixture
|
2020-10-31 03:21:42 +08:00
|
|
|
def broken_pytester(pytester: Pytester) -> Pytester:
|
|
|
|
pytester.makepyfile(
|
2018-10-15 03:48:46 +08:00
|
|
|
working_testfile="def test_proper(): assert 1", broken_testfile="foobar"
|
|
|
|
)
|
2020-10-31 03:21:42 +08:00
|
|
|
return pytester
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2020-06-26 20:50:19 +08:00
|
|
|
def _strip_resource_warnings(lines):
|
|
|
|
# Strip unreliable ResourceWarnings, so no-output assertions on stderr can work.
|
|
|
|
# (https://github.com/pytest-dev/pytest/issues/5088)
|
|
|
|
return [
|
|
|
|
x
|
|
|
|
for x in lines
|
|
|
|
if not x.startswith(("Exception ignored in:", "ResourceWarning"))
|
|
|
|
]
|
|
|
|
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_run_without_stepwise(stepwise_pytester: Pytester) -> None:
|
|
|
|
result = stepwise_pytester.runpytest("-v", "--strict-markers", "--fail")
|
2018-10-15 03:48:46 +08:00
|
|
|
result.stdout.fnmatch_lines(["*test_success_before_fail PASSED*"])
|
|
|
|
result.stdout.fnmatch_lines(["*test_fail_on_flag FAILED*"])
|
|
|
|
result.stdout.fnmatch_lines(["*test_success_after_fail PASSED*"])
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_stepwise_output_summary(pytester: Pytester) -> None:
|
|
|
|
pytester.makepyfile(
|
2020-10-31 03:13:06 +08:00
|
|
|
"""
|
|
|
|
import pytest
|
|
|
|
@pytest.mark.parametrize("expected", [True, True, True, True, False])
|
|
|
|
def test_data(expected):
|
|
|
|
assert expected
|
|
|
|
"""
|
|
|
|
)
|
2020-10-31 03:21:42 +08:00
|
|
|
result = pytester.runpytest("-v", "--stepwise")
|
2020-10-31 03:13:06 +08:00
|
|
|
result.stdout.fnmatch_lines(["stepwise: no previously failed tests, not skipping."])
|
2020-10-31 03:21:42 +08:00
|
|
|
result = pytester.runpytest("-v", "--stepwise")
|
2020-10-31 03:13:06 +08:00
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
["stepwise: skipping 4 already passed items.", "*1 failed, 4 deselected*"]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_fail_and_continue_with_stepwise(stepwise_pytester: Pytester) -> None:
|
2015-09-26 20:30:16 +08:00
|
|
|
# Run the tests with a failing second test.
|
2020-10-31 03:21:42 +08:00
|
|
|
result = stepwise_pytester.runpytest(
|
2019-04-27 21:52:12 +08:00
|
|
|
"-v", "--strict-markers", "--stepwise", "--fail"
|
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
|
|
|
# Make sure we stop after first failing test.
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_success_before_fail PASSED" in stdout
|
|
|
|
assert "test_fail_on_flag FAILED" in stdout
|
|
|
|
assert "test_success_after_fail" not in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
# "Fix" the test that failed in the last run and run it again.
|
2020-10-31 03:21:42 +08:00
|
|
|
result = stepwise_pytester.runpytest("-v", "--strict-markers", "--stepwise")
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
|
|
|
# Make sure the latest failing test runs and then continues.
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_success_before_fail" not in stdout
|
|
|
|
assert "test_fail_on_flag PASSED" in stdout
|
|
|
|
assert "test_success_after_fail PASSED" in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2020-10-31 03:13:06 +08:00
|
|
|
@pytest.mark.parametrize("stepwise_skip", ["--stepwise-skip", "--sw-skip"])
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_run_with_skip_option(stepwise_pytester: Pytester, stepwise_skip: str) -> None:
|
|
|
|
result = stepwise_pytester.runpytest(
|
2020-12-30 17:56:09 +08:00
|
|
|
"-v",
|
|
|
|
"--strict-markers",
|
|
|
|
"--stepwise",
|
|
|
|
stepwise_skip,
|
|
|
|
"--fail",
|
|
|
|
"--fail-last",
|
2018-10-15 03:48:46 +08:00
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
|
|
|
# Make sure first fail is ignore and second fail stops the test run.
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_fail_on_flag FAILED" in stdout
|
|
|
|
assert "test_success_after_fail PASSED" in stdout
|
|
|
|
assert "test_fail_last_on_flag FAILED" in stdout
|
|
|
|
assert "test_success_after_last_fail" not in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_fail_on_errors(error_pytester: Pytester) -> None:
|
|
|
|
result = error_pytester.runpytest("-v", "--strict-markers", "--stepwise")
|
2015-09-26 20:30:16 +08:00
|
|
|
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
stdout = result.stdout.str()
|
|
|
|
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_error ERROR" in stdout
|
|
|
|
assert "test_success_after_fail" not in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_change_testfile(stepwise_pytester: Pytester) -> None:
|
|
|
|
result = stepwise_pytester.runpytest(
|
2019-04-27 21:52:12 +08:00
|
|
|
"-v", "--strict-markers", "--stepwise", "--fail", "test_a.py"
|
2018-10-15 03:48:46 +08:00
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_fail_on_flag FAILED" in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
# Make sure the second test run starts from the beginning, since the
|
|
|
|
# test to continue from does not exist in testfile_b.
|
2020-10-31 03:21:42 +08:00
|
|
|
result = stepwise_pytester.runpytest(
|
2019-04-27 21:52:12 +08:00
|
|
|
"-v", "--strict-markers", "--stepwise", "test_b.py"
|
|
|
|
)
|
2020-06-26 20:50:19 +08:00
|
|
|
assert _strip_resource_warnings(result.stderr.lines) == []
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
stdout = result.stdout.str()
|
2018-10-15 03:48:46 +08:00
|
|
|
assert "test_success PASSED" in stdout
|
2015-09-26 20:30:16 +08:00
|
|
|
|
|
|
|
|
2019-06-14 03:45:27 +08:00
|
|
|
@pytest.mark.parametrize("broken_first", [True, False])
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_stop_on_collection_errors(
|
|
|
|
broken_pytester: Pytester, broken_first: bool
|
|
|
|
) -> None:
|
2019-06-14 10:10:13 +08:00
|
|
|
"""Stop during collection errors. Broken test first or broken test last
|
|
|
|
actually surfaced a bug (#5444), so we test both situations."""
|
2019-06-14 03:45:27 +08:00
|
|
|
files = ["working_testfile.py", "broken_testfile.py"]
|
|
|
|
if broken_first:
|
|
|
|
files.reverse()
|
2020-10-31 03:21:42 +08:00
|
|
|
result = broken_pytester.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
2019-10-27 23:02:37 +08:00
|
|
|
result.stdout.fnmatch_lines("*error during collection*")
|
2019-07-05 07:06:50 +08:00
|
|
|
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
def test_xfail_handling(pytester: Pytester, monkeypatch: MonkeyPatch) -> None:
|
2019-07-05 07:06:50 +08:00
|
|
|
"""Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode
|
|
|
|
|
|
|
|
(#5547)
|
|
|
|
"""
|
2020-01-12 06:04:43 +08:00
|
|
|
monkeypatch.setattr("sys.dont_write_bytecode", True)
|
|
|
|
|
2019-07-05 07:06:50 +08:00
|
|
|
contents = """
|
|
|
|
import pytest
|
|
|
|
def test_a(): pass
|
|
|
|
|
|
|
|
@pytest.mark.xfail(strict={strict})
|
|
|
|
def test_b(): assert {assert_value}
|
|
|
|
|
|
|
|
def test_c(): pass
|
|
|
|
def test_d(): pass
|
|
|
|
"""
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makepyfile(contents.format(assert_value="0", strict="False"))
|
|
|
|
result = pytester.runpytest("--sw", "-v")
|
2019-07-05 07:06:50 +08:00
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"*::test_a PASSED *",
|
|
|
|
"*::test_b XFAIL *",
|
|
|
|
"*::test_c PASSED *",
|
|
|
|
"*::test_d PASSED *",
|
|
|
|
"* 3 passed, 1 xfailed in *",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makepyfile(contents.format(assert_value="1", strict="True"))
|
|
|
|
result = pytester.runpytest("--sw", "-v")
|
2019-07-05 07:06:50 +08:00
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"*::test_a PASSED *",
|
|
|
|
"*::test_b FAILED *",
|
|
|
|
"* Interrupted*",
|
|
|
|
"* 1 failed, 1 passed in *",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
2020-10-31 03:21:42 +08:00
|
|
|
pytester.makepyfile(contents.format(assert_value="0", strict="True"))
|
|
|
|
result = pytester.runpytest("--sw", "-v")
|
2019-07-05 07:06:50 +08:00
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"*::test_b XFAIL *",
|
|
|
|
"*::test_c PASSED *",
|
|
|
|
"*::test_d PASSED *",
|
|
|
|
"* 2 passed, 1 deselected, 1 xfailed in *",
|
|
|
|
]
|
|
|
|
)
|
2021-08-31 02:24:14 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_stepwise_skip_is_independent(pytester: Pytester) -> None:
|
|
|
|
pytester.makepyfile(
|
|
|
|
"""
|
|
|
|
def test_one():
|
|
|
|
assert False
|
|
|
|
|
|
|
|
def test_two():
|
|
|
|
assert False
|
|
|
|
|
|
|
|
def test_three():
|
|
|
|
assert False
|
|
|
|
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
result = pytester.runpytest("--tb", "no", "--stepwise-skip")
|
|
|
|
result.assert_outcomes(failed=2)
|
|
|
|
result.stdout.fnmatch_lines(
|
|
|
|
[
|
|
|
|
"FAILED test_stepwise_skip_is_independent.py::test_one - assert False",
|
|
|
|
"FAILED test_stepwise_skip_is_independent.py::test_two - assert False",
|
|
|
|
"*Interrupted: Test failed, continuing from this test next run.*",
|
|
|
|
]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
def test_sw_skip_help(pytester: Pytester) -> None:
|
|
|
|
result = pytester.runpytest("-h")
|
2022-06-01 03:32:51 +08:00
|
|
|
result.stdout.fnmatch_lines("*Implicitly enables --stepwise.")
|
2023-01-13 18:14:52 +08:00
|
|
|
|
|
|
|
|
|
|
|
def test_stepwise_xdist_dont_store_lastfailed(pytester: Pytester) -> None:
|
|
|
|
pytester.makefile(
|
|
|
|
ext=".ini",
|
|
|
|
pytest=f"[pytest]\ncache_dir = {pytester.path}\n",
|
|
|
|
)
|
|
|
|
|
|
|
|
pytester.makepyfile(
|
|
|
|
conftest="""
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
@pytest.hookimpl(tryfirst=True)
|
|
|
|
def pytest_configure(config) -> None:
|
|
|
|
config.workerinput = True
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
pytester.makepyfile(
|
|
|
|
test_one="""
|
|
|
|
def test_one():
|
|
|
|
assert False
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
result = pytester.runpytest("--stepwise")
|
|
|
|
assert result.ret == pytest.ExitCode.INTERRUPTED
|
|
|
|
|
|
|
|
stepwise_cache_file = (
|
|
|
|
pytester.path / Cache._CACHE_PREFIX_VALUES / STEPWISE_CACHE_DIR
|
|
|
|
)
|
|
|
|
assert not Path(stepwise_cache_file).exists()
|
|
|
|
|
|
|
|
|
|
|
|
def test_disabled_stepwise_xdist_dont_clear_cache(pytester: Pytester) -> None:
|
|
|
|
pytester.makefile(
|
|
|
|
ext=".ini",
|
|
|
|
pytest=f"[pytest]\ncache_dir = {pytester.path}\n",
|
|
|
|
)
|
|
|
|
|
|
|
|
stepwise_cache_file = (
|
|
|
|
pytester.path / Cache._CACHE_PREFIX_VALUES / STEPWISE_CACHE_DIR
|
|
|
|
)
|
|
|
|
stepwise_cache_dir = stepwise_cache_file.parent
|
|
|
|
stepwise_cache_dir.mkdir(exist_ok=True, parents=True)
|
|
|
|
|
|
|
|
stepwise_cache_file_relative = f"{Cache._CACHE_PREFIX_VALUES}/{STEPWISE_CACHE_DIR}"
|
|
|
|
|
|
|
|
expected_value = '"test_one.py::test_one"'
|
|
|
|
content = {f"{stepwise_cache_file_relative}": expected_value}
|
|
|
|
|
|
|
|
pytester.makefile(ext="", **content)
|
|
|
|
|
|
|
|
pytester.makepyfile(
|
|
|
|
conftest="""
|
|
|
|
import pytest
|
|
|
|
|
|
|
|
@pytest.hookimpl(tryfirst=True)
|
|
|
|
def pytest_configure(config) -> None:
|
|
|
|
config.workerinput = True
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
pytester.makepyfile(
|
|
|
|
test_one="""
|
|
|
|
def test_one():
|
|
|
|
assert True
|
|
|
|
"""
|
|
|
|
)
|
|
|
|
result = pytester.runpytest()
|
|
|
|
assert result.ret == 0
|
|
|
|
|
|
|
|
assert Path(stepwise_cache_file).exists()
|
2023-06-20 19:55:40 +08:00
|
|
|
with stepwise_cache_file.open(encoding="utf-8") as file_handle:
|
2023-01-13 18:14:52 +08:00
|
|
|
observed_value = file_handle.readlines()
|
|
|
|
assert [expected_value] == observed_value
|