diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index 24c89eb6d..e333e78df 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -236,10 +236,9 @@ def pytest_runtest_setup(item: Item) -> None: if skipped: skip(skipped.reason) - if not item.config.option.runxfail: - item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) - if xfailed and not xfailed.run: - xfail("[NOTRUN] " + xfailed.reason) + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) @hookimpl(hookwrapper=True) @@ -248,12 +247,16 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]: if xfailed is None: item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) - if not item.config.option.runxfail: - if xfailed and not xfailed.run: - xfail("[NOTRUN] " + xfailed.reason) + if xfailed and not item.config.option.runxfail and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) yield + # The test run may have added an xfail mark dynamically. + xfailed = item._store.get(xfailed_key, None) + if xfailed is None: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + @hookimpl(hookwrapper=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]): diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 8fceb37aa..61de0b3e1 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -1,6 +1,7 @@ import sys import pytest +from _pytest.pytester import Testdir from _pytest.runner import runtestprotocol from _pytest.skipping import evaluate_skip_marks from _pytest.skipping import evaluate_xfail_marks @@ -425,6 +426,33 @@ class TestXFail: result = testdir.runpytest(p) result.stdout.fnmatch_lines(["*1 xfailed*"]) + def test_dynamic_xfail_set_during_runtest_failed(self, testdir: Testdir) -> None: + # Issue #7486. + p = testdir.makepyfile( + """ + import pytest + def test_this(request): + request.node.add_marker(pytest.mark.xfail(reason="xfail")) + assert 0 + """ + ) + result = testdir.runpytest(p) + result.assert_outcomes(xfailed=1) + + def test_dynamic_xfail_set_during_runtest_passed_strict( + self, testdir: Testdir + ) -> None: + # Issue #7486. + p = testdir.makepyfile( + """ + import pytest + def test_this(request): + request.node.add_marker(pytest.mark.xfail(reason="xfail", strict=True)) + """ + ) + result = testdir.runpytest(p) + result.assert_outcomes(failed=1) + @pytest.mark.parametrize( "expected, actual, matchline", [