skipping: fix dynamic xfail mark added in runtest not respected
If a test runtest phase (not setup) dynamically adds a pytest.mark.xfail
mark to the item, it should be respected, but it wasn't. This regressed
in 3e6fe92b7e
(not released).
Fix it by just always refreshing the mark if needed. This is mostly what
was done before but in a more roundabout way.
This commit is contained in:
parent
78f2dc08fa
commit
ccad10a829
|
@ -236,10 +236,9 @@ def pytest_runtest_setup(item: Item) -> None:
|
|||
if skipped:
|
||||
skip(skipped.reason)
|
||||
|
||||
if not item.config.option.runxfail:
|
||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
if xfailed and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
|
@ -248,12 +247,16 @@ def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
|
|||
if xfailed is None:
|
||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
|
||||
if not item.config.option.runxfail:
|
||||
if xfailed and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
if xfailed and not item.config.option.runxfail and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
|
||||
yield
|
||||
|
||||
# The test run may have added an xfail mark dynamically.
|
||||
xfailed = item._store.get(xfailed_key, None)
|
||||
if xfailed is None:
|
||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
import sys
|
||||
|
||||
import pytest
|
||||
from _pytest.pytester import Testdir
|
||||
from _pytest.runner import runtestprotocol
|
||||
from _pytest.skipping import evaluate_skip_marks
|
||||
from _pytest.skipping import evaluate_xfail_marks
|
||||
|
@ -425,6 +426,33 @@ class TestXFail:
|
|||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
||||
|
||||
def test_dynamic_xfail_set_during_runtest_failed(self, testdir: Testdir) -> None:
|
||||
# Issue #7486.
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
def test_this(request):
|
||||
request.node.add_marker(pytest.mark.xfail(reason="xfail"))
|
||||
assert 0
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(p)
|
||||
result.assert_outcomes(xfailed=1)
|
||||
|
||||
def test_dynamic_xfail_set_during_runtest_passed_strict(
|
||||
self, testdir: Testdir
|
||||
) -> None:
|
||||
# Issue #7486.
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
def test_this(request):
|
||||
request.node.add_marker(pytest.mark.xfail(reason="xfail", strict=True))
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(p)
|
||||
result.assert_outcomes(failed=1)
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"expected, actual, matchline",
|
||||
[
|
||||
|
|
Loading…
Reference in New Issue