skipping: use pytest_runtest_call instead of pytest_pyfunc_call
`@pytest.mark.xfail` is meant to work with arbitrary items, and there is a test `test_mark_xfail_item` which verifies this. However, the code for some reason uses `pytest_pyfunc_call` for the call phase check, which only works for Function items. The test mentioned above only passed "accidentally" because the `pytest_runtest_makereport` hook also runs a `evalxfail.istrue()` which triggers and evaluation, but conceptually it shouldn't do that. Change to `pytest_runtest_call` to make the xfail checking properly generic.
This commit is contained in:
parent
4cc4ebf3c9
commit
a1f841d5d2
|
@ -10,7 +10,6 @@ from _pytest.nodes import Item
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
from _pytest.outcomes import skip
|
from _pytest.outcomes import skip
|
||||||
from _pytest.outcomes import xfail
|
from _pytest.outcomes import xfail
|
||||||
from _pytest.python import Function
|
|
||||||
from _pytest.reports import BaseReport
|
from _pytest.reports import BaseReport
|
||||||
from _pytest.runner import CallInfo
|
from _pytest.runner import CallInfo
|
||||||
from _pytest.store import StoreKey
|
from _pytest.store import StoreKey
|
||||||
|
@ -103,12 +102,12 @@ def pytest_runtest_setup(item: Item) -> None:
|
||||||
|
|
||||||
|
|
||||||
@hookimpl(hookwrapper=True)
|
@hookimpl(hookwrapper=True)
|
||||||
def pytest_pyfunc_call(pyfuncitem: Function):
|
def pytest_runtest_call(item: Item):
|
||||||
check_xfail_no_run(pyfuncitem)
|
check_xfail_no_run(item)
|
||||||
outcome = yield
|
outcome = yield
|
||||||
passed = outcome.excinfo is None
|
passed = outcome.excinfo is None
|
||||||
if passed:
|
if passed:
|
||||||
check_strict_xfail(pyfuncitem)
|
check_strict_xfail(item)
|
||||||
|
|
||||||
|
|
||||||
def check_xfail_no_run(item: Item) -> None:
|
def check_xfail_no_run(item: Item) -> None:
|
||||||
|
@ -120,14 +119,14 @@ def check_xfail_no_run(item: Item) -> None:
|
||||||
xfail("[NOTRUN] " + evalxfail.getexplanation())
|
xfail("[NOTRUN] " + evalxfail.getexplanation())
|
||||||
|
|
||||||
|
|
||||||
def check_strict_xfail(pyfuncitem: Function) -> None:
|
def check_strict_xfail(item: Item) -> None:
|
||||||
"""check xfail(strict=True) for the given PASSING test"""
|
"""check xfail(strict=True) for the given PASSING test"""
|
||||||
evalxfail = pyfuncitem._store[evalxfail_key]
|
evalxfail = item._store[evalxfail_key]
|
||||||
if evalxfail.istrue():
|
if evalxfail.istrue():
|
||||||
strict_default = pyfuncitem.config.getini("xfail_strict")
|
strict_default = item.config.getini("xfail_strict")
|
||||||
is_strict_xfail = evalxfail.get("strict", strict_default)
|
is_strict_xfail = evalxfail.get("strict", strict_default)
|
||||||
if is_strict_xfail:
|
if is_strict_xfail:
|
||||||
del pyfuncitem._store[evalxfail_key]
|
del item._store[evalxfail_key]
|
||||||
explanation = evalxfail.getexplanation()
|
explanation = evalxfail.getexplanation()
|
||||||
fail("[XPASS(strict)] " + explanation, pytrace=False)
|
fail("[XPASS(strict)] " + explanation, pytrace=False)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue