skipping: refactor skipif/xfail mark evaluation

Previously, skipif/xfail marks were evaluated using a `MarkEvaluator`
class. I found this class very difficult to understand.

Instead of `MarkEvaluator`, rewrite using straight functions which are
hopefully easier to follow.

I tried to keep the semantics exactly as before, except improving a few
error messages.
This commit is contained in:
Ran Benita 2020-06-19 13:33:55 +03:00
parent 6072c9950d
commit 3e6fe92b7e
2 changed files with 251 additions and 233 deletions

View File

@ -3,12 +3,13 @@ import os
import platform import platform
import sys import sys
import traceback import traceback
from typing import Any
from typing import Dict
from typing import List
from typing import Optional from typing import Optional
from typing import Tuple from typing import Tuple
import attr
import _pytest._code
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config from _pytest.config import Config
from _pytest.config import hookimpl from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser from _pytest.config.argparsing import Parser
@ -16,12 +17,14 @@ from _pytest.mark.structures import Mark
from _pytest.nodes import Item from _pytest.nodes import Item
from _pytest.outcomes import fail from _pytest.outcomes import fail
from _pytest.outcomes import skip from _pytest.outcomes import skip
from _pytest.outcomes import TEST_OUTCOME
from _pytest.outcomes import xfail from _pytest.outcomes import xfail
from _pytest.reports import BaseReport from _pytest.reports import BaseReport
from _pytest.runner import CallInfo from _pytest.runner import CallInfo
from _pytest.store import StoreKey from _pytest.store import StoreKey
if TYPE_CHECKING:
from typing import Type
def pytest_addoption(parser: Parser) -> None: def pytest_addoption(parser: Parser) -> None:
group = parser.getgroup("general") group = parser.getgroup("general")
@ -64,17 +67,16 @@ def pytest_configure(config: Config) -> None:
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",
"skipif(condition): skip the given test function if eval(condition) " "skipif(condition, ..., *, reason=...): "
"results in a True value. Evaluation happens within the " "skip the given test function if any of the conditions evaluate to True. "
"module global context. Example: skipif('sys.platform == \"win32\"') " "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
"skips the test if we are on the win32 platform. see " "see https://docs.pytest.org/en/latest/skipping.html",
"https://docs.pytest.org/en/latest/skipping.html",
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): " "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
"mark the test function as an expected failure if eval(condition) " "mark the test function as an expected failure if any of the conditions "
"has a True value. Optionally specify a reason for better reporting " "evaluate to True. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. " "and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in " "If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as " "raises, and if the test fails in other ways, it will be reported as "
@ -82,179 +84,191 @@ def pytest_configure(config: Config) -> None:
) )
def compiled_eval(expr: str, d: Dict[str, object]) -> Any: def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
import _pytest._code """Evaluate a single skipif/xfail condition.
exprcode = _pytest._code.compile(expr, mode="eval") If an old-style string condition is given, it is eval()'d, otherwise the
return eval(exprcode, d) condition is bool()'d. If this fails, an appropriately formatted pytest.fail
is raised.
Returns (result, reason). The reason is only relevant if the result is True.
class MarkEvaluator: """
def __init__(self, item: Item, name: str) -> None: # String condition.
self.item = item if isinstance(condition, str):
self._marks = None # type: Optional[List[Mark]] globals_ = {
self._mark = None # type: Optional[Mark] "os": os,
self._mark_name = name "sys": sys,
"platform": platform,
def __bool__(self) -> bool: "config": item.config,
# don't cache here to prevent staleness }
return bool(self._get_marks()) if hasattr(item, "obj"):
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
def wasvalid(self) -> bool:
return not hasattr(self, "exc")
def _get_marks(self) -> List[Mark]:
return list(self.item.iter_markers(name=self._mark_name))
def invalidraise(self, exc) -> Optional[bool]:
raises = self.get("raises")
if not raises:
return None
return not isinstance(exc, raises)
def istrue(self) -> bool:
try: try:
return self._istrue() condition_code = _pytest._code.compile(condition, mode="eval")
except TEST_OUTCOME: result = eval(condition_code, globals_)
self.exc = sys.exc_info() except SyntaxError as exc:
if isinstance(self.exc[1], SyntaxError): msglines = [
# TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here. "Error evaluating %r condition" % mark.name,
assert self.exc[1].offset is not None " " + condition,
msg = [" " * (self.exc[1].offset + 4) + "^"] " " + " " * (exc.offset or 0) + "^",
msg.append("SyntaxError: invalid syntax") "SyntaxError: invalid syntax",
]
fail("\n".join(msglines), pytrace=False)
except Exception as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
# Boolean condition.
else: else:
msg = traceback.format_exception_only(*self.exc[:2]) try:
fail( result = bool(condition)
"Error evaluating %r expression\n" except Exception as exc:
" %s\n" msglines = [
"%s" % (self._mark_name, self.expr, "\n".join(msg)), "Error evaluating %r condition as a boolean" % mark.name,
pytrace=False, *traceback.format_exception_only(type(exc), exc),
) ]
fail("\n".join(msglines), pytrace=False)
def _getglobals(self) -> Dict[str, object]: reason = mark.kwargs.get("reason", None)
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} if reason is None:
if hasattr(self.item, "obj"): if isinstance(condition, str):
d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821 reason = "condition: " + condition
return d
def _istrue(self) -> bool:
if hasattr(self, "result"):
result = getattr(self, "result") # type: bool
return result
self._marks = self._get_marks()
if self._marks:
self.result = False
for mark in self._marks:
self._mark = mark
if "condition" not in mark.kwargs:
args = mark.args
else: else:
args = (mark.kwargs["condition"],)
for expr in args:
self.expr = expr
if isinstance(expr, str):
d = self._getglobals()
result = compiled_eval(expr, d)
else:
if "reason" not in mark.kwargs:
# XXX better be checked at collection time # XXX better be checked at collection time
msg = ( msg = (
"you need to specify reason=STRING " "Error evaluating %r: " % mark.name
"when using booleans as conditions." + "you need to specify reason=STRING when using booleans as conditions."
) )
fail(msg) fail(msg, pytrace=False)
result = bool(expr)
if result:
self.result = True
self.reason = mark.kwargs.get("reason", None)
self.expr = expr
return self.result
if not args: return result, reason
self.result = True
self.reason = mark.kwargs.get("reason", None)
return self.result
return False
def get(self, attr, default=None):
if self._mark is None:
return default
return self._mark.kwargs.get(attr, default)
def getexplanation(self): @attr.s(slots=True, frozen=True)
expl = getattr(self, "reason", None) or self.get("reason", None) class Skip:
if not expl: """The result of evaluate_skip_marks()."""
if not hasattr(self, "expr"):
return "" reason = attr.ib(type=str)
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
if "condition" not in mark.kwargs:
conditions = mark.args
else: else:
return "condition: " + str(self.expr) conditions = (mark.kwargs["condition"],)
return expl
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Skip(reason)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
if "reason" in mark.kwargs:
reason = mark.kwargs["reason"]
elif mark.args:
reason = mark.args[0]
else:
reason = "unconditional skip"
return Skip(reason)
return None
@attr.s(slots=True, frozen=True)
class Xfail:
"""The result of evaluate_xfail_marks()."""
reason = attr.ib(type=str)
run = attr.ib(type=bool)
strict = attr.ib(type=bool)
raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]])
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
"""Evaluate xfail marks on item, returning Xfail if triggered."""
for mark in item.iter_markers(name="xfail"):
run = mark.kwargs.get("run", True)
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
raises = mark.kwargs.get("raises", None)
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Xfail(reason, run, strict, raises)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Xfail(reason, run, strict, raises)
return None
# Whether skipped due to skip or skipif marks.
skipped_by_mark_key = StoreKey[bool]() skipped_by_mark_key = StoreKey[bool]()
evalxfail_key = StoreKey[MarkEvaluator]() # Saves the xfail mark evaluation. Can be refreshed during call if None.
xfailed_key = StoreKey[Optional[Xfail]]()
unexpectedsuccess_key = StoreKey[str]() unexpectedsuccess_key = StoreKey[str]()
@hookimpl(tryfirst=True) @hookimpl(tryfirst=True)
def pytest_runtest_setup(item: Item) -> None: def pytest_runtest_setup(item: Item) -> None:
# Check if skip or skipif are specified as pytest marks
item._store[skipped_by_mark_key] = False item._store[skipped_by_mark_key] = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._store[skipped_by_mark_key] = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"): skipped = evaluate_skip_marks(item)
if skipped:
item._store[skipped_by_mark_key] = True item._store[skipped_by_mark_key] = True
if "reason" in skip_info.kwargs: skip(skipped.reason)
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._store[evalxfail_key] = MarkEvaluator(item, "xfail") if not item.config.option.runxfail:
check_xfail_no_run(item) item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
@hookimpl(hookwrapper=True) @hookimpl(hookwrapper=True)
def pytest_runtest_call(item: Item): def pytest_runtest_call(item: Item):
check_xfail_no_run(item) if not item.config.option.runxfail:
xfailed = item._store.get(xfailed_key, None)
if xfailed is None:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
outcome = yield outcome = yield
passed = outcome.excinfo is None passed = outcome.excinfo is None
if passed: if passed:
check_strict_xfail(item) xfailed = item._store.get(xfailed_key, None)
if xfailed is None:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
def check_xfail_no_run(item: Item) -> None: if xfailed and xfailed.strict:
"""check xfail(run=False)""" del item._store[xfailed_key]
if not item.config.option.runxfail: fail("[XPASS(strict)] " + xfailed.reason, pytrace=False)
evalxfail = item._store[evalxfail_key]
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(item: Item) -> None:
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = item._store[evalxfail_key]
if evalxfail.istrue():
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del item._store[evalxfail_key]
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True) @hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]): def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
outcome = yield outcome = yield
rep = outcome.get_result() rep = outcome.get_result()
evalxfail = item._store.get(evalxfail_key, None) xfailed = item._store.get(xfailed_key, None)
# unittest special case, see setting of unexpectedsuccess_key # unittest special case, see setting of unexpectedsuccess_key
if unexpectedsuccess_key in item._store and rep.when == "call": if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key] reason = item._store[unexpectedsuccess_key]
@ -263,30 +277,27 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
else: else:
rep.longrepr = "Unexpected success" rep.longrepr = "Unexpected success"
rep.outcome = "failed" rep.outcome = "failed"
elif item.config.option.runxfail: elif item.config.option.runxfail:
pass # don't interfere pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None assert call.excinfo.value.msg is not None
rep.wasxfail = "reason: " + call.excinfo.value.msg rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped" rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): elif not rep.skipped and xfailed:
if call.excinfo: if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value): raises = xfailed.raises
if raises is not None and not isinstance(call.excinfo.value, raises):
rep.outcome = "failed" rep.outcome = "failed"
else: else:
rep.outcome = "skipped" rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation() rep.wasxfail = xfailed.reason
elif call.when == "call": elif call.when == "call":
strict_default = item.config.getini("xfail_strict") if xfailed.strict:
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed" rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation) rep.longrepr = "[XPASS(strict)] " + xfailed.reason
else: else:
rep.outcome = "passed" rep.outcome = "passed"
rep.wasxfail = explanation rep.wasxfail = xfailed.reason
elif ( elif (
item._store.get(skipped_by_mark_key, True) item._store.get(skipped_by_mark_key, True)
and rep.skipped and rep.skipped
@ -301,9 +312,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
rep.longrepr = str(filename), line + 1, reason rep.longrepr = str(filename), line + 1, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if hasattr(report, "wasxfail"): if hasattr(report, "wasxfail"):
if report.skipped: if report.skipped:

View File

@ -2,68 +2,74 @@ import sys
import pytest import pytest
from _pytest.runner import runtestprotocol from _pytest.runner import runtestprotocol
from _pytest.skipping import MarkEvaluator from _pytest.skipping import evaluate_skip_marks
from _pytest.skipping import evaluate_xfail_marks
from _pytest.skipping import pytest_runtest_setup from _pytest.skipping import pytest_runtest_setup
class TestEvaluator: class TestEvaluation:
def test_no_marker(self, testdir): def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass") item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert not evalskipif assert not skipped
assert not evalskipif.istrue()
def test_marked_no_args(self, testdir): def test_marked_xfail_no_args(self, testdir):
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@pytest.mark.xyz @pytest.mark.xfail
def test_func(): def test_func():
pass pass
""" """
) )
ev = MarkEvaluator(item, "xyz") xfailed = evaluate_xfail_marks(item)
assert ev assert xfailed
assert ev.istrue() assert xfailed.reason == ""
expl = ev.getexplanation() assert xfailed.run
assert expl == ""
assert not ev.get("run", False) def test_marked_skipif_no_args(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif
def test_func():
pass
"""
)
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == ""
def test_marked_one_arg(self, testdir): def test_marked_one_arg(self, testdir):
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@pytest.mark.xyz("hasattr(os, 'sep')") @pytest.mark.skipif("hasattr(os, 'sep')")
def test_func(): def test_func():
pass pass
""" """
) )
ev = MarkEvaluator(item, "xyz") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "condition: hasattr(os, 'sep')"
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir): def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") @pytest.mark.skipif("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func(): def test_func():
pass pass
""" """
) )
ev = MarkEvaluator(item, "xyz") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "hello world"
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
def test_marked_one_arg_twice(self, testdir): def test_marked_one_arg_twice(self, testdir):
lines = [ lines = [
"""@pytest.mark.skipif("not hasattr(os, 'murks')")""", """@pytest.mark.skipif("not hasattr(os, 'murks')")""",
"""@pytest.mark.skipif("hasattr(os, 'murks')")""", """@pytest.mark.skipif(condition="hasattr(os, 'murks')")""",
] ]
for i in range(0, 2): for i in range(0, 2):
item = testdir.getitem( item = testdir.getitem(
@ -76,11 +82,9 @@ class TestEvaluator:
""" """
% (lines[i], lines[(i + 1) % 2]) % (lines[i], lines[(i + 1) % 2])
) )
ev = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "condition: not hasattr(os, 'murks')"
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir): def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem( item = testdir.getitem(
@ -92,13 +96,11 @@ class TestEvaluator:
pass pass
""" """
) )
ev = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "condition: not hasattr(os, 'murks')"
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir) -> None: def test_marked_skipif_with_boolean_without_reason(self, testdir) -> None:
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@ -107,14 +109,34 @@ class TestEvaluator:
pass pass
""" """
) )
ev = MarkEvaluator(item, "skipif") with pytest.raises(pytest.fail.Exception) as excinfo:
exc = pytest.raises(pytest.fail.Exception, ev.istrue) evaluate_skip_marks(item)
assert exc.value.msg is not None assert excinfo.value.msg is not None
assert ( assert (
"""Failed: you need to specify reason=STRING when using booleans as conditions.""" """Error evaluating 'skipif': you need to specify reason=STRING when using booleans as conditions."""
in exc.value.msg in excinfo.value.msg
) )
def test_marked_skipif_with_invalid_boolean(self, testdir) -> None:
item = testdir.getitem(
"""
import pytest
class InvalidBool:
def __bool__(self):
raise TypeError("INVALID")
@pytest.mark.skipif(InvalidBool(), reason="xxx")
def test_func():
pass
"""
)
with pytest.raises(pytest.fail.Exception) as excinfo:
evaluate_skip_marks(item)
assert excinfo.value.msg is not None
assert "Error evaluating 'skipif' condition as a boolean" in excinfo.value.msg
assert "INVALID" in excinfo.value.msg
def test_skipif_class(self, testdir): def test_skipif_class(self, testdir):
(item,) = testdir.getitems( (item,) = testdir.getitems(
""" """
@ -126,10 +148,9 @@ class TestEvaluator:
""" """
) )
item.config._hackxyz = 3 item.config._hackxyz = 3
ev = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert ev.istrue() assert skipped
expl = ev.getexplanation() assert skipped.reason == "condition: config._hackxyz"
assert expl == "condition: config._hackxyz"
class TestXFail: class TestXFail:
@ -895,10 +916,10 @@ def test_errors_in_xfail_skip_expressions(testdir) -> None:
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"*ERROR*test_nameerror*", "*ERROR*test_nameerror*",
"*evaluating*skipif*expression*", "*evaluating*skipif*condition*",
"*asd*", "*asd*",
"*ERROR*test_syntax*", "*ERROR*test_syntax*",
"*evaluating*xfail*expression*", "*evaluating*xfail*condition*",
" syntax error", " syntax error",
markline, markline,
"SyntaxError: invalid syntax", "SyntaxError: invalid syntax",
@ -924,25 +945,12 @@ def test_xfail_skipif_with_globals(testdir):
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"]) result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
def test_direct_gives_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 error*"])
def test_default_markers(testdir): def test_default_markers(testdir):
result = testdir.runpytest("--markers") result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"*skipif(*condition)*skip*", "*skipif(condition, ..., [*], reason=...)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", "*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=xfail_strict)*expected failure*",
] ]
) )
@ -1137,7 +1145,9 @@ def test_mark_xfail_item(testdir):
class MyItem(pytest.Item): class MyItem(pytest.Item):
nodeid = 'foo' nodeid = 'foo'
def setup(self): def setup(self):
marker = pytest.mark.xfail(True, reason="Expected failure") marker = pytest.mark.xfail("1 == 2", reason="Expected failure - false")
self.add_marker(marker)
marker = pytest.mark.xfail(True, reason="Expected failure - true")
self.add_marker(marker) self.add_marker(marker)
def runtest(self): def runtest(self):
assert False assert False