Merge pull request #7388 from bluetech/mark-evaluate

skipping: refactor mark evaluation
This commit is contained in:
Ran Benita 2020-06-21 21:47:56 +03:00 committed by GitHub
commit 99d34ba029
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
3 changed files with 261 additions and 256 deletions

View File

@ -1,124 +0,0 @@
import os
import platform
import sys
import traceback
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from ..outcomes import fail
from ..outcomes import TEST_OUTCOME
from .structures import Mark
from _pytest.nodes import Item
def compiled_eval(expr: str, d: Dict[str, object]) -> Any:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
return eval(exprcode, d)
class MarkEvaluator:
def __init__(self, item: Item, name: str) -> None:
self.item = item
self._marks = None # type: Optional[List[Mark]]
self._mark = None # type: Optional[Mark]
self._mark_name = name
def __bool__(self) -> bool:
# don't cache here to prevent staleness
return bool(self._get_marks())
def wasvalid(self) -> bool:
return not hasattr(self, "exc")
def _get_marks(self) -> List[Mark]:
return list(self.item.iter_markers(name=self._mark_name))
def invalidraise(self, exc) -> Optional[bool]:
raises = self.get("raises")
if not raises:
return None
return not isinstance(exc, raises)
def istrue(self) -> bool:
try:
return self._istrue()
except TEST_OUTCOME:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
# TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here.
assert self.exc[1].offset is not None
msg = [" " * (self.exc[1].offset + 4) + "^"]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
fail(
"Error evaluating %r expression\n"
" %s\n"
"%s" % (self._mark_name, self.expr, "\n".join(msg)),
pytrace=False,
)
def _getglobals(self) -> Dict[str, object]:
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config}
if hasattr(self.item, "obj"):
d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821
return d
def _istrue(self) -> bool:
if hasattr(self, "result"):
result = getattr(self, "result") # type: bool
return result
self._marks = self._get_marks()
if self._marks:
self.result = False
for mark in self._marks:
self._mark = mark
if "condition" not in mark.kwargs:
args = mark.args
else:
args = (mark.kwargs["condition"],)
for expr in args:
self.expr = expr
if isinstance(expr, str):
d = self._getglobals()
result = compiled_eval(expr, d)
else:
if "reason" not in mark.kwargs:
# XXX better be checked at collection time
msg = (
"you need to specify reason=STRING "
"when using booleans as conditions."
)
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = mark.kwargs.get("reason", None)
self.expr = expr
return self.result
if not args:
self.result = True
self.reason = mark.kwargs.get("reason", None)
return self.result
return False
def get(self, attr, default=None):
if self._mark is None:
return default
return self._mark.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, "reason", None) or self.get("reason", None)
if not expl:
if not hasattr(self, "expr"):
return ""
else:
return "condition: " + str(self.expr)
return expl

View File

@ -1,24 +1,30 @@
""" support for skip/xfail functions and markers. """
import os
import platform
import sys
import traceback
from typing import Generator
from typing import Optional
from typing import Tuple
import attr
import _pytest._code
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config
from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser
from _pytest.mark.evaluate import MarkEvaluator
from _pytest.mark.structures import Mark
from _pytest.nodes import Item
from _pytest.outcomes import fail
from _pytest.outcomes import skip
from _pytest.outcomes import xfail
from _pytest.python import Function
from _pytest.reports import BaseReport
from _pytest.runner import CallInfo
from _pytest.store import StoreKey
skipped_by_mark_key = StoreKey[bool]()
evalxfail_key = StoreKey[MarkEvaluator]()
unexpectedsuccess_key = StoreKey[str]()
if TYPE_CHECKING:
from typing import Type
def pytest_addoption(parser: Parser) -> None:
@ -62,81 +68,200 @@ def pytest_configure(config: Config) -> None:
)
config.addinivalue_line(
"markers",
"skipif(condition): skip the given test function if eval(condition) "
"results in a True value. Evaluation happens within the "
"module global context. Example: skipif('sys.platform == \"win32\"') "
"skips the test if we are on the win32 platform. see "
"https://docs.pytest.org/en/latest/skipping.html",
"skipif(condition, ..., *, reason=...): "
"skip the given test function if any of the conditions evaluate to True. "
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
"See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
)
config.addinivalue_line(
"markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
"mark the test function as an expected failure if eval(condition) "
"has a True value. Optionally specify a reason for better reporting "
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
"mark the test function as an expected failure if any of the conditions "
"evaluate to True. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
"a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
)
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
"""Evaluate a single skipif/xfail condition.
If an old-style string condition is given, it is eval()'d, otherwise the
condition is bool()'d. If this fails, an appropriately formatted pytest.fail
is raised.
Returns (result, reason). The reason is only relevant if the result is True.
"""
# String condition.
if isinstance(condition, str):
globals_ = {
"os": os,
"sys": sys,
"platform": platform,
"config": item.config,
}
if hasattr(item, "obj"):
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
try:
condition_code = _pytest._code.compile(condition, mode="eval")
result = eval(condition_code, globals_)
except SyntaxError as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
" " + " " * (exc.offset or 0) + "^",
"SyntaxError: invalid syntax",
]
fail("\n".join(msglines), pytrace=False)
except Exception as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
# Boolean condition.
else:
try:
result = bool(condition)
except Exception as exc:
msglines = [
"Error evaluating %r condition as a boolean" % mark.name,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
reason = mark.kwargs.get("reason", None)
if reason is None:
if isinstance(condition, str):
reason = "condition: " + condition
else:
# XXX better be checked at collection time
msg = (
"Error evaluating %r: " % mark.name
+ "you need to specify reason=STRING when using booleans as conditions."
)
fail(msg, pytrace=False)
return result, reason
@attr.s(slots=True, frozen=True)
class Skip:
"""The result of evaluate_skip_marks()."""
reason = attr.ib(type=str)
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Skip(reason)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
if "reason" in mark.kwargs:
reason = mark.kwargs["reason"]
elif mark.args:
reason = mark.args[0]
else:
reason = "unconditional skip"
return Skip(reason)
return None
@attr.s(slots=True, frozen=True)
class Xfail:
"""The result of evaluate_xfail_marks()."""
reason = attr.ib(type=str)
run = attr.ib(type=bool)
strict = attr.ib(type=bool)
raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]])
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
"""Evaluate xfail marks on item, returning Xfail if triggered."""
for mark in item.iter_markers(name="xfail"):
run = mark.kwargs.get("run", True)
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
raises = mark.kwargs.get("raises", None)
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Xfail(reason, run, strict, raises)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Xfail(reason, run, strict, raises)
return None
# Whether skipped due to skip or skipif marks.
skipped_by_mark_key = StoreKey[bool]()
# Saves the xfail mark evaluation. Can be refreshed during call if None.
xfailed_key = StoreKey[Optional[Xfail]]()
unexpectedsuccess_key = StoreKey[str]()
@hookimpl(tryfirst=True)
def pytest_runtest_setup(item: Item) -> None:
# Check if skip or skipif are specified as pytest marks
item._store[skipped_by_mark_key] = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._store[skipped_by_mark_key] = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"):
skipped = evaluate_skip_marks(item)
if skipped:
item._store[skipped_by_mark_key] = True
if "reason" in skip_info.kwargs:
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
skip(skipped.reason)
item._store[evalxfail_key] = MarkEvaluator(item, "xfail")
check_xfail_no_run(item)
if not item.config.option.runxfail:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
@hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem: Function):
check_xfail_no_run(pyfuncitem)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
xfailed = item._store.get(xfailed_key, None)
if xfailed is None:
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
def check_xfail_no_run(item: Item) -> None:
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._store[evalxfail_key]
if evalxfail.istrue():
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
def check_strict_xfail(pyfuncitem: Function) -> None:
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._store[evalxfail_key]
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._store[evalxfail_key]
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
yield
@hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
outcome = yield
rep = outcome.get_result()
evalxfail = item._store.get(evalxfail_key, None)
xfailed = item._store.get(xfailed_key, None)
# unittest special case, see setting of unexpectedsuccess_key
if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key]
@ -145,30 +270,27 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
else:
rep.longrepr = "Unexpected success"
rep.outcome = "failed"
elif item.config.option.runxfail:
pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None
rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
elif not rep.skipped and xfailed:
if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value):
raises = xfailed.raises
if raises is not None and not isinstance(call.excinfo.value, raises):
rep.outcome = "failed"
else:
rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation()
rep.wasxfail = xfailed.reason
elif call.when == "call":
strict_default = item.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
if xfailed.strict:
rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
rep.longrepr = "[XPASS(strict)] " + xfailed.reason
else:
rep.outcome = "passed"
rep.wasxfail = explanation
rep.wasxfail = xfailed.reason
elif (
item._store.get(skipped_by_mark_key, True)
and rep.skipped
@ -183,9 +305,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
rep.longrepr = str(filename), line + 1, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if hasattr(report, "wasxfail"):
if report.skipped:

View File

@ -2,68 +2,74 @@ import sys
import pytest
from _pytest.runner import runtestprotocol
from _pytest.skipping import MarkEvaluator
from _pytest.skipping import evaluate_skip_marks
from _pytest.skipping import evaluate_xfail_marks
from _pytest.skipping import pytest_runtest_setup
class TestEvaluator:
class TestEvaluation:
def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, "skipif")
assert not evalskipif
assert not evalskipif.istrue()
skipped = evaluate_skip_marks(item)
assert not skipped
def test_marked_no_args(self, testdir):
def test_marked_xfail_no_args(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz
@pytest.mark.xfail
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == ""
assert not ev.get("run", False)
xfailed = evaluate_xfail_marks(item)
assert xfailed
assert xfailed.reason == ""
assert xfailed.run
def test_marked_skipif_no_args(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif
def test_func():
pass
"""
)
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == ""
def test_marked_one_arg(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')")
@pytest.mark.skipif("hasattr(os, 'sep')")
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
@pytest.mark.skipif("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func():
pass
"""
)
ev = MarkEvaluator(item, "xyz")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == "hello world"
def test_marked_one_arg_twice(self, testdir):
lines = [
"""@pytest.mark.skipif("not hasattr(os, 'murks')")""",
"""@pytest.mark.skipif("hasattr(os, 'murks')")""",
"""@pytest.mark.skipif(condition="hasattr(os, 'murks')")""",
]
for i in range(0, 2):
item = testdir.getitem(
@ -76,11 +82,9 @@ class TestEvaluator:
"""
% (lines[i], lines[(i + 1) % 2])
)
ev = MarkEvaluator(item, "skipif")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem(
@ -92,13 +96,11 @@ class TestEvaluator:
pass
"""
)
ev = MarkEvaluator(item, "skipif")
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir) -> None:
def test_marked_skipif_with_boolean_without_reason(self, testdir) -> None:
item = testdir.getitem(
"""
import pytest
@ -107,14 +109,34 @@ class TestEvaluator:
pass
"""
)
ev = MarkEvaluator(item, "skipif")
exc = pytest.raises(pytest.fail.Exception, ev.istrue)
assert exc.value.msg is not None
with pytest.raises(pytest.fail.Exception) as excinfo:
evaluate_skip_marks(item)
assert excinfo.value.msg is not None
assert (
"""Failed: you need to specify reason=STRING when using booleans as conditions."""
in exc.value.msg
"""Error evaluating 'skipif': you need to specify reason=STRING when using booleans as conditions."""
in excinfo.value.msg
)
def test_marked_skipif_with_invalid_boolean(self, testdir) -> None:
item = testdir.getitem(
"""
import pytest
class InvalidBool:
def __bool__(self):
raise TypeError("INVALID")
@pytest.mark.skipif(InvalidBool(), reason="xxx")
def test_func():
pass
"""
)
with pytest.raises(pytest.fail.Exception) as excinfo:
evaluate_skip_marks(item)
assert excinfo.value.msg is not None
assert "Error evaluating 'skipif' condition as a boolean" in excinfo.value.msg
assert "INVALID" in excinfo.value.msg
def test_skipif_class(self, testdir):
(item,) = testdir.getitems(
"""
@ -126,10 +148,9 @@ class TestEvaluator:
"""
)
item.config._hackxyz = 3
ev = MarkEvaluator(item, "skipif")
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: config._hackxyz"
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == "condition: config._hackxyz"
class TestXFail:
@ -895,10 +916,10 @@ def test_errors_in_xfail_skip_expressions(testdir) -> None:
result.stdout.fnmatch_lines(
[
"*ERROR*test_nameerror*",
"*evaluating*skipif*expression*",
"*evaluating*skipif*condition*",
"*asd*",
"*ERROR*test_syntax*",
"*evaluating*xfail*expression*",
"*evaluating*xfail*condition*",
" syntax error",
markline,
"SyntaxError: invalid syntax",
@ -924,25 +945,12 @@ def test_xfail_skipif_with_globals(testdir):
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
def test_direct_gives_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 error*"])
def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines(
[
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
"*skipif(condition, ..., [*], reason=...)*skip*",
"*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=xfail_strict)*expected failure*",
]
)
@ -1137,7 +1145,9 @@ def test_mark_xfail_item(testdir):
class MyItem(pytest.Item):
nodeid = 'foo'
def setup(self):
marker = pytest.mark.xfail(True, reason="Expected failure")
marker = pytest.mark.xfail("1 == 2", reason="Expected failure - false")
self.add_marker(marker)
marker = pytest.mark.xfail(True, reason="Expected failure - true")
self.add_marker(marker)
def runtest(self):
assert False