Merge pull request #2773 from RonnyPfannschmidt/fix-markeval-2767

refactor mark evaluators
This commit is contained in:
Bruno Oliveira 2017-10-09 12:17:15 -03:00 committed by GitHub
commit 059455b45d
4 changed files with 60 additions and 47 deletions

View File

@ -60,22 +60,31 @@ def pytest_configure(config):
) )
class MarkEvaluator: class MarkEvaluator(object):
def __init__(self, item, name): def __init__(self, item, name):
self.item = item self.item = item
self.name = name self._marks = None
self._mark = None
@property self._mark_name = name
def holder(self):
return self.item.keywords.get(self.name)
def __bool__(self): def __bool__(self):
return bool(self.holder) self._marks = self._get_marks()
return bool(self._marks)
__nonzero__ = __bool__ __nonzero__ = __bool__
def wasvalid(self): def wasvalid(self):
return not hasattr(self, 'exc') return not hasattr(self, 'exc')
def _get_marks(self):
keyword = self.item.keywords.get(self._mark_name)
if isinstance(keyword, MarkDecorator):
return [keyword.mark]
elif isinstance(keyword, MarkInfo):
return [x.combined for x in keyword]
else:
return []
def invalidraise(self, exc): def invalidraise(self, exc):
raises = self.get('raises') raises = self.get('raises')
if not raises: if not raises:
@ -95,7 +104,7 @@ class MarkEvaluator:
fail("Error evaluating %r expression\n" fail("Error evaluating %r expression\n"
" %s\n" " %s\n"
"%s" "%s"
% (self.name, self.expr, "\n".join(msg)), % (self._mark_name, self.expr, "\n".join(msg)),
pytrace=False) pytrace=False)
def _getglobals(self): def _getglobals(self):
@ -107,40 +116,45 @@ class MarkEvaluator:
def _istrue(self): def _istrue(self):
if hasattr(self, 'result'): if hasattr(self, 'result'):
return self.result return self.result
if self.holder: self._marks = self._get_marks()
if self.holder.args or 'condition' in self.holder.kwargs:
self.result = False if self._marks:
# "holder" might be a MarkInfo or a MarkDecorator; only self.result = False
# MarkInfo keeps track of all parameters it received in an for mark in self._marks:
# _arglist attribute self._mark = mark
marks = getattr(self.holder, '_marks', None) \ if 'condition' in mark.kwargs:
or [self.holder.mark] args = (mark.kwargs['condition'],)
for _, args, kwargs in marks: else:
if 'condition' in kwargs: args = mark.args
args = (kwargs['condition'],)
for expr in args: for expr in args:
self.expr = expr
if isinstance(expr, six.string_types):
d = self._getglobals()
result = cached_eval(self.item.config, expr, d)
else:
if "reason" not in mark.kwargs:
# XXX better be checked at collection time
msg = "you need to specify reason=STRING " \
"when using booleans as conditions."
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = mark.kwargs.get('reason', None)
self.expr = expr self.expr = expr
if isinstance(expr, six.string_types): return self.result
d = self._getglobals()
result = cached_eval(self.item.config, expr, d) if not args:
else: self.result = True
if "reason" not in kwargs: self.reason = mark.kwargs.get('reason', None)
# XXX better be checked at collection time return self.result
msg = "you need to specify reason=STRING " \ return False
"when using booleans as conditions."
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = kwargs.get('reason', None)
self.expr = expr
return self.result
else:
self.result = True
return getattr(self, 'result', False)
def get(self, attr, default=None): def get(self, attr, default=None):
return self.holder.kwargs.get(attr, default) if self._mark is None:
return default
return self._mark.kwargs.get(attr, default)
def getexplanation(self): def getexplanation(self):
expl = getattr(self, 'reason', None) or self.get('reason', None) expl = getattr(self, 'reason', None) or self.get('reason', None)
@ -155,17 +169,17 @@ class MarkEvaluator:
@hookimpl(tryfirst=True) @hookimpl(tryfirst=True)
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
# Check if skip or skipif are specified as pytest marks # Check if skip or skipif are specified as pytest marks
item._skipped_by_mark = False
skipif_info = item.keywords.get('skipif') skipif_info = item.keywords.get('skipif')
if isinstance(skipif_info, (MarkInfo, MarkDecorator)): if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
eval_skipif = MarkEvaluator(item, 'skipif') eval_skipif = MarkEvaluator(item, 'skipif')
if eval_skipif.istrue(): if eval_skipif.istrue():
item._evalskip = eval_skipif item._skipped_by_mark = True
skip(eval_skipif.getexplanation()) skip(eval_skipif.getexplanation())
skip_info = item.keywords.get('skip') skip_info = item.keywords.get('skip')
if isinstance(skip_info, (MarkInfo, MarkDecorator)): if isinstance(skip_info, (MarkInfo, MarkDecorator)):
item._evalskip = True item._skipped_by_mark = True
if 'reason' in skip_info.kwargs: if 'reason' in skip_info.kwargs:
skip(skip_info.kwargs['reason']) skip(skip_info.kwargs['reason'])
elif skip_info.args: elif skip_info.args:
@ -212,7 +226,6 @@ def pytest_runtest_makereport(item, call):
outcome = yield outcome = yield
rep = outcome.get_result() rep = outcome.get_result()
evalxfail = getattr(item, '_evalxfail', None) evalxfail = getattr(item, '_evalxfail', None)
evalskip = getattr(item, '_evalskip', None)
# unitttest special case, see setting of _unexpectedsuccess # unitttest special case, see setting of _unexpectedsuccess
if hasattr(item, '_unexpectedsuccess') and rep.when == "call": if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
from _pytest.compat import _is_unittest_unexpected_success_a_failure from _pytest.compat import _is_unittest_unexpected_success_a_failure
@ -248,7 +261,7 @@ def pytest_runtest_makereport(item, call):
else: else:
rep.outcome = "passed" rep.outcome = "passed"
rep.wasxfail = explanation rep.wasxfail = explanation
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple: elif item._skipped_by_mark and rep.skipped and type(rep.longrepr) is tuple:
# skipped by mark.skipif; change the location of the failure # skipped by mark.skipif; change the location of the failure
# to point to the item definition, otherwise it will display # to point to the item definition, otherwise it will display
# the location of where the skip exception was raised within pytest # the location of where the skip exception was raised within pytest

View File

@ -9,7 +9,6 @@ import _pytest._code
from _pytest.config import hookimpl from _pytest.config import hookimpl
from _pytest.outcomes import fail, skip, xfail from _pytest.outcomes import fail, skip, xfail
from _pytest.python import transfer_markers, Class, Module, Function from _pytest.python import transfer_markers, Class, Module, Function
from _pytest.skipping import MarkEvaluator
def pytest_pycollect_makeitem(collector, name, obj): def pytest_pycollect_makeitem(collector, name, obj):
@ -134,8 +133,7 @@ class TestCaseFunction(Function):
try: try:
skip(reason) skip(reason)
except skip.Exception: except skip.Exception:
self._evalskip = MarkEvaluator(self, 'SkipTest') self._skipped_by_mark = True
self._evalskip.result = True
self._addexcinfo(sys.exc_info()) self._addexcinfo(sys.exc_info())
def addExpectedFailure(self, testcase, rawexcinfo, reason=""): def addExpectedFailure(self, testcase, rawexcinfo, reason=""):

1
changelog/2767.removal Normal file
View File

@ -0,0 +1 @@
Remove the internal multi-typed attribute ``Node._evalskip`` and replace it with the boolean ``Node._skipped_by_mark``.

1
changelog/2767.trivial Normal file
View File

@ -0,0 +1 @@
* remove unnecessary mark evaluator in unittest plugin