2010-11-06 18:38:53 +08:00
|
|
|
""" support for skip/xfail functions and markers. """
|
2014-08-01 06:13:40 +08:00
|
|
|
import os
|
2011-03-03 19:19:17 +08:00
|
|
|
import sys
|
2014-08-01 06:13:40 +08:00
|
|
|
import traceback
|
|
|
|
|
|
|
|
import py
|
|
|
|
import pytest
|
2015-09-21 22:19:29 +08:00
|
|
|
from _pytest.mark import MarkInfo
|
2009-10-15 22:18:57 +08:00
|
|
|
|
2015-10-02 05:36:43 +08:00
|
|
|
|
2010-05-04 19:02:27 +08:00
|
|
|
def pytest_addoption(parser):
|
|
|
|
group = parser.getgroup("general")
|
2010-07-27 03:15:15 +08:00
|
|
|
group.addoption('--runxfail',
|
2010-05-04 19:02:27 +08:00
|
|
|
action="store_true", dest="runxfail", default=False,
|
|
|
|
help="run tests even if they are marked xfail")
|
|
|
|
|
2015-10-02 05:36:43 +08:00
|
|
|
|
2011-11-12 06:56:11 +08:00
|
|
|
def pytest_configure(config):
|
2013-10-10 04:55:20 +08:00
|
|
|
if config.option.runxfail:
|
|
|
|
old = pytest.xfail
|
|
|
|
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
|
|
|
|
def nop(*args, **kwargs):
|
|
|
|
pass
|
|
|
|
nop.Exception = XFailed
|
|
|
|
setattr(pytest, "xfail", nop)
|
|
|
|
|
2011-11-12 06:56:11 +08:00
|
|
|
config.addinivalue_line("markers",
|
2012-10-20 20:05:33 +08:00
|
|
|
"skipif(condition): skip the given test function if eval(condition) "
|
|
|
|
"results in a True value. Evaluation happens within the "
|
2011-11-12 06:56:11 +08:00
|
|
|
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
2012-10-20 19:56:53 +08:00
|
|
|
"skips the test if we are on the win32 platform. see "
|
|
|
|
"http://pytest.org/latest/skipping.html"
|
2011-11-12 06:56:11 +08:00
|
|
|
)
|
|
|
|
config.addinivalue_line("markers",
|
2014-07-26 21:11:05 +08:00
|
|
|
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
|
2012-10-20 20:05:33 +08:00
|
|
|
"as an expected failure if eval(condition) has a True value. "
|
|
|
|
"Optionally specify a reason for better reporting and run=False if "
|
2014-07-26 21:11:05 +08:00
|
|
|
"you don't even want to execute the test function. If only specific "
|
|
|
|
"exception(s) are expected, you can list them in raises, and if the test fails "
|
|
|
|
"in other ways, it will be reported as a true failure. "
|
|
|
|
"See http://pytest.org/latest/skipping.html"
|
2011-11-12 06:56:11 +08:00
|
|
|
)
|
|
|
|
|
2015-10-02 05:36:43 +08:00
|
|
|
|
2010-11-06 06:37:31 +08:00
|
|
|
def pytest_namespace():
|
|
|
|
return dict(xfail=xfail)
|
|
|
|
|
2015-10-02 05:36:43 +08:00
|
|
|
|
2010-11-06 06:37:31 +08:00
|
|
|
class XFailed(pytest.fail.Exception):
|
2014-01-18 19:31:33 +08:00
|
|
|
""" raised from an explicit call to pytest.xfail() """
|
2010-11-06 06:37:31 +08:00
|
|
|
|
2015-10-02 05:36:43 +08:00
|
|
|
|
2010-11-06 06:37:31 +08:00
|
|
|
def xfail(reason=""):
|
|
|
|
""" xfail an executing test or setup functions with the given reason."""
|
|
|
|
__tracebackhide__ = True
|
|
|
|
raise XFailed(reason)
|
|
|
|
xfail.Exception = XFailed
|
|
|
|
|
2015-10-02 05:36:43 +08:00
|
|
|
|
2015-10-03 23:55:04 +08:00
|
|
|
class MarkEvaluator:
|
2010-05-04 18:37:56 +08:00
|
|
|
def __init__(self, item, name):
|
|
|
|
self.item = item
|
|
|
|
self.name = name
|
|
|
|
|
2010-06-08 08:34:51 +08:00
|
|
|
@property
|
|
|
|
def holder(self):
|
2014-10-09 02:23:40 +08:00
|
|
|
return self.item.keywords.get(self.name)
|
2014-09-24 06:55:26 +08:00
|
|
|
|
2010-05-04 18:37:56 +08:00
|
|
|
def __bool__(self):
|
|
|
|
return bool(self.holder)
|
|
|
|
__nonzero__ = __bool__
|
|
|
|
|
2011-03-03 19:19:17 +08:00
|
|
|
def wasvalid(self):
|
|
|
|
return not hasattr(self, 'exc')
|
|
|
|
|
2014-07-26 23:46:50 +08:00
|
|
|
def invalidraise(self, exc):
|
2014-07-26 21:11:05 +08:00
|
|
|
raises = self.get('raises')
|
|
|
|
if not raises:
|
|
|
|
return
|
2014-07-26 23:46:50 +08:00
|
|
|
return not isinstance(exc, raises)
|
2014-07-26 21:11:05 +08:00
|
|
|
|
2010-05-04 18:37:56 +08:00
|
|
|
def istrue(self):
|
2011-03-03 19:19:17 +08:00
|
|
|
try:
|
|
|
|
return self._istrue()
|
2014-10-09 02:23:40 +08:00
|
|
|
except Exception:
|
2011-03-03 19:19:17 +08:00
|
|
|
self.exc = sys.exc_info()
|
|
|
|
if isinstance(self.exc[1], SyntaxError):
|
|
|
|
msg = [" " * (self.exc[1].offset + 4) + "^",]
|
|
|
|
msg.append("SyntaxError: invalid syntax")
|
|
|
|
else:
|
2014-08-01 06:13:40 +08:00
|
|
|
msg = traceback.format_exception_only(*self.exc[:2])
|
2011-03-03 19:19:17 +08:00
|
|
|
pytest.fail("Error evaluating %r expression\n"
|
|
|
|
" %s\n"
|
|
|
|
"%s"
|
|
|
|
%(self.name, self.expr, "\n".join(msg)),
|
|
|
|
pytrace=False)
|
|
|
|
|
2011-03-04 06:22:55 +08:00
|
|
|
def _getglobals(self):
|
2014-08-01 06:13:40 +08:00
|
|
|
d = {'os': os, 'sys': sys, 'config': self.item.config}
|
2011-03-04 06:22:55 +08:00
|
|
|
func = self.item.obj
|
|
|
|
try:
|
|
|
|
d.update(func.__globals__)
|
|
|
|
except AttributeError:
|
|
|
|
d.update(func.func_globals)
|
|
|
|
return d
|
|
|
|
|
2011-03-03 19:19:17 +08:00
|
|
|
def _istrue(self):
|
2015-07-19 04:16:27 +08:00
|
|
|
if hasattr(self, 'result'):
|
|
|
|
return self.result
|
2010-05-04 18:37:56 +08:00
|
|
|
if self.holder:
|
2011-03-04 06:22:55 +08:00
|
|
|
d = self._getglobals()
|
2010-05-22 00:11:47 +08:00
|
|
|
if self.holder.args:
|
|
|
|
self.result = False
|
2015-07-19 04:16:27 +08:00
|
|
|
# "holder" might be a MarkInfo or a MarkDecorator; only
|
|
|
|
# MarkInfo keeps track of all parameters it received in an
|
|
|
|
# _arglist attribute
|
|
|
|
if hasattr(self.holder, '_arglist'):
|
|
|
|
arglist = self.holder._arglist
|
|
|
|
else:
|
|
|
|
arglist = [(self.holder.args, self.holder.kwargs)]
|
|
|
|
for args, kwargs in arglist:
|
|
|
|
for expr in args:
|
2010-05-22 00:11:47 +08:00
|
|
|
self.expr = expr
|
2015-07-19 04:16:27 +08:00
|
|
|
if isinstance(expr, py.builtin._basestring):
|
|
|
|
result = cached_eval(self.item.config, expr, d)
|
|
|
|
else:
|
|
|
|
if "reason" not in kwargs:
|
|
|
|
# XXX better be checked at collection time
|
|
|
|
msg = "you need to specify reason=STRING " \
|
|
|
|
"when using booleans as conditions."
|
|
|
|
pytest.fail(msg)
|
|
|
|
result = bool(expr)
|
|
|
|
if result:
|
|
|
|
self.result = True
|
|
|
|
self.reason = kwargs.get('reason', None)
|
|
|
|
self.expr = expr
|
|
|
|
return self.result
|
2010-05-22 00:11:47 +08:00
|
|
|
else:
|
|
|
|
self.result = True
|
2010-05-04 18:37:56 +08:00
|
|
|
return getattr(self, 'result', False)
|
|
|
|
|
|
|
|
def get(self, attr, default=None):
|
|
|
|
return self.holder.kwargs.get(attr, default)
|
|
|
|
|
|
|
|
def getexplanation(self):
|
2015-07-19 04:16:27 +08:00
|
|
|
expl = getattr(self, 'reason', None) or self.get('reason', None)
|
2010-05-04 18:37:56 +08:00
|
|
|
if not expl:
|
|
|
|
if not hasattr(self, 'expr'):
|
2010-05-06 01:50:59 +08:00
|
|
|
return ""
|
2010-05-04 18:37:56 +08:00
|
|
|
else:
|
2011-03-04 06:22:55 +08:00
|
|
|
return "condition: " + str(self.expr)
|
2010-05-04 18:37:56 +08:00
|
|
|
return expl
|
2010-07-27 03:15:15 +08:00
|
|
|
|
2009-12-29 23:29:48 +08:00
|
|
|
|
2015-05-06 16:08:08 +08:00
|
|
|
@pytest.hookimpl(tryfirst=True)
|
2009-10-15 22:18:57 +08:00
|
|
|
def pytest_runtest_setup(item):
|
2015-10-02 05:35:38 +08:00
|
|
|
# Check if skip or skipif are specified as pytest marks
|
|
|
|
|
|
|
|
skipif_info = item.keywords.get('skipif')
|
|
|
|
if isinstance(skipif_info, MarkInfo):
|
|
|
|
eval_skipif = MarkEvaluator(item, 'skipif')
|
|
|
|
if eval_skipif.istrue():
|
|
|
|
item._evalskip = eval_skipif
|
|
|
|
pytest.skip(eval_skipif.getexplanation())
|
|
|
|
|
|
|
|
skip_info = item.keywords.get('skip')
|
|
|
|
if isinstance(skip_info, MarkInfo):
|
|
|
|
item._evalskip = True
|
|
|
|
if 'reason' in skip_info.kwargs:
|
|
|
|
pytest.skip(skip_info.kwargs['reason'])
|
|
|
|
elif skip_info.args:
|
|
|
|
pytest.skip(skip_info.args[0])
|
|
|
|
else:
|
2015-10-04 00:02:18 +08:00
|
|
|
pytest.skip("unconditional skip")
|
2015-09-21 18:32:17 +08:00
|
|
|
|
2010-05-04 18:37:56 +08:00
|
|
|
item._evalxfail = MarkEvaluator(item, 'xfail')
|
2010-06-08 08:34:51 +08:00
|
|
|
check_xfail_no_run(item)
|
|
|
|
|
|
|
|
def pytest_pyfunc_call(pyfuncitem):
|
|
|
|
check_xfail_no_run(pyfuncitem)
|
|
|
|
|
|
|
|
def check_xfail_no_run(item):
|
2010-11-01 06:28:31 +08:00
|
|
|
if not item.config.option.runxfail:
|
2010-06-08 08:34:51 +08:00
|
|
|
evalxfail = item._evalxfail
|
|
|
|
if evalxfail.istrue():
|
|
|
|
if not evalxfail.get('run', True):
|
2014-01-18 19:31:33 +08:00
|
|
|
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
|
2009-10-15 22:18:57 +08:00
|
|
|
|
2015-05-06 16:08:08 +08:00
|
|
|
@pytest.hookimpl(hookwrapper=True)
|
2014-10-09 02:23:40 +08:00
|
|
|
def pytest_runtest_makereport(item, call):
|
|
|
|
outcome = yield
|
|
|
|
rep = outcome.get_result()
|
|
|
|
evalxfail = getattr(item, '_evalxfail', None)
|
2015-06-19 08:59:44 +08:00
|
|
|
evalskip = getattr(item, '_evalskip', None)
|
2012-03-20 13:53:52 +08:00
|
|
|
# unitttest special case, see setting of _unexpectedsuccess
|
2014-10-09 02:23:40 +08:00
|
|
|
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
|
|
|
# we need to translate into how pytest encodes xpass
|
|
|
|
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
|
|
|
|
rep.outcome = "failed"
|
|
|
|
elif item.config.option.runxfail:
|
|
|
|
pass # don't interefere
|
|
|
|
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
|
|
|
|
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
|
|
|
rep.outcome = "skipped"
|
|
|
|
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
|
|
|
|
evalxfail.istrue():
|
|
|
|
if call.excinfo:
|
|
|
|
if evalxfail.invalidraise(call.excinfo.value):
|
|
|
|
rep.outcome = "failed"
|
|
|
|
else:
|
|
|
|
rep.outcome = "skipped"
|
2012-06-23 17:32:32 +08:00
|
|
|
rep.wasxfail = evalxfail.getexplanation()
|
2014-10-09 02:23:40 +08:00
|
|
|
elif call.when == "call":
|
|
|
|
rep.outcome = "failed" # xpass outcome
|
|
|
|
rep.wasxfail = evalxfail.getexplanation()
|
2015-06-19 08:59:44 +08:00
|
|
|
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
|
|
|
|
# skipped by mark.skipif; change the location of the failure
|
|
|
|
# to point to the item definition, otherwise it will display
|
|
|
|
# the location of where the skip exception was raised within pytest
|
|
|
|
filename, line, reason = rep.longrepr
|
|
|
|
filename, line = item.location[:2]
|
|
|
|
rep.longrepr = filename, line, reason
|
2009-10-15 22:18:57 +08:00
|
|
|
|
2009-10-23 00:37:24 +08:00
|
|
|
# called by terminalreporter progress reporting
|
2009-10-15 22:18:57 +08:00
|
|
|
def pytest_report_teststatus(report):
|
2012-06-23 17:32:32 +08:00
|
|
|
if hasattr(report, "wasxfail"):
|
2009-10-15 22:18:57 +08:00
|
|
|
if report.skipped:
|
|
|
|
return "xfailed", "x", "xfail"
|
|
|
|
elif report.failed:
|
2014-04-12 22:27:12 +08:00
|
|
|
return "xpassed", "X", ("XPASS", {'yellow': True})
|
2009-10-15 22:18:57 +08:00
|
|
|
|
|
|
|
# called by the terminalreporter instance/plugin
|
|
|
|
def pytest_terminal_summary(terminalreporter):
|
|
|
|
tr = terminalreporter
|
2010-05-06 01:50:59 +08:00
|
|
|
if not tr.reportchars:
|
|
|
|
#for name in "xfailed skipped failed xpassed":
|
|
|
|
# if not tr.stats.get(name, 0):
|
|
|
|
# tr.write_line("HINT: use '-r' option to see extra "
|
|
|
|
# "summary info about tests")
|
|
|
|
# break
|
|
|
|
return
|
|
|
|
|
|
|
|
lines = []
|
|
|
|
for char in tr.reportchars:
|
|
|
|
if char == "x":
|
|
|
|
show_xfailed(terminalreporter, lines)
|
2010-05-20 20:35:13 +08:00
|
|
|
elif char == "X":
|
2010-05-06 01:50:59 +08:00
|
|
|
show_xpassed(terminalreporter, lines)
|
2010-09-26 22:23:44 +08:00
|
|
|
elif char in "fF":
|
2011-09-30 05:44:26 +08:00
|
|
|
show_simple(terminalreporter, lines, 'failed', "FAIL %s")
|
2010-09-26 22:23:44 +08:00
|
|
|
elif char in "sS":
|
2010-05-06 01:50:59 +08:00
|
|
|
show_skipped(terminalreporter, lines)
|
2011-09-30 05:44:26 +08:00
|
|
|
elif char == "E":
|
|
|
|
show_simple(terminalreporter, lines, 'error', "ERROR %s")
|
2015-09-19 14:36:43 +08:00
|
|
|
elif char == 'p':
|
|
|
|
show_simple(terminalreporter, lines, 'passed', "PASSED %s")
|
|
|
|
|
2010-05-06 01:50:59 +08:00
|
|
|
if lines:
|
|
|
|
tr._tw.sep("=", "short test summary info")
|
|
|
|
for line in lines:
|
|
|
|
tr._tw.line(line)
|
|
|
|
|
2011-09-30 05:44:26 +08:00
|
|
|
def show_simple(terminalreporter, lines, stat, format):
|
|
|
|
failed = terminalreporter.stats.get(stat)
|
2010-05-06 01:50:59 +08:00
|
|
|
if failed:
|
|
|
|
for rep in failed:
|
2015-02-27 04:56:44 +08:00
|
|
|
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
|
|
|
lines.append(format %(pos,))
|
2010-05-06 01:50:59 +08:00
|
|
|
|
|
|
|
def show_xfailed(terminalreporter, lines):
|
|
|
|
xfailed = terminalreporter.stats.get("xfailed")
|
2009-10-15 22:18:57 +08:00
|
|
|
if xfailed:
|
|
|
|
for rep in xfailed:
|
2015-02-27 04:56:44 +08:00
|
|
|
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
2012-06-23 17:32:32 +08:00
|
|
|
reason = rep.wasxfail
|
2010-09-26 22:23:44 +08:00
|
|
|
lines.append("XFAIL %s" % (pos,))
|
|
|
|
if reason:
|
|
|
|
lines.append(" " + str(reason))
|
2009-10-15 22:18:57 +08:00
|
|
|
|
2010-05-06 01:50:59 +08:00
|
|
|
def show_xpassed(terminalreporter, lines):
|
2009-10-15 22:18:57 +08:00
|
|
|
xpassed = terminalreporter.stats.get("xpassed")
|
|
|
|
if xpassed:
|
|
|
|
for rep in xpassed:
|
2015-02-27 04:56:44 +08:00
|
|
|
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
2012-06-23 17:32:32 +08:00
|
|
|
reason = rep.wasxfail
|
2010-05-06 01:50:59 +08:00
|
|
|
lines.append("XPASS %s %s" %(pos, reason))
|
2009-10-15 22:18:57 +08:00
|
|
|
|
2010-04-21 18:50:03 +08:00
|
|
|
def cached_eval(config, expr, d):
|
|
|
|
if not hasattr(config, '_evalcache'):
|
|
|
|
config._evalcache = {}
|
|
|
|
try:
|
|
|
|
return config._evalcache[expr]
|
|
|
|
except KeyError:
|
2015-11-27 22:43:01 +08:00
|
|
|
import _pytest._code
|
|
|
|
exprcode = _pytest._code.compile(expr, mode="eval")
|
2011-03-03 19:19:17 +08:00
|
|
|
config._evalcache[expr] = x = eval(exprcode, d)
|
2010-04-21 18:50:03 +08:00
|
|
|
return x
|
|
|
|
|
|
|
|
|
2009-10-17 23:42:40 +08:00
|
|
|
def folded_skips(skipped):
|
|
|
|
d = {}
|
|
|
|
for event in skipped:
|
2010-11-14 04:03:28 +08:00
|
|
|
key = event.longrepr
|
|
|
|
assert len(key) == 3, (event, key)
|
2009-10-17 23:42:40 +08:00
|
|
|
d.setdefault(key, []).append(event)
|
|
|
|
l = []
|
2010-07-27 03:15:15 +08:00
|
|
|
for key, events in d.items():
|
2009-10-17 23:42:40 +08:00
|
|
|
l.append((len(events),) + key)
|
2010-07-27 03:15:15 +08:00
|
|
|
return l
|
2009-10-17 23:42:40 +08:00
|
|
|
|
2010-05-06 01:50:59 +08:00
|
|
|
def show_skipped(terminalreporter, lines):
|
2009-10-17 23:42:40 +08:00
|
|
|
tr = terminalreporter
|
|
|
|
skipped = tr.stats.get('skipped', [])
|
|
|
|
if skipped:
|
2010-05-06 01:50:59 +08:00
|
|
|
#if not tr.hasopt('skipped'):
|
|
|
|
# tr.write_line(
|
|
|
|
# "%d skipped tests, specify -rs for more info" %
|
|
|
|
# len(skipped))
|
|
|
|
# return
|
2009-10-17 23:42:40 +08:00
|
|
|
fskips = folded_skips(skipped)
|
|
|
|
if fskips:
|
2010-05-06 01:50:59 +08:00
|
|
|
#tr.write_sep("_", "skipped test summary")
|
2009-10-17 23:42:40 +08:00
|
|
|
for num, fspath, lineno, reason in fskips:
|
2010-05-06 01:50:59 +08:00
|
|
|
if reason.startswith("Skipped: "):
|
|
|
|
reason = reason[9:]
|
|
|
|
lines.append("SKIP [%d] %s:%d: %s" %
|
|
|
|
(num, fspath, lineno, reason))
|