add unit-tests for xfail and refine xfail handling and reporting
--HG-- branch : trunk
This commit is contained in:
parent
dd7fd97810
commit
28150c7486
|
@ -123,44 +123,83 @@ within test or setup code. Example::
|
|||
py.test.skip("unsuppored configuration")
|
||||
|
||||
"""
|
||||
# XXX py.test.skip, .importorskip and the Skipped class
|
||||
# should also be defined in this plugin, requires thought/changes
|
||||
|
||||
import py
|
||||
|
||||
class MarkEvaluator:
|
||||
def __init__(self, item, name):
|
||||
self.item = item
|
||||
self.name = name
|
||||
self.holder = getattr(item.obj, name, None)
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.holder)
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def istrue(self):
|
||||
if self.holder:
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
self.result = True
|
||||
for expr in self.holder.args:
|
||||
self.expr = expr
|
||||
if isinstance(expr, str):
|
||||
result = cached_eval(self.item.config, expr, d)
|
||||
else:
|
||||
result = expr
|
||||
if not result:
|
||||
self.result = False
|
||||
self.expr = expr
|
||||
break
|
||||
return getattr(self, 'result', False)
|
||||
|
||||
def get(self, attr, default=None):
|
||||
return self.holder.kwargs.get(attr, default)
|
||||
|
||||
def getexplanation(self):
|
||||
expl = self.get('reason', None)
|
||||
if not expl:
|
||||
if not hasattr(self, 'expr'):
|
||||
return "condition: True"
|
||||
else:
|
||||
return "condition: " + self.expr
|
||||
return expl
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if not isinstance(item, py.test.collect.Function):
|
||||
return
|
||||
expr, result = evalexpression(item, 'skipif')
|
||||
if result:
|
||||
py.test.skip(expr)
|
||||
holder = getattr(item.obj, 'xfail', None)
|
||||
if holder and not holder.kwargs.get('run', True):
|
||||
py.test.skip("<did not run>")
|
||||
evalskip = MarkEvaluator(item, 'skipif')
|
||||
if evalskip.istrue():
|
||||
py.test.skip(evalskip.getexplanation())
|
||||
item._evalxfail = MarkEvaluator(item, 'xfail')
|
||||
if item._evalxfail.istrue():
|
||||
if not item._evalxfail.get('run', True):
|
||||
py.test.skip("xfail")
|
||||
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if not isinstance(item, py.test.collect.Function):
|
||||
return
|
||||
if call.when == "setup":
|
||||
holder = getattr(item.obj, 'xfail', None)
|
||||
if holder:
|
||||
rep = __multicall__.execute()
|
||||
reason = holder.kwargs.get("reason", "<no reason given>")
|
||||
rep.keywords['xfail'] = "[not run] " + reason
|
||||
return rep
|
||||
evalxfail = getattr(item, '_evalxfail', None)
|
||||
if not evalxfail:
|
||||
return
|
||||
elif call.when == "call":
|
||||
expr, result = evalexpression(item, 'xfail')
|
||||
if call.when == "setup":
|
||||
rep = __multicall__.execute()
|
||||
if result:
|
||||
if rep.skipped and evalxfail.istrue():
|
||||
expl = evalxfail.getexplanation()
|
||||
if not evalxfail.get("run", True):
|
||||
expl = "[NOTRUN] " + expl
|
||||
rep.keywords['xfail'] = expl
|
||||
return rep
|
||||
elif call.when == "call":
|
||||
rep = __multicall__.execute()
|
||||
if evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.skipped = True
|
||||
rep.failed = rep.passed = False
|
||||
else:
|
||||
rep.skipped = rep.passed = False
|
||||
rep.failed = True
|
||||
rep.keywords['xfail'] = expr
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
else:
|
||||
if 'xfail' in rep.keywords:
|
||||
del rep.keywords['xfail']
|
||||
|
@ -190,43 +229,17 @@ def show_xfailed(terminalreporter):
|
|||
return
|
||||
tr.write_sep("_", "expected failures")
|
||||
for rep in xfailed:
|
||||
entry = rep.longrepr.reprcrash
|
||||
modpath = rep.item.getmodpath(includemodule=True)
|
||||
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
|
||||
if rep.keywords['xfail']:
|
||||
reason = rep.keywords['xfail'].strip()
|
||||
else:
|
||||
reason = rep.longrepr.reprcrash.message
|
||||
i = reason.find("\n")
|
||||
if i != -1:
|
||||
reason = reason[:i]
|
||||
pos = terminalreporter.gettestid(rep.item)
|
||||
reason = rep.keywords['xfail']
|
||||
tr._tw.line("%s %s" %(pos, reason))
|
||||
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
|
||||
for rep in xpassed:
|
||||
fspath, lineno, modpath = rep.item.reportinfo()
|
||||
pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
|
||||
tr._tw.line(pos)
|
||||
|
||||
|
||||
def evalexpression(item, keyword):
|
||||
if isinstance(item, py.test.collect.Function):
|
||||
markholder = getattr(item.obj, keyword, None)
|
||||
result = False
|
||||
if markholder:
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': item.config}
|
||||
expr, result = None, True
|
||||
for expr in markholder.args:
|
||||
if isinstance(expr, str):
|
||||
result = cached_eval(item.config, expr, d)
|
||||
else:
|
||||
result = expr
|
||||
if not result:
|
||||
break
|
||||
return expr, result
|
||||
return None, False
|
||||
pos = terminalreporter.gettestid(rep.item)
|
||||
reason = rep.keywords['xfail']
|
||||
tr._tw.line("%s %s" %(pos, reason))
|
||||
|
||||
def cached_eval(config, expr, d):
|
||||
if not hasattr(config, '_evalcache'):
|
||||
|
|
|
@ -1,40 +1,185 @@
|
|||
import py
|
||||
|
||||
def test_xfail_not_report_default(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_this():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p, '-v')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 expected failures*--report=xfailed*",
|
||||
])
|
||||
from py._plugin.pytest_skipping import MarkEvaluator
|
||||
from py._plugin.pytest_skipping import pytest_runtest_setup
|
||||
from py._plugin.pytest_runner import runtestprotocol
|
||||
|
||||
def test_xfail_not_run(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail(run=False, reason="noway")
|
||||
def test_this():
|
||||
assert 0
|
||||
@py.test.mark.xfail("True", run=False, reason="noway")
|
||||
def test_this_true():
|
||||
assert 0
|
||||
@py.test.mark.xfail("False", run=True, reason="huh")
|
||||
def test_this_false():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p, '-v')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 expected failures*--report=xfailed*",
|
||||
"*1 passed*",
|
||||
])
|
||||
result = testdir.runpytest(p, '--report=xfailed', )
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_one*test_this*not run*noway",
|
||||
"*test_one*test_this_true*not run*noway",
|
||||
])
|
||||
class TestEvaluator:
|
||||
def test_no_marker(self, testdir):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
evalskipif = MarkEvaluator(item, 'skipif')
|
||||
assert not evalskipif
|
||||
assert not evalskipif.istrue()
|
||||
|
||||
def test_marked_no_args(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.xyz
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
ev = MarkEvaluator(item, 'xyz')
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: True"
|
||||
assert not ev.get("run", False)
|
||||
|
||||
def test_marked_one_arg(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.xyz("hasattr(os, 'sep')")
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
ev = MarkEvaluator(item, 'xyz')
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: hasattr(os, 'sep')"
|
||||
|
||||
def test_marked_one_arg_with_reason(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
ev = MarkEvaluator(item, 'xyz')
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "hello world"
|
||||
assert ev.get("attr") == 2
|
||||
|
||||
def test_skipif_class(self, testdir):
|
||||
item, = testdir.getitems("""
|
||||
import py
|
||||
class TestClass:
|
||||
pytestmark = py.test.mark.skipif("config._hackxyz")
|
||||
def test_func(self):
|
||||
pass
|
||||
""")
|
||||
item.config._hackxyz = 3
|
||||
ev = MarkEvaluator(item, 'skipif')
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: config._hackxyz"
|
||||
|
||||
|
||||
class TestXFail:
|
||||
def test_xfail_simple(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
reports = runtestprotocol(item, log=False)
|
||||
assert len(reports) == 3
|
||||
callreport = reports[1]
|
||||
assert callreport.skipped
|
||||
expl = callreport.keywords['xfail']
|
||||
assert expl == "condition: True"
|
||||
|
||||
def test_xfail_xpassed(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_func():
|
||||
assert 1
|
||||
""")
|
||||
reports = runtestprotocol(item, log=False)
|
||||
assert len(reports) == 3
|
||||
callreport = reports[1]
|
||||
assert callreport.failed
|
||||
expl = callreport.keywords['xfail']
|
||||
assert expl == "condition: True"
|
||||
|
||||
def test_xfail_evalfalse_but_fails(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.xfail('False')
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
reports = runtestprotocol(item, log=False)
|
||||
callreport = reports[1]
|
||||
assert callreport.failed
|
||||
assert 'xfail' not in callreport.keywords
|
||||
|
||||
def test_xfail_not_report_default(self, testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_this():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p, '-v')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 expected failures*--report=xfailed*",
|
||||
])
|
||||
|
||||
def test_xfail_not_run_xfail_reporting(self, testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail(run=False, reason="noway")
|
||||
def test_this():
|
||||
assert 0
|
||||
@py.test.mark.xfail("True", run=False)
|
||||
def test_this_true():
|
||||
assert 0
|
||||
@py.test.mark.xfail("False", run=False, reason="huh")
|
||||
def test_this_false():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p, '--report=xfailed', )
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_one*test_this*NOTRUN*noway",
|
||||
"*test_one*test_this_true*NOTRUN*condition:*True*",
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
def test_xfail_xpass(self, testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_that():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p, '--report=xfailed')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*UNEXPECTEDLY PASSING*",
|
||||
"*test_that*",
|
||||
"*1 xpassed*"
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
class TestSkipif:
|
||||
def test_skipif_conditional(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
@py.test.mark.skipif("hasattr(os, 'sep')")
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
x = py.test.raises(py.test.skip.Exception, "pytest_runtest_setup(item)")
|
||||
assert x.value.msg == "condition: hasattr(os, 'sep')"
|
||||
|
||||
|
||||
def test_skipif_reporting(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
@py.test.mark.skipif("hasattr(sys, 'platform')")
|
||||
def test_that():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p, '-s', '--report=skipped')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*Skipped*platform*",
|
||||
"*1 skipped*"
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_skip_not_report_default(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
|
@ -47,69 +192,6 @@ def test_skip_not_report_default(testdir):
|
|||
"*1 skipped*--report=skipped*",
|
||||
])
|
||||
|
||||
def test_xfail_decorator(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_this():
|
||||
assert 0
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_that():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p, '--report=xfailed')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*expected failures*",
|
||||
"*test_one.test_this*test_one.py:4*",
|
||||
"*UNEXPECTEDLY PASSING*",
|
||||
"*test_that*",
|
||||
"*1 xfailed*"
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_xfail_at_module(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
pytestmark = py.test.mark.xfail('True')
|
||||
def test_intentional_xfail():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p, '--report=xfailed')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*expected failures*",
|
||||
"*test_intentional_xfail*:4*",
|
||||
"*1 xfailed*"
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_xfail_evalfalse_but_fails(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
@py.test.mark.xfail('False')
|
||||
def test_fail():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p, '--report=xfailed')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_xfail_evalfalse_but_fails*:4*",
|
||||
"*1 failed*"
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_skipif_decorator(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
@py.test.mark.skipif("hasattr(sys, 'platform')")
|
||||
def test_that():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p, '--report=skipped')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*Skipped*platform*",
|
||||
"*1 skipped*"
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_skipif_class(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
|
@ -127,19 +209,6 @@ def test_skipif_class(testdir):
|
|||
"*2 skipped*"
|
||||
])
|
||||
|
||||
def test_evalexpression_cls_config_example(testdir):
|
||||
from py._plugin.pytest_skipping import evalexpression
|
||||
item, = testdir.getitems("""
|
||||
import py
|
||||
class TestClass:
|
||||
pytestmark = py.test.mark.skipif("config._hackxyz")
|
||||
def test_func(self):
|
||||
pass
|
||||
""")
|
||||
item.config._hackxyz = 3
|
||||
x, y = evalexpression(item, 'skipif')
|
||||
assert x == 'config._hackxyz'
|
||||
assert y == 3
|
||||
|
||||
def test_skip_reasons_folding():
|
||||
from py._plugin import pytest_runner as runner
|
||||
|
|
Loading…
Reference in New Issue