new --runxfail option to ignore xfail markers on functions
--HG-- branch : trunk
This commit is contained in:
parent
28150c7486
commit
c933ada7fb
|
@ -14,9 +14,10 @@ Changes between 1.2.1 and 1.3.0 (release pending)
|
|||
- new pytest_pycollect_makemodule(path, parent) hook for
|
||||
allowing customization of the Module collection object for a
|
||||
matching test module.
|
||||
- extend py.test.mark.xfail to accept two more keyword arg parameters:
|
||||
``xfail(run=False)`` will not run the decorated test
|
||||
``xfail(reason="...")`` will print the reason string when reporting
|
||||
- extend and refine xfail mechanism:
|
||||
``@py.test.mark.xfail(run=False)`` do not run the decorated test
|
||||
``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
|
||||
specifiying ``--runxfail`` on command line virtually ignores xfail markers
|
||||
- expose (previously internal) commonly useful methods:
|
||||
py.io.get_terminal_with() -> return terminal width
|
||||
py.io.ansi_print(...) -> print colored/bold text on linux/win32
|
||||
|
|
|
@ -126,6 +126,12 @@ within test or setup code. Example::
|
|||
|
||||
import py
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--runxfail',
|
||||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
|
||||
class MarkEvaluator:
|
||||
def __init__(self, item, name):
|
||||
self.item = item
|
||||
|
@ -172,9 +178,10 @@ def pytest_runtest_setup(item):
|
|||
if evalskip.istrue():
|
||||
py.test.skip(evalskip.getexplanation())
|
||||
item._evalxfail = MarkEvaluator(item, 'xfail')
|
||||
if item._evalxfail.istrue():
|
||||
if not item._evalxfail.get('run', True):
|
||||
py.test.skip("xfail")
|
||||
if not item.config.getvalue("runxfail"):
|
||||
if item._evalxfail.istrue():
|
||||
if not item._evalxfail.get('run', True):
|
||||
py.test.skip("xfail")
|
||||
|
||||
def pytest_runtest_makereport(__multicall__, item, call):
|
||||
if not isinstance(item, py.test.collect.Function):
|
||||
|
@ -192,7 +199,7 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
|||
return rep
|
||||
elif call.when == "call":
|
||||
rep = __multicall__.execute()
|
||||
if evalxfail.istrue():
|
||||
if not item.config.getvalue("runxfail") and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.skipped = True
|
||||
rep.failed = rep.passed = False
|
||||
|
|
|
@ -96,6 +96,21 @@ class TestXFail:
|
|||
expl = callreport.keywords['xfail']
|
||||
assert expl == "condition: True"
|
||||
|
||||
def test_xfail_run_anyway(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest("--runxfail")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines([
|
||||
"*def test_func():*",
|
||||
"*assert 0*",
|
||||
"*1 failed*",
|
||||
])
|
||||
|
||||
def test_xfail_evalfalse_but_fails(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import py
|
||||
|
|
Loading…
Reference in New Issue