new --runxfail option to ignore xfail markers on functions
--HG-- branch : trunk
This commit is contained in:
parent
28150c7486
commit
c933ada7fb
|
@ -14,9 +14,10 @@ Changes between 1.2.1 and 1.3.0 (release pending)
|
||||||
- new pytest_pycollect_makemodule(path, parent) hook for
|
- new pytest_pycollect_makemodule(path, parent) hook for
|
||||||
allowing customization of the Module collection object for a
|
allowing customization of the Module collection object for a
|
||||||
matching test module.
|
matching test module.
|
||||||
- extend py.test.mark.xfail to accept two more keyword arg parameters:
|
- extend and refine xfail mechanism:
|
||||||
``xfail(run=False)`` will not run the decorated test
|
``@py.test.mark.xfail(run=False)`` do not run the decorated test
|
||||||
``xfail(reason="...")`` will print the reason string when reporting
|
``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
|
||||||
|
specifiying ``--runxfail`` on command line virtually ignores xfail markers
|
||||||
- expose (previously internal) commonly useful methods:
|
- expose (previously internal) commonly useful methods:
|
||||||
py.io.get_terminal_with() -> return terminal width
|
py.io.get_terminal_with() -> return terminal width
|
||||||
py.io.ansi_print(...) -> print colored/bold text on linux/win32
|
py.io.ansi_print(...) -> print colored/bold text on linux/win32
|
||||||
|
|
|
@ -126,6 +126,12 @@ within test or setup code. Example::
|
||||||
|
|
||||||
import py
|
import py
|
||||||
|
|
||||||
|
def pytest_addoption(parser):
|
||||||
|
group = parser.getgroup("general")
|
||||||
|
group.addoption('--runxfail',
|
||||||
|
action="store_true", dest="runxfail", default=False,
|
||||||
|
help="run tests even if they are marked xfail")
|
||||||
|
|
||||||
class MarkEvaluator:
|
class MarkEvaluator:
|
||||||
def __init__(self, item, name):
|
def __init__(self, item, name):
|
||||||
self.item = item
|
self.item = item
|
||||||
|
@ -172,6 +178,7 @@ def pytest_runtest_setup(item):
|
||||||
if evalskip.istrue():
|
if evalskip.istrue():
|
||||||
py.test.skip(evalskip.getexplanation())
|
py.test.skip(evalskip.getexplanation())
|
||||||
item._evalxfail = MarkEvaluator(item, 'xfail')
|
item._evalxfail = MarkEvaluator(item, 'xfail')
|
||||||
|
if not item.config.getvalue("runxfail"):
|
||||||
if item._evalxfail.istrue():
|
if item._evalxfail.istrue():
|
||||||
if not item._evalxfail.get('run', True):
|
if not item._evalxfail.get('run', True):
|
||||||
py.test.skip("xfail")
|
py.test.skip("xfail")
|
||||||
|
@ -192,7 +199,7 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
||||||
return rep
|
return rep
|
||||||
elif call.when == "call":
|
elif call.when == "call":
|
||||||
rep = __multicall__.execute()
|
rep = __multicall__.execute()
|
||||||
if evalxfail.istrue():
|
if not item.config.getvalue("runxfail") and evalxfail.istrue():
|
||||||
if call.excinfo:
|
if call.excinfo:
|
||||||
rep.skipped = True
|
rep.skipped = True
|
||||||
rep.failed = rep.passed = False
|
rep.failed = rep.passed = False
|
||||||
|
|
|
@ -96,6 +96,21 @@ class TestXFail:
|
||||||
expl = callreport.keywords['xfail']
|
expl = callreport.keywords['xfail']
|
||||||
assert expl == "condition: True"
|
assert expl == "condition: True"
|
||||||
|
|
||||||
|
def test_xfail_run_anyway(self, testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import py
|
||||||
|
@py.test.mark.xfail
|
||||||
|
def test_func():
|
||||||
|
assert 0
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest("--runxfail")
|
||||||
|
assert result.ret == 1
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*def test_func():*",
|
||||||
|
"*assert 0*",
|
||||||
|
"*1 failed*",
|
||||||
|
])
|
||||||
|
|
||||||
def test_xfail_evalfalse_but_fails(self, testdir):
|
def test_xfail_evalfalse_but_fails(self, testdir):
|
||||||
item = testdir.getitem("""
|
item = testdir.getitem("""
|
||||||
import py
|
import py
|
||||||
|
|
Loading…
Reference in New Issue