add new parameters:

xfail(run=False) will not run expected-to-fail tests
xfail(reason=True) will report the specified reason

--HG--
branch : trunk
This commit is contained in:
holger krekel 2010-05-02 22:13:16 +02:00
parent 82d4aae571
commit 1a8b2838fa
4 changed files with 75 additions and 20 deletions

View File

@ -14,6 +14,9 @@ Changes between 1.2.1 and 1.3.0 (release pending)
- new pytest_pycollect_makemodule(path, parent) hook for
allowing customization of the Module collection object for a
matching test module.
- extend py.test.mark.xfail to accept two more keyword arg parameters:
``xfail(run=False)`` will not run the decorated test
``xfail(reason="...")`` will print the reason string when reporting
- expose (previously internal) commonly useful methods:
py.io.get_terminal_with() -> return terminal width
py.io.ansi_print(...) -> print colored/bold text on linux/win32

View File

@ -83,10 +83,17 @@ Same as with skipif_ you can also selectively expect a failure
depending on platform::
@py.test.mark.xfail("sys.version_info >= (3,0)")
def test_function():
...
To not run a test and still regard it as "xfailed"::
@py.test.mark.xfail(..., run=False)
To specify an explicit reason to be shown with xfailure detail::
@py.test.mark.xfail(..., reason="my reason")
skipping on a missing import dependency
--------------------------------------------------
@ -123,27 +130,41 @@ import py
def pytest_runtest_setup(item):
if not isinstance(item, py.test.collect.Function):
return
expr, result = evalexpression(item, 'skipif')
if result:
py.test.skip(expr)
holder = getattr(item.obj, 'xfail', None)
if holder and not holder.kwargs.get('run', True):
py.test.skip("<did not run>")
def pytest_runtest_makereport(__multicall__, item, call):
if call.when != "call":
if not isinstance(item, py.test.collect.Function):
return
expr, result = evalexpression(item, 'xfail')
rep = __multicall__.execute()
if result:
if call.excinfo:
rep.skipped = True
rep.failed = rep.passed = False
if call.when == "setup":
holder = getattr(item.obj, 'xfail', None)
if holder:
rep = __multicall__.execute()
reason = holder.kwargs.get("reason", "<no reason given>")
rep.keywords['xfail'] = "[not run] " + reason
return rep
return
elif call.when == "call":
expr, result = evalexpression(item, 'xfail')
rep = __multicall__.execute()
if result:
if call.excinfo:
rep.skipped = True
rep.failed = rep.passed = False
else:
rep.skipped = rep.passed = False
rep.failed = True
rep.keywords['xfail'] = expr
else:
rep.skipped = rep.passed = False
rep.failed = True
rep.keywords['xfail'] = expr
else:
if 'xfail' in rep.keywords:
del rep.keywords['xfail']
return rep
if 'xfail' in rep.keywords:
del rep.keywords['xfail']
return rep
# called by terminalreporter progress reporting
def pytest_report_teststatus(report):
@ -151,7 +172,7 @@ def pytest_report_teststatus(report):
if report.skipped:
return "xfailed", "x", "xfail"
elif report.failed:
return "xpassed", "P", "xpass"
return "xpassed", "P", "XPASS"
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
@ -172,10 +193,13 @@ def show_xfailed(terminalreporter):
entry = rep.longrepr.reprcrash
modpath = rep.item.getmodpath(includemodule=True)
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
reason = rep.longrepr.reprcrash.message
i = reason.find("\n")
if i != -1:
reason = reason[:i]
if rep.keywords['xfail']:
reason = rep.keywords['xfail'].strip()
else:
reason = rep.longrepr.reprcrash.message
i = reason.find("\n")
if i != -1:
reason = reason[:i]
tr._tw.line("%s %s" %(pos, reason))
xpassed = terminalreporter.stats.get("xpassed")

View File

@ -159,6 +159,9 @@ def test_generic(testdir, LineMatcher):
@py.test.mark.xfail
def test_xfail():
assert 0
@py.test.mark.xfail(run=False)
def test_xfail_norun():
assert 0
""")
testdir.runpytest("--resultlog=result.log")
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
@ -167,5 +170,6 @@ def test_generic(testdir, LineMatcher):
"F *:test_fail",
"s *:test_skip",
"x *:test_xfail",
"x *:test_xfail_norun",
])

View File

@ -12,6 +12,30 @@ def test_xfail_not_report_default(testdir):
"*1 expected failures*--report=xfailed*",
])
def test_xfail_not_run(testdir):
p = testdir.makepyfile(test_one="""
import py
@py.test.mark.xfail(run=False, reason="noway")
def test_this():
assert 0
@py.test.mark.xfail("True", run=False, reason="noway")
def test_this_true():
assert 0
@py.test.mark.xfail("False", run=True, reason="huh")
def test_this_false():
assert 1
""")
result = testdir.runpytest(p, '-v')
result.stdout.fnmatch_lines([
"*2 expected failures*--report=xfailed*",
"*1 passed*",
])
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*not run*noway",
"*test_one*test_this_true*not run*noway",
])
def test_skip_not_report_default(testdir):
p = testdir.makepyfile(test_one="""
import py