Merged in squisher/pytest/xfail-cause (pull request #183)

Add mark.xfail argument raises so that unexpected exceptions show up as test failures.
This commit is contained in:
holger krekel 2014-07-28 09:55:02 +02:00
commit 38104dfc92
5 changed files with 63 additions and 6 deletions

View File

@ -26,11 +26,13 @@ def pytest_configure(config):
"http://pytest.org/latest/skipping.html" "http://pytest.org/latest/skipping.html"
) )
config.addinivalue_line("markers", config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True): mark the the test function " "xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. " "as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if " "Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. See " "you don't even want to execute the test function. If only specific "
"http://pytest.org/latest/skipping.html" "exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
) )
def pytest_namespace(): def pytest_namespace():
@ -60,6 +62,12 @@ class MarkEvaluator:
def wasvalid(self): def wasvalid(self):
return not hasattr(self, 'exc') return not hasattr(self, 'exc')
def invalidraise(self, exc):
raises = self.get('raises')
if not raises:
return
return not isinstance(exc, raises)
def istrue(self): def istrue(self):
try: try:
return self._istrue() return self._istrue()
@ -171,7 +179,11 @@ def pytest_runtest_makereport(__multicall__, item, call):
if not item.config.option.runxfail: if not item.config.option.runxfail:
if evalxfail.wasvalid() and evalxfail.istrue(): if evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo: if call.excinfo:
rep.outcome = "skipped" if evalxfail.invalidraise(call.excinfo.value):
rep.outcome = "failed"
return rep
else:
rep.outcome = "skipped"
elif call.when == "call": elif call.when == "call":
rep.outcome = "failed" rep.outcome = "failed"
else: else:

View File

@ -95,6 +95,22 @@ asserts that the given ``ExpectedException`` is raised. The reporter will
provide you with helpful output in case of failures such as *no provide you with helpful output in case of failures such as *no
exception* or *wrong exception*. exception* or *wrong exception*.
Note that it is also possible to specify a "raises" argument to
``pytest.mark.xfail``, which checks that the test is failing in a more
specific way than just having any exception raised::
@pytest.mark.xfail(raises=IndexError)
def test_f():
f()
Using ``pytest.raises`` is likely to be better for cases where you are testing
exceptions your own code is deliberately raising, whereas using
``@pytest.mark.xfail`` with a check function is probably better for something
like documenting unfixed bugs (where the test describes what "should" happen)
or bugs in dependencies.
.. _newreport: .. _newreport:
Making use of context-sensitive comparisons Making use of context-sensitive comparisons

View File

@ -23,3 +23,8 @@ def test_hello5():
def test_hello6(): def test_hello6():
pytest.xfail("reason") pytest.xfail("reason")
@xfail(raises=IndexError)
def test_hello7()
x = []
x[1] = 1

View File

@ -149,6 +149,11 @@ on a particular platform::
def test_function(): def test_function():
... ...
If you want to be more specific as to why the test is failing, you can specify
a single exception, or a list of exceptions, in the ``raises`` argument. Then
the test will be reported as a regular failure if it fails with an
exception not mentioned in ``raises``.
You can furthermore prevent the running of an "xfail" test or You can furthermore prevent the running of an "xfail" test or
specify a reason such as a bug ID or similar. Here is specify a reason such as a bug ID or similar. Here is
a simple test file with the several usages: a simple test file with the several usages:

View File

@ -330,6 +330,26 @@ class TestXFail:
"*1 xfailed*", "*1 xfailed*",
]) ])
@pytest.mark.parametrize('expected, actual, matchline',
[('TypeError', 'TypeError', "*1 xfailed*"),
('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"),
('TypeError', 'IndexError', "*1 failed*"),
('(AttributeError, TypeError)', 'IndexError', "*1 failed*"),
])
def test_xfail_raises(self, expected, actual, matchline, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=%s)
def test_raises():
raise %s()
""" % (expected, actual))
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
matchline,
])
class TestXFailwithSetupTeardown: class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir): def test_failing_setup_issue9(self, testdir):
testdir.makepyfile(""" testdir.makepyfile("""
@ -575,7 +595,7 @@ def test_default_markers(testdir):
result = testdir.runpytest("--markers") result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([ result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*", "*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True)*expected failure*", "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
]) ])
def test_xfail_test_setup_exception(testdir): def test_xfail_test_setup_exception(testdir):
@ -617,7 +637,6 @@ def test_imperativeskip_on_xfail_test(testdir):
*2 skipped* *2 skipped*
""") """)
class TestBooleanCondition: class TestBooleanCondition:
def test_skipif(self, testdir): def test_skipif(self, testdir):
testdir.makepyfile(""" testdir.makepyfile("""