Add mark.xfail argument raises so that unexpected exceptions show up as test failures.

--HG--
branch : xfail-cause
This commit is contained in:
david@mcbf.net 2014-07-26 15:11:05 +02:00
parent d98521b0d9
commit 7b273b8577
2 changed files with 67 additions and 6 deletions

View File

@ -26,11 +26,13 @@ def pytest_configure(config):
"http://pytest.org/latest/skipping.html"
)
config.addinivalue_line("markers",
"xfail(condition, reason=None, run=True): mark the the test function "
"xfail(condition, reason=None, run=True, raises=None): mark the the test function "
"as an expected failure if eval(condition) has a True value. "
"Optionally specify a reason for better reporting and run=False if "
"you don't even want to execute the test function. See "
"http://pytest.org/latest/skipping.html"
"you don't even want to execute the test function. If only specific "
"exception(s) are expected, you can list them in raises, and if the test fails "
"in other ways, it will be reported as a true failure. "
"See http://pytest.org/latest/skipping.html"
)
def pytest_namespace():
@ -60,6 +62,15 @@ class MarkEvaluator:
def wasvalid(self):
return not hasattr(self, 'exc')
def invalidraise(self, exctype):
raises = self.get('raises')
if not raises:
return
if isinstance(raises, tuple):
return exctype not in raises
else:
return raises != exctype
def istrue(self):
try:
return self._istrue()
@ -171,7 +182,11 @@ def pytest_runtest_makereport(__multicall__, item, call):
if not item.config.option.runxfail:
if evalxfail.wasvalid() and evalxfail.istrue():
if call.excinfo:
rep.outcome = "skipped"
if evalxfail.invalidraise(call.excinfo.type):
rep.outcome = "failed"
return rep
else:
rep.outcome = "skipped"
elif call.when == "call":
rep.outcome = "failed"
else:

View File

@ -330,6 +330,53 @@ class TestXFail:
"*1 xfailed*",
])
def test_xfail_raises_match(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=TypeError)
def test_raises():
raise TypeError()
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
def test_xfail_raises_mismatch(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=IndexError)
def test_raises():
raise TypeError()
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 failed*",
])
def test_xfail_raises_tuple_match(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=(AttributeError, TypeError))
def test_raises():
raise TypeError()
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
def test_xfail_raises_tuple_mismatch(self, testdir):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(raises=(AttributeError, IndexError))
def test_raises():
raise TypeError()
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 failed*",
])
class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir):
testdir.makepyfile("""
@ -575,7 +622,7 @@ def test_default_markers(testdir):
result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines([
"*skipif(*condition)*skip*",
"*xfail(*condition, reason=None, run=True)*expected failure*",
"*xfail(*condition, reason=None, run=True, raises=None)*expected failure*",
])
def test_xfail_test_setup_exception(testdir):
@ -617,7 +664,6 @@ def test_imperativeskip_on_xfail_test(testdir):
*2 skipped*
""")
class TestBooleanCondition:
def test_skipif(self, testdir):
testdir.makepyfile("""