diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 6ddbb6553..e65ac577f 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -26,11 +26,13 @@ def pytest_configure(config): "http://pytest.org/latest/skipping.html" ) config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True): mark the the test function " + "xfail(condition, reason=None, run=True, raises=None): mark the the test function " "as an expected failure if eval(condition) has a True value. " "Optionally specify a reason for better reporting and run=False if " - "you don't even want to execute the test function. See " - "http://pytest.org/latest/skipping.html" + "you don't even want to execute the test function. If only specific " + "exception(s) are expected, you can list them in raises, and if the test fails " + "in other ways, it will be reported as a true failure. " + "See http://pytest.org/latest/skipping.html" ) def pytest_namespace(): @@ -60,6 +62,15 @@ class MarkEvaluator: def wasvalid(self): return not hasattr(self, 'exc') + def invalidraise(self, exctype): + raises = self.get('raises') + if not raises: + return + if isinstance(raises, tuple): + return exctype not in raises + else: + return raises != exctype + def istrue(self): try: return self._istrue() @@ -171,7 +182,11 @@ def pytest_runtest_makereport(__multicall__, item, call): if not item.config.option.runxfail: if evalxfail.wasvalid() and evalxfail.istrue(): if call.excinfo: - rep.outcome = "skipped" + if evalxfail.invalidraise(call.excinfo.type): + rep.outcome = "failed" + return rep + else: + rep.outcome = "skipped" elif call.when == "call": rep.outcome = "failed" else: diff --git a/testing/test_skipping.py b/testing/test_skipping.py index d85a5d635..fdf73efde 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -330,6 +330,53 @@ class TestXFail: "*1 xfailed*", ]) + def test_xfail_raises_match(self, testdir): + p = testdir.makepyfile(""" + import pytest + @pytest.mark.xfail(raises=TypeError) + def test_raises(): + raise TypeError() + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 xfailed*", + ]) + + def test_xfail_raises_mismatch(self, testdir): + p = testdir.makepyfile(""" + import pytest + @pytest.mark.xfail(raises=IndexError) + def test_raises(): + raise TypeError() + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + def test_xfail_raises_tuple_match(self, testdir): + p = testdir.makepyfile(""" + import pytest + @pytest.mark.xfail(raises=(AttributeError, TypeError)) + def test_raises(): + raise TypeError() + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 xfailed*", + ]) + + def test_xfail_raises_tuple_mismatch(self, testdir): + p = testdir.makepyfile(""" + import pytest + @pytest.mark.xfail(raises=(AttributeError, IndexError)) + def test_raises(): + raise TypeError() + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 failed*", + ]) + class TestXFailwithSetupTeardown: def test_failing_setup_issue9(self, testdir): testdir.makepyfile(""" @@ -575,7 +622,7 @@ def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ "*skipif(*condition)*skip*", - "*xfail(*condition, reason=None, run=True)*expected failure*", + "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*", ]) def test_xfail_test_setup_exception(testdir): @@ -617,7 +664,6 @@ def test_imperativeskip_on_xfail_test(testdir): *2 skipped* """) - class TestBooleanCondition: def test_skipif(self, testdir): testdir.makepyfile("""