From 925f75088dececcc291c0870dbfcd5960bbb647e Mon Sep 17 00:00:00 2001 From: holger krekel Date: Thu, 20 May 2010 13:29:51 +0200 Subject: [PATCH] fix issue91 introduce new py.test.xfail(reason) helper to imperatively mark a test as expected to fail. Can be used from within setup and test functions. This is useful especially for parametrized tests when certain configurations are expected-to-fail. In this case the declarative approach with the @py.test.mark.xfail cannot be used as it would mark all configurations as xfail. --HG-- branch : trunk --- CHANGELOG | 8 ++++++ py/_plugin/pytest_runner.py | 13 +++++++++ py/_plugin/pytest_skipping.py | 14 +++++++--- testing/plugin/test_pytest_skipping.py | 37 ++++++++++++++++++++++++++ 4 files changed, 69 insertions(+), 3 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index e3c12aaca..b1c7e32f4 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -9,6 +9,14 @@ Changes between 1.3.0 and 1.3.1 - fix issue95: late-import zlib so that it's not required for general py.test startup. +- fix issue91: introduce new py.test.xfail(reason) helper + to imperatively mark a test as expected to fail. Can + be used from within setup and test functions. This is + useful especially for parametrized tests when certain + configurations are expected-to-fail. In this case the + declarative approach with the @py.test.mark.xfail cannot + be used as it would mark all configurations as xfail. + - make py.test.cmdline.main() return the exitstatus instead of raising (which is still done by py.cmdline.pytest()) and make it so that py.test.cmdline.main() can be called diff --git a/py/_plugin/pytest_runner.py b/py/_plugin/pytest_runner.py index 005fa5c0b..b791daf94 100644 --- a/py/_plugin/pytest_runner.py +++ b/py/_plugin/pytest_runner.py @@ -10,6 +10,7 @@ def pytest_namespace(): 'skip' : skip, 'importorskip' : importorskip, 'fail' : fail, + 'xfail' : xfail, 'exit' : exit, } @@ -295,6 +296,10 @@ class Failed(OutcomeException): """ raised from an explicit call to py.test.fail() """ __module__ = 'builtins' +class XFailed(OutcomeException): + """ raised from an explicit call to py.test.xfail() """ + __module__ = 'builtins' + class ExceptionFailure(Failed): """ raised by py.test.raises on an exception-assertion mismatch. """ def __init__(self, expr, expected, msg=None, excinfo=None): @@ -335,6 +340,14 @@ def fail(msg=""): fail.Exception = Failed +def xfail(reason=""): + """ xfail an executing test or setup functions, taking an optional + reason string. + """ + __tracebackhide__ = True + raise XFailed(reason) +xfail.Exception = XFailed + def raises(ExpectedException, *args, **kwargs): """ if args[0] is callable: raise AssertionError if calling it with the remaining arguments does not raise the expected exception. diff --git a/py/_plugin/pytest_skipping.py b/py/_plugin/pytest_skipping.py index 1651addd7..dedff27ca 100644 --- a/py/_plugin/pytest_skipping.py +++ b/py/_plugin/pytest_skipping.py @@ -185,9 +185,17 @@ def pytest_runtest_setup(item): def pytest_runtest_makereport(__multicall__, item, call): if not isinstance(item, py.test.collect.Function): return - evalxfail = getattr(item, '_evalxfail', None) - if not evalxfail: - return + if not (call.excinfo and + call.excinfo.errisinstance(py.test.xfail.Exception)): + evalxfail = getattr(item, '_evalxfail', None) + if not evalxfail: + return + if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception): + rep = __multicall__.execute() + rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg + rep.skipped = True + rep.failed = False + return rep if call.when == "setup": rep = __multicall__.execute() if rep.skipped and evalxfail.istrue(): diff --git a/testing/plugin/test_pytest_skipping.py b/testing/plugin/test_pytest_skipping.py index b5bcd3cd1..f38239f45 100644 --- a/testing/plugin/test_pytest_skipping.py +++ b/testing/plugin/test_pytest_skipping.py @@ -169,6 +169,43 @@ class TestXFail: ]) assert result.ret == 1 + def test_xfail_imperative(self, testdir): + p = testdir.makepyfile(""" + import py + def test_this(): + py.test.xfail("hello") + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 xfailed*", + ]) + result = testdir.runpytest(p, "-rx") + result.stdout.fnmatch_lines([ + "*XFAIL*test_this*reason:*hello*", + ]) + + def test_xfail_imperative_in_setup_function(self, testdir): + p = testdir.makepyfile(""" + import py + def setup_function(function): + py.test.xfail("hello") + + def test_this(): + assert 0 + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + "*1 xfailed*", + ]) + result = testdir.runpytest(p, "-rx") + result.stdout.fnmatch_lines([ + "*XFAIL*test_this*reason:*hello*", + ]) + + + + + class TestSkipif: def test_skipif_conditional(self, testdir): item = testdir.getitem("""