From c933ada7fbb8677eef9d5b8bf42d010a1f0b9073 Mon Sep 17 00:00:00 2001 From: holger krekel Date: Tue, 4 May 2010 13:02:27 +0200 Subject: [PATCH] new --runxfail option to ignore xfail markers on functions --HG-- branch : trunk --- CHANGELOG | 7 ++++--- py/_plugin/pytest_skipping.py | 15 +++++++++++---- testing/plugin/test_pytest_skipping.py | 15 +++++++++++++++ 3 files changed, 30 insertions(+), 7 deletions(-) diff --git a/CHANGELOG b/CHANGELOG index 0e8fb0593..5cd9b88e9 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -14,9 +14,10 @@ Changes between 1.2.1 and 1.3.0 (release pending) - new pytest_pycollect_makemodule(path, parent) hook for allowing customization of the Module collection object for a matching test module. -- extend py.test.mark.xfail to accept two more keyword arg parameters: - ``xfail(run=False)`` will not run the decorated test - ``xfail(reason="...")`` will print the reason string when reporting +- extend and refine xfail mechanism: + ``@py.test.mark.xfail(run=False)`` do not run the decorated test + ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries + specifiying ``--runxfail`` on command line virtually ignores xfail markers - expose (previously internal) commonly useful methods: py.io.get_terminal_with() -> return terminal width py.io.ansi_print(...) -> print colored/bold text on linux/win32 diff --git a/py/_plugin/pytest_skipping.py b/py/_plugin/pytest_skipping.py index 7360c86d8..09be14825 100644 --- a/py/_plugin/pytest_skipping.py +++ b/py/_plugin/pytest_skipping.py @@ -126,6 +126,12 @@ within test or setup code. Example:: import py +def pytest_addoption(parser): + group = parser.getgroup("general") + group.addoption('--runxfail', + action="store_true", dest="runxfail", default=False, + help="run tests even if they are marked xfail") + class MarkEvaluator: def __init__(self, item, name): self.item = item @@ -172,9 +178,10 @@ def pytest_runtest_setup(item): if evalskip.istrue(): py.test.skip(evalskip.getexplanation()) item._evalxfail = MarkEvaluator(item, 'xfail') - if item._evalxfail.istrue(): - if not item._evalxfail.get('run', True): - py.test.skip("xfail") + if not item.config.getvalue("runxfail"): + if item._evalxfail.istrue(): + if not item._evalxfail.get('run', True): + py.test.skip("xfail") def pytest_runtest_makereport(__multicall__, item, call): if not isinstance(item, py.test.collect.Function): @@ -192,7 +199,7 @@ def pytest_runtest_makereport(__multicall__, item, call): return rep elif call.when == "call": rep = __multicall__.execute() - if evalxfail.istrue(): + if not item.config.getvalue("runxfail") and evalxfail.istrue(): if call.excinfo: rep.skipped = True rep.failed = rep.passed = False diff --git a/testing/plugin/test_pytest_skipping.py b/testing/plugin/test_pytest_skipping.py index c6b654376..50e832f8c 100644 --- a/testing/plugin/test_pytest_skipping.py +++ b/testing/plugin/test_pytest_skipping.py @@ -96,6 +96,21 @@ class TestXFail: expl = callreport.keywords['xfail'] assert expl == "condition: True" + def test_xfail_run_anyway(self, testdir): + testdir.makepyfile(""" + import py + @py.test.mark.xfail + def test_func(): + assert 0 + """) + result = testdir.runpytest("--runxfail") + assert result.ret == 1 + result.stdout.fnmatch_lines([ + "*def test_func():*", + "*assert 0*", + "*1 failed*", + ]) + def test_xfail_evalfalse_but_fails(self, testdir): item = testdir.getitem(""" import py