Add strict option to xfail, making tests which XPASS to actually fail the suite

Fix #1355
This commit is contained in:
Bruno Oliveira 2016-02-14 20:45:55 -02:00
parent a965386b9e
commit 7823838e69
3 changed files with 171 additions and 24 deletions

View File

@ -14,6 +14,12 @@ def pytest_addoption(parser):
action="store_true", dest="runxfail", default=False, action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail") help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config): def pytest_configure(config):
if config.option.runxfail: if config.option.runxfail:
@ -178,6 +184,18 @@ def pytest_runtest_setup(item):
def pytest_pyfunc_call(pyfuncitem): def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem) check_xfail_no_run(pyfuncitem)
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue() and _is_strict_xfail(evalxfail, pyfuncitem.config):
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation,
pytrace=False)
def _is_strict_xfail(evalxfail, config):
default = config.getini('xfail_strict')
return evalxfail.get('strict', default)
def check_xfail_no_run(item): def check_xfail_no_run(item):
if not item.config.option.runxfail: if not item.config.option.runxfail:

View File

@ -32,12 +32,17 @@ Marking a test function to be skipped
.. versionadded:: 2.9 .. versionadded:: 2.9
The simplest way to skip a test function is to mark it with the `skip` decorator The simplest way to skip a test function is to mark it with the `skip` decorator
which may be passed an optional `reason`: which may be passed an optional ``reason``:
.. code-block:: python
@pytest.mark.skip(reason="no way of currently testing this") @pytest.mark.skip(reason="no way of currently testing this")
def test_the_unknown(): def test_the_unknown():
... ...
``skipif``
~~~~~~~~~~
.. versionadded:: 2.0, 2.4 .. versionadded:: 2.0, 2.4
If you wish to skip something conditionally then you can use `skipif` instead. If you wish to skip something conditionally then you can use `skipif` instead.
@ -120,7 +125,7 @@ Mark a test function as expected to fail
------------------------------------------------------- -------------------------------------------------------
You can use the ``xfail`` marker to indicate that you You can use the ``xfail`` marker to indicate that you
expect the test to fail:: expect a test to fail::
@pytest.mark.xfail @pytest.mark.xfail
def test_function(): def test_function():
@ -128,14 +133,36 @@ expect the test to fail::
This test will be run but no traceback will be reported This test will be run but no traceback will be reported
when it fails. Instead terminal reporting will list it in the when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections. "expected to fail" (``XFAIL``) or "unexpectedly passing" (``XPASS``) sections.
By specifying on the commandline:: ``strict`` parameter
~~~~~~~~~~~~~~~~~~~~
pytest --runxfail .. versionadded:: 2.9
you can force the running and reporting of an ``xfail`` marked test Both ``XFAIL`` and ``XPASS`` don't fail the test suite, unless the ``strict`` keyword-only
as if it weren't marked at all. parameter is passed as ``True``:
.. code-block:: python
@pytest.mark.xfail(strict=True)
def test_function():
...
This will make ``XPASS`` ("unexpectedly passing") results from this test to fail the test suite.
You can change the default value of the ``strict`` parameter using the
``xfail_strict`` ini option:
.. code-block:: ini
[pytest]
xfail_strict=true
``reason`` parameter
~~~~~~~~~~~~~~~~~~~~
As with skipif_ you can also mark your expectation of a failure As with skipif_ you can also mark your expectation of a failure
on a particular platform:: on a particular platform::
@ -145,14 +172,51 @@ on a particular platform::
def test_function(): def test_function():
... ...
``raises`` parameter
~~~~~~~~~~~~~~~~~~~~
If you want to be more specific as to why the test is failing, you can specify If you want to be more specific as to why the test is failing, you can specify
a single exception, or a list of exceptions, in the ``raises`` argument. Then a single exception, or a list of exceptions, in the ``raises`` argument.
the test will be reported as a regular failure if it fails with an
.. code-block:: python
@pytest.mark.xfail(raises=RuntimeError)
def test_function():
...
Then the test will be reported as a regular failure if it fails with an
exception not mentioned in ``raises``. exception not mentioned in ``raises``.
You can furthermore prevent the running of an "xfail" test or ``run`` parameter
specify a reason such as a bug ID or similar. Here is ~~~~~~~~~~~~~~~~~
a simple test file with the several usages:
If a test should be marked as xfail and reported as such but should not be
even executed, use the ``run`` parameter as ``False``:
.. code-block:: python
@pytest.mark.xfail(run=False)
def test_function():
...
This is specially useful for marking crashing tests for later inspection.
Ignoring xfail marks
~~~~~~~~~~~~~~~~~~~~
By specifying on the commandline::
pytest --runxfail
you can force the running and reporting of an ``xfail`` marked test
as if it weren't marked at all.
Examples
~~~~~~~~
Here is a simple test file with the several usages:
.. literalinclude:: example/xfail_demo.py .. literalinclude:: example/xfail_demo.py
@ -181,6 +245,19 @@ Running it with the report-on-xfail option gives this output::
======= 7 xfailed in 0.12 seconds ======== ======= 7 xfailed in 0.12 seconds ========
xfail signature summary
~~~~~~~~~~~~~~~~~~~~~~~
Here's the signature of the ``xfail`` marker, using Python 3 keyword-only
arguments syntax:
.. code-block:: python
def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False):
.. _`skip/xfail with parametrize`: .. _`skip/xfail with parametrize`:
Skip/xfail with parametrize Skip/xfail with parametrize
@ -189,19 +266,19 @@ Skip/xfail with parametrize
It is possible to apply markers like skip and xfail to individual It is possible to apply markers like skip and xfail to individual
test instances when using parametrize:: test instances when using parametrize::
import pytest import pytest
@pytest.mark.parametrize(("n", "expected"), [ @pytest.mark.parametrize(("n", "expected"), [
(1, 2), (1, 2),
pytest.mark.xfail((1, 0)), pytest.mark.xfail((1, 0)),
pytest.mark.xfail(reason="some bug")((1, 3)), pytest.mark.xfail(reason="some bug")((1, 3)),
(2, 3), (2, 3),
(3, 4), (3, 4),
(4, 5), (4, 5),
pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)), pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
]) ])
def test_increment(n, expected): def test_increment(n, expected):
assert n + 1 == expected assert n + 1 == expected
Imperative xfail from within a test or setup function Imperative xfail from within a test or setup function

View File

@ -350,6 +350,58 @@ class TestXFail:
matchline, matchline,
]) ])
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
if strict:
result.stdout.fnmatch_lines([
'*test_foo*',
'*XPASS(strict)*unsupported feature*',
])
else:
result.stdout.fnmatch_lines([
'*test_strict_xfail*',
'XPASS test_strict_xfail.py::test_foo unsupported feature',
])
assert result.ret == (1 if strict else 0)
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail_condition(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == 0
@pytest.mark.parametrize('strict_val', ['true', 'false'])
def test_strict_xfail_default_from_file(self, testdir, strict_val):
testdir.makeini('''
[pytest]
xfail_strict = %s
''' % strict_val)
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
def test_foo():
pass
""")
result = testdir.runpytest(p, '-rxX')
strict = strict_val == 'true'
result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
assert result.ret == (1 if strict else 0)
class TestXFailwithSetupTeardown: class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir): def test_failing_setup_issue9(self, testdir):