Add strict option to xfail, making tests which XPASS to actually fail the suite
Fix #1355
This commit is contained in:
parent
a965386b9e
commit
7823838e69
|
@ -14,6 +14,12 @@ def pytest_addoption(parser):
|
|||
action="store_true", dest="runxfail", default=False,
|
||||
help="run tests even if they are marked xfail")
|
||||
|
||||
parser.addini("xfail_strict", "default for the strict parameter of xfail "
|
||||
"markers when not given explicitly (default: "
|
||||
"False)",
|
||||
default=False,
|
||||
type="bool")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.runxfail:
|
||||
|
@ -178,6 +184,18 @@ def pytest_runtest_setup(item):
|
|||
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
check_xfail_no_run(pyfuncitem)
|
||||
evalxfail = pyfuncitem._evalxfail
|
||||
if evalxfail.istrue() and _is_strict_xfail(evalxfail, pyfuncitem.config):
|
||||
del pyfuncitem._evalxfail
|
||||
explanation = evalxfail.getexplanation()
|
||||
pytest.fail('[XPASS(strict)] ' + explanation,
|
||||
pytrace=False)
|
||||
|
||||
|
||||
def _is_strict_xfail(evalxfail, config):
|
||||
default = config.getini('xfail_strict')
|
||||
return evalxfail.get('strict', default)
|
||||
|
||||
|
||||
def check_xfail_no_run(item):
|
||||
if not item.config.option.runxfail:
|
||||
|
|
|
@ -32,12 +32,17 @@ Marking a test function to be skipped
|
|||
.. versionadded:: 2.9
|
||||
|
||||
The simplest way to skip a test function is to mark it with the `skip` decorator
|
||||
which may be passed an optional `reason`:
|
||||
which may be passed an optional ``reason``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.skip(reason="no way of currently testing this")
|
||||
def test_the_unknown():
|
||||
...
|
||||
|
||||
``skipif``
|
||||
~~~~~~~~~~
|
||||
|
||||
.. versionadded:: 2.0, 2.4
|
||||
|
||||
If you wish to skip something conditionally then you can use `skipif` instead.
|
||||
|
@ -120,7 +125,7 @@ Mark a test function as expected to fail
|
|||
-------------------------------------------------------
|
||||
|
||||
You can use the ``xfail`` marker to indicate that you
|
||||
expect the test to fail::
|
||||
expect a test to fail::
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_function():
|
||||
|
@ -128,14 +133,36 @@ expect the test to fail::
|
|||
|
||||
This test will be run but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" or "unexpectedly passing" sections.
|
||||
"expected to fail" (``XFAIL``) or "unexpectedly passing" (``XPASS``) sections.
|
||||
|
||||
By specifying on the commandline::
|
||||
``strict`` parameter
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
pytest --runxfail
|
||||
.. versionadded:: 2.9
|
||||
|
||||
you can force the running and reporting of an ``xfail`` marked test
|
||||
as if it weren't marked at all.
|
||||
Both ``XFAIL`` and ``XPASS`` don't fail the test suite, unless the ``strict`` keyword-only
|
||||
parameter is passed as ``True``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(strict=True)
|
||||
def test_function():
|
||||
...
|
||||
|
||||
|
||||
This will make ``XPASS`` ("unexpectedly passing") results from this test to fail the test suite.
|
||||
|
||||
You can change the default value of the ``strict`` parameter using the
|
||||
``xfail_strict`` ini option:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
xfail_strict=true
|
||||
|
||||
|
||||
``reason`` parameter
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
As with skipif_ you can also mark your expectation of a failure
|
||||
on a particular platform::
|
||||
|
@ -145,14 +172,51 @@ on a particular platform::
|
|||
def test_function():
|
||||
...
|
||||
|
||||
|
||||
``raises`` parameter
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
If you want to be more specific as to why the test is failing, you can specify
|
||||
a single exception, or a list of exceptions, in the ``raises`` argument. Then
|
||||
the test will be reported as a regular failure if it fails with an
|
||||
a single exception, or a list of exceptions, in the ``raises`` argument.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(raises=RuntimeError)
|
||||
def test_function():
|
||||
...
|
||||
|
||||
Then the test will be reported as a regular failure if it fails with an
|
||||
exception not mentioned in ``raises``.
|
||||
|
||||
You can furthermore prevent the running of an "xfail" test or
|
||||
specify a reason such as a bug ID or similar. Here is
|
||||
a simple test file with the several usages:
|
||||
``run`` parameter
|
||||
~~~~~~~~~~~~~~~~~
|
||||
|
||||
If a test should be marked as xfail and reported as such but should not be
|
||||
even executed, use the ``run`` parameter as ``False``:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.xfail(run=False)
|
||||
def test_function():
|
||||
...
|
||||
|
||||
This is specially useful for marking crashing tests for later inspection.
|
||||
|
||||
|
||||
Ignoring xfail marks
|
||||
~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
By specifying on the commandline::
|
||||
|
||||
pytest --runxfail
|
||||
|
||||
you can force the running and reporting of an ``xfail`` marked test
|
||||
as if it weren't marked at all.
|
||||
|
||||
Examples
|
||||
~~~~~~~~
|
||||
|
||||
Here is a simple test file with the several usages:
|
||||
|
||||
.. literalinclude:: example/xfail_demo.py
|
||||
|
||||
|
@ -181,6 +245,19 @@ Running it with the report-on-xfail option gives this output::
|
|||
|
||||
======= 7 xfailed in 0.12 seconds ========
|
||||
|
||||
|
||||
xfail signature summary
|
||||
~~~~~~~~~~~~~~~~~~~~~~~
|
||||
|
||||
Here's the signature of the ``xfail`` marker, using Python 3 keyword-only
|
||||
arguments syntax:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False):
|
||||
|
||||
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
Skip/xfail with parametrize
|
||||
|
|
|
@ -350,6 +350,58 @@ class TestXFail:
|
|||
matchline,
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize('strict', [True, False])
|
||||
def test_strict_xfail(self, testdir, strict):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
|
||||
def test_foo():
|
||||
pass
|
||||
""" % strict)
|
||||
result = testdir.runpytest(p, '-rxX')
|
||||
if strict:
|
||||
result.stdout.fnmatch_lines([
|
||||
'*test_foo*',
|
||||
'*XPASS(strict)*unsupported feature*',
|
||||
])
|
||||
else:
|
||||
result.stdout.fnmatch_lines([
|
||||
'*test_strict_xfail*',
|
||||
'XPASS test_strict_xfail.py::test_foo unsupported feature',
|
||||
])
|
||||
assert result.ret == (1 if strict else 0)
|
||||
|
||||
@pytest.mark.parametrize('strict', [True, False])
|
||||
def test_strict_xfail_condition(self, testdir, strict):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
|
||||
def test_foo():
|
||||
pass
|
||||
""" % strict)
|
||||
result = testdir.runpytest(p, '-rxX')
|
||||
result.stdout.fnmatch_lines('*1 passed*')
|
||||
assert result.ret == 0
|
||||
|
||||
@pytest.mark.parametrize('strict_val', ['true', 'false'])
|
||||
def test_strict_xfail_default_from_file(self, testdir, strict_val):
|
||||
testdir.makeini('''
|
||||
[pytest]
|
||||
xfail_strict = %s
|
||||
''' % strict_val)
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.xfail(reason='unsupported feature')
|
||||
def test_foo():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest(p, '-rxX')
|
||||
strict = strict_val == 'true'
|
||||
result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
|
||||
assert result.ret == (1 if strict else 0)
|
||||
|
||||
|
||||
class TestXFailwithSetupTeardown:
|
||||
def test_failing_setup_issue9(self, testdir):
|
||||
|
|
Loading…
Reference in New Issue