Merge pull request #1386 from nicoddemus/strict-xpass

Add a "strict" parameter to xfail
This commit is contained in:
Ronny Pfannschmidt 2016-02-15 02:29:43 +01:00
commit 1d190dc618
5 changed files with 209 additions and 26 deletions

View File

@ -502,13 +502,14 @@ class Parser:
""" register an ini-file option. """ register an ini-file option.
:name: name of the ini-variable :name: name of the ini-variable
:type: type of the variable, can be ``pathlist``, ``args`` or ``linelist``. :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
or ``bool``.
:default: default value if no ini-file option exists but is queried. :default: default value if no ini-file option exists but is queried.
The value of ini-variables can be retrieved via a call to The value of ini-variables can be retrieved via a call to
:py:func:`config.getini(name) <_pytest.config.Config.getini>`. :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
""" """
assert type in (None, "pathlist", "args", "linelist") assert type in (None, "pathlist", "args", "linelist", "bool")
self._inidict[name] = (help, type, default) self._inidict[name] = (help, type, default)
self._ininames.append(name) self._ininames.append(name)
@ -1011,6 +1012,8 @@ class Config(object):
return shlex.split(value) return shlex.split(value)
elif type == "linelist": elif type == "linelist":
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t] return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
elif type == "bool":
return bool(_strtobool(value.strip()))
else: else:
assert type is None assert type is None
return value return value
@ -1164,3 +1167,21 @@ def create_terminal_writer(config, *args, **kwargs):
if config.option.color == 'no': if config.option.color == 'no':
tw.hasmarkup = False tw.hasmarkup = False
return tw return tw
def _strtobool(val):
"""Convert a string representation of truth to true (1) or false (0).
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
'val' is anything else.
.. note:: copied from distutils.util
"""
val = val.lower()
if val in ('y', 'yes', 't', 'true', 'on', '1'):
return 1
elif val in ('n', 'no', 'f', 'false', 'off', '0'):
return 0
else:
raise ValueError("invalid truth value %r" % (val,))

View File

@ -14,6 +14,12 @@ def pytest_addoption(parser):
action="store_true", dest="runxfail", default=False, action="store_true", dest="runxfail", default=False,
help="run tests even if they are marked xfail") help="run tests even if they are marked xfail")
parser.addini("xfail_strict", "default for the strict parameter of xfail "
"markers when not given explicitly (default: "
"False)",
default=False,
type="bool")
def pytest_configure(config): def pytest_configure(config):
if config.option.runxfail: if config.option.runxfail:
@ -178,6 +184,18 @@ def pytest_runtest_setup(item):
def pytest_pyfunc_call(pyfuncitem): def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem) check_xfail_no_run(pyfuncitem)
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue() and _is_strict_xfail(evalxfail, pyfuncitem.config):
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation,
pytrace=False)
def _is_strict_xfail(evalxfail, config):
default = config.getini('xfail_strict')
return evalxfail.get('strict', default)
def check_xfail_no_run(item): def check_xfail_no_run(item):
if not item.config.option.runxfail: if not item.config.option.runxfail:

View File

@ -32,12 +32,17 @@ Marking a test function to be skipped
.. versionadded:: 2.9 .. versionadded:: 2.9
The simplest way to skip a test function is to mark it with the `skip` decorator The simplest way to skip a test function is to mark it with the `skip` decorator
which may be passed an optional `reason`: which may be passed an optional ``reason``:
.. code-block:: python
@pytest.mark.skip(reason="no way of currently testing this") @pytest.mark.skip(reason="no way of currently testing this")
def test_the_unknown(): def test_the_unknown():
... ...
``skipif``
~~~~~~~~~~
.. versionadded:: 2.0, 2.4 .. versionadded:: 2.0, 2.4
If you wish to skip something conditionally then you can use `skipif` instead. If you wish to skip something conditionally then you can use `skipif` instead.
@ -120,7 +125,7 @@ Mark a test function as expected to fail
------------------------------------------------------- -------------------------------------------------------
You can use the ``xfail`` marker to indicate that you You can use the ``xfail`` marker to indicate that you
expect the test to fail:: expect a test to fail::
@pytest.mark.xfail @pytest.mark.xfail
def test_function(): def test_function():
@ -128,14 +133,36 @@ expect the test to fail::
This test will be run but no traceback will be reported This test will be run but no traceback will be reported
when it fails. Instead terminal reporting will list it in the when it fails. Instead terminal reporting will list it in the
"expected to fail" or "unexpectedly passing" sections. "expected to fail" (``XFAIL``) or "unexpectedly passing" (``XPASS``) sections.
By specifying on the commandline:: ``strict`` parameter
~~~~~~~~~~~~~~~~~~~~
pytest --runxfail .. versionadded:: 2.9
you can force the running and reporting of an ``xfail`` marked test Both ``XFAIL`` and ``XPASS`` don't fail the test suite, unless the ``strict`` keyword-only
as if it weren't marked at all. parameter is passed as ``True``:
.. code-block:: python
@pytest.mark.xfail(strict=True)
def test_function():
...
This will make ``XPASS`` ("unexpectedly passing") results from this test to fail the test suite.
You can change the default value of the ``strict`` parameter using the
``xfail_strict`` ini option:
.. code-block:: ini
[pytest]
xfail_strict=true
``reason`` parameter
~~~~~~~~~~~~~~~~~~~~
As with skipif_ you can also mark your expectation of a failure As with skipif_ you can also mark your expectation of a failure
on a particular platform:: on a particular platform::
@ -145,14 +172,51 @@ on a particular platform::
def test_function(): def test_function():
... ...
``raises`` parameter
~~~~~~~~~~~~~~~~~~~~
If you want to be more specific as to why the test is failing, you can specify If you want to be more specific as to why the test is failing, you can specify
a single exception, or a list of exceptions, in the ``raises`` argument. Then a single exception, or a list of exceptions, in the ``raises`` argument.
the test will be reported as a regular failure if it fails with an
.. code-block:: python
@pytest.mark.xfail(raises=RuntimeError)
def test_function():
...
Then the test will be reported as a regular failure if it fails with an
exception not mentioned in ``raises``. exception not mentioned in ``raises``.
You can furthermore prevent the running of an "xfail" test or ``run`` parameter
specify a reason such as a bug ID or similar. Here is ~~~~~~~~~~~~~~~~~
a simple test file with the several usages:
If a test should be marked as xfail and reported as such but should not be
even executed, use the ``run`` parameter as ``False``:
.. code-block:: python
@pytest.mark.xfail(run=False)
def test_function():
...
This is specially useful for marking crashing tests for later inspection.
Ignoring xfail marks
~~~~~~~~~~~~~~~~~~~~
By specifying on the commandline::
pytest --runxfail
you can force the running and reporting of an ``xfail`` marked test
as if it weren't marked at all.
Examples
~~~~~~~~
Here is a simple test file with the several usages:
.. literalinclude:: example/xfail_demo.py .. literalinclude:: example/xfail_demo.py
@ -181,6 +245,19 @@ Running it with the report-on-xfail option gives this output::
======= 7 xfailed in 0.12 seconds ======== ======= 7 xfailed in 0.12 seconds ========
xfail signature summary
~~~~~~~~~~~~~~~~~~~~~~~
Here's the signature of the ``xfail`` marker, using Python 3 keyword-only
arguments syntax:
.. code-block:: python
def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False):
.. _`skip/xfail with parametrize`: .. _`skip/xfail with parametrize`:
Skip/xfail with parametrize Skip/xfail with parametrize
@ -189,19 +266,19 @@ Skip/xfail with parametrize
It is possible to apply markers like skip and xfail to individual It is possible to apply markers like skip and xfail to individual
test instances when using parametrize:: test instances when using parametrize::
import pytest import pytest
@pytest.mark.parametrize(("n", "expected"), [ @pytest.mark.parametrize(("n", "expected"), [
(1, 2), (1, 2),
pytest.mark.xfail((1, 0)), pytest.mark.xfail((1, 0)),
pytest.mark.xfail(reason="some bug")((1, 3)), pytest.mark.xfail(reason="some bug")((1, 3)),
(2, 3), (2, 3),
(3, 4), (3, 4),
(4, 5), (4, 5),
pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)), pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
]) ])
def test_increment(n, expected): def test_increment(n, expected):
assert n + 1 == expected assert n + 1 == expected
Imperative xfail from within a test or setup function Imperative xfail from within a test or setup function

View File

@ -231,6 +231,21 @@ class TestConfigAPI:
l = config.getini("a2") l = config.getini("a2")
assert l == [] assert l == []
@pytest.mark.parametrize('str_val, bool_val',
[('True', True), ('no', False), ('no-ini', True)])
def test_addini_bool(self, testdir, str_val, bool_val):
testdir.makeconftest("""
def pytest_addoption(parser):
parser.addini("strip", "", type="bool", default=True)
""")
if str_val != 'no-ini':
testdir.makeini("""
[pytest]
strip=%s
""" % str_val)
config = testdir.parseconfig()
assert config.getini("strip") is bool_val
def test_addinivalue_line_existing(self, testdir): def test_addinivalue_line_existing(self, testdir):
testdir.makeconftest(""" testdir.makeconftest("""
def pytest_addoption(parser): def pytest_addoption(parser):

View File

@ -350,6 +350,58 @@ class TestXFail:
matchline, matchline,
]) ])
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
if strict:
result.stdout.fnmatch_lines([
'*test_foo*',
'*XPASS(strict)*unsupported feature*',
])
else:
result.stdout.fnmatch_lines([
'*test_strict_xfail*',
'XPASS test_strict_xfail.py::test_foo unsupported feature',
])
assert result.ret == (1 if strict else 0)
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail_condition(self, testdir, strict):
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(False, reason='unsupported feature', strict=%s)
def test_foo():
pass
""" % strict)
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines('*1 passed*')
assert result.ret == 0
@pytest.mark.parametrize('strict_val', ['true', 'false'])
def test_strict_xfail_default_from_file(self, testdir, strict_val):
testdir.makeini('''
[pytest]
xfail_strict = %s
''' % strict_val)
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature')
def test_foo():
pass
""")
result = testdir.runpytest(p, '-rxX')
strict = strict_val == 'true'
result.stdout.fnmatch_lines('*1 failed*' if strict else '*1 xpassed*')
assert result.ret == (1 if strict else 0)
class TestXFailwithSetupTeardown: class TestXFailwithSetupTeardown:
def test_failing_setup_issue9(self, testdir): def test_failing_setup_issue9(self, testdir):