Merge pull request #1389 from nicoddemus/fix-strict-xfail

Fix bug in strict xfail: test was not being actually called
This commit is contained in:
Bruno Oliveira 2016-02-17 17:58:49 -02:00
commit 6940f82235
4 changed files with 48 additions and 15 deletions

View File

@ -17,6 +17,9 @@
``xfail_strict`` ini option that can be used to configure it project-wise.
Thanks `@rabbbit`_ for the request and `@nicoddemus`_ for the PR (`#1355`_).
* ``Parser.addini`` now supports options of type ``bool``. Thanks
`@nicoddemus`_ for the PR.
* New ``ALLOW_BYTES`` doctest option strips ``b`` prefixes from byte strings
in doctest output (similar to ``ALLOW_UNICODE``).
Thanks `@jaraco`_ for the request and `@nicoddemus`_ for the PR (`#1287`_).

View File

@ -182,28 +182,37 @@ def pytest_runtest_setup(item):
item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
@pytest.mark.hookwrapper
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue() and _is_strict_xfail(evalxfail, pyfuncitem.config):
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation,
pytrace=False)
def _is_strict_xfail(evalxfail, config):
default = config.getini('xfail_strict')
return evalxfail.get('strict', default)
outcome = yield
passed = outcome.excinfo is None
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item):
"""check xfail(run=False)"""
if not item.config.option.runxfail:
evalxfail = item._evalxfail
if evalxfail.istrue():
if not evalxfail.get('run', True):
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
def check_strict_xfail(pyfuncitem):
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._evalxfail
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini('xfail_strict')
is_strict_xfail = evalxfail.get('strict', strict_default)
if is_strict_xfail:
del pyfuncitem._evalxfail
explanation = evalxfail.getexplanation()
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
@pytest.hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item, call):
outcome = yield

View File

@ -497,6 +497,7 @@ class TestFunction:
return True
config.pluginmanager.register(MyPlugin1())
config.pluginmanager.register(MyPlugin2())
config.hook.pytest_runtest_setup(item=item)
config.hook.pytest_pyfunc_call(pyfuncitem=item)
def test_multiple_parametrize(self, testdir):

View File

@ -127,13 +127,15 @@ class TestEvaluator:
class TestXFail:
def test_xfail_simple(self, testdir):
@pytest.mark.parametrize('strict', [True, False])
def test_xfail_simple(self, testdir, strict):
item = testdir.getitem("""
import pytest
@pytest.mark.xfail
@pytest.mark.xfail(strict=%s)
def test_func():
assert 0
""")
""" % strict)
reports = runtestprotocol(item, log=False)
assert len(reports) == 3
callreport = reports[1]
@ -350,6 +352,23 @@ class TestXFail:
matchline,
])
def test_strict_sanity(self, testdir):
"""sanity check for xfail(strict=True): a failing test should behave
exactly like a normal xfail.
"""
p = testdir.makepyfile("""
import pytest
@pytest.mark.xfail(reason='unsupported feature', strict=True)
def test_foo():
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
'*XFAIL*',
'*unsupported feature*',
])
assert result.ret == 0
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail(self, testdir, strict):
p = testdir.makepyfile("""
@ -357,7 +376,7 @@ class TestXFail:
@pytest.mark.xfail(reason='unsupported feature', strict=%s)
def test_foo():
pass
with open('foo_executed', 'w'): pass # make sure test executes
""" % strict)
result = testdir.runpytest(p, '-rxX')
if strict:
@ -371,6 +390,7 @@ class TestXFail:
'XPASS test_strict_xfail.py::test_foo unsupported feature',
])
assert result.ret == (1 if strict else 0)
assert testdir.tmpdir.join('foo_executed').isfile()
@pytest.mark.parametrize('strict', [True, False])
def test_strict_xfail_condition(self, testdir, strict):