fix skip/xfail confusion, reported and discussed on
http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
This commit is contained in:
parent
65cbf591d8
commit
6e0c30d67d
12
CHANGELOG
12
CHANGELOG
|
@ -1,21 +1,33 @@
|
|||
Changes between 2.2.4 and 2.2.5.dev
|
||||
-----------------------------------
|
||||
|
||||
- fix xfail/skip confusion: a skip-mark or an imperative pytest.skip
|
||||
will now take precedence before xfail-markers because we
|
||||
can't determine xfail/xpass status in case of a skip. see also:
|
||||
http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
|
||||
|
||||
- always report installed 3rd party plugins
|
||||
|
||||
- fix issue160: a failing setup of an xfail-marked tests should
|
||||
be reported as xfail (not xpass)
|
||||
|
||||
- fix issue128: show captured output when capsys/capfd are used
|
||||
|
||||
- pluginmanager.register(...) now raises ValueError if the
|
||||
plugin has been already registered or the name is taken
|
||||
|
||||
- fix issue159: improve http://pytest.org/latest/faq.html
|
||||
especially with respect to the "magic" history, also mention
|
||||
pytest-django, trial and unittest integration.
|
||||
|
||||
- reporting refinements:
|
||||
- pytest_report_header now receives a "startdir" so that
|
||||
you can use startdir.bestrelpath(yourpath) to show
|
||||
nice relative path
|
||||
|
||||
- allow plugins to implement both pytest_report_header and
|
||||
pytest_sessionstart (sessionstart is invoked first).
|
||||
|
||||
- don't show deselected reason line if there is none
|
||||
|
||||
Changes between 2.2.3 and 2.2.4
|
||||
|
|
28
ISSUES.txt
28
ISSUES.txt
|
@ -355,3 +355,31 @@ def test_run(pytester, fslayout):
|
|||
result = pytester.runpytest(p)
|
||||
assert result.ret == 0
|
||||
assert result.passed == 1
|
||||
|
||||
Another idea is to allow to define a full scenario including the run
|
||||
in one content string::
|
||||
|
||||
runscenario("""
|
||||
test_{TESTNAME}.py:
|
||||
import pytest
|
||||
@pytest.mark.xfail
|
||||
def test_that_fails():
|
||||
assert 0
|
||||
|
||||
@pytest.mark.skipif("True")
|
||||
def test_hello():
|
||||
pass
|
||||
|
||||
conftest.py:
|
||||
import pytest
|
||||
def pytest_runsetup_setup(item):
|
||||
pytest.skip("abc")
|
||||
|
||||
runpytest -rsxX
|
||||
*SKIP*{TESTNAME}*
|
||||
*1 skipped*
|
||||
""")
|
||||
|
||||
This could be run with at least three different ways to invoke pytest:
|
||||
through the shell, through "python -m pytest" and inlined. As inlined
|
||||
would be the fastest it could be run first (or "--fast" mode).
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.2.5.dev3'
|
||||
__version__ = '2.2.5.dev4'
|
||||
|
|
|
@ -114,7 +114,7 @@ class LogXML(object):
|
|||
|
||||
def append_failure(self, report):
|
||||
#msg = str(report.longrepr.reprtraceback.extraline)
|
||||
if "xfail" in report.keywords:
|
||||
if hasattr(report, "wasxfail"):
|
||||
self.append(
|
||||
Junit.skipped(message="xfail-marked test passes unexpectedly"))
|
||||
self.skipped += 1
|
||||
|
@ -148,8 +148,8 @@ class LogXML(object):
|
|||
self.errors += 1
|
||||
|
||||
def append_skipped(self, report):
|
||||
if "xfail" in report.keywords:
|
||||
self.append(Junit.skipped(str(report.keywords['xfail']),
|
||||
if hasattr(report, "wasxfail"):
|
||||
self.append(Junit.skipped(str(report.wasxfail),
|
||||
message="expected test failure"))
|
||||
else:
|
||||
filename, lineno, skipreason = report.longrepr
|
||||
|
|
|
@ -387,7 +387,7 @@ class Session(FSCollector):
|
|||
raise self.Interrupted(self.shouldstop)
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.failed and 'xfail' not in getattr(report, 'keywords', []):
|
||||
if report.failed and not hasattr(report, 'wasxfail'):
|
||||
self._testsfailed += 1
|
||||
maxfail = self.config.getvalue("maxfail")
|
||||
if maxfail and self._testsfailed >= maxfail:
|
||||
|
|
|
@ -50,7 +50,7 @@ def pytest_make_collect_report(__multicall__, collector):
|
|||
|
||||
def pytest_runtest_makereport():
|
||||
pytestPDB.item = None
|
||||
|
||||
|
||||
class PdbInvoke:
|
||||
@pytest.mark.tryfirst
|
||||
def pytest_runtest_makereport(self, item, call, __multicall__):
|
||||
|
@ -59,7 +59,7 @@ class PdbInvoke:
|
|||
call.excinfo.errisinstance(pytest.skip.Exception) or \
|
||||
call.excinfo.errisinstance(py.std.bdb.BdbQuit):
|
||||
return rep
|
||||
if "xfail" in rep.keywords:
|
||||
if hasattr(rep, "wasxfail"):
|
||||
return rep
|
||||
# we assume that the above execute() suspended capturing
|
||||
# XXX we re-use the TerminalReporter's terminalwriter
|
||||
|
|
|
@ -138,7 +138,7 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
|||
rep = __multicall__.execute()
|
||||
if rep.when == "call":
|
||||
# we need to translate into how py.test encodes xpass
|
||||
rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
|
||||
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
|
||||
rep.outcome = "failed"
|
||||
return rep
|
||||
if not (call.excinfo and
|
||||
|
@ -149,27 +149,27 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
|||
if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
|
||||
if not item.config.getvalue("runxfail"):
|
||||
rep = __multicall__.execute()
|
||||
rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
|
||||
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||||
rep.outcome = "skipped"
|
||||
return rep
|
||||
rep = __multicall__.execute()
|
||||
evalxfail = item._evalxfail
|
||||
if not item.config.option.runxfail:
|
||||
if evalxfail.wasvalid() and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.outcome = "skipped"
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
elif call.when == "call":
|
||||
rep.outcome = "failed"
|
||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
||||
return rep
|
||||
if 'xfail' in rep.keywords:
|
||||
del rep.keywords['xfail']
|
||||
if not rep.skipped:
|
||||
if not item.config.option.runxfail:
|
||||
if evalxfail.wasvalid() and evalxfail.istrue():
|
||||
if call.excinfo:
|
||||
rep.outcome = "skipped"
|
||||
elif call.when == "call":
|
||||
rep.outcome = "failed"
|
||||
else:
|
||||
return rep
|
||||
rep.wasxfail = evalxfail.getexplanation()
|
||||
return rep
|
||||
return rep
|
||||
|
||||
# called by terminalreporter progress reporting
|
||||
def pytest_report_teststatus(report):
|
||||
if 'xfail' in report.keywords:
|
||||
if hasattr(report, "wasxfail"):
|
||||
if report.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif report.failed:
|
||||
|
@ -216,7 +216,7 @@ def show_xfailed(terminalreporter, lines):
|
|||
if xfailed:
|
||||
for rep in xfailed:
|
||||
pos = rep.nodeid
|
||||
reason = rep.keywords['xfail']
|
||||
reason = rep.wasxfail
|
||||
lines.append("XFAIL %s" % (pos,))
|
||||
if reason:
|
||||
lines.append(" " + str(reason))
|
||||
|
@ -226,7 +226,7 @@ def show_xpassed(terminalreporter, lines):
|
|||
if xpassed:
|
||||
for rep in xpassed:
|
||||
pos = rep.nodeid
|
||||
reason = rep.keywords['xfail']
|
||||
reason = rep.wasxfail
|
||||
lines.append("XPASS %s %s" %(pos, reason))
|
||||
|
||||
def cached_eval(config, expr, d):
|
||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
|||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.2.5.dev3',
|
||||
version='2.2.5.dev4',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
|
|
@ -138,7 +138,7 @@ class TestPython:
|
|||
sys.stderr.write("hello-stderr\\n")
|
||||
raise ValueError(42)
|
||||
""")
|
||||
|
||||
|
||||
result, dom = runandparse(testdir)
|
||||
assert result.ret
|
||||
node = dom.getElementsByTagName("testsuite")[0]
|
||||
|
@ -366,7 +366,7 @@ def test_invalid_xml_escape():
|
|||
27, # issue #126
|
||||
0xD800, 0xDFFF, 0xFFFE, 0x0FFFF) #, 0x110000)
|
||||
valid = (0x9, 0xA, 0x20,) # 0xD, 0xD7FF, 0xE000, 0xFFFD, 0x10000, 0x10FFFF)
|
||||
|
||||
|
||||
from _pytest.junitxml import bin_xml_escape
|
||||
|
||||
|
||||
|
|
|
@ -113,8 +113,7 @@ class TestXFail:
|
|||
assert len(reports) == 3
|
||||
callreport = reports[1]
|
||||
assert callreport.skipped
|
||||
expl = callreport.keywords['xfail']
|
||||
assert expl == ""
|
||||
assert callreport.wasxfail == ""
|
||||
|
||||
def test_xfail_xpassed(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
|
@ -127,8 +126,7 @@ class TestXFail:
|
|||
assert len(reports) == 3
|
||||
callreport = reports[1]
|
||||
assert callreport.failed
|
||||
expl = callreport.keywords['xfail']
|
||||
assert expl == ""
|
||||
assert callreport.wasxfail == ""
|
||||
|
||||
def test_xfail_run_anyway(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -155,7 +153,8 @@ class TestXFail:
|
|||
reports = runtestprotocol(item, log=False)
|
||||
callreport = reports[1]
|
||||
assert callreport.failed
|
||||
assert 'xfail' not in callreport.keywords
|
||||
assert not hasattr(callreport, "wasxfail")
|
||||
assert 'xfail' in callreport.keywords
|
||||
|
||||
def test_xfail_not_report_default(self, testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
|
@ -572,3 +571,28 @@ def test_xfail_test_setup_exception(testdir):
|
|||
assert result.ret == 0
|
||||
assert 'xfailed' in result.stdout.str()
|
||||
assert 'xpassed' not in result.stdout.str()
|
||||
|
||||
def test_imperativeskip_on_xfail_test(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.xfail
|
||||
def test_that_fails():
|
||||
assert 0
|
||||
|
||||
@pytest.mark.skipif("True")
|
||||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
def pytest_runtest_setup(item):
|
||||
pytest.skip("abc")
|
||||
""")
|
||||
result = testdir.runpytest("-rsxX")
|
||||
result.stdout.fnmatch_lines_random("""
|
||||
*SKIP*abc*
|
||||
*SKIP*condition: True*
|
||||
*2 skipped*
|
||||
""")
|
||||
|
||||
|
||||
|
|
Loading…
Reference in New Issue