fix skip/xfail confusion, reported and discussed on
http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
This commit is contained in:
parent
65cbf591d8
commit
6e0c30d67d
12
CHANGELOG
12
CHANGELOG
|
@ -1,21 +1,33 @@
|
||||||
Changes between 2.2.4 and 2.2.5.dev
|
Changes between 2.2.4 and 2.2.5.dev
|
||||||
-----------------------------------
|
-----------------------------------
|
||||||
|
|
||||||
|
- fix xfail/skip confusion: a skip-mark or an imperative pytest.skip
|
||||||
|
will now take precedence before xfail-markers because we
|
||||||
|
can't determine xfail/xpass status in case of a skip. see also:
|
||||||
|
http://stackoverflow.com/questions/11105828/in-py-test-when-i-explicitly-skip-a-test-that-is-marked-as-xfail-how-can-i-get
|
||||||
|
|
||||||
- always report installed 3rd party plugins
|
- always report installed 3rd party plugins
|
||||||
|
|
||||||
- fix issue160: a failing setup of an xfail-marked tests should
|
- fix issue160: a failing setup of an xfail-marked tests should
|
||||||
be reported as xfail (not xpass)
|
be reported as xfail (not xpass)
|
||||||
|
|
||||||
- fix issue128: show captured output when capsys/capfd are used
|
- fix issue128: show captured output when capsys/capfd are used
|
||||||
|
|
||||||
- pluginmanager.register(...) now raises ValueError if the
|
- pluginmanager.register(...) now raises ValueError if the
|
||||||
plugin has been already registered or the name is taken
|
plugin has been already registered or the name is taken
|
||||||
|
|
||||||
- fix issue159: improve http://pytest.org/latest/faq.html
|
- fix issue159: improve http://pytest.org/latest/faq.html
|
||||||
especially with respect to the "magic" history, also mention
|
especially with respect to the "magic" history, also mention
|
||||||
pytest-django, trial and unittest integration.
|
pytest-django, trial and unittest integration.
|
||||||
|
|
||||||
- reporting refinements:
|
- reporting refinements:
|
||||||
- pytest_report_header now receives a "startdir" so that
|
- pytest_report_header now receives a "startdir" so that
|
||||||
you can use startdir.bestrelpath(yourpath) to show
|
you can use startdir.bestrelpath(yourpath) to show
|
||||||
nice relative path
|
nice relative path
|
||||||
|
|
||||||
- allow plugins to implement both pytest_report_header and
|
- allow plugins to implement both pytest_report_header and
|
||||||
pytest_sessionstart (sessionstart is invoked first).
|
pytest_sessionstart (sessionstart is invoked first).
|
||||||
|
|
||||||
- don't show deselected reason line if there is none
|
- don't show deselected reason line if there is none
|
||||||
|
|
||||||
Changes between 2.2.3 and 2.2.4
|
Changes between 2.2.3 and 2.2.4
|
||||||
|
|
28
ISSUES.txt
28
ISSUES.txt
|
@ -355,3 +355,31 @@ def test_run(pytester, fslayout):
|
||||||
result = pytester.runpytest(p)
|
result = pytester.runpytest(p)
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
assert result.passed == 1
|
assert result.passed == 1
|
||||||
|
|
||||||
|
Another idea is to allow to define a full scenario including the run
|
||||||
|
in one content string::
|
||||||
|
|
||||||
|
runscenario("""
|
||||||
|
test_{TESTNAME}.py:
|
||||||
|
import pytest
|
||||||
|
@pytest.mark.xfail
|
||||||
|
def test_that_fails():
|
||||||
|
assert 0
|
||||||
|
|
||||||
|
@pytest.mark.skipif("True")
|
||||||
|
def test_hello():
|
||||||
|
pass
|
||||||
|
|
||||||
|
conftest.py:
|
||||||
|
import pytest
|
||||||
|
def pytest_runsetup_setup(item):
|
||||||
|
pytest.skip("abc")
|
||||||
|
|
||||||
|
runpytest -rsxX
|
||||||
|
*SKIP*{TESTNAME}*
|
||||||
|
*1 skipped*
|
||||||
|
""")
|
||||||
|
|
||||||
|
This could be run with at least three different ways to invoke pytest:
|
||||||
|
through the shell, through "python -m pytest" and inlined. As inlined
|
||||||
|
would be the fastest it could be run first (or "--fast" mode).
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
#
|
#
|
||||||
__version__ = '2.2.5.dev3'
|
__version__ = '2.2.5.dev4'
|
||||||
|
|
|
@ -114,7 +114,7 @@ class LogXML(object):
|
||||||
|
|
||||||
def append_failure(self, report):
|
def append_failure(self, report):
|
||||||
#msg = str(report.longrepr.reprtraceback.extraline)
|
#msg = str(report.longrepr.reprtraceback.extraline)
|
||||||
if "xfail" in report.keywords:
|
if hasattr(report, "wasxfail"):
|
||||||
self.append(
|
self.append(
|
||||||
Junit.skipped(message="xfail-marked test passes unexpectedly"))
|
Junit.skipped(message="xfail-marked test passes unexpectedly"))
|
||||||
self.skipped += 1
|
self.skipped += 1
|
||||||
|
@ -148,8 +148,8 @@ class LogXML(object):
|
||||||
self.errors += 1
|
self.errors += 1
|
||||||
|
|
||||||
def append_skipped(self, report):
|
def append_skipped(self, report):
|
||||||
if "xfail" in report.keywords:
|
if hasattr(report, "wasxfail"):
|
||||||
self.append(Junit.skipped(str(report.keywords['xfail']),
|
self.append(Junit.skipped(str(report.wasxfail),
|
||||||
message="expected test failure"))
|
message="expected test failure"))
|
||||||
else:
|
else:
|
||||||
filename, lineno, skipreason = report.longrepr
|
filename, lineno, skipreason = report.longrepr
|
||||||
|
|
|
@ -387,7 +387,7 @@ class Session(FSCollector):
|
||||||
raise self.Interrupted(self.shouldstop)
|
raise self.Interrupted(self.shouldstop)
|
||||||
|
|
||||||
def pytest_runtest_logreport(self, report):
|
def pytest_runtest_logreport(self, report):
|
||||||
if report.failed and 'xfail' not in getattr(report, 'keywords', []):
|
if report.failed and not hasattr(report, 'wasxfail'):
|
||||||
self._testsfailed += 1
|
self._testsfailed += 1
|
||||||
maxfail = self.config.getvalue("maxfail")
|
maxfail = self.config.getvalue("maxfail")
|
||||||
if maxfail and self._testsfailed >= maxfail:
|
if maxfail and self._testsfailed >= maxfail:
|
||||||
|
|
|
@ -59,7 +59,7 @@ class PdbInvoke:
|
||||||
call.excinfo.errisinstance(pytest.skip.Exception) or \
|
call.excinfo.errisinstance(pytest.skip.Exception) or \
|
||||||
call.excinfo.errisinstance(py.std.bdb.BdbQuit):
|
call.excinfo.errisinstance(py.std.bdb.BdbQuit):
|
||||||
return rep
|
return rep
|
||||||
if "xfail" in rep.keywords:
|
if hasattr(rep, "wasxfail"):
|
||||||
return rep
|
return rep
|
||||||
# we assume that the above execute() suspended capturing
|
# we assume that the above execute() suspended capturing
|
||||||
# XXX we re-use the TerminalReporter's terminalwriter
|
# XXX we re-use the TerminalReporter's terminalwriter
|
||||||
|
|
|
@ -138,7 +138,7 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
||||||
rep = __multicall__.execute()
|
rep = __multicall__.execute()
|
||||||
if rep.when == "call":
|
if rep.when == "call":
|
||||||
# we need to translate into how py.test encodes xpass
|
# we need to translate into how py.test encodes xpass
|
||||||
rep.keywords['xfail'] = "reason: " + repr(item._unexpectedsuccess)
|
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
|
||||||
rep.outcome = "failed"
|
rep.outcome = "failed"
|
||||||
return rep
|
return rep
|
||||||
if not (call.excinfo and
|
if not (call.excinfo and
|
||||||
|
@ -149,27 +149,27 @@ def pytest_runtest_makereport(__multicall__, item, call):
|
||||||
if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
|
if call.excinfo and call.excinfo.errisinstance(py.test.xfail.Exception):
|
||||||
if not item.config.getvalue("runxfail"):
|
if not item.config.getvalue("runxfail"):
|
||||||
rep = __multicall__.execute()
|
rep = __multicall__.execute()
|
||||||
rep.keywords['xfail'] = "reason: " + call.excinfo.value.msg
|
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||||||
rep.outcome = "skipped"
|
rep.outcome = "skipped"
|
||||||
return rep
|
return rep
|
||||||
rep = __multicall__.execute()
|
rep = __multicall__.execute()
|
||||||
evalxfail = item._evalxfail
|
evalxfail = item._evalxfail
|
||||||
|
if not rep.skipped:
|
||||||
if not item.config.option.runxfail:
|
if not item.config.option.runxfail:
|
||||||
if evalxfail.wasvalid() and evalxfail.istrue():
|
if evalxfail.wasvalid() and evalxfail.istrue():
|
||||||
if call.excinfo:
|
if call.excinfo:
|
||||||
rep.outcome = "skipped"
|
rep.outcome = "skipped"
|
||||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
|
||||||
elif call.when == "call":
|
elif call.when == "call":
|
||||||
rep.outcome = "failed"
|
rep.outcome = "failed"
|
||||||
rep.keywords['xfail'] = evalxfail.getexplanation()
|
else:
|
||||||
|
return rep
|
||||||
|
rep.wasxfail = evalxfail.getexplanation()
|
||||||
return rep
|
return rep
|
||||||
if 'xfail' in rep.keywords:
|
|
||||||
del rep.keywords['xfail']
|
|
||||||
return rep
|
return rep
|
||||||
|
|
||||||
# called by terminalreporter progress reporting
|
# called by terminalreporter progress reporting
|
||||||
def pytest_report_teststatus(report):
|
def pytest_report_teststatus(report):
|
||||||
if 'xfail' in report.keywords:
|
if hasattr(report, "wasxfail"):
|
||||||
if report.skipped:
|
if report.skipped:
|
||||||
return "xfailed", "x", "xfail"
|
return "xfailed", "x", "xfail"
|
||||||
elif report.failed:
|
elif report.failed:
|
||||||
|
@ -216,7 +216,7 @@ def show_xfailed(terminalreporter, lines):
|
||||||
if xfailed:
|
if xfailed:
|
||||||
for rep in xfailed:
|
for rep in xfailed:
|
||||||
pos = rep.nodeid
|
pos = rep.nodeid
|
||||||
reason = rep.keywords['xfail']
|
reason = rep.wasxfail
|
||||||
lines.append("XFAIL %s" % (pos,))
|
lines.append("XFAIL %s" % (pos,))
|
||||||
if reason:
|
if reason:
|
||||||
lines.append(" " + str(reason))
|
lines.append(" " + str(reason))
|
||||||
|
@ -226,7 +226,7 @@ def show_xpassed(terminalreporter, lines):
|
||||||
if xpassed:
|
if xpassed:
|
||||||
for rep in xpassed:
|
for rep in xpassed:
|
||||||
pos = rep.nodeid
|
pos = rep.nodeid
|
||||||
reason = rep.keywords['xfail']
|
reason = rep.wasxfail
|
||||||
lines.append("XPASS %s %s" %(pos, reason))
|
lines.append("XPASS %s %s" %(pos, reason))
|
||||||
|
|
||||||
def cached_eval(config, expr, d):
|
def cached_eval(config, expr, d):
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
||||||
name='pytest',
|
name='pytest',
|
||||||
description='py.test: simple powerful testing with Python',
|
description='py.test: simple powerful testing with Python',
|
||||||
long_description = long_description,
|
long_description = long_description,
|
||||||
version='2.2.5.dev3',
|
version='2.2.5.dev4',
|
||||||
url='http://pytest.org',
|
url='http://pytest.org',
|
||||||
license='MIT license',
|
license='MIT license',
|
||||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||||
|
|
|
@ -113,8 +113,7 @@ class TestXFail:
|
||||||
assert len(reports) == 3
|
assert len(reports) == 3
|
||||||
callreport = reports[1]
|
callreport = reports[1]
|
||||||
assert callreport.skipped
|
assert callreport.skipped
|
||||||
expl = callreport.keywords['xfail']
|
assert callreport.wasxfail == ""
|
||||||
assert expl == ""
|
|
||||||
|
|
||||||
def test_xfail_xpassed(self, testdir):
|
def test_xfail_xpassed(self, testdir):
|
||||||
item = testdir.getitem("""
|
item = testdir.getitem("""
|
||||||
|
@ -127,8 +126,7 @@ class TestXFail:
|
||||||
assert len(reports) == 3
|
assert len(reports) == 3
|
||||||
callreport = reports[1]
|
callreport = reports[1]
|
||||||
assert callreport.failed
|
assert callreport.failed
|
||||||
expl = callreport.keywords['xfail']
|
assert callreport.wasxfail == ""
|
||||||
assert expl == ""
|
|
||||||
|
|
||||||
def test_xfail_run_anyway(self, testdir):
|
def test_xfail_run_anyway(self, testdir):
|
||||||
testdir.makepyfile("""
|
testdir.makepyfile("""
|
||||||
|
@ -155,7 +153,8 @@ class TestXFail:
|
||||||
reports = runtestprotocol(item, log=False)
|
reports = runtestprotocol(item, log=False)
|
||||||
callreport = reports[1]
|
callreport = reports[1]
|
||||||
assert callreport.failed
|
assert callreport.failed
|
||||||
assert 'xfail' not in callreport.keywords
|
assert not hasattr(callreport, "wasxfail")
|
||||||
|
assert 'xfail' in callreport.keywords
|
||||||
|
|
||||||
def test_xfail_not_report_default(self, testdir):
|
def test_xfail_not_report_default(self, testdir):
|
||||||
p = testdir.makepyfile(test_one="""
|
p = testdir.makepyfile(test_one="""
|
||||||
|
@ -572,3 +571,28 @@ def test_xfail_test_setup_exception(testdir):
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
assert 'xfailed' in result.stdout.str()
|
assert 'xfailed' in result.stdout.str()
|
||||||
assert 'xpassed' not in result.stdout.str()
|
assert 'xpassed' not in result.stdout.str()
|
||||||
|
|
||||||
|
def test_imperativeskip_on_xfail_test(testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import pytest
|
||||||
|
@pytest.mark.xfail
|
||||||
|
def test_that_fails():
|
||||||
|
assert 0
|
||||||
|
|
||||||
|
@pytest.mark.skipif("True")
|
||||||
|
def test_hello():
|
||||||
|
pass
|
||||||
|
""")
|
||||||
|
testdir.makeconftest("""
|
||||||
|
import pytest
|
||||||
|
def pytest_runtest_setup(item):
|
||||||
|
pytest.skip("abc")
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest("-rsxX")
|
||||||
|
result.stdout.fnmatch_lines_random("""
|
||||||
|
*SKIP*abc*
|
||||||
|
*SKIP*condition: True*
|
||||||
|
*2 skipped*
|
||||||
|
""")
|
||||||
|
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue