parent
0431b8bb74
commit
9d4e0365da
|
@ -10,6 +10,9 @@
|
||||||
- fix issue735: assertion failures on debug versions of Python 3.4+
|
- fix issue735: assertion failures on debug versions of Python 3.4+
|
||||||
Thanks Benjamin Peterson.
|
Thanks Benjamin Peterson.
|
||||||
|
|
||||||
|
- fix issue114: skipif marker reports to internal skipping plugin;
|
||||||
|
Thanks Floris Bruynooghe for reporting and Bruno Oliveira for the PR.
|
||||||
|
|
||||||
2.7.1 (compared to 2.7.0)
|
2.7.1 (compared to 2.7.0)
|
||||||
-----------------------------
|
-----------------------------
|
||||||
|
|
||||||
|
|
|
@ -137,6 +137,7 @@ class MarkEvaluator:
|
||||||
def pytest_runtest_setup(item):
|
def pytest_runtest_setup(item):
|
||||||
evalskip = MarkEvaluator(item, 'skipif')
|
evalskip = MarkEvaluator(item, 'skipif')
|
||||||
if evalskip.istrue():
|
if evalskip.istrue():
|
||||||
|
item._evalskip = evalskip
|
||||||
pytest.skip(evalskip.getexplanation())
|
pytest.skip(evalskip.getexplanation())
|
||||||
item._evalxfail = MarkEvaluator(item, 'xfail')
|
item._evalxfail = MarkEvaluator(item, 'xfail')
|
||||||
check_xfail_no_run(item)
|
check_xfail_no_run(item)
|
||||||
|
@ -156,6 +157,7 @@ def pytest_runtest_makereport(item, call):
|
||||||
outcome = yield
|
outcome = yield
|
||||||
rep = outcome.get_result()
|
rep = outcome.get_result()
|
||||||
evalxfail = getattr(item, '_evalxfail', None)
|
evalxfail = getattr(item, '_evalxfail', None)
|
||||||
|
evalskip = getattr(item, '_evalskip', None)
|
||||||
# unitttest special case, see setting of _unexpectedsuccess
|
# unitttest special case, see setting of _unexpectedsuccess
|
||||||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
||||||
# we need to translate into how pytest encodes xpass
|
# we need to translate into how pytest encodes xpass
|
||||||
|
@ -177,6 +179,13 @@ def pytest_runtest_makereport(item, call):
|
||||||
elif call.when == "call":
|
elif call.when == "call":
|
||||||
rep.outcome = "failed" # xpass outcome
|
rep.outcome = "failed" # xpass outcome
|
||||||
rep.wasxfail = evalxfail.getexplanation()
|
rep.wasxfail = evalxfail.getexplanation()
|
||||||
|
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
|
||||||
|
# skipped by mark.skipif; change the location of the failure
|
||||||
|
# to point to the item definition, otherwise it will display
|
||||||
|
# the location of where the skip exception was raised within pytest
|
||||||
|
filename, line, reason = rep.longrepr
|
||||||
|
filename, line = item.location[:2]
|
||||||
|
rep.longrepr = filename, line, reason
|
||||||
|
|
||||||
# called by terminalreporter progress reporting
|
# called by terminalreporter progress reporting
|
||||||
def pytest_report_teststatus(report):
|
def pytest_report_teststatus(report):
|
||||||
|
|
|
@ -396,7 +396,7 @@ class TestSkipif:
|
||||||
|
|
||||||
|
|
||||||
def test_skipif_reporting(self, testdir):
|
def test_skipif_reporting(self, testdir):
|
||||||
p = testdir.makepyfile("""
|
p = testdir.makepyfile(test_foo="""
|
||||||
import pytest
|
import pytest
|
||||||
@pytest.mark.skipif("hasattr(sys, 'platform')")
|
@pytest.mark.skipif("hasattr(sys, 'platform')")
|
||||||
def test_that():
|
def test_that():
|
||||||
|
@ -404,7 +404,7 @@ class TestSkipif:
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest(p, '-s', '-rs')
|
result = testdir.runpytest(p, '-s', '-rs')
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*SKIP*1*platform*",
|
"*SKIP*1*test_foo.py*platform*",
|
||||||
"*1 skipped*"
|
"*1 skipped*"
|
||||||
])
|
])
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
|
|
Loading…
Reference in New Issue