unittest's unexpectedSuccess should work as non-strict xpass
Make sure tests for that behavior obtain the same return code using either pytest or unittest to run the same file
This commit is contained in:
parent
dfc659f781
commit
4ed412eb59
|
@ -220,6 +220,18 @@ def check_strict_xfail(pyfuncitem):
|
||||||
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
||||||
|
|
||||||
|
|
||||||
|
def _is_unittest_unexpected_success_a_failure():
|
||||||
|
"""Return if the test suite should fail if a @expectedFailure unittest test PASSES.
|
||||||
|
|
||||||
|
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
|
||||||
|
Changed in version 3.4: Returns False if there were any
|
||||||
|
unexpectedSuccesses from tests marked with the expectedFailure() decorator.
|
||||||
|
|
||||||
|
TODO: this should be moved to the "compat" module.
|
||||||
|
"""
|
||||||
|
return sys.version_info >= (3, 4)
|
||||||
|
|
||||||
|
|
||||||
@pytest.hookimpl(hookwrapper=True)
|
@pytest.hookimpl(hookwrapper=True)
|
||||||
def pytest_runtest_makereport(item, call):
|
def pytest_runtest_makereport(item, call):
|
||||||
outcome = yield
|
outcome = yield
|
||||||
|
@ -228,13 +240,15 @@ def pytest_runtest_makereport(item, call):
|
||||||
evalskip = getattr(item, '_evalskip', None)
|
evalskip = getattr(item, '_evalskip', None)
|
||||||
# unitttest special case, see setting of _unexpectedsuccess
|
# unitttest special case, see setting of _unexpectedsuccess
|
||||||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
||||||
# unittest treats an 'unexpected successes' as a failure
|
|
||||||
# which means pytest needs to handle it like a 'xfail(strict=True)'
|
|
||||||
rep.outcome = "failed"
|
|
||||||
if item._unexpectedsuccess:
|
if item._unexpectedsuccess:
|
||||||
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
|
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
|
||||||
else:
|
else:
|
||||||
rep.longrepr = "Unexpected success"
|
rep.longrepr = "Unexpected success"
|
||||||
|
if _is_unittest_unexpected_success_a_failure():
|
||||||
|
rep.outcome = "failed"
|
||||||
|
else:
|
||||||
|
rep.outcome = "passed"
|
||||||
|
rep.wasxfail = rep.longrepr
|
||||||
elif item.config.option.runxfail:
|
elif item.config.option.runxfail:
|
||||||
pass # don't interefere
|
pass # don't interefere
|
||||||
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
|
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
|
||||||
|
|
|
@ -419,8 +419,9 @@ class TestTrialUnittest:
|
||||||
def test_method(self):
|
def test_method(self):
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
|
from _pytest.skipping import _is_unittest_unexpected_success_a_failure
|
||||||
|
should_fail = _is_unittest_unexpected_success_a_failure()
|
||||||
result = testdir.runpytest("-rxs")
|
result = testdir.runpytest("-rxs")
|
||||||
assert result.ret == 0
|
|
||||||
result.stdout.fnmatch_lines_random([
|
result.stdout.fnmatch_lines_random([
|
||||||
"*XFAIL*test_trial_todo*",
|
"*XFAIL*test_trial_todo*",
|
||||||
"*trialselfskip*",
|
"*trialselfskip*",
|
||||||
|
@ -429,8 +430,9 @@ class TestTrialUnittest:
|
||||||
"*i2wanto*",
|
"*i2wanto*",
|
||||||
"*sys.version_info*",
|
"*sys.version_info*",
|
||||||
"*skip_in_method*",
|
"*skip_in_method*",
|
||||||
"*4 skipped*3 xfail*1 xpass*",
|
"*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*",
|
||||||
])
|
])
|
||||||
|
assert result.ret == (1 if should_fail else 0)
|
||||||
|
|
||||||
def test_trial_error(self, testdir):
|
def test_trial_error(self, testdir):
|
||||||
testdir.makepyfile("""
|
testdir.makepyfile("""
|
||||||
|
@ -587,39 +589,62 @@ def test_unittest_typerror_traceback(testdir):
|
||||||
assert "TypeError" in result.stdout.str()
|
assert "TypeError" in result.stdout.str()
|
||||||
assert result.ret == 1
|
assert result.ret == 1
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||||
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir):
|
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
|
||||||
testdir.makepyfile("""
|
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
|
||||||
|
script = testdir.makepyfile("""
|
||||||
import unittest
|
import unittest
|
||||||
class MyTestCase(unittest.TestCase):
|
class MyTestCase(unittest.TestCase):
|
||||||
@unittest.expectedFailure
|
@unittest.expectedFailure
|
||||||
def test_failing_test_is_xfail(self):
|
def test_failing_test_is_xfail(self):
|
||||||
assert False
|
assert False
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
""")
|
""")
|
||||||
|
if runner == 'pytest':
|
||||||
result = testdir.runpytest("-rxX")
|
result = testdir.runpytest("-rxX")
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*XFAIL*MyTestCase*test_failing_test_is_xfail*",
|
"*XFAIL*MyTestCase*test_failing_test_is_xfail*",
|
||||||
"*1 xfailed*",
|
"*1 xfailed*",
|
||||||
])
|
])
|
||||||
|
else:
|
||||||
|
result = testdir.runpython(script)
|
||||||
|
result.stderr.fnmatch_lines([
|
||||||
|
"*1 test in*",
|
||||||
|
"*OK*(expected failures=1)*",
|
||||||
|
])
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||||
def test_unittest_expected_failure_for_passing_test_is_fail(testdir):
|
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
|
||||||
testdir.makepyfile("""
|
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
|
||||||
|
script = testdir.makepyfile("""
|
||||||
import unittest
|
import unittest
|
||||||
class MyTestCase(unittest.TestCase):
|
class MyTestCase(unittest.TestCase):
|
||||||
@unittest.expectedFailure
|
@unittest.expectedFailure
|
||||||
def test_passing_test_is_fail(self):
|
def test_passing_test_is_fail(self):
|
||||||
assert True
|
assert True
|
||||||
|
if __name__ == '__main__':
|
||||||
|
unittest.main()
|
||||||
""")
|
""")
|
||||||
|
from _pytest.skipping import _is_unittest_unexpected_success_a_failure
|
||||||
|
should_fail = _is_unittest_unexpected_success_a_failure()
|
||||||
|
if runner == 'pytest':
|
||||||
result = testdir.runpytest("-rxX")
|
result = testdir.runpytest("-rxX")
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*FAILURES*",
|
|
||||||
"*MyTestCase*test_passing_test_is_fail*",
|
"*MyTestCase*test_passing_test_is_fail*",
|
||||||
"*Unexpected success*",
|
"*1 failed*" if should_fail else "*1 xpassed*",
|
||||||
"*1 failed*",
|
|
||||||
])
|
])
|
||||||
assert result.ret == 1
|
else:
|
||||||
|
result = testdir.runpython(script)
|
||||||
|
result.stderr.fnmatch_lines([
|
||||||
|
"*1 test in*",
|
||||||
|
"*(unexpected successes=1)*",
|
||||||
|
])
|
||||||
|
|
||||||
|
assert result.ret == (1 if should_fail else 0)
|
||||||
|
|
||||||
|
|
||||||
@pytest.mark.parametrize('fix_type, stmt', [
|
@pytest.mark.parametrize('fix_type, stmt', [
|
||||||
|
|
Loading…
Reference in New Issue