Merge branch 'master' into merge-master-into-features
Preparing for 3.0
This commit is contained in:
commit
463e6572c5
1
AUTHORS
1
AUTHORS
|
@ -28,6 +28,7 @@ Carl Friedrich Bolz
|
|||
Charles Cloud
|
||||
Charnjit SiNGH (CCSJ)
|
||||
Chris Lamb
|
||||
Christian Boelsen
|
||||
Christian Theunert
|
||||
Christian Tismer
|
||||
Christopher Gilling
|
||||
|
|
|
@ -3,8 +3,15 @@
|
|||
|
||||
**Incompatible changes**
|
||||
|
||||
<<<<<<< HEAD
|
||||
A number of incompatible changes were made in this release, with the intent of removing features deprecated for a long
|
||||
time or change existing behaviors in order to make them less surprising/more useful.
|
||||
=======
|
||||
* Improve error message with fixture lookup errors: add an 'E' to the first
|
||||
line and '>' to the rest. Fixes `#717`_. Thanks `@blueyed`_ for reporting and
|
||||
a PR, `@eolo999`_ for the initial PR and `@tomviner`_ for his guidance during
|
||||
EuroPython2016 sprint.
|
||||
>>>>>>> master
|
||||
|
||||
* Reinterpretation mode has now been removed. Only plain and rewrite
|
||||
mode are available, consequently the ``--assert=reinterp`` option is
|
||||
|
@ -159,6 +166,14 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
* Plugins now benefit from assertion rewriting. Thanks
|
||||
`@sober7`_, `@nicoddemus`_ and `@flub`_ for the PR.
|
||||
|
||||
* Change ``report.outcome`` for ``xpassed`` tests to ``"passed"`` in non-strict
|
||||
mode and ``"failed"`` in strict mode. Thanks to `@hackebrot`_ for the PR
|
||||
(`#1795`_) and `@gprasad84`_ for report (`#1546`_).
|
||||
|
||||
* Tests marked with ``xfail(strict=False)`` (the default) now appear in
|
||||
JUnitXML reports as passing tests instead of skipped.
|
||||
Thanks to `@hackebrot`_ for the PR (`#1795`_).
|
||||
|
||||
* Highlight path of the file location in the error report to make it easier to copy/paste.
|
||||
Thanks `@suzaku`_ for the PR (`#1778`_).
|
||||
|
||||
|
@ -322,11 +337,16 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
* Fixed scope overriding inside metafunc.parametrize (`#634`_).
|
||||
Thanks to `@Stranger6667`_ for the PR.
|
||||
|
||||
*
|
||||
* Fixed the total tests tally in junit xml output (`#1798`_).
|
||||
Thanks to `@cryporchild`_ for the PR.
|
||||
|
||||
* ``pytest_terminal_summary`` hook now receives the ``exitstatus``
|
||||
of the test session as argument. Thanks `@blueyed`_ for the PR (`#1809`_).
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
* Fixed off-by-one error with lines from ``request.node.warn``.
|
||||
Thanks to `@blueyed`_ for the PR.
|
||||
|
||||
*
|
||||
|
||||
|
@ -354,6 +374,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
.. _#1526: https://github.com/pytest-dev/pytest/pull/1526
|
||||
.. _#1539: https://github.com/pytest-dev/pytest/issues/1539
|
||||
.. _#1544: https://github.com/pytest-dev/pytest/issues/1544
|
||||
.. _#1546: https://github.com/pytest-dev/pytest/issues/1546
|
||||
.. _#1553: https://github.com/pytest-dev/pytest/issues/1553
|
||||
.. _#1562: https://github.com/pytest-dev/pytest/issues/1562
|
||||
.. _#1579: https://github.com/pytest-dev/pytest/issues/1579
|
||||
|
@ -377,6 +398,9 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
.. _#1740: https://github.com/pytest-dev/pytest/issues/1740
|
||||
.. _#1749: https://github.com/pytest-dev/pytest/issues/1749
|
||||
.. _#1778: https://github.com/pytest-dev/pytest/pull/1778
|
||||
.. _#1795: https://github.com/pytest-dev/pytest/pull/1795
|
||||
.. _#1798: https://github.com/pytest-dev/pytest/pull/1798
|
||||
.. _#1809: https://github.com/pytest-dev/pytest/pull/1809
|
||||
.. _#372: https://github.com/pytest-dev/pytest/issues/372
|
||||
.. _#457: https://github.com/pytest-dev/pytest/issues/457
|
||||
.. _#460: https://github.com/pytest-dev/pytest/pull/460
|
||||
|
@ -393,6 +417,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
.. _@BeyondEvil: https://github.com/BeyondEvil
|
||||
.. _@blueyed: https://github.com/blueyed
|
||||
.. _@ceridwen: https://github.com/ceridwen
|
||||
.. _@cryporchild: https://github.com/cryporchild
|
||||
.. _@csaftoiu: https://github.com/csaftoiu
|
||||
.. _@d6e: https://github.com/d6e
|
||||
.. _@davehunt: https://github.com/davehunt
|
||||
|
@ -400,6 +425,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
.. _@eolo999: https://github.com/eolo999
|
||||
.. _@fengxx: https://github.com/fengxx
|
||||
.. _@flub: https://github.com/flub
|
||||
.. _@gprasad84: https://github.com/gprasad84
|
||||
.. _@graingert: https://github.com/graingert
|
||||
.. _@hartym: https://github.com/hartym
|
||||
.. _@JonathonSonesen: https://github.com/JonathonSonesen
|
||||
|
|
|
@ -1,4 +1,7 @@
|
|||
import sys
|
||||
|
||||
from py._code.code import FormattedExcinfo
|
||||
|
||||
import py
|
||||
import pytest
|
||||
import warnings
|
||||
|
@ -649,6 +652,7 @@ class FixtureLookupError(LookupError):
|
|||
|
||||
return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
|
||||
|
||||
|
||||
class FixtureLookupErrorRepr(TerminalRepr):
|
||||
def __init__(self, filename, firstlineno, tblines, errorstring, argname):
|
||||
self.tblines = tblines
|
||||
|
@ -658,16 +662,16 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
|||
self.argname = argname
|
||||
|
||||
def toterminal(self, tw):
|
||||
#tw.line("FixtureLookupError: %s" %(self.argname), red=True)
|
||||
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
|
||||
for tbline in self.tblines:
|
||||
tw.line(tbline.rstrip())
|
||||
lines = self.errorstring.split("\n")
|
||||
for line in lines:
|
||||
if line == lines[0]:
|
||||
prefix = 'E '
|
||||
else:
|
||||
prefix = ' '
|
||||
tw.line(prefix + line.strip(), red=True)
|
||||
if lines:
|
||||
tw.line('{0} {1}'.format(FormattedExcinfo.fail_marker,
|
||||
lines[0].strip()), red=True)
|
||||
for line in lines[1:]:
|
||||
tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker,
|
||||
line.strip()), red=True)
|
||||
tw.line()
|
||||
tw.line("%s:%d" % (self.filename, self.firstlineno+1))
|
||||
|
||||
|
|
|
@ -370,7 +370,7 @@ class LogXML(object):
|
|||
suite_stop_time = time.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
|
||||
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped']
|
||||
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error']
|
||||
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
|
||||
|
|
|
@ -292,7 +292,7 @@ class Node(object):
|
|||
if fslocation is None:
|
||||
fslocation = getattr(self, "fspath", None)
|
||||
else:
|
||||
fslocation = "%s:%s" % fslocation[:2]
|
||||
fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1)
|
||||
|
||||
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
|
||||
code=code, message=message,
|
||||
|
|
|
@ -1538,4 +1538,3 @@ class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr):
|
|||
def setup(self):
|
||||
super(Function, self).setup()
|
||||
fixtures.fillfixtures(self)
|
||||
|
||||
|
|
|
@ -216,6 +216,18 @@ def check_strict_xfail(pyfuncitem):
|
|||
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
||||
|
||||
|
||||
def _is_unittest_unexpected_success_a_failure():
|
||||
"""Return if the test suite should fail if a @expectedFailure unittest test PASSES.
|
||||
|
||||
From https://docs.python.org/3/library/unittest.html?highlight=unittest#unittest.TestResult.wasSuccessful:
|
||||
Changed in version 3.4: Returns False if there were any
|
||||
unexpectedSuccesses from tests marked with the expectedFailure() decorator.
|
||||
|
||||
TODO: this should be moved to the "compat" module.
|
||||
"""
|
||||
return sys.version_info >= (3, 4)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
|
@ -224,9 +236,15 @@ def pytest_runtest_makereport(item, call):
|
|||
evalskip = getattr(item, '_evalskip', None)
|
||||
# unitttest special case, see setting of _unexpectedsuccess
|
||||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
||||
# we need to translate into how pytest encodes xpass
|
||||
rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
|
||||
rep.outcome = "failed"
|
||||
if item._unexpectedsuccess:
|
||||
rep.longrepr = "Unexpected success: {0}".format(item._unexpectedsuccess)
|
||||
else:
|
||||
rep.longrepr = "Unexpected success"
|
||||
if _is_unittest_unexpected_success_a_failure():
|
||||
rep.outcome = "failed"
|
||||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = rep.longrepr
|
||||
elif item.config.option.runxfail:
|
||||
pass # don't interefere
|
||||
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
|
||||
|
@ -241,8 +259,15 @@ def pytest_runtest_makereport(item, call):
|
|||
rep.outcome = "skipped"
|
||||
rep.wasxfail = evalxfail.getexplanation()
|
||||
elif call.when == "call":
|
||||
rep.outcome = "failed" # xpass outcome
|
||||
rep.wasxfail = evalxfail.getexplanation()
|
||||
strict_default = item.config.getini('xfail_strict')
|
||||
is_strict_xfail = evalxfail.get('strict', strict_default)
|
||||
explanation = evalxfail.getexplanation()
|
||||
if is_strict_xfail:
|
||||
rep.outcome = "failed"
|
||||
rep.longrepr = "[XPASS(strict)] {0}".format(explanation)
|
||||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = explanation
|
||||
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
|
||||
# skipped by mark.skipif; change the location of the failure
|
||||
# to point to the item definition, otherwise it will display
|
||||
|
@ -256,7 +281,7 @@ def pytest_report_teststatus(report):
|
|||
if hasattr(report, "wasxfail"):
|
||||
if report.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif report.failed:
|
||||
elif report.passed:
|
||||
return "xpassed", "X", ("XPASS", {'yellow': True})
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
|
|
|
@ -425,11 +425,11 @@ class TestFillFixtures:
|
|||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ERROR*test_lookup_error*",
|
||||
"*def test_lookup_error(unknown):*",
|
||||
"*fixture*unknown*not found*",
|
||||
# check if fixtures appear sorted
|
||||
"*available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*",
|
||||
"*ERROR at setup of test_lookup_error*",
|
||||
" def test_lookup_error(unknown):*",
|
||||
"E fixture 'unknown' not found",
|
||||
"> available fixtures:*a_fixture,*b_fixture,*c_fixture,*d_fixture*monkeypatch,*", # sorted
|
||||
"> use 'py*test --fixtures *' for help on them.",
|
||||
"*1 error*",
|
||||
])
|
||||
assert "INTERNAL" not in result.stdout.str()
|
||||
|
|
|
@ -1191,22 +1191,23 @@ class TestMarkersWithParametrization:
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=1)
|
||||
|
||||
def test_xfail_passing_is_xpass(self, testdir):
|
||||
@pytest.mark.parametrize('strict', [True, False])
|
||||
def test_xfail_passing_is_xpass(self, testdir, strict):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
|
||||
pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})((2, 3)),
|
||||
(3, 4),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
""".format(strict=strict)
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
# xpass is fail, obviously :)
|
||||
reprec.assertoutcome(passed=2, failed=1)
|
||||
passed, failed = (2, 1) if strict else (3, 0)
|
||||
reprec.assertoutcome(passed=passed, failed=failed)
|
||||
|
||||
def test_parametrize_called_in_generate_tests(self, testdir):
|
||||
s = """
|
||||
|
|
|
@ -548,13 +548,14 @@ class TestWarning:
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_warn_on_test_item_from_request(self, testdir):
|
||||
def test_warn_on_test_item_from_request(self, testdir, request):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def fix(request):
|
||||
request.node.warn("T1", "hello")
|
||||
|
||||
def test_hello(fix):
|
||||
pass
|
||||
""")
|
||||
|
@ -565,7 +566,7 @@ class TestWarning:
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("""
|
||||
===*pytest-warning summary*===
|
||||
*WT1*test_warn_on_test_item*:5*hello*
|
||||
*WT1*test_warn_on_test_item*:7 hello*
|
||||
""")
|
||||
|
||||
class TestRootdir:
|
||||
|
@ -625,6 +626,7 @@ class TestRootdir:
|
|||
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
|
||||
assert rootdir == tmpdir
|
||||
|
||||
|
||||
class TestOverrideIniArgs:
|
||||
@pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split())
|
||||
def test_override_ini_names(self, testdir, name):
|
||||
|
|
|
@ -99,7 +99,31 @@ class TestPython:
|
|||
result, dom = runandparse(testdir)
|
||||
assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(name="pytest", errors=0, failures=1, skips=3, tests=5)
|
||||
node.assert_attr(name="pytest", errors=0, failures=1, skips=2, tests=5)
|
||||
|
||||
def test_summing_simple_with_errors(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def fixture():
|
||||
raise Exception()
|
||||
def test_pass():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_error(fixture):
|
||||
pass
|
||||
@pytest.mark.xfail
|
||||
def test_xfail():
|
||||
assert False
|
||||
@pytest.mark.xfail(strict=True)
|
||||
def test_xpass():
|
||||
assert True
|
||||
""")
|
||||
result, dom = runandparse(testdir)
|
||||
assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(name="pytest", errors=1, failures=2, skips=1, tests=5)
|
||||
|
||||
def test_timing_function(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -130,7 +154,7 @@ class TestPython:
|
|||
result, dom = runandparse(testdir)
|
||||
assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(errors=1, tests=0)
|
||||
node.assert_attr(errors=1, tests=1)
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(
|
||||
file="test_setup_error.py",
|
||||
|
@ -238,7 +262,7 @@ class TestPython:
|
|||
result, dom = runandparse(testdir)
|
||||
assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(errors=1, tests=0)
|
||||
node.assert_attr(errors=1, tests=1)
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(classname="pytest", name="internal")
|
||||
fnode = tnode.find_first_by_tag("error")
|
||||
|
@ -368,23 +392,40 @@ class TestPython:
|
|||
result, dom = runandparse(testdir)
|
||||
# assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(skips=1, tests=1)
|
||||
node.assert_attr(skips=0, tests=1)
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(
|
||||
file="test_xfailure_xpass.py",
|
||||
line="1",
|
||||
classname="test_xfailure_xpass",
|
||||
name="test_xpass")
|
||||
fnode = tnode.find_first_by_tag("skipped")
|
||||
fnode.assert_attr(message="xfail-marked test passes unexpectedly")
|
||||
# assert "ValueError" in fnode.toxml()
|
||||
|
||||
def test_xfailure_xpass_strict(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.mark.xfail(strict=True, reason="This needs to fail!")
|
||||
def test_xpass():
|
||||
pass
|
||||
""")
|
||||
result, dom = runandparse(testdir)
|
||||
# assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(skips=0, tests=1)
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(
|
||||
file="test_xfailure_xpass_strict.py",
|
||||
line="1",
|
||||
classname="test_xfailure_xpass_strict",
|
||||
name="test_xpass")
|
||||
fnode = tnode.find_first_by_tag("failure")
|
||||
fnode.assert_attr(message="[XPASS(strict)] This needs to fail!")
|
||||
|
||||
def test_collect_error(self, testdir):
|
||||
testdir.makepyfile("syntax error")
|
||||
result, dom = runandparse(testdir)
|
||||
assert result.ret
|
||||
node = dom.find_first_by_tag("testsuite")
|
||||
node.assert_attr(errors=1, tests=0)
|
||||
node.assert_attr(errors=1, tests=1)
|
||||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(
|
||||
file="test_collect_error.py",
|
||||
|
|
|
@ -145,7 +145,20 @@ class TestXFail:
|
|||
def test_xfail_xpassed(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import pytest
|
||||
@pytest.mark.xfail
|
||||
@pytest.mark.xfail(reason="this is an xfail")
|
||||
def test_func():
|
||||
assert 1
|
||||
""")
|
||||
reports = runtestprotocol(item, log=False)
|
||||
assert len(reports) == 3
|
||||
callreport = reports[1]
|
||||
assert callreport.passed
|
||||
assert callreport.wasxfail == "this is an xfail"
|
||||
|
||||
def test_xfail_xpassed_strict(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import pytest
|
||||
@pytest.mark.xfail(strict=True, reason="nope")
|
||||
def test_func():
|
||||
assert 1
|
||||
""")
|
||||
|
@ -153,7 +166,8 @@ class TestXFail:
|
|||
assert len(reports) == 3
|
||||
callreport = reports[1]
|
||||
assert callreport.failed
|
||||
assert callreport.wasxfail == ""
|
||||
assert callreport.longrepr == "[XPASS(strict)] nope"
|
||||
assert not hasattr(callreport, "wasxfail")
|
||||
|
||||
def test_xfail_run_anyway(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
|
|
@ -419,8 +419,9 @@ class TestTrialUnittest:
|
|||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
from _pytest.skipping import _is_unittest_unexpected_success_a_failure
|
||||
should_fail = _is_unittest_unexpected_success_a_failure()
|
||||
result = testdir.runpytest("-rxs")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*XFAIL*test_trial_todo*",
|
||||
"*trialselfskip*",
|
||||
|
@ -429,8 +430,9 @@ class TestTrialUnittest:
|
|||
"*i2wanto*",
|
||||
"*sys.version_info*",
|
||||
"*skip_in_method*",
|
||||
"*4 skipped*3 xfail*1 xpass*",
|
||||
"*1 failed*4 skipped*3 xfailed*" if should_fail else "*4 skipped*3 xfail*1 xpass*",
|
||||
])
|
||||
assert result.ret == (1 if should_fail else 0)
|
||||
|
||||
def test_trial_error(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -587,24 +589,62 @@ def test_unittest_typerror_traceback(testdir):
|
|||
assert "TypeError" in result.stdout.str()
|
||||
assert result.ret == 1
|
||||
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
def test_unittest_unexpected_failure(testdir):
|
||||
testdir.makepyfile("""
|
||||
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
|
||||
def test_unittest_expected_failure_for_failing_test_is_xfail(testdir, runner):
|
||||
script = testdir.makepyfile("""
|
||||
import unittest
|
||||
class MyTestCase(unittest.TestCase):
|
||||
@unittest.expectedFailure
|
||||
def test_func1(self):
|
||||
assert 0
|
||||
@unittest.expectedFailure
|
||||
def test_func2(self):
|
||||
assert 1
|
||||
def test_failing_test_is_xfail(self):
|
||||
assert False
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
""")
|
||||
result = testdir.runpytest("-rxX")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*XFAIL*MyTestCase*test_func1*",
|
||||
"*XPASS*MyTestCase*test_func2*",
|
||||
"*1 xfailed*1 xpass*",
|
||||
])
|
||||
if runner == 'pytest':
|
||||
result = testdir.runpytest("-rxX")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*XFAIL*MyTestCase*test_failing_test_is_xfail*",
|
||||
"*1 xfailed*",
|
||||
])
|
||||
else:
|
||||
result = testdir.runpython(script)
|
||||
result.stderr.fnmatch_lines([
|
||||
"*1 test in*",
|
||||
"*OK*(expected failures=1)*",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,7)")
|
||||
@pytest.mark.parametrize('runner', ['pytest', 'unittest'])
|
||||
def test_unittest_expected_failure_for_passing_test_is_fail(testdir, runner):
|
||||
script = testdir.makepyfile("""
|
||||
import unittest
|
||||
class MyTestCase(unittest.TestCase):
|
||||
@unittest.expectedFailure
|
||||
def test_passing_test_is_fail(self):
|
||||
assert True
|
||||
if __name__ == '__main__':
|
||||
unittest.main()
|
||||
""")
|
||||
from _pytest.skipping import _is_unittest_unexpected_success_a_failure
|
||||
should_fail = _is_unittest_unexpected_success_a_failure()
|
||||
if runner == 'pytest':
|
||||
result = testdir.runpytest("-rxX")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*MyTestCase*test_passing_test_is_fail*",
|
||||
"*1 failed*" if should_fail else "*1 xpassed*",
|
||||
])
|
||||
else:
|
||||
result = testdir.runpython(script)
|
||||
result.stderr.fnmatch_lines([
|
||||
"*1 test in*",
|
||||
"*(unexpected successes=1)*",
|
||||
])
|
||||
|
||||
assert result.ret == (1 if should_fail else 0)
|
||||
|
||||
|
||||
@pytest.mark.parametrize('fix_type, stmt', [
|
||||
|
|
5
tox.ini
5
tox.ini
|
@ -82,13 +82,12 @@ commands=
|
|||
[testenv:py27-trial]
|
||||
deps=twisted
|
||||
commands=
|
||||
pytest -rsxf {posargs:testing/test_unittest.py}
|
||||
py.test -ra {posargs:testing/test_unittest.py}
|
||||
|
||||
[testenv:py35-trial]
|
||||
platform=linux|darwin
|
||||
deps={[testenv:py27-trial]deps}
|
||||
commands=
|
||||
pytest -rsxf {posargs:testing/test_unittest.py}
|
||||
py.test -ra {posargs:testing/test_unittest.py}
|
||||
|
||||
[testenv:doctest]
|
||||
commands=pytest --doctest-modules _pytest
|
||||
|
|
Loading…
Reference in New Issue