Internal refactorings in order to support the new pytest-subtests plugin
Related to #1367
This commit is contained in:
parent
951e07d71d
commit
1a119a22d1
|
@ -0,0 +1,6 @@
|
||||||
|
Internal refactorings have been made in order to make the implementation of the
|
||||||
|
`pytest-subtests <https://github.com/pytest-dev/pytest-subtests>`__ plugin
|
||||||
|
possible, which adds unittest sub-test support and a new ``subtests`` fixture as discussed in
|
||||||
|
`#1367 <https://github.com/pytest-dev/pytest/issues/1367>`__.
|
||||||
|
|
||||||
|
For details on the internal refactorings, please see the details on the related PR.
|
|
@ -1,6 +1,8 @@
|
||||||
import py
|
import py
|
||||||
|
|
||||||
|
from _pytest._code.code import ExceptionInfo
|
||||||
from _pytest._code.code import TerminalRepr
|
from _pytest._code.code import TerminalRepr
|
||||||
|
from _pytest.outcomes import skip
|
||||||
|
|
||||||
|
|
||||||
def getslaveinfoline(node):
|
def getslaveinfoline(node):
|
||||||
|
@ -20,6 +22,7 @@ def getslaveinfoline(node):
|
||||||
|
|
||||||
class BaseReport(object):
|
class BaseReport(object):
|
||||||
when = None
|
when = None
|
||||||
|
location = None
|
||||||
|
|
||||||
def __init__(self, **kw):
|
def __init__(self, **kw):
|
||||||
self.__dict__.update(kw)
|
self.__dict__.update(kw)
|
||||||
|
@ -97,6 +100,43 @@ class BaseReport(object):
|
||||||
def fspath(self):
|
def fspath(self):
|
||||||
return self.nodeid.split("::")[0]
|
return self.nodeid.split("::")[0]
|
||||||
|
|
||||||
|
@property
|
||||||
|
def count_towards_summary(self):
|
||||||
|
"""
|
||||||
|
**Experimental**
|
||||||
|
|
||||||
|
Returns True if this report should be counted towards the totals shown at the end of the
|
||||||
|
test session: "1 passed, 1 failure, etc".
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
This function is considered **experimental**, so beware that it is subject to changes
|
||||||
|
even in patch releases.
|
||||||
|
"""
|
||||||
|
return True
|
||||||
|
|
||||||
|
@property
|
||||||
|
def head_line(self):
|
||||||
|
"""
|
||||||
|
**Experimental**
|
||||||
|
|
||||||
|
Returns the head line shown with longrepr output for this report, more commonly during
|
||||||
|
traceback representation during failures::
|
||||||
|
|
||||||
|
________ Test.foo ________
|
||||||
|
|
||||||
|
|
||||||
|
In the example above, the head_line is "Test.foo".
|
||||||
|
|
||||||
|
.. note::
|
||||||
|
|
||||||
|
This function is considered **experimental**, so beware that it is subject to changes
|
||||||
|
even in patch releases.
|
||||||
|
"""
|
||||||
|
if self.location is not None:
|
||||||
|
fspath, lineno, domain = self.location
|
||||||
|
return domain
|
||||||
|
|
||||||
|
|
||||||
class TestReport(BaseReport):
|
class TestReport(BaseReport):
|
||||||
""" Basic test report object (also used for setup and teardown calls if
|
""" Basic test report object (also used for setup and teardown calls if
|
||||||
|
@ -159,6 +199,49 @@ class TestReport(BaseReport):
|
||||||
self.outcome,
|
self.outcome,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@classmethod
|
||||||
|
def from_item_and_call(cls, item, call):
|
||||||
|
"""
|
||||||
|
Factory method to create and fill a TestReport with standard item and call info.
|
||||||
|
"""
|
||||||
|
when = call.when
|
||||||
|
duration = call.stop - call.start
|
||||||
|
keywords = {x: 1 for x in item.keywords}
|
||||||
|
excinfo = call.excinfo
|
||||||
|
sections = []
|
||||||
|
if not call.excinfo:
|
||||||
|
outcome = "passed"
|
||||||
|
longrepr = None
|
||||||
|
else:
|
||||||
|
if not isinstance(excinfo, ExceptionInfo):
|
||||||
|
outcome = "failed"
|
||||||
|
longrepr = excinfo
|
||||||
|
elif excinfo.errisinstance(skip.Exception):
|
||||||
|
outcome = "skipped"
|
||||||
|
r = excinfo._getreprcrash()
|
||||||
|
longrepr = (str(r.path), r.lineno, r.message)
|
||||||
|
else:
|
||||||
|
outcome = "failed"
|
||||||
|
if call.when == "call":
|
||||||
|
longrepr = item.repr_failure(excinfo)
|
||||||
|
else: # exception in setup or teardown
|
||||||
|
longrepr = item._repr_failure_py(
|
||||||
|
excinfo, style=item.config.option.tbstyle
|
||||||
|
)
|
||||||
|
for rwhen, key, content in item._report_sections:
|
||||||
|
sections.append(("Captured %s %s" % (key, rwhen), content))
|
||||||
|
return cls(
|
||||||
|
item.nodeid,
|
||||||
|
item.location,
|
||||||
|
keywords,
|
||||||
|
outcome,
|
||||||
|
longrepr,
|
||||||
|
when,
|
||||||
|
sections,
|
||||||
|
duration,
|
||||||
|
user_properties=item.user_properties,
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
class CollectReport(BaseReport):
|
class CollectReport(BaseReport):
|
||||||
when = "collect"
|
when = "collect"
|
||||||
|
|
|
@ -246,43 +246,7 @@ class CallInfo(object):
|
||||||
|
|
||||||
|
|
||||||
def pytest_runtest_makereport(item, call):
|
def pytest_runtest_makereport(item, call):
|
||||||
when = call.when
|
return TestReport.from_item_and_call(item, call)
|
||||||
duration = call.stop - call.start
|
|
||||||
keywords = {x: 1 for x in item.keywords}
|
|
||||||
excinfo = call.excinfo
|
|
||||||
sections = []
|
|
||||||
if not call.excinfo:
|
|
||||||
outcome = "passed"
|
|
||||||
longrepr = None
|
|
||||||
else:
|
|
||||||
if not isinstance(excinfo, ExceptionInfo):
|
|
||||||
outcome = "failed"
|
|
||||||
longrepr = excinfo
|
|
||||||
elif excinfo.errisinstance(skip.Exception):
|
|
||||||
outcome = "skipped"
|
|
||||||
r = excinfo._getreprcrash()
|
|
||||||
longrepr = (str(r.path), r.lineno, r.message)
|
|
||||||
else:
|
|
||||||
outcome = "failed"
|
|
||||||
if call.when == "call":
|
|
||||||
longrepr = item.repr_failure(excinfo)
|
|
||||||
else: # exception in setup or teardown
|
|
||||||
longrepr = item._repr_failure_py(
|
|
||||||
excinfo, style=item.config.option.tbstyle
|
|
||||||
)
|
|
||||||
for rwhen, key, content in item._report_sections:
|
|
||||||
sections.append(("Captured %s %s" % (key, rwhen), content))
|
|
||||||
return TestReport(
|
|
||||||
item.nodeid,
|
|
||||||
item.location,
|
|
||||||
keywords,
|
|
||||||
outcome,
|
|
||||||
longrepr,
|
|
||||||
when,
|
|
||||||
sections,
|
|
||||||
duration,
|
|
||||||
user_properties=item.user_properties,
|
|
||||||
)
|
|
||||||
|
|
||||||
|
|
||||||
def pytest_make_collect_report(collector):
|
def pytest_make_collect_report(collector):
|
||||||
|
|
|
@ -197,6 +197,7 @@ class WarningReport(object):
|
||||||
message = attr.ib()
|
message = attr.ib()
|
||||||
nodeid = attr.ib(default=None)
|
nodeid = attr.ib(default=None)
|
||||||
fslocation = attr.ib(default=None)
|
fslocation = attr.ib(default=None)
|
||||||
|
count_towards_summary = True
|
||||||
|
|
||||||
def get_location(self, config):
|
def get_location(self, config):
|
||||||
"""
|
"""
|
||||||
|
@ -383,6 +384,7 @@ class TerminalReporter(object):
|
||||||
self.write_fspath_result(fsid, "")
|
self.write_fspath_result(fsid, "")
|
||||||
|
|
||||||
def pytest_runtest_logreport(self, report):
|
def pytest_runtest_logreport(self, report):
|
||||||
|
self._tests_ran = True
|
||||||
rep = report
|
rep = report
|
||||||
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
|
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
|
||||||
category, letter, word = res
|
category, letter, word = res
|
||||||
|
@ -391,7 +393,6 @@ class TerminalReporter(object):
|
||||||
else:
|
else:
|
||||||
markup = None
|
markup = None
|
||||||
self.stats.setdefault(category, []).append(rep)
|
self.stats.setdefault(category, []).append(rep)
|
||||||
self._tests_ran = True
|
|
||||||
if not letter and not word:
|
if not letter and not word:
|
||||||
# probably passed setup/teardown
|
# probably passed setup/teardown
|
||||||
return
|
return
|
||||||
|
@ -724,9 +725,8 @@ class TerminalReporter(object):
|
||||||
return res + " "
|
return res + " "
|
||||||
|
|
||||||
def _getfailureheadline(self, rep):
|
def _getfailureheadline(self, rep):
|
||||||
if hasattr(rep, "location"):
|
if rep.head_line:
|
||||||
fspath, lineno, domain = rep.location
|
return rep.head_line
|
||||||
return domain
|
|
||||||
else:
|
else:
|
||||||
return "test session" # XXX?
|
return "test session" # XXX?
|
||||||
|
|
||||||
|
@ -874,18 +874,23 @@ class TerminalReporter(object):
|
||||||
|
|
||||||
|
|
||||||
def build_summary_stats_line(stats):
|
def build_summary_stats_line(stats):
|
||||||
keys = ("failed passed skipped deselected xfailed xpassed warnings error").split()
|
known_types = (
|
||||||
unknown_key_seen = False
|
"failed passed skipped deselected xfailed xpassed warnings error".split()
|
||||||
for key in stats.keys():
|
)
|
||||||
if key not in keys:
|
unknown_type_seen = False
|
||||||
if key: # setup/teardown reports have an empty key, ignore them
|
for found_type in stats:
|
||||||
keys.append(key)
|
if found_type not in known_types:
|
||||||
unknown_key_seen = True
|
if found_type: # setup/teardown reports have an empty key, ignore them
|
||||||
|
known_types.append(found_type)
|
||||||
|
unknown_type_seen = True
|
||||||
parts = []
|
parts = []
|
||||||
for key in keys:
|
for key in known_types:
|
||||||
val = stats.get(key, None)
|
reports = stats.get(key, None)
|
||||||
if val:
|
if reports:
|
||||||
parts.append("%d %s" % (len(val), key))
|
count = sum(
|
||||||
|
1 for rep in reports if getattr(rep, "count_towards_summary", True)
|
||||||
|
)
|
||||||
|
parts.append("%d %s" % (count, key))
|
||||||
|
|
||||||
if parts:
|
if parts:
|
||||||
line = ", ".join(parts)
|
line = ", ".join(parts)
|
||||||
|
@ -894,14 +899,14 @@ def build_summary_stats_line(stats):
|
||||||
|
|
||||||
if "failed" in stats or "error" in stats:
|
if "failed" in stats or "error" in stats:
|
||||||
color = "red"
|
color = "red"
|
||||||
elif "warnings" in stats or unknown_key_seen:
|
elif "warnings" in stats or unknown_type_seen:
|
||||||
color = "yellow"
|
color = "yellow"
|
||||||
elif "passed" in stats:
|
elif "passed" in stats:
|
||||||
color = "green"
|
color = "green"
|
||||||
else:
|
else:
|
||||||
color = "yellow"
|
color = "yellow"
|
||||||
|
|
||||||
return (line, color)
|
return line, color
|
||||||
|
|
||||||
|
|
||||||
def _plugin_nameversions(plugininfo):
|
def _plugin_nameversions(plugininfo):
|
||||||
|
|
|
@ -15,6 +15,7 @@ import py
|
||||||
|
|
||||||
import pytest
|
import pytest
|
||||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||||
|
from _pytest.reports import BaseReport
|
||||||
from _pytest.terminal import _plugin_nameversions
|
from _pytest.terminal import _plugin_nameversions
|
||||||
from _pytest.terminal import build_summary_stats_line
|
from _pytest.terminal import build_summary_stats_line
|
||||||
from _pytest.terminal import getreportopt
|
from _pytest.terminal import getreportopt
|
||||||
|
@ -1228,6 +1229,20 @@ def test_summary_stats(exp_line, exp_color, stats_arg):
|
||||||
assert color == exp_color
|
assert color == exp_color
|
||||||
|
|
||||||
|
|
||||||
|
def test_skip_counting_towards_summary():
|
||||||
|
class DummyReport(BaseReport):
|
||||||
|
count_towards_summary = True
|
||||||
|
|
||||||
|
r1 = DummyReport()
|
||||||
|
r2 = DummyReport()
|
||||||
|
res = build_summary_stats_line({"failed": (r1, r2)})
|
||||||
|
assert res == ("2 failed", "red")
|
||||||
|
|
||||||
|
r1.count_towards_summary = False
|
||||||
|
res = build_summary_stats_line({"failed": (r1, r2)})
|
||||||
|
assert res == ("1 failed", "red")
|
||||||
|
|
||||||
|
|
||||||
class TestClassicOutputStyle(object):
|
class TestClassicOutputStyle(object):
|
||||||
"""Ensure classic output style works as expected (#3883)"""
|
"""Ensure classic output style works as expected (#3883)"""
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue