_get_main_color: no yellow ("boring") for non-last item

- refactor _get_main_color/build_summary_stats_line
- factor out property _is_last_item; test_summary_stats: tr._is_last_item
- _write_progress_information_filling_space: remove color arg
- use setter for stats, handling main color
- _get_main_color: skip cache for last item
- Handle random order in test for py35.
This commit is contained in:
Daniel Hahler 2020-01-06 15:09:09 +01:00
parent 2da331ea9c
commit 57512aa997
4 changed files with 127 additions and 73 deletions

View File

@ -0,0 +1 @@
Fallback to green (instead of yellow) for non-last items without previous passes with colored terminal progress indicator.

View File

@ -247,6 +247,8 @@ class TerminalReporter:
self._showfspath = None self._showfspath = None
self.stats = {} # type: Dict[str, List[Any]] self.stats = {} # type: Dict[str, List[Any]]
self._main_color = None # type: Optional[str]
self._known_types = None # type: Optional[List]
self.startdir = config.invocation_dir self.startdir = config.invocation_dir
if file is None: if file is None:
file = sys.stdout file = sys.stdout
@ -365,6 +367,12 @@ class TerminalReporter:
def line(self, msg, **kw): def line(self, msg, **kw):
self._tw.line(msg, **kw) self._tw.line(msg, **kw)
def _add_stats(self, category: str, items: List) -> None:
set_main_color = category not in self.stats
self.stats.setdefault(category, []).extend(items[:])
if set_main_color:
self._set_main_color()
def pytest_internalerror(self, excrepr): def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"): for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line) self.write_line("INTERNALERROR> " + line)
@ -374,7 +382,6 @@ class TerminalReporter:
# from _pytest.nodes import get_fslocation_from_item # from _pytest.nodes import get_fslocation_from_item
from _pytest.warnings import warning_record_to_str from _pytest.warnings import warning_record_to_str
warnings = self.stats.setdefault("warnings", [])
fslocation = warning_message.filename, warning_message.lineno fslocation = warning_message.filename, warning_message.lineno
message = warning_record_to_str(warning_message) message = warning_record_to_str(warning_message)
@ -382,7 +389,7 @@ class TerminalReporter:
warning_report = WarningReport( warning_report = WarningReport(
fslocation=fslocation, message=message, nodeid=nodeid fslocation=fslocation, message=message, nodeid=nodeid
) )
warnings.append(warning_report) self._add_stats("warnings", [warning_report])
def pytest_plugin_registered(self, plugin): def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig: if self.config.option.traceconfig:
@ -393,7 +400,7 @@ class TerminalReporter:
self.write_line(msg) self.write_line(msg)
def pytest_deselected(self, items): def pytest_deselected(self, items):
self.stats.setdefault("deselected", []).extend(items) self._add_stats("deselected", items)
def pytest_runtest_logstart(self, nodeid, location): def pytest_runtest_logstart(self, nodeid, location):
# ensure that the path is printed before the # ensure that the path is printed before the
@ -414,7 +421,7 @@ class TerminalReporter:
word, markup = word word, markup = word
else: else:
markup = None markup = None
self.stats.setdefault(category, []).append(rep) self._add_stats(category, [rep])
if not letter and not word: if not letter and not word:
# probably passed setup/teardown # probably passed setup/teardown
return return
@ -456,6 +463,10 @@ class TerminalReporter:
self._tw.write(" " + line) self._tw.write(" " + line)
self.currentfspath = -2 self.currentfspath = -2
@property
def _is_last_item(self):
return len(self._progress_nodeids_reported) == self._session.testscollected
def pytest_runtest_logfinish(self, nodeid): def pytest_runtest_logfinish(self, nodeid):
assert self._session assert self._session
if self.verbosity <= 0 and self._show_progress_info: if self.verbosity <= 0 and self._show_progress_info:
@ -465,15 +476,12 @@ class TerminalReporter:
else: else:
progress_length = len(" [100%]") progress_length = len(" [100%]")
main_color, _ = _get_main_color(self.stats)
self._progress_nodeids_reported.add(nodeid) self._progress_nodeids_reported.add(nodeid)
is_last_item = (
len(self._progress_nodeids_reported) == self._session.testscollected if self._is_last_item:
) self._write_progress_information_filling_space()
if is_last_item:
self._write_progress_information_filling_space(color=main_color)
else: else:
main_color, _ = self._get_main_color()
w = self._width_of_current_line w = self._width_of_current_line
past_edge = w + progress_length + 1 >= self._screen_width past_edge = w + progress_length + 1 >= self._screen_width
if past_edge: if past_edge:
@ -497,9 +505,8 @@ class TerminalReporter:
) )
return " [100%]" return " [100%]"
def _write_progress_information_filling_space(self, color=None): def _write_progress_information_filling_space(self):
if not color: color, _ = self._get_main_color()
color, _ = _get_main_color(self.stats)
msg = self._get_progress_information_message() msg = self._get_progress_information_message()
w = self._width_of_current_line w = self._width_of_current_line
fill = self._tw.fullwidth - w - 1 fill = self._tw.fullwidth - w - 1
@ -524,9 +531,9 @@ class TerminalReporter:
def pytest_collectreport(self, report: CollectReport) -> None: def pytest_collectreport(self, report: CollectReport) -> None:
if report.failed: if report.failed:
self.stats.setdefault("error", []).append(report) self._add_stats("error", [report])
elif report.skipped: elif report.skipped:
self.stats.setdefault("skipped", []).append(report) self._add_stats("skipped", [report])
items = [x for x in report.result if isinstance(x, pytest.Item)] items = [x for x in report.result if isinstance(x, pytest.Item)]
self._numcollected += len(items) self._numcollected += len(items)
if self.isatty: if self.isatty:
@ -909,7 +916,7 @@ class TerminalReporter:
return return
session_duration = time.time() - self._sessionstarttime session_duration = time.time() - self._sessionstarttime
(parts, main_color) = build_summary_stats_line(self.stats) (parts, main_color) = self.build_summary_stats_line()
line_parts = [] line_parts = []
display_sep = self.verbosity >= 0 display_sep = self.verbosity >= 0
@ -1012,6 +1019,56 @@ class TerminalReporter:
for line in lines: for line in lines:
self.write_line(line) self.write_line(line)
def _get_main_color(self) -> Tuple[str, List[str]]:
if self._main_color is None or self._known_types is None or self._is_last_item:
self._set_main_color()
assert self._main_color
assert self._known_types
return self._main_color, self._known_types
def _set_main_color(self) -> Tuple[str, List[str]]:
stats = self.stats
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats.keys():
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True
# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or "xpassed" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats or not self._is_last_item:
main_color = "green"
else:
main_color = "yellow"
self._main_color, self._known_types = main_color, known_types
return main_color, known_types
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self.stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _get_pos(config, rep): def _get_pos(config, rep):
nodeid = config.cwd_relative_nodeid(rep.nodeid) nodeid = config.cwd_relative_nodeid(rep.nodeid)
@ -1100,50 +1157,6 @@ def _make_plural(count, noun):
return count, noun + "s" if count != 1 else noun return count, noun + "s" if count != 1 else noun
def _get_main_color(stats) -> Tuple[str, List[str]]:
known_types = (
"failed passed skipped deselected xfailed xpassed warnings error".split()
)
unknown_type_seen = False
for found_type in stats.keys():
if found_type not in known_types:
if found_type: # setup/teardown reports have an empty key, ignore them
known_types.append(found_type)
unknown_type_seen = True
# main color
if "failed" in stats or "error" in stats:
main_color = "red"
elif "warnings" in stats or unknown_type_seen:
main_color = "yellow"
elif "passed" in stats:
main_color = "green"
else:
main_color = "yellow"
return main_color, known_types
def build_summary_stats_line(stats):
main_color, known_types = _get_main_color(stats)
parts = []
for key in known_types:
reports = stats.get(key, None)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))
if not parts:
parts = [("no tests ran", {_color_for_type_default: True})]
return parts, main_color
def _plugin_nameversions(plugininfo) -> List[str]: def _plugin_nameversions(plugininfo) -> List[str]:
values = [] # type: List[str] values = [] # type: List[str]
for plugin, dist in plugininfo: for plugin, dist in plugininfo:

View File

@ -506,7 +506,7 @@ class TestPDB:
rest = child.read().decode("utf8") rest = child.read().decode("utf8")
assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest assert "! _pytest.outcomes.Exit: Quitting debugger !" in rest
assert "= \x1b[33mno tests ran\x1b[0m\x1b[33m in" in rest assert "= \x1b[33mno tests ran\x1b[0m\x1b[32m in" in rest
assert "BdbQuit" not in rest assert "BdbQuit" not in rest
assert "UNEXPECTED EXCEPTION" not in rest assert "UNEXPECTED EXCEPTION" not in rest

View File

@ -11,13 +11,13 @@ from io import StringIO
import pluggy import pluggy
import py import py
import _pytest.config
import pytest import pytest
from _pytest.main import ExitCode from _pytest.main import ExitCode
from _pytest.reports import BaseReport from _pytest.reports import BaseReport
from _pytest.terminal import _folded_skips from _pytest.terminal import _folded_skips
from _pytest.terminal import _get_line_with_reprcrash_message from _pytest.terminal import _get_line_with_reprcrash_message
from _pytest.terminal import _plugin_nameversions from _pytest.terminal import _plugin_nameversions
from _pytest.terminal import build_summary_stats_line
from _pytest.terminal import getreportopt from _pytest.terminal import getreportopt
from _pytest.terminal import TerminalReporter from _pytest.terminal import TerminalReporter
@ -1344,6 +1344,12 @@ def test_terminal_summary_warnings_header_once(testdir):
assert stdout.count("=== warnings summary ") == 1 assert stdout.count("=== warnings summary ") == 1
@pytest.fixture(scope="session")
def tr():
config = _pytest.config._prepareconfig()
return TerminalReporter(config)
@pytest.mark.parametrize( @pytest.mark.parametrize(
"exp_color, exp_line, stats_arg", "exp_color, exp_line, stats_arg",
[ [
@ -1431,10 +1437,10 @@ def test_terminal_summary_warnings_header_once(testdir):
), ),
("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}), ("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}),
( (
"green", "yellow",
[ [
("1 passed", {"bold": True, "green": True}), ("1 passed", {"bold": False, "green": True}),
("1 xpassed", {"bold": False, "yellow": True}), ("1 xpassed", {"bold": True, "yellow": True}),
], ],
{"xpassed": (1,), "passed": (1,)}, {"xpassed": (1,), "passed": (1,)},
), ),
@ -1474,26 +1480,42 @@ def test_terminal_summary_warnings_header_once(testdir):
), ),
], ],
) )
def test_summary_stats(exp_line, exp_color, stats_arg): def test_summary_stats(tr, exp_line, exp_color, stats_arg):
tr.stats = stats_arg
# Fake "_is_last_item" to be True.
class fake_session:
testscollected = 0
tr._session = fake_session
assert tr._is_last_item
# Reset cache.
tr._main_color = None
print("Based on stats: %s" % stats_arg) print("Based on stats: %s" % stats_arg)
print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color)) print('Expect summary: "{}"; with color "{}"'.format(exp_line, exp_color))
(line, color) = build_summary_stats_line(stats_arg) (line, color) = tr.build_summary_stats_line()
print('Actually got: "{}"; with color "{}"'.format(line, color)) print('Actually got: "{}"; with color "{}"'.format(line, color))
assert line == exp_line assert line == exp_line
assert color == exp_color assert color == exp_color
def test_skip_counting_towards_summary(): def test_skip_counting_towards_summary(tr):
class DummyReport(BaseReport): class DummyReport(BaseReport):
count_towards_summary = True count_towards_summary = True
r1 = DummyReport() r1 = DummyReport()
r2 = DummyReport() r2 = DummyReport()
res = build_summary_stats_line({"failed": (r1, r2)}) tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("2 failed", {"bold": True, "red": True})], "red") assert res == ([("2 failed", {"bold": True, "red": True})], "red")
r1.count_towards_summary = False r1.count_towards_summary = False
res = build_summary_stats_line({"failed": (r1, r2)}) tr.stats = {"failed": (r1, r2)}
tr._main_color = None
res = tr.build_summary_stats_line()
assert res == ([("1 failed", {"bold": True, "red": True})], "red") assert res == ([("1 failed", {"bold": True, "red": True})], "red")
@ -1595,6 +1617,11 @@ class TestProgressOutputStyle:
def test_colored_progress(self, testdir, monkeypatch): def test_colored_progress(self, testdir, monkeypatch):
monkeypatch.setenv("PY_COLORS", "1") monkeypatch.setenv("PY_COLORS", "1")
testdir.makepyfile( testdir.makepyfile(
test_axfail="""
import pytest
@pytest.mark.xfail
def test_axfail(): assert 0
""",
test_bar=""" test_bar="""
import pytest import pytest
@pytest.mark.parametrize('i', range(10)) @pytest.mark.parametrize('i', range(10))
@ -1619,13 +1646,26 @@ class TestProgressOutputStyle:
[ [
line.format(**RE_COLORS) line.format(**RE_COLORS)
for line in [ for line in [
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}", r"test_axfail.py {yellow}x{reset}{green} \s+ \[ 4%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}", r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 52%\]{reset}",
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 76%\]{reset}",
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}", r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
] ]
] ]
) )
# Only xfail should have yellow progress indicator.
result = testdir.runpytest("test_axfail.py")
result.stdout.re_match_lines(
[
line.format(**RE_COLORS)
for line in [
r"test_axfail.py {yellow}x{reset}{yellow} \s+ \[100%\]{reset}",
r"^{yellow}=+ ({yellow}{bold}|{bold}{yellow})1 xfailed{reset}{yellow} in ",
]
]
)
def test_count(self, many_tests_files, testdir): def test_count(self, many_tests_files, testdir):
testdir.makeini( testdir.makeini(
""" """