diff --git a/changelog/5061.feature.rst b/changelog/5061.feature.rst new file mode 100644 index 000000000..9eb0c1cd3 --- /dev/null +++ b/changelog/5061.feature.rst @@ -0,0 +1 @@ +Use multiple colors with terminal summary statistics. diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 2fd81ea2b..fd30d8572 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -864,15 +864,41 @@ class TerminalReporter: self._tw.line(content) def summary_stats(self): - session_duration = time.time() - self._sessionstarttime - (line, color) = build_summary_stats_line(self.stats) - msg = "{} in {}".format(line, format_session_duration(session_duration)) - markup = {color: True, "bold": True} + if self.verbosity < -1: + return - if self.verbosity >= 0: - self.write_sep("=", msg, **markup) - if self.verbosity == -1: - self.write_line(msg, **markup) + session_duration = time.time() - self._sessionstarttime + (parts, main_color) = build_summary_stats_line(self.stats) + line_parts = [] + + display_sep = self.verbosity >= 0 + if display_sep: + fullwidth = self._tw.fullwidth + for text, markup in parts: + with_markup = self._tw.markup(text, **markup) + if display_sep: + fullwidth += len(with_markup) - len(text) + line_parts.append(with_markup) + msg = ", ".join(line_parts) + + main_markup = {main_color: True} + duration = " in {}".format(format_session_duration(session_duration)) + duration_with_markup = self._tw.markup(duration, **main_markup) + if display_sep: + fullwidth += len(duration_with_markup) - len(duration) + msg += duration_with_markup + + if display_sep: + markup_for_end_sep = self._tw.markup("", **main_markup) + if markup_for_end_sep.endswith("\x1b[0m"): + markup_for_end_sep = markup_for_end_sep[:-4] + fullwidth += len(markup_for_end_sep) + msg += markup_for_end_sep + + if display_sep: + self.write_sep("=", msg, fullwidth=fullwidth, **main_markup) + else: + self.write_line(msg, **main_markup) def short_test_summary(self): if not self.reportchars: @@ -1011,6 +1037,15 @@ def _folded_skips(skipped): return values +_color_for_type = { + "failed": "red", + "error": "red", + "warnings": "yellow", + "passed": "green", +} +_color_for_type_default = "yellow" + + def build_summary_stats_line(stats): known_types = ( "failed passed skipped deselected xfailed xpassed warnings error".split() @@ -1021,6 +1056,17 @@ def build_summary_stats_line(stats): if found_type: # setup/teardown reports have an empty key, ignore them known_types.append(found_type) unknown_type_seen = True + + # main color + if "failed" in stats or "error" in stats: + main_color = "red" + elif "warnings" in stats or unknown_type_seen: + main_color = "yellow" + elif "passed" in stats: + main_color = "green" + else: + main_color = "yellow" + parts = [] for key in known_types: reports = stats.get(key, None) @@ -1028,23 +1074,14 @@ def build_summary_stats_line(stats): count = sum( 1 for rep in reports if getattr(rep, "count_towards_summary", True) ) - parts.append("%d %s" % (count, key)) + color = _color_for_type.get(key, _color_for_type_default) + markup = {color: True, "bold": color == main_color} + parts.append(("%d %s" % (count, key), markup)) - if parts: - line = ", ".join(parts) - else: - line = "no tests ran" + if not parts: + parts = [("no tests ran", {_color_for_type_default: True})] - if "failed" in stats or "error" in stats: - color = "red" - elif "warnings" in stats or unknown_type_seen: - color = "yellow" - elif "passed" in stats: - color = "green" - else: - color = "yellow" - - return line, color + return parts, main_color def _plugin_nameversions(plugininfo): diff --git a/testing/test_pdb.py b/testing/test_pdb.py index 7d9436313..53475078e 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -193,7 +193,7 @@ class TestPDB: ) child = testdir.spawn_pytest("-rs --pdb %s" % p1) child.expect("Skipping also with pdb active") - child.expect("1 skipped in") + child.expect_exact("= \x1b[33m\x1b[1m1 skipped\x1b[0m\x1b[33m in") child.sendeof() self.flush(child) @@ -221,7 +221,7 @@ class TestPDB: child.sendeof() rest = child.read().decode("utf8") assert "Exit: Quitting debugger" in rest - assert "= 1 failed in" in rest + assert "= \x1b[31m\x1b[1m1 failed\x1b[0m\x1b[31m in" in rest assert "def test_1" not in rest assert "get rekt" not in rest self.flush(child) @@ -725,7 +725,7 @@ class TestPDB: assert "> PDB continue (IO-capturing resumed) >" in rest else: assert "> PDB continue >" in rest - assert "1 passed in" in rest + assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest def test_pdb_used_outside_test(self, testdir): p1 = testdir.makepyfile( @@ -1041,7 +1041,7 @@ class TestTraceOption: child.sendline("q") child.expect_exact("Exit: Quitting debugger") rest = child.read().decode("utf8") - assert "2 passed in" in rest + assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest assert "reading from stdin while output" not in rest # Only printed once - not on stderr. assert "Exit: Quitting debugger" not in child.before.decode("utf8") @@ -1152,7 +1152,7 @@ def test_pdb_suspends_fixture_capturing(testdir, fixture): TestPDB.flush(child) assert child.exitstatus == 0 - assert "= 1 passed in " in rest + assert "= \x1b[32m\x1b[1m1 passed\x1b[0m\x1b[32m in" in rest assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 3bdabc5de..a624be3b4 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -164,7 +164,7 @@ class TestTerminal: child.expect(r"collecting 2 items") child.expect(r"collected 2 items") rest = child.read().decode("utf8") - assert "2 passed in" in rest + assert "= \x1b[32m\x1b[1m2 passed\x1b[0m\x1b[32m in" in rest def test_itemreport_subclasses_show_subclassed_file(self, testdir): testdir.makepyfile( @@ -1252,42 +1252,123 @@ def test_terminal_summary_warnings_header_once(testdir): # dict value, not the actual contents, so tuples of anything # suffice # Important statuses -- the highest priority of these always wins - ("red", "1 failed", {"failed": (1,)}), - ("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}), - ("red", "1 error", {"error": (1,)}), - ("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}), + ("red", [("1 failed", {"bold": True, "red": True})], {"failed": (1,)}), + ( + "red", + [ + ("1 failed", {"bold": True, "red": True}), + ("1 passed", {"bold": False, "green": True}), + ], + {"failed": (1,), "passed": (1,)}, + ), + ("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}), + ( + "red", + [ + ("1 passed", {"bold": False, "green": True}), + ("1 error", {"bold": True, "red": True}), + ], + {"error": (1,), "passed": (1,)}, + ), # (a status that's not known to the code) - ("yellow", "1 weird", {"weird": (1,)}), - ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), - ("yellow", "1 warnings", {"warnings": (1,)}), - ("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}), - ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}), + ("yellow", [("1 weird", {"bold": True, "yellow": True})], {"weird": (1,)}), + ( + "yellow", + [ + ("1 passed", {"bold": False, "green": True}), + ("1 weird", {"bold": True, "yellow": True}), + ], + {"weird": (1,), "passed": (1,)}, + ), + ( + "yellow", + [("1 warnings", {"bold": True, "yellow": True})], + {"warnings": (1,)}, + ), + ( + "yellow", + [ + ("1 passed", {"bold": False, "green": True}), + ("1 warnings", {"bold": True, "yellow": True}), + ], + {"warnings": (1,), "passed": (1,)}, + ), + ( + "green", + [("5 passed", {"bold": True, "green": True})], + {"passed": (1, 2, 3, 4, 5)}, + ), # "Boring" statuses. These have no effect on the color of the summary # line. Thus, if *every* test has a boring status, the summary line stays # at its default color, i.e. yellow, to warn the user that the test run # produced no useful information - ("yellow", "1 skipped", {"skipped": (1,)}), - ("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}), - ("yellow", "1 deselected", {"deselected": (1,)}), - ("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}), - ("yellow", "1 xfailed", {"xfailed": (1,)}), - ("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}), - ("yellow", "1 xpassed", {"xpassed": (1,)}), - ("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}), + ("yellow", [("1 skipped", {"bold": True, "yellow": True})], {"skipped": (1,)}), + ( + "green", + [ + ("1 passed", {"bold": True, "green": True}), + ("1 skipped", {"bold": False, "yellow": True}), + ], + {"skipped": (1,), "passed": (1,)}, + ), + ( + "yellow", + [("1 deselected", {"bold": True, "yellow": True})], + {"deselected": (1,)}, + ), + ( + "green", + [ + ("1 passed", {"bold": True, "green": True}), + ("1 deselected", {"bold": False, "yellow": True}), + ], + {"deselected": (1,), "passed": (1,)}, + ), + ("yellow", [("1 xfailed", {"bold": True, "yellow": True})], {"xfailed": (1,)}), + ( + "green", + [ + ("1 passed", {"bold": True, "green": True}), + ("1 xfailed", {"bold": False, "yellow": True}), + ], + {"xfailed": (1,), "passed": (1,)}, + ), + ("yellow", [("1 xpassed", {"bold": True, "yellow": True})], {"xpassed": (1,)}), + ( + "green", + [ + ("1 passed", {"bold": True, "green": True}), + ("1 xpassed", {"bold": False, "yellow": True}), + ], + {"xpassed": (1,), "passed": (1,)}, + ), # Likewise if no tests were found at all - ("yellow", "no tests ran", {}), + ("yellow", [("no tests ran", {"yellow": True})], {}), # Test the empty-key special case - ("yellow", "no tests ran", {"": (1,)}), - ("green", "1 passed", {"": (1,), "passed": (1,)}), + ("yellow", [("no tests ran", {"yellow": True})], {"": (1,)}), + ( + "green", + [("1 passed", {"bold": True, "green": True})], + {"": (1,), "passed": (1,)}, + ), # A couple more complex combinations ( "red", - "1 failed, 2 passed, 3 xfailed", + [ + ("1 failed", {"bold": True, "red": True}), + ("2 passed", {"bold": False, "green": True}), + ("3 xfailed", {"bold": False, "yellow": True}), + ], {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}, ), ( "green", - "1 passed, 2 skipped, 3 deselected, 2 xfailed", + [ + ("1 passed", {"bold": True, "green": True}), + ("2 skipped", {"bold": False, "yellow": True}), + ("3 deselected", {"bold": False, "yellow": True}), + ("2 xfailed", {"bold": False, "yellow": True}), + ], { "passed": (1,), "skipped": (1, 2), @@ -1313,11 +1394,11 @@ def test_skip_counting_towards_summary(): r1 = DummyReport() r2 = DummyReport() res = build_summary_stats_line({"failed": (r1, r2)}) - assert res == ("2 failed", "red") + assert res == ([("2 failed", {"bold": True, "red": True})], "red") r1.count_towards_summary = False res = build_summary_stats_line({"failed": (r1, r2)}) - assert res == ("1 failed", "red") + assert res == ([("1 failed", {"bold": True, "red": True})], "red") class TestClassicOutputStyle: