Merge branch 'esiegerman/summary_colors' of github.com:esiegerman/pytest
This commit is contained in:
commit
d10054a38d
1
AUTHORS
1
AUTHORS
|
@ -25,6 +25,7 @@ Daniel Nuri
|
|||
Dave Hunt
|
||||
David Mohr
|
||||
Eduardo Schettino
|
||||
Eric Siegerman
|
||||
Florian Bruhin
|
||||
Edison Gustavo Muenz
|
||||
Floris Bruynooghe
|
||||
|
|
|
@ -1,6 +1,9 @@
|
|||
2.8.0.dev (compared to 2.7.X)
|
||||
-----------------------------
|
||||
|
||||
- partial fix for issue500: color the summary bar yellow for warning
|
||||
situations, including the case where no tests were run
|
||||
|
||||
- fix issue713: JUnit XML reports for doctest failures.
|
||||
Thanks Punyashloka Biswal.
|
||||
|
||||
|
|
|
@ -487,26 +487,9 @@ class TerminalReporter:
|
|||
|
||||
def summary_stats(self):
|
||||
session_duration = time.time() - self._sessionstarttime
|
||||
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings").split()
|
||||
for key in self.stats.keys():
|
||||
if key not in keys:
|
||||
keys.append(key)
|
||||
parts = []
|
||||
for key in keys:
|
||||
if key: # setup/teardown reports have an empty key, ignore them
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" % (len(val), key))
|
||||
line = ", ".join(parts)
|
||||
(line, color) = build_summary_stats_line(self.stats)
|
||||
msg = "%s in %.2f seconds" % (line, session_duration)
|
||||
|
||||
markup = {'bold': True}
|
||||
if 'failed' in self.stats or 'error' in self.stats:
|
||||
markup = {'red': True, 'bold': True}
|
||||
else:
|
||||
markup = {'green': True, 'bold': True}
|
||||
markup = {color: True, 'bold': True}
|
||||
|
||||
if self.verbosity >= 0:
|
||||
self.write_sep("=", msg, **markup)
|
||||
|
@ -542,3 +525,29 @@ def flatten(l):
|
|||
else:
|
||||
yield x
|
||||
|
||||
def build_summary_stats_line(stats):
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings error").split()
|
||||
unknown_key_seen = False
|
||||
for key in stats.keys():
|
||||
if key not in keys:
|
||||
if key: # setup/teardown reports have an empty key, ignore them
|
||||
keys.append(key)
|
||||
unknown_key_seen = True
|
||||
parts = []
|
||||
for key in keys:
|
||||
val = stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" % (len(val), key))
|
||||
line = ", ".join(parts)
|
||||
|
||||
if 'failed' in stats or 'error' in stats:
|
||||
color = 'red'
|
||||
elif 'warnings' in stats or unknown_key_seen:
|
||||
color = 'yellow'
|
||||
elif 'passed' in stats:
|
||||
color = 'green'
|
||||
else:
|
||||
color = 'yellow'
|
||||
|
||||
return (line, color)
|
||||
|
|
|
@ -7,6 +7,7 @@ import pluggy
|
|||
import sys
|
||||
|
||||
from _pytest.terminal import TerminalReporter, repr_pythonversion, getreportopt
|
||||
from _pytest.terminal import build_summary_stats_line
|
||||
from _pytest import runner
|
||||
|
||||
def basic_run_report(item):
|
||||
|
@ -718,3 +719,68 @@ def test_terminal_summary(testdir):
|
|||
*==== hello ====*
|
||||
world
|
||||
""")
|
||||
|
||||
@pytest.mark.parametrize("exp_color, exp_line, stats_arg", [
|
||||
# The method under test only cares about the length of each
|
||||
# dict value, not the actual contents, so tuples of anything
|
||||
# suffice
|
||||
|
||||
# Important statuses -- the highest priority of these always wins
|
||||
("red", "1 failed", {"failed": (1,)}),
|
||||
("red", "1 failed, 1 passed", {"failed": (1,), "passed": (1,)}),
|
||||
|
||||
("red", "1 error", {"error": (1,)}),
|
||||
("red", "1 passed, 1 error", {"error": (1,), "passed": (1,)}),
|
||||
|
||||
# (a status that's not known to the code)
|
||||
("yellow", "1 weird", {"weird": (1,)}),
|
||||
("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}),
|
||||
|
||||
("yellow", "1 warnings", {"warnings": (1,)}),
|
||||
("yellow", "1 passed, 1 warnings", {"warnings": (1,), "passed": (1,)}),
|
||||
|
||||
("green", "5 passed", {"passed": (1,2,3,4,5)}),
|
||||
|
||||
|
||||
# "Boring" statuses. These have no effect on the color of the summary
|
||||
# line. Thus, if *every* test has a boring status, the summary line stays
|
||||
# at its default color, i.e. yellow, to warn the user that the test run
|
||||
# produced no useful information
|
||||
("yellow", "1 skipped", {"skipped": (1,)}),
|
||||
("green", "1 passed, 1 skipped", {"skipped": (1,), "passed": (1,)}),
|
||||
|
||||
("yellow", "1 deselected", {"deselected": (1,)}),
|
||||
("green", "1 passed, 1 deselected", {"deselected": (1,), "passed": (1,)}),
|
||||
|
||||
("yellow", "1 xfailed", {"xfailed": (1,)}),
|
||||
("green", "1 passed, 1 xfailed", {"xfailed": (1,), "passed": (1,)}),
|
||||
|
||||
("yellow", "1 xpassed", {"xpassed": (1,)}),
|
||||
("green", "1 passed, 1 xpassed", {"xpassed": (1,), "passed": (1,)}),
|
||||
|
||||
# Likewise if no tests were found at all
|
||||
("yellow", "", {}),
|
||||
|
||||
# Test the empty-key special case
|
||||
("yellow", "", {"": (1,)}),
|
||||
("green", "1 passed", {"": (1,), "passed": (1,)}),
|
||||
|
||||
|
||||
# A couple more complex combinations
|
||||
("red", "1 failed, 2 passed, 3 xfailed",
|
||||
{"passed": (1,2), "failed": (1,), "xfailed": (1,2,3)}),
|
||||
|
||||
("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed",
|
||||
{"passed": (1,),
|
||||
"skipped": (1,2),
|
||||
"deselected": (1,2,3),
|
||||
"xfailed": (1,2)}),
|
||||
])
|
||||
def test_summary_stats(exp_line, exp_color, stats_arg):
|
||||
print("Based on stats: %s" % stats_arg)
|
||||
print("Expect summary: \"%s\"; with color \"%s\"" % (exp_line, exp_color))
|
||||
(line, color) = build_summary_stats_line(stats_arg)
|
||||
print("Actually got: \"%s\"; with color \"%s\"" % (line, color))
|
||||
assert line == exp_line
|
||||
assert color == exp_color
|
||||
|
||||
|
|
Loading…
Reference in New Issue