Improve summary stats when using '--collect-only' (#7875)

Co-authored-by: Bruno Oliveira <nicoddemus@gmail.com>
This commit is contained in:
Hugo Martins 2020-11-08 14:45:10 +00:00 committed by GitHub
parent 29f2f4e854
commit 5b2e5e8a40
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
10 changed files with 128 additions and 21 deletions

View File

@ -0,0 +1 @@
Improved reporting when using ``--collected-only``. It will now show the number of collected tests in the summary stats.

View File

@ -102,4 +102,4 @@ interesting to just look at the collection tree:
<YamlItem hello>
<YamlItem ok>
========================== no tests ran in 0.12s ===========================
========================== 2 tests found in 0.12s ===========================

View File

@ -175,7 +175,7 @@ objects, they are still using the default pytest representation:
<Function test_timedistance_v3[forward]>
<Function test_timedistance_v3[backward]>
========================== no tests ran in 0.12s ===========================
========================== 8 tests found in 0.12s ===========================
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
together with the actual data, instead of listing them separately.
@ -252,7 +252,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
<Function test_demo1[advanced]>
<Function test_demo2[advanced]>
========================== no tests ran in 0.12s ===========================
========================== 4 tests found in 0.12s ===========================
Note that we told ``metafunc.parametrize()`` that your scenario values
should be considered class-scoped. With pytest-2.3 this leads to a
@ -328,7 +328,7 @@ Let's first see how it looks like at collection time:
<Function test_db_initialized[d1]>
<Function test_db_initialized[d2]>
========================== no tests ran in 0.12s ===========================
========================== 2/2 tests found in 0.12s ===========================
And then when we run the test:

View File

@ -157,7 +157,7 @@ The test collection would look like this:
<Function simple_check>
<Function complex_check>
========================== no tests ran in 0.12s ===========================
========================== 2 tests found in 0.12s ===========================
You can check for multiple glob patterns by adding a space between the patterns:
@ -220,7 +220,7 @@ You can always peek at the collection tree without running tests like this:
<Function test_method>
<Function test_anothermethod>
========================== no tests ran in 0.12s ===========================
========================== 3 tests found in 0.12s ===========================
.. _customizing-test-collection:
@ -282,7 +282,7 @@ leave out the ``setup.py`` file:
<Module 'pkg/module_py2.py'>
<Function 'test_only_on_python2'>
====== no tests ran in 0.04 seconds ======
====== 1 tests found in 0.04 seconds ======
If you run with a Python 3 interpreter both the one test and the ``setup.py``
file will be left out:
@ -296,7 +296,7 @@ file will be left out:
rootdir: $REGENDOC_TMPDIR, configfile: pytest.ini
collected 0 items
========================== no tests ran in 0.12s ===========================
========================== no tests found in 0.12s ===========================
It's also possible to ignore files based on Unix shell-style wildcards by adding
patterns to :globalvar:`collect_ignore_glob`.

View File

@ -919,7 +919,7 @@ Running the above tests results in the following test IDs being used:
<Function test_ehlo[mail.python.org]>
<Function test_noop[mail.python.org]>
========================== no tests ran in 0.12s ===========================
========================== 10 tests found in 0.12s ===========================
.. _`fixture-parametrize-marks`:

View File

@ -1163,15 +1163,45 @@ class TerminalReporter:
self._main_color = self._determine_main_color(bool(unknown_types))
def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
"""
Build the parts used in the last summary stats line.
The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===".
This function builds a list of the "parts" that make up for the text in that line, in
the example above it would be:
[
("12 passed", {"green": True}),
("2 errors", {"red": True}
]
That last dict for each line is a "markup dictionary", used by TerminalWriter to
color output.
The final color of the line is also determined by this function, and is the second
element of the returned tuple.
"""
if self.config.getoption("collectonly"):
return self._build_collect_only_summary_stats_line()
else:
return self._build_normal_summary_stats_line()
def _get_reports_to_display(self, key: str) -> List[Any]:
"""Get test/collection reports for the given status key, such as `passed` or `error`."""
reports = self.stats.get(key, [])
return [x for x in reports if getattr(x, "count_towards_summary", True)]
def _build_normal_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
main_color, known_types = self._get_main_color()
parts = []
for key in known_types:
reports = self.stats.get(key, None)
reports = self._get_reports_to_display(key)
if reports:
count = sum(
1 for rep in reports if getattr(rep, "count_towards_summary", True)
)
count = len(reports)
color = _color_for_type.get(key, _color_for_type_default)
markup = {color: True, "bold": color == main_color}
parts.append(("%d %s" % _make_plural(count, key), markup))
@ -1181,6 +1211,40 @@ class TerminalReporter:
return parts, main_color
def _build_collect_only_summary_stats_line(
self,
) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]:
deselected = len(self._get_reports_to_display("deselected"))
errors = len(self._get_reports_to_display("error"))
if self._numcollected == 0:
parts = [("no tests collected", {"yellow": True})]
main_color = "yellow"
elif deselected == 0:
main_color = "green"
collected_output = "%d %s collected" % _make_plural(
self._numcollected, "test"
)
parts = [(collected_output, {main_color: True})]
else:
all_tests_were_deselected = self._numcollected == deselected
if all_tests_were_deselected:
main_color = "yellow"
collected_output = f"no tests collected ({deselected} deselected)"
else:
main_color = "green"
selected = self._numcollected - deselected
collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)"
parts = [(collected_output, {main_color: True})]
if errors:
main_color = _color_for_type["error"]
parts += [("%d %s" % _make_plural(errors, "error"), {main_color: True})]
return parts, main_color
def _get_pos(config: Config, rep: BaseReport):
nodeid = config.cwd_relative_nodeid(rep.nodeid)
@ -1267,7 +1331,7 @@ _color_for_type_default = "yellow"
def _make_plural(count: int, noun: str) -> Tuple[int, str]:
# No need to pluralize words such as `failed` or `passed`.
if noun not in ["error", "warnings"]:
if noun not in ["error", "warnings", "test"]:
return count, noun
# The `warnings` key is plural. To avoid API breakage, we keep it that way but

View File

@ -889,7 +889,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
[
"*collected 1 item*",
"*<Module test_collection_collect_only_live_logging.py>*",
"*no tests ran*",
"*1 test collected*",
]
)
elif verbose == "-q":
@ -897,7 +897,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
expected_lines.extend(
[
"*test_collection_collect_only_live_logging.py::test_simple*",
"no tests ran in [0-9].[0-9][0-9]s",
"1 test collected in [0-9].[0-9][0-9]s",
]
)
elif verbose == "-qq":

View File

@ -1417,7 +1417,7 @@ class TestMetafuncFunctional:
' @pytest.mark.parametrise("x", range(2))',
"E Failed: Unknown 'parametrise' mark, did you mean 'parametrize'?",
"*! Interrupted: 1 error during collection !*",
"*= 1 error in *",
"*= no tests collected, 1 error in *",
]
)

View File

@ -909,7 +909,7 @@ class TestLastFailed:
"",
"<Module pkg1/test_1.py>",
" <Function test_fail>",
"*= 1 deselected in *",
"*= 1/2 tests collected (1 deselected) in *",
],
)
@ -942,7 +942,7 @@ class TestLastFailed:
" <Function test_fail>",
" <Function test_other>",
"",
"*= 1 deselected in *",
"*= 2/3 tests collected (1 deselected) in *",
],
consecutive=True,
)
@ -977,7 +977,7 @@ class TestLastFailed:
"<Module pkg1/test_1.py>",
" <Function test_pass>",
"",
"*= no tests ran in*",
"*= 1 test collected in*",
],
consecutive=True,
)

View File

@ -458,6 +458,48 @@ class TestCollectonly:
result = testdir.runpytest("--collect-only", "-qq")
result.stdout.fnmatch_lines(["*test_fun.py: 1*"])
def test_collect_only_summary_status(self, testdir):
"""Custom status depending on test selection using -k or -m. #7701."""
testdir.makepyfile(
test_collect_foo="""
def test_foo(): pass
""",
test_collect_bar="""
def test_foobar(): pass
def test_bar(): pass
""",
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines("*== 3 tests collected in * ==*")
result = testdir.runpytest("--collect-only", "test_collect_foo.py")
result.stdout.fnmatch_lines("*== 1 test collected in * ==*")
result = testdir.runpytest("--collect-only", "-k", "foo")
result.stdout.fnmatch_lines("*== 2/3 tests collected (1 deselected) in * ==*")
result = testdir.runpytest("--collect-only", "-k", "test_bar")
result.stdout.fnmatch_lines("*== 1/3 tests collected (2 deselected) in * ==*")
result = testdir.runpytest("--collect-only", "-k", "invalid")
result.stdout.fnmatch_lines("*== no tests collected (3 deselected) in * ==*")
testdir.mkdir("no_tests_here")
result = testdir.runpytest("--collect-only", "no_tests_here")
result.stdout.fnmatch_lines("*== no tests collected in * ==*")
testdir.makepyfile(
test_contains_error="""
raise RuntimeError
""",
)
result = testdir.runpytest("--collect-only")
result.stdout.fnmatch_lines("*== 3 tests collected, 1 error in * ==*")
result = testdir.runpytest("--collect-only", "-k", "foo")
result.stdout.fnmatch_lines(
"*== 2/3 tests collected (1 deselected), 1 error in * ==*"
)
class TestFixtureReporting:
def test_setup_fixture_error(self, testdir):