diff --git a/changelog/7701.improvement.rst b/changelog/7701.improvement.rst new file mode 100644 index 000000000..e214be9e3 --- /dev/null +++ b/changelog/7701.improvement.rst @@ -0,0 +1 @@ +Improved reporting when using ``--collected-only``. It will now show the number of collected tests in the summary stats. diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 464a6c6ce..558c56772 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -102,4 +102,4 @@ interesting to just look at the collection tree: - ========================== no tests ran in 0.12s =========================== + ========================== 2 tests found in 0.12s =========================== diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index f1c98d449..d5a11b451 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -175,7 +175,7 @@ objects, they are still using the default pytest representation: - ========================== no tests ran in 0.12s =========================== + ========================== 8 tests found in 0.12s =========================== In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs together with the actual data, instead of listing them separately. @@ -252,7 +252,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ========================== no tests ran in 0.12s =========================== + ========================== 4 tests found in 0.12s =========================== Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -328,7 +328,7 @@ Let's first see how it looks like at collection time: - ========================== no tests ran in 0.12s =========================== + ========================== 2/2 tests found in 0.12s =========================== And then when we run the test: diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index c2f034839..f7917b790 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -157,7 +157,7 @@ The test collection would look like this: - ========================== no tests ran in 0.12s =========================== + ========================== 2 tests found in 0.12s =========================== You can check for multiple glob patterns by adding a space between the patterns: @@ -220,7 +220,7 @@ You can always peek at the collection tree without running tests like this: - ========================== no tests ran in 0.12s =========================== + ========================== 3 tests found in 0.12s =========================== .. _customizing-test-collection: @@ -282,7 +282,7 @@ leave out the ``setup.py`` file: - ====== no tests ran in 0.04 seconds ====== + ====== 1 tests found in 0.04 seconds ====== If you run with a Python 3 interpreter both the one test and the ``setup.py`` file will be left out: @@ -296,7 +296,7 @@ file will be left out: rootdir: $REGENDOC_TMPDIR, configfile: pytest.ini collected 0 items - ========================== no tests ran in 0.12s =========================== + ========================== no tests found in 0.12s =========================== It's also possible to ignore files based on Unix shell-style wildcards by adding patterns to :globalvar:`collect_ignore_glob`. diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index 90e88d876..a0411902c 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -919,7 +919,7 @@ Running the above tests results in the following test IDs being used: - ========================== no tests ran in 0.12s =========================== + ========================== 10 tests found in 0.12s =========================== .. _`fixture-parametrize-marks`: diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 8ea67f3b5..f1736ee43 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -1163,15 +1163,45 @@ class TerminalReporter: self._main_color = self._determine_main_color(bool(unknown_types)) def build_summary_stats_line(self) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: - main_color, known_types = self._get_main_color() + """ + Build the parts used in the last summary stats line. + The summary stats line is the line shown at the end, "=== 12 passed, 2 errors in Xs===". + + This function builds a list of the "parts" that make up for the text in that line, in + the example above it would be: + + [ + ("12 passed", {"green": True}), + ("2 errors", {"red": True} + ] + + That last dict for each line is a "markup dictionary", used by TerminalWriter to + color output. + + The final color of the line is also determined by this function, and is the second + element of the returned tuple. + """ + if self.config.getoption("collectonly"): + return self._build_collect_only_summary_stats_line() + else: + return self._build_normal_summary_stats_line() + + def _get_reports_to_display(self, key: str) -> List[Any]: + """Get test/collection reports for the given status key, such as `passed` or `error`.""" + reports = self.stats.get(key, []) + return [x for x in reports if getattr(x, "count_towards_summary", True)] + + def _build_normal_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + main_color, known_types = self._get_main_color() parts = [] + for key in known_types: - reports = self.stats.get(key, None) + reports = self._get_reports_to_display(key) if reports: - count = sum( - 1 for rep in reports if getattr(rep, "count_towards_summary", True) - ) + count = len(reports) color = _color_for_type.get(key, _color_for_type_default) markup = {color: True, "bold": color == main_color} parts.append(("%d %s" % _make_plural(count, key), markup)) @@ -1181,6 +1211,40 @@ class TerminalReporter: return parts, main_color + def _build_collect_only_summary_stats_line( + self, + ) -> Tuple[List[Tuple[str, Dict[str, bool]]], str]: + deselected = len(self._get_reports_to_display("deselected")) + errors = len(self._get_reports_to_display("error")) + + if self._numcollected == 0: + parts = [("no tests collected", {"yellow": True})] + main_color = "yellow" + + elif deselected == 0: + main_color = "green" + collected_output = "%d %s collected" % _make_plural( + self._numcollected, "test" + ) + parts = [(collected_output, {main_color: True})] + else: + all_tests_were_deselected = self._numcollected == deselected + if all_tests_were_deselected: + main_color = "yellow" + collected_output = f"no tests collected ({deselected} deselected)" + else: + main_color = "green" + selected = self._numcollected - deselected + collected_output = f"{selected}/{self._numcollected} tests collected ({deselected} deselected)" + + parts = [(collected_output, {main_color: True})] + + if errors: + main_color = _color_for_type["error"] + parts += [("%d %s" % _make_plural(errors, "error"), {main_color: True})] + + return parts, main_color + def _get_pos(config: Config, rep: BaseReport): nodeid = config.cwd_relative_nodeid(rep.nodeid) @@ -1267,7 +1331,7 @@ _color_for_type_default = "yellow" def _make_plural(count: int, noun: str) -> Tuple[int, str]: # No need to pluralize words such as `failed` or `passed`. - if noun not in ["error", "warnings"]: + if noun not in ["error", "warnings", "test"]: return count, noun # The `warnings` key is plural. To avoid API breakage, we keep it that way but diff --git a/testing/logging/test_reporting.py b/testing/logging/test_reporting.py index 7545d016d..fc9f10823 100644 --- a/testing/logging/test_reporting.py +++ b/testing/logging/test_reporting.py @@ -889,7 +889,7 @@ def test_collection_collect_only_live_logging(testdir, verbose): [ "*collected 1 item*", "**", - "*no tests ran*", + "*1 test collected*", ] ) elif verbose == "-q": @@ -897,7 +897,7 @@ def test_collection_collect_only_live_logging(testdir, verbose): expected_lines.extend( [ "*test_collection_collect_only_live_logging.py::test_simple*", - "no tests ran in [0-9].[0-9][0-9]s", + "1 test collected in [0-9].[0-9][0-9]s", ] ) elif verbose == "-qq": diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 2a6b3dc54..676f1d988 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -1417,7 +1417,7 @@ class TestMetafuncFunctional: ' @pytest.mark.parametrise("x", range(2))', "E Failed: Unknown 'parametrise' mark, did you mean 'parametrize'?", "*! Interrupted: 1 error during collection !*", - "*= 1 error in *", + "*= no tests collected, 1 error in *", ] ) diff --git a/testing/test_cacheprovider.py b/testing/test_cacheprovider.py index 54e657b27..37253b8b5 100644 --- a/testing/test_cacheprovider.py +++ b/testing/test_cacheprovider.py @@ -909,7 +909,7 @@ class TestLastFailed: "", "", " ", - "*= 1 deselected in *", + "*= 1/2 tests collected (1 deselected) in *", ], ) @@ -942,7 +942,7 @@ class TestLastFailed: " ", " ", "", - "*= 1 deselected in *", + "*= 2/3 tests collected (1 deselected) in *", ], consecutive=True, ) @@ -977,7 +977,7 @@ class TestLastFailed: "", " ", "", - "*= no tests ran in*", + "*= 1 test collected in*", ], consecutive=True, ) diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 77bd2ace6..0b861f25a 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -458,6 +458,48 @@ class TestCollectonly: result = testdir.runpytest("--collect-only", "-qq") result.stdout.fnmatch_lines(["*test_fun.py: 1*"]) + def test_collect_only_summary_status(self, testdir): + """Custom status depending on test selection using -k or -m. #7701.""" + testdir.makepyfile( + test_collect_foo=""" + def test_foo(): pass + """, + test_collect_bar=""" + def test_foobar(): pass + def test_bar(): pass + """, + ) + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines("*== 3 tests collected in * ==*") + + result = testdir.runpytest("--collect-only", "test_collect_foo.py") + result.stdout.fnmatch_lines("*== 1 test collected in * ==*") + + result = testdir.runpytest("--collect-only", "-k", "foo") + result.stdout.fnmatch_lines("*== 2/3 tests collected (1 deselected) in * ==*") + + result = testdir.runpytest("--collect-only", "-k", "test_bar") + result.stdout.fnmatch_lines("*== 1/3 tests collected (2 deselected) in * ==*") + + result = testdir.runpytest("--collect-only", "-k", "invalid") + result.stdout.fnmatch_lines("*== no tests collected (3 deselected) in * ==*") + + testdir.mkdir("no_tests_here") + result = testdir.runpytest("--collect-only", "no_tests_here") + result.stdout.fnmatch_lines("*== no tests collected in * ==*") + + testdir.makepyfile( + test_contains_error=""" + raise RuntimeError + """, + ) + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines("*== 3 tests collected, 1 error in * ==*") + result = testdir.runpytest("--collect-only", "-k", "foo") + result.stdout.fnmatch_lines( + "*== 2/3 tests collected (1 deselected), 1 error in * ==*" + ) + class TestFixtureReporting: def test_setup_fixture_error(self, testdir):