diff --git a/AUTHORS b/AUTHORS index e11400c1f..def641c95 100644 --- a/AUTHORS +++ b/AUTHORS @@ -160,6 +160,7 @@ Manuel Krebber Marc Schlaich Marcelo Duarte Trevisani Marcin Bachry +Marco Gorelli Mark Abramowitz Markus Unterwaditzer Martijn Faassen diff --git a/changelog/5990.improvement.rst b/changelog/5990.improvement.rst new file mode 100644 index 000000000..6f5ad648e --- /dev/null +++ b/changelog/5990.improvement.rst @@ -0,0 +1 @@ +Fix plurality mismatch in test summary (e.g. display "1 error" instead of "1 errors"). diff --git a/src/_pytest/main.py b/src/_pytest/main.py index ad65ed299..7b3855e6c 100644 --- a/src/_pytest/main.py +++ b/src/_pytest/main.py @@ -248,7 +248,10 @@ def pytest_collection(session): def pytest_runtestloop(session): if session.testsfailed and not session.config.option.continue_on_collection_errors: - raise session.Interrupted("%d errors during collection" % session.testsfailed) + raise session.Interrupted( + "%d error%s during collection" + % (session.testsfailed, "s" if session.testsfailed != 1 else "") + ) if session.config.option.collectonly: return True diff --git a/src/_pytest/terminal.py b/src/_pytest/terminal.py index 35f6d324b..228fc4219 100644 --- a/src/_pytest/terminal.py +++ b/src/_pytest/terminal.py @@ -539,7 +539,7 @@ class TerminalReporter: str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s") ) if errors: - line += " / %d errors" % errors + line += " / %d error%s" % (errors, "s" if errors != 1 else "") if deselected: line += " / %d deselected" % deselected if skipped: @@ -1056,6 +1056,19 @@ _color_for_type = { _color_for_type_default = "yellow" +def _make_plural(count, noun): + # No need to pluralize words such as `failed` or `passed`. + if noun not in ["error", "warnings"]: + return count, noun + + # The `warnings` key is plural. To avoid API breakage, we keep it that way but + # set it to singular here so we can determine plurality in the same way as we do + # for `error`. + noun = noun.replace("warnings", "warning") + + return count, noun + "s" if count != 1 else noun + + def build_summary_stats_line(stats): known_types = ( "failed passed skipped deselected xfailed xpassed warnings error".split() @@ -1086,7 +1099,7 @@ def build_summary_stats_line(stats): ) color = _color_for_type.get(key, _color_for_type_default) markup = {color: True, "bold": color == main_color} - parts.append(("%d %s" % (count, key), markup)) + parts.append(("%d %s" % _make_plural(count, key), markup)) if not parts: parts = [("no tests ran", {_color_for_type_default: True})] diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 2bf56cb80..82c727fc6 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -628,7 +628,7 @@ class TestInvocationVariants: result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True) assert result.ret != 0 - result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"]) + result.stdout.fnmatch_lines(["collected*0*items*/*1*error"]) def test_pyargs_only_imported_once(self, testdir): pkg = testdir.mkpydir("foo") @@ -956,7 +956,7 @@ class TestDurations: testdir.makepyfile(test_collecterror="""xyz""") result = testdir.runpytest("--durations=2", "-k test_1") assert result.ret == 2 - result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"]) + result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"]) # Collection errors abort test execution, therefore no duration is # output result.stdout.no_fnmatch_line("*duration*") diff --git a/testing/python/collect.py b/testing/python/collect.py index 537047119..30f9841b5 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -1167,7 +1167,7 @@ def test_dont_collect_non_function_callable(testdir): [ "*collected 1 item*", "*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*", - "*1 passed, 1 warnings in *", + "*1 passed, 1 warning in *", ] ) diff --git a/testing/python/fixtures.py b/testing/python/fixtures.py index 6399863c7..6dca793e0 100644 --- a/testing/python/fixtures.py +++ b/testing/python/fixtures.py @@ -3081,7 +3081,7 @@ class TestErrors: *KeyError* *ERROR*teardown*test_2* *KeyError* - *3 pass*2 error* + *3 pass*2 errors* """ ) diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 6c5de5c03..3aab3ac2a 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -122,7 +122,7 @@ class TestAssertionRewrite: } testdir.makepyfile(**contents) result = testdir.runpytest_subprocess() - assert "warnings" not in "".join(result.outlines) + assert "warning" not in "".join(result.outlines) def test_rewrites_plugin_as_a_package(self, testdir): pkgdir = testdir.mkpydir("plugin") diff --git a/testing/test_capture.py b/testing/test_capture.py index 4320a7cae..67aa0c77e 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -453,7 +453,7 @@ class TestCaptureFixture: "E*capfd*capsys*same*time*", "*ERROR*setup*test_two*", "E*capsys*capfd*same*time*", - "*2 error*", + "*2 errors*", ] ) diff --git a/testing/test_collection.py b/testing/test_collection.py index 7a5cf795b..259868357 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -892,7 +892,7 @@ def test_continue_on_collection_errors(testdir): assert res.ret == 1 res.stdout.fnmatch_lines( - ["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"] + ["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"] ) @@ -909,7 +909,7 @@ def test_continue_on_collection_errors_maxfail(testdir): res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3") assert res.ret == 1 - res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"]) + res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"]) def test_fixture_scope_sibling_conftests(testdir): @@ -1253,7 +1253,7 @@ def test_collector_respects_tbstyle(testdir): ' File "*/test_collector_respects_tbstyle.py", line 1, in ', " assert 0", "AssertionError: assert 0", - "*! Interrupted: 1 errors during collection !*", + "*! Interrupted: 1 error during collection !*", "*= 1 error in *", ] ) diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 37b3988f7..79095e3e7 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -334,7 +334,7 @@ class TestDoctests: [ "*ERROR collecting hello.py*", "*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR), - "*Interrupted: 1 errors during collection*", + "*Interrupted: 1 error during collection*", ] ) diff --git a/testing/test_mark.py b/testing/test_mark.py index c8d5851ac..93bc77a16 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -891,7 +891,7 @@ def test_parameterset_for_fail_at_collect(testdir): result = testdir.runpytest(str(p1)) result.stdout.fnmatch_lines( [ - "collected 0 items / 1 errors", + "collected 0 items / 1 error", "* ERROR collecting test_parameterset_for_fail_at_collect.py *", "Empty parameter set in 'test' at line 3", "*= 1 error in *", diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py index 1e63bbf49..0ff508d2c 100644 --- a/testing/test_runner_xunit.py +++ b/testing/test_runner_xunit.py @@ -234,7 +234,7 @@ def test_setup_funcarg_setup_when_outer_scope_fails(testdir): "*ValueError*42*", "*function2*", "*ValueError*42*", - "*2 error*", + "*2 errors*", ] ) result.stdout.no_fnmatch_line("*xyz43*") diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 51b1bbdd6..8ba77ba12 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -886,7 +886,7 @@ def test_errors_in_xfail_skip_expressions(testdir): " syntax error", markline, "SyntaxError: invalid syntax", - "*1 pass*2 error*", + "*1 pass*2 errors*", ] ) diff --git a/testing/test_stepwise.py b/testing/test_stepwise.py index f61425b6b..3e4f86f21 100644 --- a/testing/test_stepwise.py +++ b/testing/test_stepwise.py @@ -164,7 +164,7 @@ def test_stop_on_collection_errors(broken_testdir, broken_first): if broken_first: files.reverse() result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files) - result.stdout.fnmatch_lines("*errors during collection*") + result.stdout.fnmatch_lines("*error during collection*") def test_xfail_handling(testdir): diff --git a/testing/test_terminal.py b/testing/test_terminal.py index c53b9f2ec..ba1844fed 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -1237,7 +1237,7 @@ def test_terminal_summary_warnings_header_once(testdir): "*= warnings summary =*", "*warning_from_test*", "*= short test summary info =*", - "*== 1 failed, 1 warnings in *", + "*== 1 failed, 1 warning in *", ] ) result.stdout.no_fnmatch_line("*None*") @@ -1263,6 +1263,7 @@ def test_terminal_summary_warnings_header_once(testdir): {"failed": (1,), "passed": (1,)}, ), ("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}), + ("red", [("2 errors", {"bold": True, "red": True})], {"error": (1, 2)}), ( "red", [ @@ -1281,16 +1282,12 @@ def test_terminal_summary_warnings_header_once(testdir): ], {"weird": (1,), "passed": (1,)}, ), - ( - "yellow", - [("1 warnings", {"bold": True, "yellow": True})], - {"warnings": (1,)}, - ), + ("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": (1,)}), ( "yellow", [ ("1 passed", {"bold": False, "green": True}), - ("1 warnings", {"bold": True, "yellow": True}), + ("1 warning", {"bold": True, "yellow": True}), ], {"warnings": (1,), "passed": (1,)}, ), diff --git a/testing/test_warnings.py b/testing/test_warnings.py index 077636c52..bbcf87e5a 100644 --- a/testing/test_warnings.py +++ b/testing/test_warnings.py @@ -142,7 +142,7 @@ def test_unicode(testdir, pyfile_with_warnings): [ "*== %s ==*" % WARNINGS_SUMMARY_HEADER, "*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*", - "* 1 passed, 1 warnings*", + "* 1 passed, 1 warning*", ] ) @@ -201,7 +201,7 @@ def test_filterwarnings_mark(testdir, default_config): """ ) result = testdir.runpytest("-W always" if default_config == "cmdline" else "") - result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warnings in *"]) + result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warning in *"]) def test_non_string_warning_argument(testdir): @@ -216,7 +216,7 @@ def test_non_string_warning_argument(testdir): """ ) result = testdir.runpytest("-W", "always") - result.stdout.fnmatch_lines(["*= 1 passed, 1 warnings in *"]) + result.stdout.fnmatch_lines(["*= 1 passed, 1 warning in *"]) def test_filterwarnings_mark_registration(testdir): @@ -302,7 +302,7 @@ def test_collection_warnings(testdir): "*== %s ==*" % WARNINGS_SUMMARY_HEADER, " *collection_warnings.py:3: UserWarning: collection warning", ' warnings.warn(UserWarning("collection warning"))', - "* 1 passed, 1 warnings*", + "* 1 passed, 1 warning*", ] ) @@ -358,7 +358,7 @@ def test_hide_pytest_internal_warnings(testdir, ignore_pytest_warnings): [ "*== %s ==*" % WARNINGS_SUMMARY_HEADER, "*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning", - "* 1 passed, 1 warnings *", + "* 1 passed, 1 warning *", ] ) @@ -476,7 +476,7 @@ class TestDeprecationWarningsByDefault: [ "*== %s ==*" % WARNINGS_SUMMARY_HEADER, "*test_hidden_by_mark.py:3: DeprecationWarning: collection", - "* 1 passed, 1 warnings*", + "* 1 passed, 1 warning*", ] )