Merge pull request #5990 from MarcoGorelli/plurality-matching
Plurality matching
This commit is contained in:
commit
cdc53da19c
1
AUTHORS
1
AUTHORS
|
@ -160,6 +160,7 @@ Manuel Krebber
|
||||||
Marc Schlaich
|
Marc Schlaich
|
||||||
Marcelo Duarte Trevisani
|
Marcelo Duarte Trevisani
|
||||||
Marcin Bachry
|
Marcin Bachry
|
||||||
|
Marco Gorelli
|
||||||
Mark Abramowitz
|
Mark Abramowitz
|
||||||
Markus Unterwaditzer
|
Markus Unterwaditzer
|
||||||
Martijn Faassen
|
Martijn Faassen
|
||||||
|
|
|
@ -0,0 +1 @@
|
||||||
|
Fix plurality mismatch in test summary (e.g. display "1 error" instead of "1 errors").
|
|
@ -248,7 +248,10 @@ def pytest_collection(session):
|
||||||
|
|
||||||
def pytest_runtestloop(session):
|
def pytest_runtestloop(session):
|
||||||
if session.testsfailed and not session.config.option.continue_on_collection_errors:
|
if session.testsfailed and not session.config.option.continue_on_collection_errors:
|
||||||
raise session.Interrupted("%d errors during collection" % session.testsfailed)
|
raise session.Interrupted(
|
||||||
|
"%d error%s during collection"
|
||||||
|
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
|
||||||
|
)
|
||||||
|
|
||||||
if session.config.option.collectonly:
|
if session.config.option.collectonly:
|
||||||
return True
|
return True
|
||||||
|
|
|
@ -539,7 +539,7 @@ class TerminalReporter:
|
||||||
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
|
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
|
||||||
)
|
)
|
||||||
if errors:
|
if errors:
|
||||||
line += " / %d errors" % errors
|
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
|
||||||
if deselected:
|
if deselected:
|
||||||
line += " / %d deselected" % deselected
|
line += " / %d deselected" % deselected
|
||||||
if skipped:
|
if skipped:
|
||||||
|
@ -1056,6 +1056,19 @@ _color_for_type = {
|
||||||
_color_for_type_default = "yellow"
|
_color_for_type_default = "yellow"
|
||||||
|
|
||||||
|
|
||||||
|
def _make_plural(count, noun):
|
||||||
|
# No need to pluralize words such as `failed` or `passed`.
|
||||||
|
if noun not in ["error", "warnings"]:
|
||||||
|
return count, noun
|
||||||
|
|
||||||
|
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
|
||||||
|
# set it to singular here so we can determine plurality in the same way as we do
|
||||||
|
# for `error`.
|
||||||
|
noun = noun.replace("warnings", "warning")
|
||||||
|
|
||||||
|
return count, noun + "s" if count != 1 else noun
|
||||||
|
|
||||||
|
|
||||||
def build_summary_stats_line(stats):
|
def build_summary_stats_line(stats):
|
||||||
known_types = (
|
known_types = (
|
||||||
"failed passed skipped deselected xfailed xpassed warnings error".split()
|
"failed passed skipped deselected xfailed xpassed warnings error".split()
|
||||||
|
@ -1086,7 +1099,7 @@ def build_summary_stats_line(stats):
|
||||||
)
|
)
|
||||||
color = _color_for_type.get(key, _color_for_type_default)
|
color = _color_for_type.get(key, _color_for_type_default)
|
||||||
markup = {color: True, "bold": color == main_color}
|
markup = {color: True, "bold": color == main_color}
|
||||||
parts.append(("%d %s" % (count, key), markup))
|
parts.append(("%d %s" % _make_plural(count, key), markup))
|
||||||
|
|
||||||
if not parts:
|
if not parts:
|
||||||
parts = [("no tests ran", {_color_for_type_default: True})]
|
parts = [("no tests ran", {_color_for_type_default: True})]
|
||||||
|
|
|
@ -628,7 +628,7 @@ class TestInvocationVariants:
|
||||||
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
|
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
|
||||||
assert result.ret != 0
|
assert result.ret != 0
|
||||||
|
|
||||||
result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"])
|
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
|
||||||
|
|
||||||
def test_pyargs_only_imported_once(self, testdir):
|
def test_pyargs_only_imported_once(self, testdir):
|
||||||
pkg = testdir.mkpydir("foo")
|
pkg = testdir.mkpydir("foo")
|
||||||
|
@ -956,7 +956,7 @@ class TestDurations:
|
||||||
testdir.makepyfile(test_collecterror="""xyz""")
|
testdir.makepyfile(test_collecterror="""xyz""")
|
||||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||||
assert result.ret == 2
|
assert result.ret == 2
|
||||||
result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"])
|
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
|
||||||
# Collection errors abort test execution, therefore no duration is
|
# Collection errors abort test execution, therefore no duration is
|
||||||
# output
|
# output
|
||||||
result.stdout.no_fnmatch_line("*duration*")
|
result.stdout.no_fnmatch_line("*duration*")
|
||||||
|
|
|
@ -1167,7 +1167,7 @@ def test_dont_collect_non_function_callable(testdir):
|
||||||
[
|
[
|
||||||
"*collected 1 item*",
|
"*collected 1 item*",
|
||||||
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
|
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
|
||||||
"*1 passed, 1 warnings in *",
|
"*1 passed, 1 warning in *",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -3081,7 +3081,7 @@ class TestErrors:
|
||||||
*KeyError*
|
*KeyError*
|
||||||
*ERROR*teardown*test_2*
|
*ERROR*teardown*test_2*
|
||||||
*KeyError*
|
*KeyError*
|
||||||
*3 pass*2 error*
|
*3 pass*2 errors*
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -122,7 +122,7 @@ class TestAssertionRewrite:
|
||||||
}
|
}
|
||||||
testdir.makepyfile(**contents)
|
testdir.makepyfile(**contents)
|
||||||
result = testdir.runpytest_subprocess()
|
result = testdir.runpytest_subprocess()
|
||||||
assert "warnings" not in "".join(result.outlines)
|
assert "warning" not in "".join(result.outlines)
|
||||||
|
|
||||||
def test_rewrites_plugin_as_a_package(self, testdir):
|
def test_rewrites_plugin_as_a_package(self, testdir):
|
||||||
pkgdir = testdir.mkpydir("plugin")
|
pkgdir = testdir.mkpydir("plugin")
|
||||||
|
|
|
@ -453,7 +453,7 @@ class TestCaptureFixture:
|
||||||
"E*capfd*capsys*same*time*",
|
"E*capfd*capsys*same*time*",
|
||||||
"*ERROR*setup*test_two*",
|
"*ERROR*setup*test_two*",
|
||||||
"E*capsys*capfd*same*time*",
|
"E*capsys*capfd*same*time*",
|
||||||
"*2 error*",
|
"*2 errors*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -892,7 +892,7 @@ def test_continue_on_collection_errors(testdir):
|
||||||
assert res.ret == 1
|
assert res.ret == 1
|
||||||
|
|
||||||
res.stdout.fnmatch_lines(
|
res.stdout.fnmatch_lines(
|
||||||
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
|
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -909,7 +909,7 @@ def test_continue_on_collection_errors_maxfail(testdir):
|
||||||
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
|
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
|
||||||
assert res.ret == 1
|
assert res.ret == 1
|
||||||
|
|
||||||
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
|
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"])
|
||||||
|
|
||||||
|
|
||||||
def test_fixture_scope_sibling_conftests(testdir):
|
def test_fixture_scope_sibling_conftests(testdir):
|
||||||
|
@ -1253,7 +1253,7 @@ def test_collector_respects_tbstyle(testdir):
|
||||||
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
|
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
|
||||||
" assert 0",
|
" assert 0",
|
||||||
"AssertionError: assert 0",
|
"AssertionError: assert 0",
|
||||||
"*! Interrupted: 1 errors during collection !*",
|
"*! Interrupted: 1 error during collection !*",
|
||||||
"*= 1 error in *",
|
"*= 1 error in *",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
|
@ -334,7 +334,7 @@ class TestDoctests:
|
||||||
[
|
[
|
||||||
"*ERROR collecting hello.py*",
|
"*ERROR collecting hello.py*",
|
||||||
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
|
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
|
||||||
"*Interrupted: 1 errors during collection*",
|
"*Interrupted: 1 error during collection*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -891,7 +891,7 @@ def test_parameterset_for_fail_at_collect(testdir):
|
||||||
result = testdir.runpytest(str(p1))
|
result = testdir.runpytest(str(p1))
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
[
|
[
|
||||||
"collected 0 items / 1 errors",
|
"collected 0 items / 1 error",
|
||||||
"* ERROR collecting test_parameterset_for_fail_at_collect.py *",
|
"* ERROR collecting test_parameterset_for_fail_at_collect.py *",
|
||||||
"Empty parameter set in 'test' at line 3",
|
"Empty parameter set in 'test' at line 3",
|
||||||
"*= 1 error in *",
|
"*= 1 error in *",
|
||||||
|
|
|
@ -234,7 +234,7 @@ def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
|
||||||
"*ValueError*42*",
|
"*ValueError*42*",
|
||||||
"*function2*",
|
"*function2*",
|
||||||
"*ValueError*42*",
|
"*ValueError*42*",
|
||||||
"*2 error*",
|
"*2 errors*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
result.stdout.no_fnmatch_line("*xyz43*")
|
result.stdout.no_fnmatch_line("*xyz43*")
|
||||||
|
|
|
@ -886,7 +886,7 @@ def test_errors_in_xfail_skip_expressions(testdir):
|
||||||
" syntax error",
|
" syntax error",
|
||||||
markline,
|
markline,
|
||||||
"SyntaxError: invalid syntax",
|
"SyntaxError: invalid syntax",
|
||||||
"*1 pass*2 error*",
|
"*1 pass*2 errors*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
@ -164,7 +164,7 @@ def test_stop_on_collection_errors(broken_testdir, broken_first):
|
||||||
if broken_first:
|
if broken_first:
|
||||||
files.reverse()
|
files.reverse()
|
||||||
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
||||||
result.stdout.fnmatch_lines("*errors during collection*")
|
result.stdout.fnmatch_lines("*error during collection*")
|
||||||
|
|
||||||
|
|
||||||
def test_xfail_handling(testdir):
|
def test_xfail_handling(testdir):
|
||||||
|
|
|
@ -1237,7 +1237,7 @@ def test_terminal_summary_warnings_header_once(testdir):
|
||||||
"*= warnings summary =*",
|
"*= warnings summary =*",
|
||||||
"*warning_from_test*",
|
"*warning_from_test*",
|
||||||
"*= short test summary info =*",
|
"*= short test summary info =*",
|
||||||
"*== 1 failed, 1 warnings in *",
|
"*== 1 failed, 1 warning in *",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
result.stdout.no_fnmatch_line("*None*")
|
result.stdout.no_fnmatch_line("*None*")
|
||||||
|
@ -1263,6 +1263,7 @@ def test_terminal_summary_warnings_header_once(testdir):
|
||||||
{"failed": (1,), "passed": (1,)},
|
{"failed": (1,), "passed": (1,)},
|
||||||
),
|
),
|
||||||
("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}),
|
("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}),
|
||||||
|
("red", [("2 errors", {"bold": True, "red": True})], {"error": (1, 2)}),
|
||||||
(
|
(
|
||||||
"red",
|
"red",
|
||||||
[
|
[
|
||||||
|
@ -1281,16 +1282,12 @@ def test_terminal_summary_warnings_header_once(testdir):
|
||||||
],
|
],
|
||||||
{"weird": (1,), "passed": (1,)},
|
{"weird": (1,), "passed": (1,)},
|
||||||
),
|
),
|
||||||
(
|
("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": (1,)}),
|
||||||
"yellow",
|
|
||||||
[("1 warnings", {"bold": True, "yellow": True})],
|
|
||||||
{"warnings": (1,)},
|
|
||||||
),
|
|
||||||
(
|
(
|
||||||
"yellow",
|
"yellow",
|
||||||
[
|
[
|
||||||
("1 passed", {"bold": False, "green": True}),
|
("1 passed", {"bold": False, "green": True}),
|
||||||
("1 warnings", {"bold": True, "yellow": True}),
|
("1 warning", {"bold": True, "yellow": True}),
|
||||||
],
|
],
|
||||||
{"warnings": (1,), "passed": (1,)},
|
{"warnings": (1,), "passed": (1,)},
|
||||||
),
|
),
|
||||||
|
|
|
@ -142,7 +142,7 @@ def test_unicode(testdir, pyfile_with_warnings):
|
||||||
[
|
[
|
||||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||||
"*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*",
|
"*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*",
|
||||||
"* 1 passed, 1 warnings*",
|
"* 1 passed, 1 warning*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ def test_filterwarnings_mark(testdir, default_config):
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest("-W always" if default_config == "cmdline" else "")
|
result = testdir.runpytest("-W always" if default_config == "cmdline" else "")
|
||||||
result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warnings in *"])
|
result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warning in *"])
|
||||||
|
|
||||||
|
|
||||||
def test_non_string_warning_argument(testdir):
|
def test_non_string_warning_argument(testdir):
|
||||||
|
@ -216,7 +216,7 @@ def test_non_string_warning_argument(testdir):
|
||||||
"""
|
"""
|
||||||
)
|
)
|
||||||
result = testdir.runpytest("-W", "always")
|
result = testdir.runpytest("-W", "always")
|
||||||
result.stdout.fnmatch_lines(["*= 1 passed, 1 warnings in *"])
|
result.stdout.fnmatch_lines(["*= 1 passed, 1 warning in *"])
|
||||||
|
|
||||||
|
|
||||||
def test_filterwarnings_mark_registration(testdir):
|
def test_filterwarnings_mark_registration(testdir):
|
||||||
|
@ -302,7 +302,7 @@ def test_collection_warnings(testdir):
|
||||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||||
" *collection_warnings.py:3: UserWarning: collection warning",
|
" *collection_warnings.py:3: UserWarning: collection warning",
|
||||||
' warnings.warn(UserWarning("collection warning"))',
|
' warnings.warn(UserWarning("collection warning"))',
|
||||||
"* 1 passed, 1 warnings*",
|
"* 1 passed, 1 warning*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -358,7 +358,7 @@ def test_hide_pytest_internal_warnings(testdir, ignore_pytest_warnings):
|
||||||
[
|
[
|
||||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||||
"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
|
"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
|
||||||
"* 1 passed, 1 warnings *",
|
"* 1 passed, 1 warning *",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
@ -476,7 +476,7 @@ class TestDeprecationWarningsByDefault:
|
||||||
[
|
[
|
||||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||||
"*test_hidden_by_mark.py:3: DeprecationWarning: collection",
|
"*test_hidden_by_mark.py:3: DeprecationWarning: collection",
|
||||||
"* 1 passed, 1 warnings*",
|
"* 1 passed, 1 warning*",
|
||||||
]
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
Loading…
Reference in New Issue