Fix plurality mismatch for and in pytest terminal summary
This commit is contained in:
parent
cbc39dd86e
commit
d863c30c74
1
AUTHORS
1
AUTHORS
|
@ -160,6 +160,7 @@ Manuel Krebber
|
|||
Marc Schlaich
|
||||
Marcelo Duarte Trevisani
|
||||
Marcin Bachry
|
||||
Marco Gorelli
|
||||
Mark Abramowitz
|
||||
Markus Unterwaditzer
|
||||
Martijn Faassen
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Fix plurality mismatch in test summary (e.g. display "1 error" instead of "1 errors").
|
|
@ -248,7 +248,10 @@ def pytest_collection(session):
|
|||
|
||||
def pytest_runtestloop(session):
|
||||
if session.testsfailed and not session.config.option.continue_on_collection_errors:
|
||||
raise session.Interrupted("%d errors during collection" % session.testsfailed)
|
||||
raise session.Interrupted(
|
||||
"%d error%s during collection"
|
||||
% (session.testsfailed, "s" if session.testsfailed != 1 else "")
|
||||
)
|
||||
|
||||
if session.config.option.collectonly:
|
||||
return True
|
||||
|
|
|
@ -539,7 +539,7 @@ class TerminalReporter:
|
|||
str(self._numcollected) + " item" + ("" if self._numcollected == 1 else "s")
|
||||
)
|
||||
if errors:
|
||||
line += " / %d errors" % errors
|
||||
line += " / %d error%s" % (errors, "s" if errors != 1 else "")
|
||||
if deselected:
|
||||
line += " / %d deselected" % deselected
|
||||
if skipped:
|
||||
|
@ -1056,6 +1056,19 @@ _color_for_type = {
|
|||
_color_for_type_default = "yellow"
|
||||
|
||||
|
||||
def _make_plural(count, noun):
|
||||
# No need to pluralize words such as `failed` or `passed`.
|
||||
if noun not in ["error", "warnings"]:
|
||||
return count, noun
|
||||
|
||||
# The `warnings` key is plural. To avoid API breakage, we keep it that way but
|
||||
# set it to singular here so we can determine plurality in the same way as we do
|
||||
# for `error`.
|
||||
noun = noun.replace("warnings", "warning")
|
||||
|
||||
return count, noun + "s" if count != 1 else noun
|
||||
|
||||
|
||||
def build_summary_stats_line(stats):
|
||||
known_types = (
|
||||
"failed passed skipped deselected xfailed xpassed warnings error".split()
|
||||
|
@ -1086,7 +1099,7 @@ def build_summary_stats_line(stats):
|
|||
)
|
||||
color = _color_for_type.get(key, _color_for_type_default)
|
||||
markup = {color: True, "bold": color == main_color}
|
||||
parts.append(("%d %s" % (count, key), markup))
|
||||
parts.append(("%d %s" % _make_plural(count, key), markup))
|
||||
|
||||
if not parts:
|
||||
parts = [("no tests ran", {_color_for_type_default: True})]
|
||||
|
|
|
@ -628,7 +628,7 @@ class TestInvocationVariants:
|
|||
result = testdir.runpytest("--pyargs", "tpkg.test_hello", syspathinsert=True)
|
||||
assert result.ret != 0
|
||||
|
||||
result.stdout.fnmatch_lines(["collected*0*items*/*1*errors"])
|
||||
result.stdout.fnmatch_lines(["collected*0*items*/*1*error"])
|
||||
|
||||
def test_pyargs_only_imported_once(self, testdir):
|
||||
pkg = testdir.mkpydir("foo")
|
||||
|
@ -956,7 +956,7 @@ class TestDurations:
|
|||
testdir.makepyfile(test_collecterror="""xyz""")
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret == 2
|
||||
result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"])
|
||||
result.stdout.fnmatch_lines(["*Interrupted: 1 error during collection*"])
|
||||
# Collection errors abort test execution, therefore no duration is
|
||||
# output
|
||||
result.stdout.no_fnmatch_line("*duration*")
|
||||
|
|
|
@ -1167,7 +1167,7 @@ def test_dont_collect_non_function_callable(testdir):
|
|||
[
|
||||
"*collected 1 item*",
|
||||
"*test_dont_collect_non_function_callable.py:2: *cannot collect 'test_a' because it is not a function*",
|
||||
"*1 passed, 1 warnings in *",
|
||||
"*1 passed, 1 warning in *",
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
@ -3081,7 +3081,7 @@ class TestErrors:
|
|||
*KeyError*
|
||||
*ERROR*teardown*test_2*
|
||||
*KeyError*
|
||||
*3 pass*2 error*
|
||||
*3 pass*2 errors*
|
||||
"""
|
||||
)
|
||||
|
||||
|
|
|
@ -122,7 +122,7 @@ class TestAssertionRewrite:
|
|||
}
|
||||
testdir.makepyfile(**contents)
|
||||
result = testdir.runpytest_subprocess()
|
||||
assert "warnings" not in "".join(result.outlines)
|
||||
assert "warning" not in "".join(result.outlines)
|
||||
|
||||
def test_rewrites_plugin_as_a_package(self, testdir):
|
||||
pkgdir = testdir.mkpydir("plugin")
|
||||
|
|
|
@ -453,7 +453,7 @@ class TestCaptureFixture:
|
|||
"E*capfd*capsys*same*time*",
|
||||
"*ERROR*setup*test_two*",
|
||||
"E*capsys*capfd*same*time*",
|
||||
"*2 error*",
|
||||
"*2 errors*",
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
@ -892,7 +892,7 @@ def test_continue_on_collection_errors(testdir):
|
|||
assert res.ret == 1
|
||||
|
||||
res.stdout.fnmatch_lines(
|
||||
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 error*"]
|
||||
["collected 2 items / 2 errors", "*1 failed, 1 passed, 2 errors*"]
|
||||
)
|
||||
|
||||
|
||||
|
@ -909,7 +909,7 @@ def test_continue_on_collection_errors_maxfail(testdir):
|
|||
res = testdir.runpytest("--continue-on-collection-errors", "--maxfail=3")
|
||||
assert res.ret == 1
|
||||
|
||||
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 error*"])
|
||||
res.stdout.fnmatch_lines(["collected 2 items / 2 errors", "*1 failed, 2 errors*"])
|
||||
|
||||
|
||||
def test_fixture_scope_sibling_conftests(testdir):
|
||||
|
@ -1253,7 +1253,7 @@ def test_collector_respects_tbstyle(testdir):
|
|||
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
|
||||
" assert 0",
|
||||
"AssertionError: assert 0",
|
||||
"*! Interrupted: 1 errors during collection !*",
|
||||
"*! Interrupted: 1 error during collection !*",
|
||||
"*= 1 error in *",
|
||||
]
|
||||
)
|
||||
|
|
|
@ -334,7 +334,7 @@ class TestDoctests:
|
|||
[
|
||||
"*ERROR collecting hello.py*",
|
||||
"*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR),
|
||||
"*Interrupted: 1 errors during collection*",
|
||||
"*Interrupted: 1 error during collection*",
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
@ -891,7 +891,7 @@ def test_parameterset_for_fail_at_collect(testdir):
|
|||
result = testdir.runpytest(str(p1))
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 0 items / 1 errors",
|
||||
"collected 0 items / 1 error",
|
||||
"* ERROR collecting test_parameterset_for_fail_at_collect.py *",
|
||||
"Empty parameter set in 'test' at line 3",
|
||||
"*= 1 error in *",
|
||||
|
|
|
@ -234,7 +234,7 @@ def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
|
|||
"*ValueError*42*",
|
||||
"*function2*",
|
||||
"*ValueError*42*",
|
||||
"*2 error*",
|
||||
"*2 errors*",
|
||||
]
|
||||
)
|
||||
result.stdout.no_fnmatch_line("*xyz43*")
|
||||
|
|
|
@ -886,7 +886,7 @@ def test_errors_in_xfail_skip_expressions(testdir):
|
|||
" syntax error",
|
||||
markline,
|
||||
"SyntaxError: invalid syntax",
|
||||
"*1 pass*2 error*",
|
||||
"*1 pass*2 errors*",
|
||||
]
|
||||
)
|
||||
|
||||
|
|
|
@ -164,7 +164,7 @@ def test_stop_on_collection_errors(broken_testdir, broken_first):
|
|||
if broken_first:
|
||||
files.reverse()
|
||||
result = broken_testdir.runpytest("-v", "--strict-markers", "--stepwise", *files)
|
||||
result.stdout.fnmatch_lines("*errors during collection*")
|
||||
result.stdout.fnmatch_lines("*error during collection*")
|
||||
|
||||
|
||||
def test_xfail_handling(testdir):
|
||||
|
|
|
@ -1237,7 +1237,7 @@ def test_terminal_summary_warnings_header_once(testdir):
|
|||
"*= warnings summary =*",
|
||||
"*warning_from_test*",
|
||||
"*= short test summary info =*",
|
||||
"*== 1 failed, 1 warnings in *",
|
||||
"*== 1 failed, 1 warning in *",
|
||||
]
|
||||
)
|
||||
result.stdout.no_fnmatch_line("*None*")
|
||||
|
@ -1263,6 +1263,7 @@ def test_terminal_summary_warnings_header_once(testdir):
|
|||
{"failed": (1,), "passed": (1,)},
|
||||
),
|
||||
("red", [("1 error", {"bold": True, "red": True})], {"error": (1,)}),
|
||||
("red", [("2 errors", {"bold": True, "red": True})], {"error": (1, 2)}),
|
||||
(
|
||||
"red",
|
||||
[
|
||||
|
@ -1281,16 +1282,12 @@ def test_terminal_summary_warnings_header_once(testdir):
|
|||
],
|
||||
{"weird": (1,), "passed": (1,)},
|
||||
),
|
||||
(
|
||||
"yellow",
|
||||
[("1 warnings", {"bold": True, "yellow": True})],
|
||||
{"warnings": (1,)},
|
||||
),
|
||||
("yellow", [("1 warning", {"bold": True, "yellow": True})], {"warnings": (1,)}),
|
||||
(
|
||||
"yellow",
|
||||
[
|
||||
("1 passed", {"bold": False, "green": True}),
|
||||
("1 warnings", {"bold": True, "yellow": True}),
|
||||
("1 warning", {"bold": True, "yellow": True}),
|
||||
],
|
||||
{"warnings": (1,), "passed": (1,)},
|
||||
),
|
||||
|
|
|
@ -142,7 +142,7 @@ def test_unicode(testdir, pyfile_with_warnings):
|
|||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
"*test_unicode.py:7: UserWarning: \u6d4b\u8bd5*",
|
||||
"* 1 passed, 1 warnings*",
|
||||
"* 1 passed, 1 warning*",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -201,7 +201,7 @@ def test_filterwarnings_mark(testdir, default_config):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest("-W always" if default_config == "cmdline" else "")
|
||||
result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warnings in *"])
|
||||
result.stdout.fnmatch_lines(["*= 1 failed, 2 passed, 1 warning in *"])
|
||||
|
||||
|
||||
def test_non_string_warning_argument(testdir):
|
||||
|
@ -216,7 +216,7 @@ def test_non_string_warning_argument(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest("-W", "always")
|
||||
result.stdout.fnmatch_lines(["*= 1 passed, 1 warnings in *"])
|
||||
result.stdout.fnmatch_lines(["*= 1 passed, 1 warning in *"])
|
||||
|
||||
|
||||
def test_filterwarnings_mark_registration(testdir):
|
||||
|
@ -302,7 +302,7 @@ def test_collection_warnings(testdir):
|
|||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
" *collection_warnings.py:3: UserWarning: collection warning",
|
||||
' warnings.warn(UserWarning("collection warning"))',
|
||||
"* 1 passed, 1 warnings*",
|
||||
"* 1 passed, 1 warning*",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -358,7 +358,7 @@ def test_hide_pytest_internal_warnings(testdir, ignore_pytest_warnings):
|
|||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
"*test_hide_pytest_internal_warnings.py:4: PytestWarning: some internal warning",
|
||||
"* 1 passed, 1 warnings *",
|
||||
"* 1 passed, 1 warning *",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -476,7 +476,7 @@ class TestDeprecationWarningsByDefault:
|
|||
[
|
||||
"*== %s ==*" % WARNINGS_SUMMARY_HEADER,
|
||||
"*test_hidden_by_mark.py:3: DeprecationWarning: collection",
|
||||
"* 1 passed, 1 warnings*",
|
||||
"* 1 passed, 1 warning*",
|
||||
]
|
||||
)
|
||||
|
||||
|
|
Loading…
Reference in New Issue