Merge pull request #7396 from gnikonorov/issue_7295
Refactor src/_pytest/config/__init__.py to use the warnings module instead of stderr for warnings
This commit is contained in:
commit
e6e300e729
|
@ -0,0 +1 @@
|
|||
``src/_pytest/config/__init__.py`` now uses the ``warnings`` module to report warnings instead of ``sys.stderr.write``.
|
|
@ -1038,7 +1038,7 @@ class Config:
|
|||
mode = "plain"
|
||||
else:
|
||||
self._mark_plugins_for_rewrite(hook)
|
||||
_warn_about_missing_assertion(mode)
|
||||
self._warn_about_missing_assertion(mode)
|
||||
|
||||
def _mark_plugins_for_rewrite(self, hook) -> None:
|
||||
"""
|
||||
|
@ -1182,7 +1182,12 @@ class Config:
|
|||
def _warn_or_fail_if_strict(self, message: str) -> None:
|
||||
if self.known_args_namespace.strict_config:
|
||||
fail(message, pytrace=False)
|
||||
sys.stderr.write("WARNING: {}".format(message))
|
||||
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
|
||||
_issue_warning_captured(
|
||||
PytestConfigWarning(message), self.hook, stacklevel=3,
|
||||
)
|
||||
|
||||
def _get_unknown_ini_keys(self) -> List[str]:
|
||||
parser_inicfg = self._parser._inidict
|
||||
|
@ -1351,6 +1356,28 @@ class Config:
|
|||
""" (deprecated, use getoption(skip=True)) """
|
||||
return self.getoption(name, skip=True)
|
||||
|
||||
def _warn_about_missing_assertion(self, mode: str) -> None:
|
||||
if not _assertion_supported():
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
|
||||
if mode == "plain":
|
||||
warning_text = (
|
||||
"ASSERTIONS ARE NOT EXECUTED"
|
||||
" and FAILING TESTS WILL PASS. Are you"
|
||||
" using python -O?"
|
||||
)
|
||||
else:
|
||||
warning_text = (
|
||||
"assertions not in test modules or"
|
||||
" plugins will be ignored"
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n"
|
||||
)
|
||||
_issue_warning_captured(
|
||||
PytestConfigWarning(warning_text), self.hook, stacklevel=3,
|
||||
)
|
||||
|
||||
|
||||
def _assertion_supported() -> bool:
|
||||
try:
|
||||
|
@ -1361,24 +1388,6 @@ def _assertion_supported() -> bool:
|
|||
return False
|
||||
|
||||
|
||||
def _warn_about_missing_assertion(mode) -> None:
|
||||
if not _assertion_supported():
|
||||
if mode == "plain":
|
||||
sys.stderr.write(
|
||||
"WARNING: ASSERTIONS ARE NOT EXECUTED"
|
||||
" and FAILING TESTS WILL PASS. Are you"
|
||||
" using python -O?"
|
||||
)
|
||||
else:
|
||||
sys.stderr.write(
|
||||
"WARNING: assertions not in test modules or"
|
||||
" plugins will be ignored"
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n"
|
||||
)
|
||||
|
||||
|
||||
def create_terminal_writer(
|
||||
config: Config, file: Optional[TextIO] = None
|
||||
) -> TerminalWriter:
|
||||
|
|
|
@ -1384,12 +1384,35 @@ def test_exception_handling_no_traceback(testdir):
|
|||
|
||||
|
||||
@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names")
|
||||
def test_warn_missing(testdir):
|
||||
@pytest.mark.parametrize(
|
||||
"cmdline_args, warning_output",
|
||||
[
|
||||
(
|
||||
["-OO", "-m", "pytest", "-h"],
|
||||
["warning :*PytestConfigWarning:*assert statements are not executed*"],
|
||||
),
|
||||
(
|
||||
["-OO", "-m", "pytest"],
|
||||
[
|
||||
"=*= warnings summary =*=",
|
||||
"*PytestConfigWarning:*assert statements are not executed*",
|
||||
],
|
||||
),
|
||||
(
|
||||
["-OO", "-m", "pytest", "--assert=plain"],
|
||||
[
|
||||
"=*= warnings summary =*=",
|
||||
"*PytestConfigWarning: ASSERTIONS ARE NOT EXECUTED and FAILING TESTS WILL PASS. "
|
||||
"Are you using python -O?",
|
||||
],
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_warn_missing(testdir, cmdline_args, warning_output):
|
||||
testdir.makepyfile("")
|
||||
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h")
|
||||
result.stderr.fnmatch_lines(["*WARNING*assert statements are not executed*"])
|
||||
result = testdir.run(sys.executable, "-OO", "-m", "pytest")
|
||||
result.stderr.fnmatch_lines(["*WARNING*assert statements are not executed*"])
|
||||
|
||||
result = testdir.run(sys.executable, *cmdline_args)
|
||||
result.stdout.fnmatch_lines(warning_output)
|
||||
|
||||
|
||||
def test_recursion_source_decode(testdir):
|
||||
|
|
|
@ -164,7 +164,7 @@ class TestParseIni:
|
|||
assert result.ret == 0
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"ini_file_text, invalid_keys, stderr_output, exception_text",
|
||||
"ini_file_text, invalid_keys, warning_output, exception_text",
|
||||
[
|
||||
(
|
||||
"""
|
||||
|
@ -174,8 +174,9 @@ class TestParseIni:
|
|||
""",
|
||||
["unknown_ini", "another_unknown_ini"],
|
||||
[
|
||||
"WARNING: Unknown config ini key: another_unknown_ini",
|
||||
"WARNING: Unknown config ini key: unknown_ini",
|
||||
"=*= warnings summary =*=",
|
||||
"*PytestConfigWarning:*Unknown config ini key: another_unknown_ini",
|
||||
"*PytestConfigWarning:*Unknown config ini key: unknown_ini",
|
||||
],
|
||||
"Unknown config ini key: another_unknown_ini",
|
||||
),
|
||||
|
@ -186,7 +187,10 @@ class TestParseIni:
|
|||
minversion = 5.0.0
|
||||
""",
|
||||
["unknown_ini"],
|
||||
["WARNING: Unknown config ini key: unknown_ini"],
|
||||
[
|
||||
"=*= warnings summary =*=",
|
||||
"*PytestConfigWarning:*Unknown config ini key: unknown_ini",
|
||||
],
|
||||
"Unknown config ini key: unknown_ini",
|
||||
),
|
||||
(
|
||||
|
@ -221,7 +225,7 @@ class TestParseIni:
|
|||
],
|
||||
)
|
||||
def test_invalid_ini_keys(
|
||||
self, testdir, ini_file_text, invalid_keys, stderr_output, exception_text
|
||||
self, testdir, ini_file_text, invalid_keys, warning_output, exception_text
|
||||
):
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
|
@ -235,7 +239,7 @@ class TestParseIni:
|
|||
assert sorted(config._get_unknown_ini_keys()) == sorted(invalid_keys)
|
||||
|
||||
result = testdir.runpytest()
|
||||
result.stderr.fnmatch_lines(stderr_output)
|
||||
result.stdout.fnmatch_lines(warning_output)
|
||||
|
||||
if exception_text:
|
||||
with pytest.raises(pytest.fail.Exception, match=exception_text):
|
||||
|
|
Loading…
Reference in New Issue