New --xfail-tb flag (#12280)

Fix #12231
This commit is contained in:
Brian Okken 2024-05-07 10:37:00 -07:00 committed by GitHub
parent 4080459f04
commit 8efbefb2d7
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
3 changed files with 83 additions and 34 deletions

View File

@ -0,0 +1,11 @@
Added `--xfail-tb` flag, which turns on traceback output for XFAIL results.
* If the `--xfail-tb` flag is not sent, tracebacks for XFAIL results are NOT shown.
* The style of traceback for XFAIL is set with `--tb`, and can be `auto|long|short|line|native|no`.
* Note: Even if you have `--xfail-tb` set, you won't see them if `--tb=no`.
Some history:
With pytest 8.0, `-rx` or `-ra` would not only turn on summary reports for xfail, but also report the tracebacks for xfail results. This caused issues with some projects that utilize xfail, but don't want to see all of the xfail tracebacks.
This change detaches xfail tracebacks from `-rx`, and now we turn on xfail tracebacks with `--xfail-tb`. With this, the default `-rx`/ `-ra` behavior is identical to pre-8.0 with respect to xfail tracebacks. While this is a behavior change, it brings default behavior back to pre-8.0.0 behavior, which ultimately was considered the better course of action.

View File

@ -216,6 +216,13 @@ def pytest_addoption(parser: Parser) -> None:
choices=["auto", "long", "short", "no", "line", "native"], choices=["auto", "long", "short", "no", "line", "native"],
help="Traceback print mode (auto/long/short/line/native/no)", help="Traceback print mode (auto/long/short/line/native/no)",
) )
group._addoption(
"--xfail-tb",
action="store_true",
dest="xfail_tb",
default=False,
help="Show tracebacks for xfail (as long as --tb != no)",
)
group._addoption( group._addoption(
"--show-capture", "--show-capture",
action="store", action="store",
@ -1086,21 +1093,29 @@ class TerminalReporter:
self._tw.line(content) self._tw.line(content)
def summary_failures(self) -> None: def summary_failures(self) -> None:
self.summary_failures_combined("failed", "FAILURES") style = self.config.option.tbstyle
self.summary_failures_combined("failed", "FAILURES", style=style)
def summary_xfailures(self) -> None: def summary_xfailures(self) -> None:
self.summary_failures_combined("xfailed", "XFAILURES", "x") show_tb = self.config.option.xfail_tb
style = self.config.option.tbstyle if show_tb else "no"
self.summary_failures_combined("xfailed", "XFAILURES", style=style)
def summary_failures_combined( def summary_failures_combined(
self, which_reports: str, sep_title: str, needed_opt: Optional[str] = None self,
which_reports: str,
sep_title: str,
*,
style: str,
needed_opt: Optional[str] = None,
) -> None: ) -> None:
if self.config.option.tbstyle != "no": if style != "no":
if not needed_opt or self.hasopt(needed_opt): if not needed_opt or self.hasopt(needed_opt):
reports: List[BaseReport] = self.getreports(which_reports) reports: List[BaseReport] = self.getreports(which_reports)
if not reports: if not reports:
return return
self.write_sep("=", sep_title) self.write_sep("=", sep_title)
if self.config.option.tbstyle == "line": if style == "line":
for rep in reports: for rep in reports:
line = self._getcrashline(rep) line = self._getcrashline(rep)
self.write_line(line) self.write_line(line)

View File

@ -2909,54 +2909,77 @@ def test_summary_xfail_reason(pytester: Pytester) -> None:
assert result.stdout.lines.count(expect2) == 1 assert result.stdout.lines.count(expect2) == 1
def test_summary_xfail_tb(pytester: Pytester) -> None: @pytest.fixture()
pytester.makepyfile( def xfail_testfile(pytester: Pytester) -> Path:
return pytester.makepyfile(
""" """
import pytest import pytest
@pytest.mark.xfail def test_fail():
def test_xfail():
a, b = 1, 2 a, b = 1, 2
assert a == b assert a == b
@pytest.mark.xfail
def test_xfail():
c, d = 3, 4
assert c == d
""" """
) )
result = pytester.runpytest("-rx")
def test_xfail_tb_default(xfail_testfile, pytester: Pytester) -> None:
result = pytester.runpytest(xfail_testfile)
# test_fail, show traceback
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"*= FAILURES =*",
"*_ test_fail _*",
"*def test_fail():*",
"* a, b = 1, 2*",
"*> assert a == b*",
"*E assert 1 == 2*",
]
)
# test_xfail, don't show traceback
result.stdout.no_fnmatch_line("*= XFAILURES =*")
def test_xfail_tb_true(xfail_testfile, pytester: Pytester) -> None:
result = pytester.runpytest(xfail_testfile, "--xfail-tb")
# both test_fail and test_xfail, show traceback
result.stdout.fnmatch_lines(
[
"*= FAILURES =*",
"*_ test_fail _*",
"*def test_fail():*",
"* a, b = 1, 2*",
"*> assert a == b*",
"*E assert 1 == 2*",
"*= XFAILURES =*", "*= XFAILURES =*",
"*_ test_xfail _*", "*_ test_xfail _*",
"* @pytest.mark.xfail*", "*def test_xfail():*",
"* def test_xfail():*", "* c, d = 3, 4*",
"* a, b = 1, 2*", "*> assert c == d*",
"> *assert a == b*", "*E assert 3 == 4*",
"E *assert 1 == 2*", "*short test summary info*",
"test_summary_xfail_tb.py:6: AssertionError*",
"*= short test summary info =*",
"XFAIL test_summary_xfail_tb.py::test_xfail",
"*= 1 xfailed in * =*",
] ]
) )
def test_xfail_tb_line(pytester: Pytester) -> None: def test_xfail_tb_line(xfail_testfile, pytester: Pytester) -> None:
pytester.makepyfile( result = pytester.runpytest(xfail_testfile, "--xfail-tb", "--tb=line")
"""
import pytest
@pytest.mark.xfail # both test_fail and test_xfail, show line
def test_xfail():
a, b = 1, 2
assert a == b
"""
)
result = pytester.runpytest("-rx", "--tb=line")
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"*= FAILURES =*",
"*test_xfail_tb_line.py:5: assert 1 == 2",
"*= XFAILURES =*", "*= XFAILURES =*",
"*test_xfail_tb_line.py:6: assert 1 == 2", "*test_xfail_tb_line.py:10: assert 3 == 4",
"*= short test summary info =*", "*short test summary info*",
"XFAIL test_xfail_tb_line.py::test_xfail",
"*= 1 xfailed in * =*",
] ]
) )