diff --git a/_pytest/skipping.py b/_pytest/skipping.py index 07359b5c2..55a24ddb9 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -43,13 +43,13 @@ def pytest_configure(config): "http://pytest.org/latest/skipping.html" ) config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True, raises=None): mark the the test function " - "as an expected failure if eval(condition) has a True value. " - "Optionally specify a reason for better reporting and run=False if " - "you don't even want to execute the test function. If only specific " - "exception(s) are expected, you can list them in raises, and if the test fails " - "in other ways, it will be reported as a true failure. " - "See http://pytest.org/latest/skipping.html" + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html" ) diff --git a/testing/test_skipping.py b/testing/test_skipping.py index b872c8b6e..194c8692b 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -825,7 +825,7 @@ def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ "*skipif(*condition)*skip*", - "*xfail(*condition, reason=None, run=True, raises=None)*expected failure*", + "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", ]) def test_xfail_test_setup_exception(testdir):