Use new no-match functions to replace previous idiom

This commit is contained in:
Bruno Oliveira 2019-10-05 14:18:51 -03:00
parent 0c18e24433
commit 47c2091ecd
21 changed files with 77 additions and 76 deletions

View File

@ -246,7 +246,7 @@ class TestGeneralUsage:
)
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
assert "should not be seen" not in result.stdout.str()
result.stdout.no_fnmatch_line("*should not be seen*")
assert "stderr42" not in result.stderr.str()
def test_conftest_printing_shows_if_error(self, testdir):
@ -954,7 +954,7 @@ class TestDurations:
result.stdout.fnmatch_lines(["*Interrupted: 1 errors during collection*"])
# Collection errors abort test execution, therefore no duration is
# output
assert "duration" not in result.stdout.str()
result.stdout.no_fnmatch_line("*duration*")
def test_with_not(self, testdir):
testdir.makepyfile(self.source)
@ -1008,7 +1008,7 @@ def test_zipimport_hook(testdir, tmpdir):
result = testdir.runpython(target)
assert result.ret == 0
result.stderr.fnmatch_lines(["*not found*foo*"])
assert "INTERNALERROR>" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNALERROR>*")
def test_import_plugin_unicode_name(testdir):

View File

@ -399,7 +399,7 @@ def test_match_raises_error(testdir):
result = testdir.runpytest()
assert result.ret != 0
result.stdout.fnmatch_lines(["*AssertionError*Pattern*[123]*not found*"])
assert "__tracebackhide__ = True" not in result.stdout.str()
result.stdout.no_fnmatch_line("*__tracebackhide__ = True*")
result = testdir.runpytest("--fulltrace")
assert result.ret != 0
@ -1343,7 +1343,8 @@ def test_cwd_deleted(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["* 1 failed in *"])
assert "INTERNALERROR" not in result.stdout.str() + result.stderr.str()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stderr.no_fnmatch_line("*INTERNALERROR*")
@pytest.mark.usefixtures("limited_recursion_depth")

View File

@ -46,7 +46,7 @@ def test_change_level_undo(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*log from test1*", "*2 failed in *"])
assert "log from test2" not in result.stdout.str()
result.stdout.no_fnmatch_line("*log from test2*")
def test_with_statement(caplog):

View File

@ -109,7 +109,7 @@ def test_log_cli_level_log_level_interaction(testdir):
"=* 1 failed in *=",
]
)
assert "DEBUG" not in result.stdout.str()
result.stdout.no_re_match_line("DEBUG")
def test_setup_logging(testdir):
@ -282,7 +282,7 @@ def test_log_cli_default_level(testdir):
"WARNING*test_log_cli_default_level.py* message will be shown*",
]
)
assert "INFO message won't be shown" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INFO message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@ -566,7 +566,7 @@ def test_log_cli_level(testdir):
"PASSED", # 'PASSED' on its own line because the log message prints a new line
]
)
assert "This log message won't be shown" not in result.stdout.str()
result.stdout.no_fnmatch_line("*This log message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@ -580,7 +580,7 @@ def test_log_cli_level(testdir):
"PASSED", # 'PASSED' on its own line because the log message prints a new line
]
)
assert "This log message won't be shown" not in result.stdout.str()
result.stdout.no_fnmatch_line("*This log message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@ -616,7 +616,7 @@ def test_log_cli_ini_level(testdir):
"PASSED", # 'PASSED' on its own line because the log message prints a new line
]
)
assert "This log message won't be shown" not in result.stdout.str()
result.stdout.no_fnmatch_line("*This log message won't be shown*")
# make sure that that we get a '0' exit code for the testsuite
assert result.ret == 0
@ -942,7 +942,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
]
)
elif verbose == "-q":
assert "collected 1 item*" not in result.stdout.str()
result.stdout.no_fnmatch_line("*collected 1 item**")
expected_lines.extend(
[
"*test_collection_collect_only_live_logging.py::test_simple*",
@ -950,7 +950,7 @@ def test_collection_collect_only_live_logging(testdir, verbose):
]
)
elif verbose == "-qq":
assert "collected 1 item*" not in result.stdout.str()
result.stdout.no_fnmatch_line("*collected 1 item**")
expected_lines.extend(["*test_collection_collect_only_live_logging.py: 1*"])
result.stdout.fnmatch_lines(expected_lines)
@ -983,7 +983,7 @@ def test_collection_logging_to_file(testdir):
result = testdir.runpytest()
assert "--- live log collection ---" not in result.stdout.str()
result.stdout.no_fnmatch_line("*--- live log collection ---*")
assert result.ret == 0
assert os.path.isfile(log_file)

View File

@ -1139,7 +1139,7 @@ def test_unorderable_types(testdir):
"""
)
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
result.stdout.no_fnmatch_line("*TypeError*")
assert result.ret == ExitCode.NO_TESTS_COLLECTED

View File

@ -455,7 +455,7 @@ class TestFillFixtures:
"*1 error*",
]
)
assert "INTERNAL" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNAL*")
def test_fixture_excinfo_leak(self, testdir):
# on python2 sys.excinfo would leak into fixture executions
@ -2647,7 +2647,7 @@ class TestFixtureMarker:
*3 passed*
"""
)
assert "error" not in result.stdout.str()
result.stdout.no_fnmatch_line("*error*")
def test_fixture_finalizer(self, testdir):
testdir.makeconftest(
@ -3151,7 +3151,7 @@ class TestShowFixtures:
*hello world*
"""
)
assert "arg0" not in result.stdout.str()
result.stdout.no_fnmatch_line("*arg0*")
@pytest.mark.parametrize("testmod", [True, False])
def test_show_fixtures_conftest(self, testdir, testmod):

View File

@ -27,7 +27,7 @@ def test_show_only_active_fixtures(testdir, mode, dummy_yaml_custom_test):
result.stdout.fnmatch_lines(
["*SETUP F arg1*", "*test_arg1 (fixtures used: arg1)*", "*TEARDOWN F arg1*"]
)
assert "_arg0" not in result.stdout.str()
result.stdout.no_fnmatch_line("*_arg0*")
def test_show_different_scopes(testdir, mode):

View File

@ -1,6 +1,6 @@
def test_no_items_should_not_show_output(testdir):
result = testdir.runpytest("--fixtures-per-test")
assert "fixtures used by" not in result.stdout.str()
result.stdout.no_fnmatch_line("*fixtures used by*")
assert result.ret == 0
@ -30,7 +30,7 @@ def test_fixtures_in_module(testdir):
" arg1 docstring",
]
)
assert "_arg0" not in result.stdout.str()
result.stdout.no_fnmatch_line("*_arg0*")
def test_fixtures_in_conftest(testdir):

View File

@ -1034,7 +1034,7 @@ def test_assertion_options(testdir):
result = testdir.runpytest()
assert "3 == 4" in result.stdout.str()
result = testdir.runpytest_subprocess("--assert=plain")
assert "3 == 4" not in result.stdout.str()
result.stdout.no_fnmatch_line("*3 == 4*")
def test_triple_quoted_string_issue113(testdir):
@ -1046,7 +1046,7 @@ def test_triple_quoted_string_issue113(testdir):
)
result = testdir.runpytest("--fulltrace")
result.stdout.fnmatch_lines(["*1 failed*"])
assert "SyntaxError" not in result.stdout.str()
result.stdout.no_fnmatch_line("*SyntaxError*")
def test_traceback_failure(testdir):

View File

@ -914,7 +914,7 @@ def test_rewritten():
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
assert "pytest-warning summary" not in result.stdout.str()
result.stdout.no_fnmatch_line("*pytest-warning summary*")
def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch):
monkeypatch.setenv("PYTEST_PLUGINS", "plugin")
@ -932,7 +932,7 @@ def test_rewritten():
testdir.chdir()
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*= 1 passed in *=*"])
assert "pytest-warning summary" not in result.stdout.str()
result.stdout.no_fnmatch_line("*pytest-warning summary*")
class TestAssertionRewriteHookDetails:
@ -1124,7 +1124,7 @@ def test_issue731(testdir):
"""
)
result = testdir.runpytest()
assert "unbalanced braces" not in result.stdout.str()
result.stdout.no_fnmatch_line("*unbalanced braces*")
class TestIssue925:

View File

@ -327,7 +327,7 @@ class TestLastFailed:
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines(["test_b.py*"])
assert "test_a.py" not in result.stdout.str()
result.stdout.no_fnmatch_line("*test_a.py*")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", "1")
@ -660,11 +660,11 @@ class TestLastFailed:
if quiet:
args.append("-q")
result = testdir.runpytest(*args)
assert "run all" not in result.stdout.str()
result.stdout.no_fnmatch_line("*run all*")
result = testdir.runpytest(*args)
if quiet:
assert "run all" not in result.stdout.str()
result.stdout.no_fnmatch_line("*run all*")
else:
assert "rerun previous" in result.stdout.str()

View File

@ -609,12 +609,12 @@ class TestCaptureFixture:
*while capture is disabled*
"""
)
assert "captured before" not in result.stdout.str()
assert "captured after" not in result.stdout.str()
result.stdout.no_fnmatch_line("*captured before*")
result.stdout.no_fnmatch_line("*captured after*")
if no_capture:
assert "test_normal executed" in result.stdout.str()
else:
assert "test_normal executed" not in result.stdout.str()
result.stdout.no_fnmatch_line("*test_normal executed*")
@pytest.mark.parametrize("fixture", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures(self, testdir, fixture):
@ -650,8 +650,8 @@ class TestCaptureFixture:
)
result = testdir.runpytest_subprocess()
result.stdout.fnmatch_lines(["*1 passed*"])
assert "stdout contents begin" not in result.stdout.str()
assert "stderr contents begin" not in result.stdout.str()
result.stdout.no_fnmatch_line("*stdout contents begin*")
result.stdout.no_fnmatch_line("*stderr contents begin*")
@pytest.mark.parametrize("cap", ["capsys", "capfd"])
def test_fixture_use_by_other_fixtures_teardown(self, testdir, cap):
@ -721,7 +721,7 @@ def test_capture_conftest_runtest_setup(testdir):
testdir.makepyfile("def test_func(): pass")
result = testdir.runpytest()
assert result.ret == 0
assert "hello19" not in result.stdout.str()
result.stdout.no_fnmatch_line("*hello19*")
def test_capture_badoutput_issue412(testdir):
@ -1388,7 +1388,7 @@ def test_crash_on_closing_tmpfile_py27(testdir):
result = testdir.runpytest_subprocess(str(p))
assert result.ret == 0
assert result.stderr.str() == ""
assert "IOError" not in result.stdout.str()
result.stdout.no_fnmatch_line("*IOError*")
def test_pickling_and_unpickling_encoded_file():

View File

@ -139,7 +139,7 @@ class TestCollectFS:
# by default, ignore tests inside a virtualenv
result = testdir.runpytest()
assert "test_invenv" not in result.stdout.str()
result.stdout.no_fnmatch_line("*test_invenv*")
# allow test collection if user insists
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" in result.stdout.str()
@ -165,7 +165,7 @@ class TestCollectFS:
testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py")
testfile.write("def test_hello(): pass")
result = testdir.runpytest("--collect-in-virtualenv")
assert "test_invenv" not in result.stdout.str()
result.stdout.no_fnmatch_line("*test_invenv*")
# ...unless the virtualenv is explicitly given on the CLI
result = testdir.runpytest("--collect-in-virtualenv", ".virtual")
assert "test_invenv" in result.stdout.str()
@ -364,7 +364,7 @@ class TestCustomConftests:
testdir.makepyfile(test_world="def test_hello(): pass")
result = testdir.runpytest()
assert result.ret == ExitCode.NO_TESTS_COLLECTED
assert "passed" not in result.stdout.str()
result.stdout.no_fnmatch_line("*passed*")
result = testdir.runpytest("--XX")
assert result.ret == 0
assert "passed" in result.stdout.str()
@ -857,7 +857,7 @@ def test_exit_on_collection_with_maxfail_smaller_than_n_errors(testdir):
["*ERROR collecting test_02_import_error.py*", "*No module named *asdfa*"]
)
assert "test_03" not in res.stdout.str()
res.stdout.no_fnmatch_line("*test_03*")
def test_exit_on_collection_with_maxfail_bigger_than_n_errors(testdir):
@ -996,12 +996,12 @@ def test_collect_init_tests(testdir):
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module test_foo.py>", " <Function test_foo>"]
)
assert "test_init" not in result.stdout.str()
result.stdout.no_fnmatch_line("*test_init*")
result = testdir.runpytest("./tests/__init__.py", "--collect-only")
result.stdout.fnmatch_lines(
["<Package */tests>", " <Module __init__.py>", " <Function test_init>"]
)
assert "test_foo" not in result.stdout.str()
result.stdout.no_fnmatch_line("*test_foo*")
def test_collect_invalid_signature_message(testdir):

View File

@ -187,7 +187,7 @@ def test_conftest_confcutdir(testdir):
)
result = testdir.runpytest("-h", "--confcutdir=%s" % x, x)
result.stdout.fnmatch_lines(["*--xyz*"])
assert "warning: could not load initial" not in result.stdout.str()
result.stdout.no_fnmatch_line("*warning: could not load initial*")
@pytest.mark.skipif(
@ -648,5 +648,5 @@ def test_required_option_help(testdir):
)
)
result = testdir.runpytest("-h", x)
assert "argument --xyz is required" not in result.stdout.str()
result.stdout.no_fnmatch_line("*argument --xyz is required*")
assert "general:" in result.stdout.str()

View File

@ -239,8 +239,8 @@ class TestDoctests:
]
)
# lines below should be trimmed out
assert "text-line-2" not in result.stdout.str()
assert "text-line-after" not in result.stdout.str()
result.stdout.no_fnmatch_line("*text-line-2*")
result.stdout.no_fnmatch_line("*text-line-after*")
def test_docstring_full_context_around_error(self, testdir):
"""Test that we show the whole context before the actual line of a failing
@ -1177,7 +1177,7 @@ class TestDoctestAutoUseFixtures:
"""
)
result = testdir.runpytest("--doctest-modules")
assert "FAILURES" not in str(result.stdout.str())
result.stdout.no_fnmatch_line("*FAILURES*")
result.stdout.fnmatch_lines(["*=== 1 passed in *"])
@pytest.mark.parametrize("scope", SCOPES)
@ -1209,7 +1209,7 @@ class TestDoctestAutoUseFixtures:
"""
)
result = testdir.runpytest("--doctest-modules")
assert "FAILURES" not in str(result.stdout.str())
str(result.stdout.no_fnmatch_line("*FAILURES*"))
result.stdout.fnmatch_lines(["*=== 1 passed in *"])

View File

@ -1216,7 +1216,7 @@ def test_runs_twice(testdir, run_and_parse):
)
result, dom = run_and_parse(f, f)
assert "INTERNALERROR" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
assert first == second
@ -1231,7 +1231,7 @@ def test_runs_twice_xdist(testdir, run_and_parse):
)
result, dom = run_and_parse(f, "--dist", "each", "--tx", "2*popen")
assert "INTERNALERROR" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
first, second = [x["classname"] for x in dom.find_by_tag("testcase")]
assert first == second
@ -1271,7 +1271,7 @@ def test_fancy_items_regression(testdir, run_and_parse):
result, dom = run_and_parse()
assert "INTERNALERROR" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
items = sorted("%(classname)s %(name)s" % x for x in dom.find_by_tag("testcase"))
import pprint

View File

@ -615,7 +615,7 @@ def test_pytest_fail_notrace_runtest(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["world", "hello"])
assert "def teardown_function" not in result.stdout.str()
result.stdout.no_fnmatch_line("*def teardown_function*")
def test_pytest_fail_notrace_collection(testdir):
@ -630,7 +630,7 @@ def test_pytest_fail_notrace_collection(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["hello"])
assert "def some_internal_function()" not in result.stdout.str()
result.stdout.no_fnmatch_line("*def some_internal_function()*")
def test_pytest_fail_notrace_non_ascii(testdir):
@ -648,7 +648,7 @@ def test_pytest_fail_notrace_non_ascii(testdir):
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*test_hello*", "oh oh: ☺"])
assert "def test_hello" not in result.stdout.str()
result.stdout.no_fnmatch_line("*def test_hello*")
def test_pytest_no_tests_collected_exit_status(testdir):
@ -813,7 +813,7 @@ def test_failure_in_setup(testdir):
"""
)
result = testdir.runpytest("--tb=line")
assert "def setup_module" not in result.stdout.str()
result.stdout.no_fnmatch_line("*def setup_module*")
def test_makereport_getsource(testdir):
@ -825,7 +825,7 @@ def test_makereport_getsource(testdir):
"""
)
result = testdir.runpytest()
assert "INTERNALERROR" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stdout.fnmatch_lines(["*else: assert False*"])
@ -856,7 +856,7 @@ def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
"""
)
result = testdir.runpytest("-vv")
assert "INTERNALERROR" not in result.stdout.str()
result.stdout.no_fnmatch_line("*INTERNALERROR*")
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])

View File

@ -237,7 +237,7 @@ def test_setup_funcarg_setup_when_outer_scope_fails(testdir):
"*2 error*",
]
)
assert "xyz43" not in result.stdout.str()
result.stdout.no_fnmatch_line("*xyz43*")
@pytest.mark.parametrize("arg", ["", "arg"])

View File

@ -949,7 +949,7 @@ def test_xfail_test_setup_exception(testdir):
result = testdir.runpytest(p)
assert result.ret == 0
assert "xfailed" in result.stdout.str()
assert "xpassed" not in result.stdout.str()
result.stdout.no_fnmatch_line("*xpassed*")
def test_imperativeskip_on_xfail_test(testdir):

View File

@ -204,7 +204,7 @@ class TestTerminal:
result = testdir.runpytest("-vv")
assert result.ret == 0
result.stdout.fnmatch_lines(["*a123/test_hello123.py*PASS*"])
assert " <- " not in result.stdout.str()
result.stdout.no_fnmatch_line("* <- *")
def test_keyboard_interrupt(self, testdir, option):
testdir.makepyfile(
@ -559,7 +559,7 @@ class TestTerminalFunctional:
"*= 2 passed, 1 deselected in * =*",
]
)
assert "= 1 deselected =" not in result.stdout.str()
result.stdout.no_fnmatch_line("*= 1 deselected =*")
assert result.ret == 0
def test_no_skip_summary_if_failure(self, testdir):
@ -759,7 +759,7 @@ def test_fail_extra_reporting(testdir, monkeypatch):
monkeypatch.setenv("COLUMNS", "80")
testdir.makepyfile("def test_this(): assert 0, 'this_failed' * 100")
result = testdir.runpytest()
assert "short test summary" not in result.stdout.str()
result.stdout.no_fnmatch_line("*short test summary*")
result = testdir.runpytest("-rf")
result.stdout.fnmatch_lines(
[
@ -772,13 +772,13 @@ def test_fail_extra_reporting(testdir, monkeypatch):
def test_fail_reporting_on_pass(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("-rf")
assert "short test summary" not in result.stdout.str()
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_extra_reporting(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest()
assert "short test summary" not in result.stdout.str()
result.stdout.no_fnmatch_line("*short test summary*")
result = testdir.runpytest("-rp")
result.stdout.fnmatch_lines(["*test summary*", "PASS*test_pass_extra_reporting*"])
@ -786,7 +786,7 @@ def test_pass_extra_reporting(testdir):
def test_pass_reporting_on_fail(testdir):
testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest("-rp")
assert "short test summary" not in result.stdout.str()
result.stdout.no_fnmatch_line("*short test summary*")
def test_pass_output_reporting(testdir):
@ -829,7 +829,7 @@ def test_color_no(testdir):
testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest("--color=no")
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" not in result.stdout.str()
result.stdout.no_fnmatch_line("*\x1b[1m*")
@pytest.mark.parametrize("verbose", [True, False])
@ -851,7 +851,7 @@ def test_color_yes_collection_on_non_atty(testdir, verbose):
result = testdir.runpytest(*args)
assert "test session starts" in result.stdout.str()
assert "\x1b[1m" in result.stdout.str()
assert "collecting 10 items" not in result.stdout.str()
result.stdout.no_fnmatch_line("*collecting 10 items*")
if verbose:
assert "collecting ..." in result.stdout.str()
assert "collected 10 items" in result.stdout.str()
@ -1214,7 +1214,7 @@ def test_terminal_summary_warnings_are_displayed(testdir):
"*== 1 failed, 2 warnings in *",
]
)
assert "None" not in result.stdout.str()
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 2
@ -1239,7 +1239,7 @@ def test_terminal_summary_warnings_header_once(testdir):
"*== 1 failed, 1 warnings in *",
]
)
assert "None" not in result.stdout.str()
result.stdout.no_fnmatch_line("*None*")
stdout = result.stdout.str()
assert stdout.count("warning_from_test") == 1
assert stdout.count("=== warnings summary ") == 1
@ -1402,7 +1402,7 @@ class TestProgressOutputStyle:
"""
)
output = testdir.runpytest()
assert "ZeroDivisionError" not in output.stdout.str()
output.stdout.no_fnmatch_line("*ZeroDivisionError*")
output.stdout.fnmatch_lines(["=* 2 passed in *="])
def test_normal(self, many_tests_files, testdir):
@ -1494,7 +1494,7 @@ class TestProgressOutputStyle:
)
output = testdir.runpytest("--capture=no")
assert "%]" not in output.stdout.str()
output.stdout.no_fnmatch_line("*%]*")
class TestProgressWithTeardown:

View File

@ -270,7 +270,7 @@ def test_setup_failure_is_shown(testdir):
result = testdir.runpytest("-s")
assert result.ret == 1
result.stdout.fnmatch_lines(["*setUp*", "*assert 0*down1*", "*1 failed*"])
assert "never42" not in result.stdout.str()
result.stdout.no_fnmatch_line("*never42*")
def test_setup_setUpClass(testdir):
@ -342,7 +342,7 @@ def test_testcase_adderrorandfailure_defers(testdir, type):
% (type, type)
)
result = testdir.runpytest()
assert "should not raise" not in result.stdout.str()
result.stdout.no_fnmatch_line("*should not raise*")
@pytest.mark.parametrize("type", ["Error", "Failure"])
@ -684,7 +684,7 @@ def test_unittest_not_shown_in_traceback(testdir):
"""
)
res = testdir.runpytest()
assert "failUnlessEqual" not in res.stdout.str()
res.stdout.no_fnmatch_line("*failUnlessEqual*")
def test_unorderable_types(testdir):
@ -703,7 +703,7 @@ def test_unorderable_types(testdir):
"""
)
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
result.stdout.no_fnmatch_line("*TypeError*")
assert result.ret == ExitCode.NO_TESTS_COLLECTED
@ -1020,7 +1020,7 @@ def test_testcase_handles_init_exceptions(testdir):
)
result = testdir.runpytest()
assert "should raise this exception" in result.stdout.str()
assert "ERROR at teardown of MyTestCase.test_hello" not in result.stdout.str()
result.stdout.no_fnmatch_line("*ERROR at teardown of MyTestCase.test_hello*")
def test_error_message_with_parametrized_fixtures(testdir):