Merge pull request #7944 from symonk/refactor-test-mark-to-use-pytester
Refactor test_warning_types.py & test_mark.py to use pytester
This commit is contained in:
commit
78c09b9931
|
@ -1,13 +1,16 @@
|
|||
import os
|
||||
import sys
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from unittest import mock
|
||||
|
||||
import pytest
|
||||
from _pytest.config import ExitCode
|
||||
from _pytest.mark import MarkGenerator as Mark
|
||||
from _pytest.mark import MarkGenerator
|
||||
from _pytest.mark.structures import EMPTY_PARAMETERSET_OPTION
|
||||
from _pytest.nodes import Collector
|
||||
from _pytest.nodes import Node
|
||||
from _pytest.pytester import Pytester
|
||||
|
||||
|
||||
class TestMark:
|
||||
|
@ -18,7 +21,7 @@ class TestMark:
|
|||
assert attr in module.__all__ # type: ignore
|
||||
|
||||
def test_pytest_mark_notcallable(self) -> None:
|
||||
mark = Mark()
|
||||
mark = MarkGenerator()
|
||||
with pytest.raises(TypeError):
|
||||
mark() # type: ignore[operator]
|
||||
|
||||
|
@ -36,17 +39,17 @@ class TestMark:
|
|||
assert pytest.mark.foo(SomeClass) is SomeClass
|
||||
assert pytest.mark.foo.with_args(SomeClass) is not SomeClass # type: ignore[comparison-overlap]
|
||||
|
||||
def test_pytest_mark_name_starts_with_underscore(self):
|
||||
mark = Mark()
|
||||
def test_pytest_mark_name_starts_with_underscore(self) -> None:
|
||||
mark = MarkGenerator()
|
||||
with pytest.raises(AttributeError):
|
||||
mark._some_name
|
||||
|
||||
|
||||
def test_marked_class_run_twice(testdir):
|
||||
def test_marked_class_run_twice(pytester: Pytester) -> None:
|
||||
"""Test fails file is run twice that contains marked class.
|
||||
See issue#683.
|
||||
"""
|
||||
py_file = testdir.makepyfile(
|
||||
py_file = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.parametrize('abc', [1, 2, 3])
|
||||
|
@ -55,13 +58,13 @@ def test_marked_class_run_twice(testdir):
|
|||
assert abc in [1, 2, 3]
|
||||
"""
|
||||
)
|
||||
file_name = os.path.basename(py_file.strpath)
|
||||
rec = testdir.inline_run(file_name, file_name)
|
||||
file_name = os.path.basename(py_file)
|
||||
rec = pytester.inline_run(file_name, file_name)
|
||||
rec.assertoutcome(passed=6)
|
||||
|
||||
|
||||
def test_ini_markers(testdir):
|
||||
testdir.makeini(
|
||||
def test_ini_markers(pytester: Pytester) -> None:
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
markers =
|
||||
|
@ -69,7 +72,7 @@ def test_ini_markers(testdir):
|
|||
a2: this is a smoke marker
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def test_markers(pytestconfig):
|
||||
markers = pytestconfig.getini("markers")
|
||||
|
@ -79,12 +82,12 @@ def test_ini_markers(testdir):
|
|||
assert markers[1].startswith("a2:")
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run()
|
||||
rec = pytester.inline_run()
|
||||
rec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
def test_markers_option(testdir):
|
||||
testdir.makeini(
|
||||
def test_markers_option(pytester: Pytester) -> None:
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
markers =
|
||||
|
@ -93,21 +96,21 @@ def test_markers_option(testdir):
|
|||
nodescription
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("--markers")
|
||||
result = pytester.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines(
|
||||
["*a1*this is a webtest*", "*a1some*another marker", "*nodescription*"]
|
||||
)
|
||||
|
||||
|
||||
def test_ini_markers_whitespace(testdir):
|
||||
testdir.makeini(
|
||||
def test_ini_markers_whitespace(pytester: Pytester) -> None:
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
markers =
|
||||
a1 : this is a whitespace marker
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -116,33 +119,33 @@ def test_ini_markers_whitespace(testdir):
|
|||
assert True
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run("--strict-markers", "-m", "a1")
|
||||
rec = pytester.inline_run("--strict-markers", "-m", "a1")
|
||||
rec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
def test_marker_without_description(testdir):
|
||||
testdir.makefile(
|
||||
def test_marker_without_description(pytester: Pytester) -> None:
|
||||
pytester.makefile(
|
||||
".cfg",
|
||||
setup="""
|
||||
[tool:pytest]
|
||||
markers=slow
|
||||
""",
|
||||
)
|
||||
testdir.makeconftest(
|
||||
pytester.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
pytest.mark.xfail('FAIL')
|
||||
"""
|
||||
)
|
||||
ftdir = testdir.mkdir("ft1_dummy")
|
||||
testdir.tmpdir.join("conftest.py").move(ftdir.join("conftest.py"))
|
||||
rec = testdir.runpytest("--strict-markers")
|
||||
ftdir = pytester.mkdir("ft1_dummy")
|
||||
pytester.path.joinpath("conftest.py").replace(ftdir.joinpath("conftest.py"))
|
||||
rec = pytester.runpytest("--strict-markers")
|
||||
rec.assert_outcomes()
|
||||
|
||||
|
||||
def test_markers_option_with_plugin_in_current_dir(testdir):
|
||||
testdir.makeconftest('pytest_plugins = "flip_flop"')
|
||||
testdir.makepyfile(
|
||||
def test_markers_option_with_plugin_in_current_dir(pytester: Pytester) -> None:
|
||||
pytester.makeconftest('pytest_plugins = "flip_flop"')
|
||||
pytester.makepyfile(
|
||||
flip_flop="""\
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers", "flip:flop")
|
||||
|
@ -154,7 +157,7 @@ def test_markers_option_with_plugin_in_current_dir(testdir):
|
|||
return
|
||||
metafunc.parametrize("x", (10, 20))"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""\
|
||||
import pytest
|
||||
@pytest.mark.flipper
|
||||
|
@ -162,12 +165,12 @@ def test_markers_option_with_plugin_in_current_dir(testdir):
|
|||
assert x"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest("--markers")
|
||||
result = pytester.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines(["*flip*flop*"])
|
||||
|
||||
|
||||
def test_mark_on_pseudo_function(testdir):
|
||||
testdir.makepyfile(
|
||||
def test_mark_on_pseudo_function(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -176,13 +179,15 @@ def test_mark_on_pseudo_function(testdir):
|
|||
pass
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec = pytester.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("option_name", ["--strict-markers", "--strict"])
|
||||
def test_strict_prohibits_unregistered_markers(testdir, option_name):
|
||||
testdir.makepyfile(
|
||||
def test_strict_prohibits_unregistered_markers(
|
||||
pytester: Pytester, option_name: str
|
||||
) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.unregisteredmark
|
||||
|
@ -190,7 +195,7 @@ def test_strict_prohibits_unregistered_markers(testdir, option_name):
|
|||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(option_name)
|
||||
result = pytester.runpytest(option_name)
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines(
|
||||
["'unregisteredmark' not found in `markers` configuration option"]
|
||||
|
@ -208,8 +213,10 @@ def test_strict_prohibits_unregistered_markers(testdir, option_name):
|
|||
("xyz or xyz2", ["test_one", "test_two"]),
|
||||
],
|
||||
)
|
||||
def test_mark_option(expr: str, expected_passed: str, testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
def test_mark_option(
|
||||
expr: str, expected_passed: List[Optional[str]], pytester: Pytester
|
||||
) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.xyz
|
||||
|
@ -220,18 +227,20 @@ def test_mark_option(expr: str, expected_passed: str, testdir) -> None:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run("-m", expr)
|
||||
rec = pytester.inline_run("-m", expr)
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
passed = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed == expected_passed
|
||||
passed_str = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed_str == expected_passed
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("expr", "expected_passed"),
|
||||
[("interface", ["test_interface"]), ("not interface", ["test_nointer"])],
|
||||
)
|
||||
def test_mark_option_custom(expr: str, expected_passed: str, testdir) -> None:
|
||||
testdir.makeconftest(
|
||||
def test_mark_option_custom(
|
||||
expr: str, expected_passed: List[str], pytester: Pytester
|
||||
) -> None:
|
||||
pytester.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
def pytest_collection_modifyitems(items):
|
||||
|
@ -240,7 +249,7 @@ def test_mark_option_custom(expr: str, expected_passed: str, testdir) -> None:
|
|||
item.add_marker(pytest.mark.interface)
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def test_interface():
|
||||
pass
|
||||
|
@ -248,10 +257,10 @@ def test_mark_option_custom(expr: str, expected_passed: str, testdir) -> None:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run("-m", expr)
|
||||
rec = pytester.inline_run("-m", expr)
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
passed = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed == expected_passed
|
||||
passed_str = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed_str == expected_passed
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
|
@ -266,8 +275,10 @@ def test_mark_option_custom(expr: str, expected_passed: str, testdir) -> None:
|
|||
("not (1 or 2)", ["test_interface", "test_nointer", "test_pass"]),
|
||||
],
|
||||
)
|
||||
def test_keyword_option_custom(expr: str, expected_passed: str, testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
def test_keyword_option_custom(
|
||||
expr: str, expected_passed: List[str], pytester: Pytester
|
||||
) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def test_interface():
|
||||
pass
|
||||
|
@ -281,15 +292,15 @@ def test_keyword_option_custom(expr: str, expected_passed: str, testdir) -> None
|
|||
pass
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run("-k", expr)
|
||||
rec = pytester.inline_run("-k", expr)
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
passed = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed == expected_passed
|
||||
passed_str = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed_str == expected_passed
|
||||
|
||||
|
||||
def test_keyword_option_considers_mark(testdir):
|
||||
testdir.copy_example("marks/marks_considered_keywords")
|
||||
rec = testdir.inline_run("-k", "foo")
|
||||
def test_keyword_option_considers_mark(pytester: Pytester) -> None:
|
||||
pytester.copy_example("marks/marks_considered_keywords")
|
||||
rec = pytester.inline_run("-k", "foo")
|
||||
passed = rec.listoutcomes()[0]
|
||||
assert len(passed) == 1
|
||||
|
||||
|
@ -302,8 +313,10 @@ def test_keyword_option_considers_mark(testdir):
|
|||
("2-3", ["test_func[2-3]"]),
|
||||
],
|
||||
)
|
||||
def test_keyword_option_parametrize(expr: str, expected_passed: str, testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
def test_keyword_option_parametrize(
|
||||
expr: str, expected_passed: List[str], pytester: Pytester
|
||||
) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
|
||||
|
@ -311,14 +324,14 @@ def test_keyword_option_parametrize(expr: str, expected_passed: str, testdir) ->
|
|||
pass
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run("-k", expr)
|
||||
rec = pytester.inline_run("-k", expr)
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
passed = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed == expected_passed
|
||||
passed_str = [x.nodeid.split("::")[-1] for x in passed]
|
||||
assert passed_str == expected_passed
|
||||
|
||||
|
||||
def test_parametrize_with_module(testdir):
|
||||
testdir.makepyfile(
|
||||
def test_parametrize_with_module(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.parametrize("arg", [pytest,])
|
||||
|
@ -326,7 +339,7 @@ def test_parametrize_with_module(testdir):
|
|||
pass
|
||||
"""
|
||||
)
|
||||
rec = testdir.inline_run()
|
||||
rec = pytester.inline_run()
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
expected_id = "test_func[" + pytest.__name__ + "]"
|
||||
assert passed[0].nodeid.split("::")[-1] == expected_id
|
||||
|
@ -356,23 +369,23 @@ def test_parametrize_with_module(testdir):
|
|||
],
|
||||
)
|
||||
def test_keyword_option_wrong_arguments(
|
||||
expr: str, expected_error: str, testdir, capsys
|
||||
expr: str, expected_error: str, pytester: Pytester, capsys
|
||||
) -> None:
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def test_func(arg):
|
||||
pass
|
||||
"""
|
||||
)
|
||||
testdir.inline_run("-k", expr)
|
||||
pytester.inline_run("-k", expr)
|
||||
err = capsys.readouterr().err
|
||||
assert expected_error in err
|
||||
|
||||
|
||||
def test_parametrized_collected_from_command_line(testdir):
|
||||
def test_parametrized_collected_from_command_line(pytester: Pytester) -> None:
|
||||
"""Parametrized test not collected if test named specified in command
|
||||
line issue#649."""
|
||||
py_file = testdir.makepyfile(
|
||||
py_file = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.parametrize("arg", [None, 1.3, "2-3"])
|
||||
|
@ -380,14 +393,14 @@ def test_parametrized_collected_from_command_line(testdir):
|
|||
pass
|
||||
"""
|
||||
)
|
||||
file_name = os.path.basename(py_file.strpath)
|
||||
rec = testdir.inline_run(file_name + "::" + "test_func")
|
||||
file_name = os.path.basename(py_file)
|
||||
rec = pytester.inline_run(file_name + "::" + "test_func")
|
||||
rec.assertoutcome(passed=3)
|
||||
|
||||
|
||||
def test_parametrized_collect_with_wrong_args(testdir):
|
||||
def test_parametrized_collect_with_wrong_args(pytester: Pytester) -> None:
|
||||
"""Test collect parametrized func with wrong number of args."""
|
||||
py_file = testdir.makepyfile(
|
||||
py_file = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -397,7 +410,7 @@ def test_parametrized_collect_with_wrong_args(testdir):
|
|||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest(py_file)
|
||||
result = pytester.runpytest(py_file)
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
'test_parametrized_collect_with_wrong_args.py::test_func: in "parametrize" the number of names (2):',
|
||||
|
@ -408,9 +421,9 @@ def test_parametrized_collect_with_wrong_args(testdir):
|
|||
)
|
||||
|
||||
|
||||
def test_parametrized_with_kwargs(testdir):
|
||||
def test_parametrized_with_kwargs(pytester: Pytester) -> None:
|
||||
"""Test collect parametrized func with wrong number of args."""
|
||||
py_file = testdir.makepyfile(
|
||||
py_file = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -424,13 +437,13 @@ def test_parametrized_with_kwargs(testdir):
|
|||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest(py_file)
|
||||
result = pytester.runpytest(py_file)
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_parametrize_iterator(testdir):
|
||||
def test_parametrize_iterator(pytester: Pytester) -> None:
|
||||
"""`parametrize` should work with generators (#5354)."""
|
||||
py_file = testdir.makepyfile(
|
||||
py_file = pytester.makepyfile(
|
||||
"""\
|
||||
import pytest
|
||||
|
||||
|
@ -444,16 +457,16 @@ def test_parametrize_iterator(testdir):
|
|||
assert a >= 1
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(py_file)
|
||||
result = pytester.runpytest(py_file)
|
||||
assert result.ret == 0
|
||||
# should not skip any tests
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
|
||||
|
||||
class TestFunctional:
|
||||
def test_merging_markers_deep(self, testdir):
|
||||
def test_merging_markers_deep(self, pytester: Pytester) -> None:
|
||||
# issue 199 - propagate markers into nested classes
|
||||
p = testdir.makepyfile(
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
class TestA(object):
|
||||
|
@ -466,13 +479,15 @@ class TestFunctional:
|
|||
assert True
|
||||
"""
|
||||
)
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
items, rec = pytester.inline_genitems(p)
|
||||
for item in items:
|
||||
print(item, item.keywords)
|
||||
assert [x for x in item.iter_markers() if x.name == "a"]
|
||||
|
||||
def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
def test_mark_decorator_subclass_does_not_propagate_to_base(
|
||||
self, pytester: Pytester
|
||||
) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -487,12 +502,12 @@ class TestFunctional:
|
|||
def test_bar(self): pass
|
||||
"""
|
||||
)
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
items, rec = pytester.inline_genitems(p)
|
||||
self.assert_markers(items, test_foo=("a", "b"), test_bar=("a",))
|
||||
|
||||
def test_mark_should_not_pass_to_siebling_class(self, testdir):
|
||||
def test_mark_should_not_pass_to_siebling_class(self, pytester: Pytester) -> None:
|
||||
"""#568"""
|
||||
p = testdir.makepyfile(
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -510,7 +525,7 @@ class TestFunctional:
|
|||
|
||||
"""
|
||||
)
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
items, rec = pytester.inline_genitems(p)
|
||||
base_item, sub_item, sub_item_other = items
|
||||
print(items, [x.nodeid for x in items])
|
||||
# new api segregates
|
||||
|
@ -518,8 +533,8 @@ class TestFunctional:
|
|||
assert not list(sub_item_other.iter_markers(name="b"))
|
||||
assert list(sub_item.iter_markers(name="b"))
|
||||
|
||||
def test_mark_decorator_baseclasses_merged(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
def test_mark_decorator_baseclasses_merged(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -538,11 +553,11 @@ class TestFunctional:
|
|||
def test_bar(self): pass
|
||||
"""
|
||||
)
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
items, rec = pytester.inline_genitems(p)
|
||||
self.assert_markers(items, test_foo=("a", "b", "c"), test_bar=("a", "b", "d"))
|
||||
|
||||
def test_mark_closest(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
def test_mark_closest(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -557,14 +572,18 @@ class TestFunctional:
|
|||
|
||||
"""
|
||||
)
|
||||
items, rec = testdir.inline_genitems(p)
|
||||
items, rec = pytester.inline_genitems(p)
|
||||
has_own, has_inherited = items
|
||||
assert has_own.get_closest_marker("c").kwargs == {"location": "function"}
|
||||
assert has_inherited.get_closest_marker("c").kwargs == {"location": "class"}
|
||||
has_own_marker = has_own.get_closest_marker("c")
|
||||
has_inherited_marker = has_inherited.get_closest_marker("c")
|
||||
assert has_own_marker is not None
|
||||
assert has_inherited_marker is not None
|
||||
assert has_own_marker.kwargs == {"location": "function"}
|
||||
assert has_inherited_marker.kwargs == {"location": "class"}
|
||||
assert has_own.get_closest_marker("missing") is None
|
||||
|
||||
def test_mark_with_wrong_marker(self, testdir):
|
||||
reprec = testdir.inline_runsource(
|
||||
def test_mark_with_wrong_marker(self, pytester: Pytester) -> None:
|
||||
reprec = pytester.inline_runsource(
|
||||
"""
|
||||
import pytest
|
||||
class pytestmark(object):
|
||||
|
@ -577,8 +596,8 @@ class TestFunctional:
|
|||
assert len(values) == 1
|
||||
assert "TypeError" in str(values[0].longrepr)
|
||||
|
||||
def test_mark_dynamically_in_funcarg(self, testdir):
|
||||
testdir.makeconftest(
|
||||
def test_mark_dynamically_in_funcarg(self, pytester: Pytester) -> None:
|
||||
pytester.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
|
@ -589,17 +608,17 @@ class TestFunctional:
|
|||
terminalreporter._tw.line("keyword: %s" % values[0].keywords)
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def test_func(arg):
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(["keyword: *hello*"])
|
||||
|
||||
def test_no_marker_match_on_unmarked_names(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
def test_no_marker_match_on_unmarked_names(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.shouldmatch
|
||||
|
@ -610,15 +629,15 @@ class TestFunctional:
|
|||
assert 1
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run("-m", "test_unmarked", p)
|
||||
reprec = pytester.inline_run("-m", "test_unmarked", p)
|
||||
passed, skipped, failed = reprec.listoutcomes()
|
||||
assert len(passed) + len(skipped) + len(failed) == 0
|
||||
dlist = reprec.getcalls("pytest_deselected")
|
||||
deselected_tests = dlist[0].items
|
||||
assert len(deselected_tests) == 2
|
||||
|
||||
def test_keywords_at_node_level(self, testdir):
|
||||
testdir.makepyfile(
|
||||
def test_keywords_at_node_level(self, pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
|
@ -636,11 +655,11 @@ class TestFunctional:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec = pytester.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_keyword_added_for_session(self, testdir):
|
||||
testdir.makeconftest(
|
||||
def test_keyword_added_for_session(self, pytester: Pytester) -> None:
|
||||
pytester.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
def pytest_collection_modifyitems(session):
|
||||
|
@ -651,7 +670,7 @@ class TestFunctional:
|
|||
session.add_marker(10))
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
def test_some(request):
|
||||
assert "mark1" in request.keywords
|
||||
|
@ -664,14 +683,14 @@ class TestFunctional:
|
|||
assert marker.kwargs == {}
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run("-m", "mark1")
|
||||
reprec = pytester.inline_run("-m", "mark1")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def assert_markers(self, items, **expected):
|
||||
def assert_markers(self, items, **expected) -> None:
|
||||
"""Assert that given items have expected marker names applied to them.
|
||||
expected should be a dict of (item name -> seq of expected marker names).
|
||||
|
||||
Note: this could be moved to ``testdir`` if proven to be useful
|
||||
Note: this could be moved to ``pytester`` if proven to be useful
|
||||
to other modules.
|
||||
"""
|
||||
items = {x.name: x for x in items}
|
||||
|
@ -680,9 +699,9 @@ class TestFunctional:
|
|||
assert markers == set(expected_markers)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore")
|
||||
def test_mark_from_parameters(self, testdir):
|
||||
def test_mark_from_parameters(self, pytester: Pytester) -> None:
|
||||
"""#1540"""
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -701,12 +720,12 @@ class TestFunctional:
|
|||
assert True
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reprec = pytester.inline_run()
|
||||
reprec.assertoutcome(skipped=1)
|
||||
|
||||
def test_reevaluate_dynamic_expr(self, testdir):
|
||||
def test_reevaluate_dynamic_expr(self, pytester: Pytester) -> None:
|
||||
"""#7360"""
|
||||
py_file1 = testdir.makepyfile(
|
||||
py_file1 = pytester.makepyfile(
|
||||
test_reevaluate_dynamic_expr1="""
|
||||
import pytest
|
||||
|
||||
|
@ -717,7 +736,7 @@ class TestFunctional:
|
|||
assert True
|
||||
"""
|
||||
)
|
||||
py_file2 = testdir.makepyfile(
|
||||
py_file2 = pytester.makepyfile(
|
||||
test_reevaluate_dynamic_expr2="""
|
||||
import pytest
|
||||
|
||||
|
@ -729,15 +748,15 @@ class TestFunctional:
|
|||
"""
|
||||
)
|
||||
|
||||
file_name1 = os.path.basename(py_file1.strpath)
|
||||
file_name2 = os.path.basename(py_file2.strpath)
|
||||
reprec = testdir.inline_run(file_name1, file_name2)
|
||||
file_name1 = os.path.basename(py_file1)
|
||||
file_name2 = os.path.basename(py_file2)
|
||||
reprec = pytester.inline_run(file_name1, file_name2)
|
||||
reprec.assertoutcome(passed=1, skipped=1)
|
||||
|
||||
|
||||
class TestKeywordSelection:
|
||||
def test_select_simple(self, testdir):
|
||||
file_test = testdir.makepyfile(
|
||||
def test_select_simple(self, pytester: Pytester) -> None:
|
||||
file_test = pytester.makepyfile(
|
||||
"""
|
||||
def test_one():
|
||||
assert 0
|
||||
|
@ -748,7 +767,7 @@ class TestKeywordSelection:
|
|||
)
|
||||
|
||||
def check(keyword, name):
|
||||
reprec = testdir.inline_run("-s", "-k", keyword, file_test)
|
||||
reprec = pytester.inline_run("-s", "-k", keyword, file_test)
|
||||
passed, skipped, failed = reprec.listoutcomes()
|
||||
assert len(failed) == 1
|
||||
assert failed[0].nodeid.split("::")[-1] == name
|
||||
|
@ -769,8 +788,8 @@ class TestKeywordSelection:
|
|||
"xxx and TestClass and test_2",
|
||||
],
|
||||
)
|
||||
def test_select_extra_keywords(self, testdir, keyword):
|
||||
p = testdir.makepyfile(
|
||||
def test_select_extra_keywords(self, pytester: Pytester, keyword) -> None:
|
||||
p = pytester.makepyfile(
|
||||
test_select="""
|
||||
def test_1():
|
||||
pass
|
||||
|
@ -779,7 +798,7 @@ class TestKeywordSelection:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
conftest="""
|
||||
import pytest
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
@ -790,7 +809,7 @@ class TestKeywordSelection:
|
|||
item.extra_keyword_matches.add("xxx")
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run(p.dirpath(), "-s", "-k", keyword)
|
||||
reprec = pytester.inline_run(p.parent, "-s", "-k", keyword)
|
||||
print("keyword", repr(keyword))
|
||||
passed, skipped, failed = reprec.listoutcomes()
|
||||
assert len(passed) == 1
|
||||
|
@ -799,15 +818,15 @@ class TestKeywordSelection:
|
|||
assert len(dlist) == 1
|
||||
assert dlist[0].items[0].name == "test_1"
|
||||
|
||||
def test_select_starton(self, testdir):
|
||||
threepass = testdir.makepyfile(
|
||||
def test_select_starton(self, pytester: Pytester) -> None:
|
||||
threepass = pytester.makepyfile(
|
||||
test_threepass="""
|
||||
def test_one(): assert 1
|
||||
def test_two(): assert 1
|
||||
def test_three(): assert 1
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run("-k", "test_two:", threepass)
|
||||
reprec = pytester.inline_run("-k", "test_two:", threepass)
|
||||
passed, skipped, failed = reprec.listoutcomes()
|
||||
assert len(passed) == 2
|
||||
assert not failed
|
||||
|
@ -816,21 +835,21 @@ class TestKeywordSelection:
|
|||
item = dlist[0].items[0]
|
||||
assert item.name == "test_one"
|
||||
|
||||
def test_keyword_extra(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
def test_keyword_extra(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
def test_one():
|
||||
assert 0
|
||||
test_one.mykeyword = True
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run("-k", "mykeyword", p)
|
||||
reprec = pytester.inline_run("-k", "mykeyword", p)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 1
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_keyword_extra_dash(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
def test_keyword_extra_dash(self, pytester: Pytester) -> None:
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
def test_one():
|
||||
assert 0
|
||||
|
@ -839,42 +858,42 @@ class TestKeywordSelection:
|
|||
)
|
||||
# with argparse the argument to an option cannot
|
||||
# start with '-'
|
||||
reprec = testdir.inline_run("-k", "-mykeyword", p)
|
||||
reprec = pytester.inline_run("-k", "-mykeyword", p)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert passed + skipped + failed == 0
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"keyword", ["__", "+", ".."],
|
||||
)
|
||||
def test_no_magic_values(self, testdir, keyword: str) -> None:
|
||||
def test_no_magic_values(self, pytester: Pytester, keyword: str) -> None:
|
||||
"""Make sure the tests do not match on magic values,
|
||||
no double underscored values, like '__dict__' and '+'.
|
||||
"""
|
||||
p = testdir.makepyfile(
|
||||
p = pytester.makepyfile(
|
||||
"""
|
||||
def test_one(): assert 1
|
||||
"""
|
||||
)
|
||||
|
||||
reprec = testdir.inline_run("-k", keyword, p)
|
||||
reprec = pytester.inline_run("-k", keyword, p)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
dlist = reprec.getcalls("pytest_deselected")
|
||||
assert passed + skipped + failed == 0
|
||||
deselected_tests = dlist[0].items
|
||||
assert len(deselected_tests) == 1
|
||||
|
||||
def test_no_match_directories_outside_the_suite(self, testdir):
|
||||
def test_no_match_directories_outside_the_suite(self, pytester: Pytester) -> None:
|
||||
"""`-k` should not match against directories containing the test suite (#7040)."""
|
||||
test_contents = """
|
||||
def test_aaa(): pass
|
||||
def test_ddd(): pass
|
||||
"""
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
**{"ddd/tests/__init__.py": "", "ddd/tests/test_foo.py": test_contents}
|
||||
)
|
||||
|
||||
def get_collected_names(*args):
|
||||
_, rec = testdir.inline_genitems(*args)
|
||||
_, rec = pytester.inline_genitems(*args)
|
||||
calls = rec.getcalls("pytest_collection_finish")
|
||||
assert len(calls) == 1
|
||||
return [x.name for x in calls[0].session.items]
|
||||
|
@ -883,7 +902,7 @@ class TestKeywordSelection:
|
|||
assert get_collected_names() == ["test_aaa", "test_ddd"]
|
||||
|
||||
# do not collect anything based on names outside the collection tree
|
||||
assert get_collected_names("-k", testdir.tmpdir.basename) == []
|
||||
assert get_collected_names("-k", pytester._name) == []
|
||||
|
||||
# "-k ddd" should only collect "test_ddd", but not
|
||||
# 'test_aaa' just because one of its parent directories is named "ddd";
|
||||
|
@ -902,7 +921,7 @@ class TestMarkDecorator:
|
|||
("foo", pytest.mark.bar(), False),
|
||||
],
|
||||
)
|
||||
def test__eq__(self, lhs, rhs, expected):
|
||||
def test__eq__(self, lhs, rhs, expected) -> None:
|
||||
assert (lhs == rhs) == expected
|
||||
|
||||
def test_aliases(self) -> None:
|
||||
|
@ -913,9 +932,11 @@ class TestMarkDecorator:
|
|||
|
||||
|
||||
@pytest.mark.parametrize("mark", [None, "", "skip", "xfail"])
|
||||
def test_parameterset_for_parametrize_marks(testdir, mark):
|
||||
def test_parameterset_for_parametrize_marks(
|
||||
pytester: Pytester, mark: Optional[str]
|
||||
) -> None:
|
||||
if mark is not None:
|
||||
testdir.makeini(
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
{}={}
|
||||
|
@ -924,7 +945,7 @@ def test_parameterset_for_parametrize_marks(testdir, mark):
|
|||
)
|
||||
)
|
||||
|
||||
config = testdir.parseconfig()
|
||||
config = pytester.parseconfig()
|
||||
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
|
||||
|
||||
pytest_configure(config)
|
||||
|
@ -938,8 +959,8 @@ def test_parameterset_for_parametrize_marks(testdir, mark):
|
|||
assert result_mark.kwargs.get("run") is False
|
||||
|
||||
|
||||
def test_parameterset_for_fail_at_collect(testdir):
|
||||
testdir.makeini(
|
||||
def test_parameterset_for_fail_at_collect(pytester: Pytester) -> None:
|
||||
pytester.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
{}=fail_at_collect
|
||||
|
@ -948,7 +969,7 @@ def test_parameterset_for_fail_at_collect(testdir):
|
|||
)
|
||||
)
|
||||
|
||||
config = testdir.parseconfig()
|
||||
config = pytester.parseconfig()
|
||||
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
|
||||
|
||||
pytest_configure(config)
|
||||
|
@ -959,7 +980,7 @@ def test_parameterset_for_fail_at_collect(testdir):
|
|||
):
|
||||
get_empty_parameterset_mark(config, ["a"], pytest_configure)
|
||||
|
||||
p1 = testdir.makepyfile(
|
||||
p1 = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -968,7 +989,7 @@ def test_parameterset_for_fail_at_collect(testdir):
|
|||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(str(p1))
|
||||
result = pytester.runpytest(str(p1))
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 0 items / 1 error",
|
||||
|
@ -980,13 +1001,13 @@ def test_parameterset_for_fail_at_collect(testdir):
|
|||
assert result.ret == ExitCode.INTERRUPTED
|
||||
|
||||
|
||||
def test_parameterset_for_parametrize_bad_markname(testdir):
|
||||
def test_parameterset_for_parametrize_bad_markname(pytester: Pytester) -> None:
|
||||
with pytest.raises(pytest.UsageError):
|
||||
test_parameterset_for_parametrize_marks(testdir, "bad")
|
||||
test_parameterset_for_parametrize_marks(pytester, "bad")
|
||||
|
||||
|
||||
def test_mark_expressions_no_smear(testdir):
|
||||
testdir.makepyfile(
|
||||
def test_mark_expressions_no_smear(pytester: Pytester) -> None:
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -1004,7 +1025,7 @@ def test_mark_expressions_no_smear(testdir):
|
|||
"""
|
||||
)
|
||||
|
||||
reprec = testdir.inline_run("-m", "FOO")
|
||||
reprec = pytester.inline_run("-m", "FOO")
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
dlist = reprec.getcalls("pytest_deselected")
|
||||
assert passed == 1
|
||||
|
@ -1014,13 +1035,13 @@ def test_mark_expressions_no_smear(testdir):
|
|||
|
||||
# todo: fixed
|
||||
# keywords smear - expected behaviour
|
||||
# reprec_keywords = testdir.inline_run("-k", "FOO")
|
||||
# reprec_keywords = pytester.inline_run("-k", "FOO")
|
||||
# passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes()
|
||||
# assert passed_k == 2
|
||||
# assert skipped_k == failed_k == 0
|
||||
|
||||
|
||||
def test_addmarker_order():
|
||||
def test_addmarker_order() -> None:
|
||||
session = mock.Mock()
|
||||
session.own_markers = []
|
||||
session.parent = None
|
||||
|
@ -1034,9 +1055,9 @@ def test_addmarker_order():
|
|||
|
||||
|
||||
@pytest.mark.filterwarnings("ignore")
|
||||
def test_markers_from_parametrize(testdir):
|
||||
def test_markers_from_parametrize(pytester: Pytester) -> None:
|
||||
"""#3605"""
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -1067,7 +1088,7 @@ def test_markers_from_parametrize(testdir):
|
|||
"""
|
||||
)
|
||||
|
||||
result = testdir.runpytest()
|
||||
result = pytester.runpytest()
|
||||
result.assert_outcomes(passed=4)
|
||||
|
||||
|
||||
|
@ -1079,13 +1100,13 @@ def test_pytest_param_id_requires_string() -> None:
|
|||
|
||||
|
||||
@pytest.mark.parametrize("s", (None, "hello world"))
|
||||
def test_pytest_param_id_allows_none_or_string(s):
|
||||
def test_pytest_param_id_allows_none_or_string(s) -> None:
|
||||
assert pytest.param(id=s)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("expr", ("NOT internal_err", "NOT (internal_err)", "bogus/"))
|
||||
def test_marker_expr_eval_failure_handling(testdir, expr):
|
||||
foo = testdir.makepyfile(
|
||||
def test_marker_expr_eval_failure_handling(pytester: Pytester, expr) -> None:
|
||||
foo = pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
|
@ -1095,6 +1116,6 @@ def test_marker_expr_eval_failure_handling(testdir, expr):
|
|||
"""
|
||||
)
|
||||
expected = f"ERROR: Wrong expression passed to '-m': {expr}: *"
|
||||
result = testdir.runpytest(foo, "-m", expr)
|
||||
result = pytester.runpytest(foo, "-m", expr)
|
||||
result.stderr.fnmatch_lines([expected])
|
||||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
|
|
|
@ -1,18 +1,19 @@
|
|||
import inspect
|
||||
|
||||
import _pytest.warning_types
|
||||
import pytest
|
||||
from _pytest import warning_types
|
||||
from _pytest.pytester import Pytester
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"warning_class",
|
||||
[
|
||||
w
|
||||
for n, w in vars(_pytest.warning_types).items()
|
||||
for n, w in vars(warning_types).items()
|
||||
if inspect.isclass(w) and issubclass(w, Warning)
|
||||
],
|
||||
)
|
||||
def test_warning_types(warning_class):
|
||||
def test_warning_types(warning_class: UserWarning) -> None:
|
||||
"""Make sure all warnings declared in _pytest.warning_types are displayed as coming
|
||||
from 'pytest' instead of the internal module (#5452).
|
||||
"""
|
||||
|
@ -20,11 +21,11 @@ def test_warning_types(warning_class):
|
|||
|
||||
|
||||
@pytest.mark.filterwarnings("error::pytest.PytestWarning")
|
||||
def test_pytest_warnings_repr_integration_test(testdir):
|
||||
def test_pytest_warnings_repr_integration_test(pytester: Pytester) -> None:
|
||||
"""Small integration test to ensure our small hack of setting the __module__ attribute
|
||||
of our warnings actually works (#5452).
|
||||
"""
|
||||
testdir.makepyfile(
|
||||
pytester.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
import warnings
|
||||
|
@ -33,5 +34,5 @@ def test_pytest_warnings_repr_integration_test(testdir):
|
|||
warnings.warn(pytest.PytestWarning("some warning"))
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result = pytester.runpytest()
|
||||
result.stdout.fnmatch_lines(["E pytest.PytestWarning: some warning"])
|
||||
|
|
Loading…
Reference in New Issue