Merge remote-tracking branch 'origin/master' into issue_7295
This commit is contained in:
commit
0dd77b3e39
1
AUTHORS
1
AUTHORS
|
@ -233,6 +233,7 @@ Pulkit Goyal
|
|||
Punyashloka Biswal
|
||||
Quentin Pradet
|
||||
Ralf Schmitt
|
||||
Ram Rachum
|
||||
Ralph Giles
|
||||
Ran Benita
|
||||
Raphael Castaneda
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Fixed exception causes all over the codebase, i.e. use `raise new_exception from old_exception` when wrapping an exception.
|
|
@ -0,0 +1,13 @@
|
|||
``--junitxml`` now includes the exception cause in the ``message`` XML attribute for failures during setup and teardown.
|
||||
|
||||
Previously:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<error message="test setup failure">
|
||||
|
||||
Now:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<error message="failed on setup with "ValueError: Some error during setup"">
|
|
@ -6,6 +6,7 @@ from .code import Frame
|
|||
from .code import getfslineno
|
||||
from .code import getrawcode
|
||||
from .code import Traceback
|
||||
from .code import TracebackEntry
|
||||
from .source import compile_ as compile
|
||||
from .source import Source
|
||||
|
||||
|
@ -17,6 +18,7 @@ __all__ = [
|
|||
"getfslineno",
|
||||
"getrawcode",
|
||||
"Traceback",
|
||||
"TracebackEntry",
|
||||
"compile",
|
||||
"Source",
|
||||
]
|
||||
|
|
|
@ -213,7 +213,7 @@ class TracebackEntry:
|
|||
return source.getstatement(self.lineno)
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
def path(self) -> Union[py.path.local, str]:
|
||||
""" path to the source code """
|
||||
return self.frame.code.path
|
||||
|
||||
|
|
|
@ -215,7 +215,7 @@ class Source:
|
|||
newex.offset = ex.offset
|
||||
newex.lineno = ex.lineno
|
||||
newex.text = ex.text
|
||||
raise newex
|
||||
raise newex from ex
|
||||
else:
|
||||
if flag & ast.PyCF_ONLY_AST:
|
||||
assert isinstance(co, ast.AST)
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
""" command line options, ini-file and conftest.py processing. """
|
||||
import argparse
|
||||
import collections.abc
|
||||
import contextlib
|
||||
import copy
|
||||
import enum
|
||||
|
@ -15,10 +16,13 @@ from typing import Any
|
|||
from typing import Callable
|
||||
from typing import Dict
|
||||
from typing import IO
|
||||
from typing import Iterable
|
||||
from typing import Iterator
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Sequence
|
||||
from typing import Set
|
||||
from typing import TextIO
|
||||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
|
@ -42,6 +46,7 @@ from _pytest.compat import TYPE_CHECKING
|
|||
from _pytest.outcomes import fail
|
||||
from _pytest.outcomes import Skipped
|
||||
from _pytest.pathlib import import_path
|
||||
from _pytest.pathlib import ImportMode
|
||||
from _pytest.pathlib import Path
|
||||
from _pytest.store import Store
|
||||
from _pytest.warning_types import PytestConfigWarning
|
||||
|
@ -50,6 +55,7 @@ if TYPE_CHECKING:
|
|||
from typing import Type
|
||||
|
||||
from _pytest._code.code import _TracebackStyle
|
||||
from _pytest.terminal import TerminalReporter
|
||||
from .argparsing import Argument
|
||||
|
||||
|
||||
|
@ -88,18 +94,24 @@ class ExitCode(enum.IntEnum):
|
|||
|
||||
|
||||
class ConftestImportFailure(Exception):
|
||||
def __init__(self, path, excinfo):
|
||||
def __init__(
|
||||
self,
|
||||
path: py.path.local,
|
||||
excinfo: Tuple["Type[Exception]", Exception, TracebackType],
|
||||
) -> None:
|
||||
super().__init__(path, excinfo)
|
||||
self.path = path
|
||||
self.excinfo = excinfo # type: Tuple[Type[Exception], Exception, TracebackType]
|
||||
self.excinfo = excinfo
|
||||
|
||||
def __str__(self):
|
||||
def __str__(self) -> str:
|
||||
return "{}: {} (from {})".format(
|
||||
self.excinfo[0].__name__, self.excinfo[1], self.path
|
||||
)
|
||||
|
||||
|
||||
def filter_traceback_for_conftest_import_failure(entry) -> bool:
|
||||
def filter_traceback_for_conftest_import_failure(
|
||||
entry: _pytest._code.TracebackEntry,
|
||||
) -> bool:
|
||||
"""filters tracebacks entries which point to pytest internals or importlib.
|
||||
|
||||
Make a special case for importlib because we use it to import test modules and conftest files
|
||||
|
@ -108,7 +120,10 @@ def filter_traceback_for_conftest_import_failure(entry) -> bool:
|
|||
return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep)
|
||||
|
||||
|
||||
def main(args=None, plugins=None) -> Union[int, ExitCode]:
|
||||
def main(
|
||||
args: Optional[List[str]] = None,
|
||||
plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
|
||||
) -> Union[int, ExitCode]:
|
||||
""" return exit code, after performing an in-process test run.
|
||||
|
||||
:arg args: list of command line arguments.
|
||||
|
@ -177,7 +192,7 @@ class cmdline: # compatibility namespace
|
|||
main = staticmethod(main)
|
||||
|
||||
|
||||
def filename_arg(path, optname):
|
||||
def filename_arg(path: str, optname: str) -> str:
|
||||
""" Argparse type validator for filename arguments.
|
||||
|
||||
:path: path of filename
|
||||
|
@ -188,7 +203,7 @@ def filename_arg(path, optname):
|
|||
return path
|
||||
|
||||
|
||||
def directory_arg(path, optname):
|
||||
def directory_arg(path: str, optname: str) -> str:
|
||||
"""Argparse type validator for directory arguments.
|
||||
|
||||
:path: path of directory
|
||||
|
@ -239,13 +254,16 @@ builtin_plugins = set(default_plugins)
|
|||
builtin_plugins.add("pytester")
|
||||
|
||||
|
||||
def get_config(args=None, plugins=None):
|
||||
def get_config(
|
||||
args: Optional[List[str]] = None,
|
||||
plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
|
||||
) -> "Config":
|
||||
# subsequent calls to main will create a fresh instance
|
||||
pluginmanager = PytestPluginManager()
|
||||
config = Config(
|
||||
pluginmanager,
|
||||
invocation_params=Config.InvocationParams(
|
||||
args=args or (), plugins=plugins, dir=Path.cwd()
|
||||
args=args or (), plugins=plugins, dir=Path.cwd(),
|
||||
),
|
||||
)
|
||||
|
||||
|
@ -255,10 +273,11 @@ def get_config(args=None, plugins=None):
|
|||
|
||||
for spec in default_plugins:
|
||||
pluginmanager.import_plugin(spec)
|
||||
|
||||
return config
|
||||
|
||||
|
||||
def get_plugin_manager():
|
||||
def get_plugin_manager() -> "PytestPluginManager":
|
||||
"""
|
||||
Obtain a new instance of the
|
||||
:py:class:`_pytest.config.PytestPluginManager`, with default plugins
|
||||
|
@ -271,8 +290,9 @@ def get_plugin_manager():
|
|||
|
||||
|
||||
def _prepareconfig(
|
||||
args: Optional[Union[py.path.local, List[str]]] = None, plugins=None
|
||||
):
|
||||
args: Optional[Union[py.path.local, List[str]]] = None,
|
||||
plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
|
||||
) -> "Config":
|
||||
if args is None:
|
||||
args = sys.argv[1:]
|
||||
elif isinstance(args, py.path.local):
|
||||
|
@ -290,9 +310,10 @@ def _prepareconfig(
|
|||
pluginmanager.consider_pluginarg(plugin)
|
||||
else:
|
||||
pluginmanager.register(plugin)
|
||||
return pluginmanager.hook.pytest_cmdline_parse(
|
||||
config = pluginmanager.hook.pytest_cmdline_parse(
|
||||
pluginmanager=pluginmanager, args=args
|
||||
)
|
||||
return config
|
||||
except BaseException:
|
||||
config._ensure_unconfigure()
|
||||
raise
|
||||
|
@ -313,13 +334,11 @@ class PytestPluginManager(PluginManager):
|
|||
|
||||
super().__init__("pytest")
|
||||
# The objects are module objects, only used generically.
|
||||
self._conftest_plugins = set() # type: Set[object]
|
||||
self._conftest_plugins = set() # type: Set[types.ModuleType]
|
||||
|
||||
# state related to local conftest plugins
|
||||
# Maps a py.path.local to a list of module objects.
|
||||
self._dirpath2confmods = {} # type: Dict[Any, List[object]]
|
||||
# Maps a py.path.local to a module object.
|
||||
self._conftestpath2mod = {} # type: Dict[Any, object]
|
||||
# State related to local conftest plugins.
|
||||
self._dirpath2confmods = {} # type: Dict[py.path.local, List[types.ModuleType]]
|
||||
self._conftestpath2mod = {} # type: Dict[Path, types.ModuleType]
|
||||
self._confcutdir = None # type: Optional[py.path.local]
|
||||
self._noconftest = False
|
||||
self._duplicatepaths = set() # type: Set[py.path.local]
|
||||
|
@ -328,7 +347,7 @@ class PytestPluginManager(PluginManager):
|
|||
self.register(self)
|
||||
if os.environ.get("PYTEST_DEBUG"):
|
||||
err = sys.stderr # type: IO[str]
|
||||
encoding = getattr(err, "encoding", "utf8")
|
||||
encoding = getattr(err, "encoding", "utf8") # type: str
|
||||
try:
|
||||
err = open(
|
||||
os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding,
|
||||
|
@ -343,7 +362,7 @@ class PytestPluginManager(PluginManager):
|
|||
# Used to know when we are importing conftests after the pytest_configure stage
|
||||
self._configured = False
|
||||
|
||||
def parse_hookimpl_opts(self, plugin, name):
|
||||
def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str):
|
||||
# pytest hooks are always prefixed with pytest_
|
||||
# so we avoid accessing possibly non-readable attributes
|
||||
# (see issue #1073)
|
||||
|
@ -372,7 +391,7 @@ class PytestPluginManager(PluginManager):
|
|||
opts.setdefault(name, hasattr(method, name) or name in known_marks)
|
||||
return opts
|
||||
|
||||
def parse_hookspec_opts(self, module_or_class, name):
|
||||
def parse_hookspec_opts(self, module_or_class, name: str):
|
||||
opts = super().parse_hookspec_opts(module_or_class, name)
|
||||
if opts is None:
|
||||
method = getattr(module_or_class, name)
|
||||
|
@ -389,7 +408,9 @@ class PytestPluginManager(PluginManager):
|
|||
}
|
||||
return opts
|
||||
|
||||
def register(self, plugin: _PluggyPlugin, name: Optional[str] = None):
|
||||
def register(
|
||||
self, plugin: _PluggyPlugin, name: Optional[str] = None
|
||||
) -> Optional[str]:
|
||||
if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS:
|
||||
warnings.warn(
|
||||
PytestConfigWarning(
|
||||
|
@ -399,8 +420,8 @@ class PytestPluginManager(PluginManager):
|
|||
)
|
||||
)
|
||||
)
|
||||
return
|
||||
ret = super().register(plugin, name)
|
||||
return None
|
||||
ret = super().register(plugin, name) # type: Optional[str]
|
||||
if ret:
|
||||
self.hook.pytest_plugin_registered.call_historic(
|
||||
kwargs=dict(plugin=plugin, manager=self)
|
||||
|
@ -410,11 +431,12 @@ class PytestPluginManager(PluginManager):
|
|||
self.consider_module(plugin)
|
||||
return ret
|
||||
|
||||
def getplugin(self, name):
|
||||
def getplugin(self, name: str):
|
||||
# support deprecated naming because plugins (xdist e.g.) use it
|
||||
return self.get_plugin(name)
|
||||
plugin = self.get_plugin(name) # type: Optional[_PluggyPlugin]
|
||||
return plugin
|
||||
|
||||
def hasplugin(self, name):
|
||||
def hasplugin(self, name: str) -> bool:
|
||||
"""Return True if the plugin with the given name is registered."""
|
||||
return bool(self.get_plugin(name))
|
||||
|
||||
|
@ -436,7 +458,7 @@ class PytestPluginManager(PluginManager):
|
|||
#
|
||||
# internal API for local conftest plugin handling
|
||||
#
|
||||
def _set_initial_conftests(self, namespace):
|
||||
def _set_initial_conftests(self, namespace: argparse.Namespace) -> None:
|
||||
""" load initial conftest files given a preparsed "namespace".
|
||||
As conftest files may add their own command line options
|
||||
which have arguments ('--my-opt somepath') we might get some
|
||||
|
@ -454,8 +476,8 @@ class PytestPluginManager(PluginManager):
|
|||
self._using_pyargs = namespace.pyargs
|
||||
testpaths = namespace.file_or_dir
|
||||
foundanchor = False
|
||||
for path in testpaths:
|
||||
path = str(path)
|
||||
for testpath in testpaths:
|
||||
path = str(testpath)
|
||||
# remove node-id syntax
|
||||
i = path.find("::")
|
||||
if i != -1:
|
||||
|
@ -467,7 +489,9 @@ class PytestPluginManager(PluginManager):
|
|||
if not foundanchor:
|
||||
self._try_load_conftest(current, namespace.importmode)
|
||||
|
||||
def _try_load_conftest(self, anchor, importmode):
|
||||
def _try_load_conftest(
|
||||
self, anchor: py.path.local, importmode: Union[str, ImportMode]
|
||||
) -> None:
|
||||
self._getconftestmodules(anchor, importmode)
|
||||
# let's also consider test* subdirs
|
||||
if anchor.check(dir=1):
|
||||
|
@ -476,7 +500,9 @@ class PytestPluginManager(PluginManager):
|
|||
self._getconftestmodules(x, importmode)
|
||||
|
||||
@lru_cache(maxsize=128)
|
||||
def _getconftestmodules(self, path, importmode):
|
||||
def _getconftestmodules(
|
||||
self, path: py.path.local, importmode: Union[str, ImportMode],
|
||||
) -> List[types.ModuleType]:
|
||||
if self._noconftest:
|
||||
return []
|
||||
|
||||
|
@ -499,7 +525,9 @@ class PytestPluginManager(PluginManager):
|
|||
self._dirpath2confmods[directory] = clist
|
||||
return clist
|
||||
|
||||
def _rget_with_confmod(self, name, path, importmode):
|
||||
def _rget_with_confmod(
|
||||
self, name: str, path: py.path.local, importmode: Union[str, ImportMode],
|
||||
) -> Tuple[types.ModuleType, Any]:
|
||||
modules = self._getconftestmodules(path, importmode)
|
||||
for mod in reversed(modules):
|
||||
try:
|
||||
|
@ -508,7 +536,9 @@ class PytestPluginManager(PluginManager):
|
|||
continue
|
||||
raise KeyError(name)
|
||||
|
||||
def _importconftest(self, conftestpath, importmode):
|
||||
def _importconftest(
|
||||
self, conftestpath: py.path.local, importmode: Union[str, ImportMode],
|
||||
) -> types.ModuleType:
|
||||
# Use a resolved Path object as key to avoid loading the same conftest twice
|
||||
# with build systems that create build directories containing
|
||||
# symlinks to actual files.
|
||||
|
@ -526,7 +556,9 @@ class PytestPluginManager(PluginManager):
|
|||
try:
|
||||
mod = import_path(conftestpath, mode=importmode)
|
||||
except Exception as e:
|
||||
raise ConftestImportFailure(conftestpath, sys.exc_info()) from e
|
||||
assert e.__traceback__ is not None
|
||||
exc_info = (type(e), e, e.__traceback__)
|
||||
raise ConftestImportFailure(conftestpath, exc_info) from e
|
||||
|
||||
self._check_non_top_pytest_plugins(mod, conftestpath)
|
||||
|
||||
|
@ -542,7 +574,9 @@ class PytestPluginManager(PluginManager):
|
|||
self.consider_conftest(mod)
|
||||
return mod
|
||||
|
||||
def _check_non_top_pytest_plugins(self, mod, conftestpath):
|
||||
def _check_non_top_pytest_plugins(
|
||||
self, mod: types.ModuleType, conftestpath: py.path.local,
|
||||
) -> None:
|
||||
if (
|
||||
hasattr(mod, "pytest_plugins")
|
||||
and self._configured
|
||||
|
@ -564,7 +598,9 @@ class PytestPluginManager(PluginManager):
|
|||
#
|
||||
#
|
||||
|
||||
def consider_preparse(self, args, *, exclude_only: bool = False) -> None:
|
||||
def consider_preparse(
|
||||
self, args: Sequence[str], *, exclude_only: bool = False
|
||||
) -> None:
|
||||
i = 0
|
||||
n = len(args)
|
||||
while i < n:
|
||||
|
@ -585,7 +621,7 @@ class PytestPluginManager(PluginManager):
|
|||
continue
|
||||
self.consider_pluginarg(parg)
|
||||
|
||||
def consider_pluginarg(self, arg) -> None:
|
||||
def consider_pluginarg(self, arg: str) -> None:
|
||||
if arg.startswith("no:"):
|
||||
name = arg[3:]
|
||||
if name in essential_plugins:
|
||||
|
@ -610,7 +646,7 @@ class PytestPluginManager(PluginManager):
|
|||
del self._name2plugin["pytest_" + name]
|
||||
self.import_plugin(arg, consider_entry_points=True)
|
||||
|
||||
def consider_conftest(self, conftestmodule) -> None:
|
||||
def consider_conftest(self, conftestmodule: types.ModuleType) -> None:
|
||||
self.register(conftestmodule, name=conftestmodule.__file__)
|
||||
|
||||
def consider_env(self) -> None:
|
||||
|
@ -619,7 +655,9 @@ class PytestPluginManager(PluginManager):
|
|||
def consider_module(self, mod: types.ModuleType) -> None:
|
||||
self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
|
||||
|
||||
def _import_plugin_specs(self, spec):
|
||||
def _import_plugin_specs(
|
||||
self, spec: Union[None, types.ModuleType, str, Sequence[str]]
|
||||
) -> None:
|
||||
plugins = _get_plugin_specs_as_list(spec)
|
||||
for import_spec in plugins:
|
||||
self.import_plugin(import_spec)
|
||||
|
@ -636,7 +674,6 @@ class PytestPluginManager(PluginManager):
|
|||
assert isinstance(modname, str), (
|
||||
"module name as text required, got %r" % modname
|
||||
)
|
||||
modname = str(modname)
|
||||
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
|
||||
return
|
||||
|
||||
|
@ -668,27 +705,29 @@ class PytestPluginManager(PluginManager):
|
|||
self.register(mod, modname)
|
||||
|
||||
|
||||
def _get_plugin_specs_as_list(specs):
|
||||
"""
|
||||
Parses a list of "plugin specs" and returns a list of plugin names.
|
||||
|
||||
Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in
|
||||
which case it is returned as a list. Specs can also be `None` in which case an
|
||||
empty list is returned.
|
||||
"""
|
||||
if specs is not None and not isinstance(specs, types.ModuleType):
|
||||
if isinstance(specs, str):
|
||||
specs = specs.split(",") if specs else []
|
||||
if not isinstance(specs, (list, tuple)):
|
||||
raise UsageError(
|
||||
"Plugin specs must be a ','-separated string or a "
|
||||
"list/tuple of strings for plugin names. Given: %r" % specs
|
||||
)
|
||||
def _get_plugin_specs_as_list(
|
||||
specs: Union[None, types.ModuleType, str, Sequence[str]]
|
||||
) -> List[str]:
|
||||
"""Parse a plugins specification into a list of plugin names."""
|
||||
# None means empty.
|
||||
if specs is None:
|
||||
return []
|
||||
# Workaround for #3899 - a submodule which happens to be called "pytest_plugins".
|
||||
if isinstance(specs, types.ModuleType):
|
||||
return []
|
||||
# Comma-separated list.
|
||||
if isinstance(specs, str):
|
||||
return specs.split(",") if specs else []
|
||||
# Direct specification.
|
||||
if isinstance(specs, collections.abc.Sequence):
|
||||
return list(specs)
|
||||
return []
|
||||
raise UsageError(
|
||||
"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r"
|
||||
% specs
|
||||
)
|
||||
|
||||
|
||||
def _ensure_removed_sysmodule(modname):
|
||||
def _ensure_removed_sysmodule(modname: str) -> None:
|
||||
try:
|
||||
del sys.modules[modname]
|
||||
except KeyError:
|
||||
|
@ -703,7 +742,7 @@ class Notset:
|
|||
notset = Notset()
|
||||
|
||||
|
||||
def _iter_rewritable_modules(package_files):
|
||||
def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]:
|
||||
"""
|
||||
Given an iterable of file names in a source distribution, return the "names" that should
|
||||
be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should
|
||||
|
@ -766,6 +805,10 @@ def _iter_rewritable_modules(package_files):
|
|||
yield from _iter_rewritable_modules(new_package_files)
|
||||
|
||||
|
||||
def _args_converter(args: Iterable[str]) -> Tuple[str, ...]:
|
||||
return tuple(args)
|
||||
|
||||
|
||||
class Config:
|
||||
"""
|
||||
Access to configuration values, pluginmanager and plugin hooks.
|
||||
|
@ -793,9 +836,9 @@ class Config:
|
|||
Plugins accessing ``InvocationParams`` must be aware of that.
|
||||
"""
|
||||
|
||||
args = attr.ib(converter=tuple)
|
||||
args = attr.ib(type=Tuple[str, ...], converter=_args_converter)
|
||||
"""tuple of command-line arguments as passed to ``pytest.main()``."""
|
||||
plugins = attr.ib()
|
||||
plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]])
|
||||
"""list of extra plugins, might be `None`."""
|
||||
dir = attr.ib(type=Path)
|
||||
"""directory where ``pytest.main()`` was invoked from."""
|
||||
|
@ -855,7 +898,7 @@ class Config:
|
|||
"""Backward compatibility"""
|
||||
return py.path.local(str(self.invocation_params.dir))
|
||||
|
||||
def add_cleanup(self, func) -> None:
|
||||
def add_cleanup(self, func: Callable[[], None]) -> None:
|
||||
""" Add a function to be called when the config object gets out of
|
||||
use (usually coninciding with pytest_unconfigure)."""
|
||||
self._cleanup.append(func)
|
||||
|
@ -876,12 +919,15 @@ class Config:
|
|||
fin = self._cleanup.pop()
|
||||
fin()
|
||||
|
||||
def get_terminal_writer(self):
|
||||
return self.pluginmanager.get_plugin("terminalreporter")._tw
|
||||
def get_terminal_writer(self) -> TerminalWriter:
|
||||
terminalreporter = self.pluginmanager.get_plugin(
|
||||
"terminalreporter"
|
||||
) # type: TerminalReporter
|
||||
return terminalreporter._tw
|
||||
|
||||
def pytest_cmdline_parse(
|
||||
self, pluginmanager: PytestPluginManager, args: List[str]
|
||||
) -> object:
|
||||
) -> "Config":
|
||||
try:
|
||||
self.parse(args)
|
||||
except UsageError:
|
||||
|
@ -923,7 +969,7 @@ class Config:
|
|||
sys.stderr.write("INTERNALERROR> %s\n" % line)
|
||||
sys.stderr.flush()
|
||||
|
||||
def cwd_relative_nodeid(self, nodeid):
|
||||
def cwd_relative_nodeid(self, nodeid: str) -> str:
|
||||
# nodeid's are relative to the rootpath, compute relative to cwd
|
||||
if self.invocation_dir != self.rootdir:
|
||||
fullpath = self.rootdir.join(nodeid)
|
||||
|
@ -931,7 +977,7 @@ class Config:
|
|||
return nodeid
|
||||
|
||||
@classmethod
|
||||
def fromdictargs(cls, option_dict, args):
|
||||
def fromdictargs(cls, option_dict, args) -> "Config":
|
||||
""" constructor usable for subprocesses. """
|
||||
config = get_config(args)
|
||||
config.option.__dict__.update(option_dict)
|
||||
|
@ -949,7 +995,7 @@ class Config:
|
|||
setattr(self.option, opt.dest, opt.default)
|
||||
|
||||
@hookimpl(trylast=True)
|
||||
def pytest_load_initial_conftests(self, early_config):
|
||||
def pytest_load_initial_conftests(self, early_config: "Config") -> None:
|
||||
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
|
||||
|
||||
def _initini(self, args: Sequence[str]) -> None:
|
||||
|
@ -1078,7 +1124,7 @@ class Config:
|
|||
raise
|
||||
self._validate_keys()
|
||||
|
||||
def _checkversion(self):
|
||||
def _checkversion(self) -> None:
|
||||
import pytest
|
||||
|
||||
minver = self.inicfg.get("minversion", None)
|
||||
|
@ -1172,7 +1218,7 @@ class Config:
|
|||
except PrintHelp:
|
||||
pass
|
||||
|
||||
def addinivalue_line(self, name, line):
|
||||
def addinivalue_line(self, name: str, line: str) -> None:
|
||||
""" add a line to an ini-file option. The option must have been
|
||||
declared but might not yet be set in which case the line becomes the
|
||||
the first line in its value. """
|
||||
|
@ -1191,11 +1237,11 @@ class Config:
|
|||
self._inicache[name] = val = self._getini(name)
|
||||
return val
|
||||
|
||||
def _getini(self, name: str) -> Any:
|
||||
def _getini(self, name: str):
|
||||
try:
|
||||
description, type, default = self._parser._inidict[name]
|
||||
except KeyError:
|
||||
raise ValueError("unknown configuration value: {!r}".format(name))
|
||||
except KeyError as e:
|
||||
raise ValueError("unknown configuration value: {!r}".format(name)) from e
|
||||
override_value = self._get_override_ini_value(name)
|
||||
if override_value is None:
|
||||
try:
|
||||
|
@ -1236,12 +1282,14 @@ class Config:
|
|||
else:
|
||||
return value
|
||||
elif type == "bool":
|
||||
return bool(_strtobool(str(value).strip()))
|
||||
return _strtobool(str(value).strip())
|
||||
else:
|
||||
assert type is None
|
||||
return value
|
||||
|
||||
def _getconftest_pathlist(self, name, path):
|
||||
def _getconftest_pathlist(
|
||||
self, name: str, path: py.path.local
|
||||
) -> Optional[List[py.path.local]]:
|
||||
try:
|
||||
mod, relroots = self.pluginmanager._rget_with_confmod(
|
||||
name, path, self.getoption("importmode")
|
||||
|
@ -1249,7 +1297,7 @@ class Config:
|
|||
except KeyError:
|
||||
return None
|
||||
modpath = py.path.local(mod.__file__).dirpath()
|
||||
values = []
|
||||
values = [] # type: List[py.path.local]
|
||||
for relroot in relroots:
|
||||
if not isinstance(relroot, py.path.local):
|
||||
relroot = relroot.replace("/", py.path.local.sep)
|
||||
|
@ -1291,20 +1339,20 @@ class Config:
|
|||
if val is None and skip:
|
||||
raise AttributeError(name)
|
||||
return val
|
||||
except AttributeError:
|
||||
except AttributeError as e:
|
||||
if default is not notset:
|
||||
return default
|
||||
if skip:
|
||||
import pytest
|
||||
|
||||
pytest.skip("no {!r} option found".format(name))
|
||||
raise ValueError("no option named {!r}".format(name))
|
||||
raise ValueError("no option named {!r}".format(name)) from e
|
||||
|
||||
def getvalue(self, name, path=None):
|
||||
def getvalue(self, name: str, path=None):
|
||||
""" (deprecated, use getoption()) """
|
||||
return self.getoption(name)
|
||||
|
||||
def getvalueorskip(self, name, path=None):
|
||||
def getvalueorskip(self, name: str, path=None):
|
||||
""" (deprecated, use getoption(skip=True)) """
|
||||
return self.getoption(name, skip=True)
|
||||
|
||||
|
@ -1330,7 +1378,7 @@ class Config:
|
|||
)
|
||||
|
||||
|
||||
def _assertion_supported():
|
||||
def _assertion_supported() -> bool:
|
||||
try:
|
||||
assert False
|
||||
except AssertionError:
|
||||
|
@ -1339,12 +1387,14 @@ def _assertion_supported():
|
|||
return False
|
||||
|
||||
|
||||
def create_terminal_writer(config: Config, *args, **kwargs) -> TerminalWriter:
|
||||
def create_terminal_writer(
|
||||
config: Config, file: Optional[TextIO] = None
|
||||
) -> TerminalWriter:
|
||||
"""Create a TerminalWriter instance configured according to the options
|
||||
in the config object. Every code which requires a TerminalWriter object
|
||||
and has access to a config object should use this function.
|
||||
"""
|
||||
tw = TerminalWriter(*args, **kwargs)
|
||||
tw = TerminalWriter(file=file)
|
||||
if config.option.color == "yes":
|
||||
tw.hasmarkup = True
|
||||
if config.option.color == "no":
|
||||
|
@ -1352,8 +1402,8 @@ def create_terminal_writer(config: Config, *args, **kwargs) -> TerminalWriter:
|
|||
return tw
|
||||
|
||||
|
||||
def _strtobool(val):
|
||||
"""Convert a string representation of truth to true (1) or false (0).
|
||||
def _strtobool(val: str) -> bool:
|
||||
"""Convert a string representation of truth to True or False.
|
||||
|
||||
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
|
||||
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
|
||||
|
@ -1363,8 +1413,8 @@ def _strtobool(val):
|
|||
"""
|
||||
val = val.lower()
|
||||
if val in ("y", "yes", "t", "true", "on", "1"):
|
||||
return 1
|
||||
return True
|
||||
elif val in ("n", "no", "f", "false", "off", "0"):
|
||||
return 0
|
||||
return False
|
||||
else:
|
||||
raise ValueError("invalid truth value {!r}".format(val))
|
||||
|
|
|
@ -265,9 +265,9 @@ class Argument:
|
|||
else:
|
||||
try:
|
||||
self.dest = self._short_opts[0][1:]
|
||||
except IndexError:
|
||||
except IndexError as e:
|
||||
self.dest = "???" # Needed for the error repr.
|
||||
raise ArgumentError("need a long or short option", self)
|
||||
raise ArgumentError("need a long or short option", self) from e
|
||||
|
||||
def names(self) -> List[str]:
|
||||
return self._short_opts + self._long_opts
|
||||
|
|
|
@ -26,7 +26,7 @@ def _parse_ini_config(path: py.path.local) -> iniconfig.IniConfig:
|
|||
try:
|
||||
return iniconfig.IniConfig(path)
|
||||
except iniconfig.ParseError as exc:
|
||||
raise UsageError(str(exc))
|
||||
raise UsageError(str(exc)) from exc
|
||||
|
||||
|
||||
def load_config_dict_from_file(
|
||||
|
|
|
@ -28,10 +28,10 @@ def _validate_usepdb_cls(value: str) -> Tuple[str, str]:
|
|||
"""Validate syntax of --pdbcls option."""
|
||||
try:
|
||||
modname, classname = value.split(":")
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
raise argparse.ArgumentTypeError(
|
||||
"{!r} is not in the format 'modname:classname'".format(value)
|
||||
)
|
||||
) from e
|
||||
return (modname, classname)
|
||||
|
||||
|
||||
|
@ -130,7 +130,7 @@ class pytestPDB:
|
|||
value = ":".join((modname, classname))
|
||||
raise UsageError(
|
||||
"--pdbcls: could not import {!r}: {}".format(value, exc)
|
||||
)
|
||||
) from exc
|
||||
else:
|
||||
import pdb
|
||||
|
||||
|
|
|
@ -938,13 +938,13 @@ def _eval_scope_callable(
|
|||
# Type ignored because there is no typing mechanism to specify
|
||||
# keyword arguments, currently.
|
||||
result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] # noqa: F821
|
||||
except Exception:
|
||||
except Exception as e:
|
||||
raise TypeError(
|
||||
"Error evaluating {} while defining fixture '{}'.\n"
|
||||
"Expected a function with the signature (*, fixture_name, config)".format(
|
||||
scope_callable, fixture_name
|
||||
)
|
||||
)
|
||||
) from e
|
||||
if not isinstance(result, str):
|
||||
fail(
|
||||
"Expected {} to return a 'str' while defining fixture '{}', but it returned:\n"
|
||||
|
|
|
@ -96,7 +96,7 @@ def pytest_addoption(parser: Parser) -> None:
|
|||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_cmdline_parse():
|
||||
outcome = yield
|
||||
config = outcome.get_result()
|
||||
config = outcome.get_result() # type: Config
|
||||
if config.option.debug:
|
||||
path = os.path.abspath("pytestdebug.log")
|
||||
debugfile = open(path, "w")
|
||||
|
@ -124,7 +124,7 @@ def pytest_cmdline_parse():
|
|||
config.add_cleanup(unset_tracing)
|
||||
|
||||
|
||||
def showversion(config):
|
||||
def showversion(config: Config) -> None:
|
||||
if config.option.version > 1:
|
||||
sys.stderr.write(
|
||||
"This is pytest version {}, imported from {}\n".format(
|
||||
|
@ -224,7 +224,7 @@ def showhelp(config: Config) -> None:
|
|||
conftest_options = [("pytest_plugins", "list of plugin names to load")]
|
||||
|
||||
|
||||
def getpluginversioninfo(config):
|
||||
def getpluginversioninfo(config: Config) -> List[str]:
|
||||
lines = []
|
||||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||
if plugininfo:
|
||||
|
|
|
@ -143,7 +143,7 @@ def pytest_configure(config: "Config") -> None:
|
|||
@hookspec(firstresult=True)
|
||||
def pytest_cmdline_parse(
|
||||
pluginmanager: "PytestPluginManager", args: List[str]
|
||||
) -> Optional[object]:
|
||||
) -> Optional["Config"]:
|
||||
"""return initialized config object, parsing the specified args.
|
||||
|
||||
Stops at first non-None result, see :ref:`firstresult`
|
||||
|
|
|
@ -236,10 +236,16 @@ class _NodeReporter:
|
|||
self._add_simple(Junit.skipped, "collection skipped", report.longrepr)
|
||||
|
||||
def append_error(self, report: TestReport) -> None:
|
||||
if report.when == "teardown":
|
||||
msg = "test teardown failure"
|
||||
assert report.longrepr is not None
|
||||
if getattr(report.longrepr, "reprcrash", None) is not None:
|
||||
reason = report.longrepr.reprcrash.message
|
||||
else:
|
||||
msg = "test setup failure"
|
||||
reason = str(report.longrepr)
|
||||
|
||||
if report.when == "teardown":
|
||||
msg = 'failed on teardown with "{}"'.format(reason)
|
||||
else:
|
||||
msg = 'failed on setup with "{}"'.format(reason)
|
||||
self._add_simple(Junit.error, msg, report.longrepr)
|
||||
|
||||
def append_skipped(self, report: TestReport) -> None:
|
||||
|
|
|
@ -141,9 +141,14 @@ class PercentStyleMultiline(logging.PercentStyle):
|
|||
|
||||
if auto_indent_option is None:
|
||||
return 0
|
||||
elif type(auto_indent_option) is int:
|
||||
elif isinstance(auto_indent_option, bool):
|
||||
if auto_indent_option:
|
||||
return -1
|
||||
else:
|
||||
return 0
|
||||
elif isinstance(auto_indent_option, int):
|
||||
return int(auto_indent_option)
|
||||
elif type(auto_indent_option) is str:
|
||||
elif isinstance(auto_indent_option, str):
|
||||
try:
|
||||
return int(auto_indent_option)
|
||||
except ValueError:
|
||||
|
@ -153,9 +158,6 @@ class PercentStyleMultiline(logging.PercentStyle):
|
|||
return -1
|
||||
except ValueError:
|
||||
return 0
|
||||
elif type(auto_indent_option) is bool:
|
||||
if auto_indent_option:
|
||||
return -1
|
||||
|
||||
return 0
|
||||
|
||||
|
@ -487,13 +489,13 @@ def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[i
|
|||
log_level = log_level.upper()
|
||||
try:
|
||||
return int(getattr(logging, log_level, log_level))
|
||||
except ValueError:
|
||||
except ValueError as e:
|
||||
# Python logging does not recognise this as a logging level
|
||||
raise pytest.UsageError(
|
||||
"'{}' is not recognized as a logging level name for "
|
||||
"'{}'. Please consider passing the "
|
||||
"logging level num instead.".format(log_level, setting_name)
|
||||
)
|
||||
) from e
|
||||
|
||||
|
||||
# run after terminalreporter/capturemanager are configured
|
||||
|
|
|
@ -1,124 +0,0 @@
|
|||
import os
|
||||
import platform
|
||||
import sys
|
||||
import traceback
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
|
||||
from ..outcomes import fail
|
||||
from ..outcomes import TEST_OUTCOME
|
||||
from .structures import Mark
|
||||
from _pytest.nodes import Item
|
||||
|
||||
|
||||
def compiled_eval(expr: str, d: Dict[str, object]) -> Any:
|
||||
import _pytest._code
|
||||
|
||||
exprcode = _pytest._code.compile(expr, mode="eval")
|
||||
return eval(exprcode, d)
|
||||
|
||||
|
||||
class MarkEvaluator:
|
||||
def __init__(self, item: Item, name: str) -> None:
|
||||
self.item = item
|
||||
self._marks = None # type: Optional[List[Mark]]
|
||||
self._mark = None # type: Optional[Mark]
|
||||
self._mark_name = name
|
||||
|
||||
def __bool__(self) -> bool:
|
||||
# don't cache here to prevent staleness
|
||||
return bool(self._get_marks())
|
||||
|
||||
def wasvalid(self) -> bool:
|
||||
return not hasattr(self, "exc")
|
||||
|
||||
def _get_marks(self) -> List[Mark]:
|
||||
return list(self.item.iter_markers(name=self._mark_name))
|
||||
|
||||
def invalidraise(self, exc) -> Optional[bool]:
|
||||
raises = self.get("raises")
|
||||
if not raises:
|
||||
return None
|
||||
return not isinstance(exc, raises)
|
||||
|
||||
def istrue(self) -> bool:
|
||||
try:
|
||||
return self._istrue()
|
||||
except TEST_OUTCOME:
|
||||
self.exc = sys.exc_info()
|
||||
if isinstance(self.exc[1], SyntaxError):
|
||||
# TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here.
|
||||
assert self.exc[1].offset is not None
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^"]
|
||||
msg.append("SyntaxError: invalid syntax")
|
||||
else:
|
||||
msg = traceback.format_exception_only(*self.exc[:2])
|
||||
fail(
|
||||
"Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s" % (self._mark_name, self.expr, "\n".join(msg)),
|
||||
pytrace=False,
|
||||
)
|
||||
|
||||
def _getglobals(self) -> Dict[str, object]:
|
||||
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config}
|
||||
if hasattr(self.item, "obj"):
|
||||
d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821
|
||||
return d
|
||||
|
||||
def _istrue(self) -> bool:
|
||||
if hasattr(self, "result"):
|
||||
result = getattr(self, "result") # type: bool
|
||||
return result
|
||||
self._marks = self._get_marks()
|
||||
|
||||
if self._marks:
|
||||
self.result = False
|
||||
for mark in self._marks:
|
||||
self._mark = mark
|
||||
if "condition" not in mark.kwargs:
|
||||
args = mark.args
|
||||
else:
|
||||
args = (mark.kwargs["condition"],)
|
||||
|
||||
for expr in args:
|
||||
self.expr = expr
|
||||
if isinstance(expr, str):
|
||||
d = self._getglobals()
|
||||
result = compiled_eval(expr, d)
|
||||
else:
|
||||
if "reason" not in mark.kwargs:
|
||||
# XXX better be checked at collection time
|
||||
msg = (
|
||||
"you need to specify reason=STRING "
|
||||
"when using booleans as conditions."
|
||||
)
|
||||
fail(msg)
|
||||
result = bool(expr)
|
||||
if result:
|
||||
self.result = True
|
||||
self.reason = mark.kwargs.get("reason", None)
|
||||
self.expr = expr
|
||||
return self.result
|
||||
|
||||
if not args:
|
||||
self.result = True
|
||||
self.reason = mark.kwargs.get("reason", None)
|
||||
return self.result
|
||||
return False
|
||||
|
||||
def get(self, attr, default=None):
|
||||
if self._mark is None:
|
||||
return default
|
||||
return self._mark.kwargs.get(attr, default)
|
||||
|
||||
def getexplanation(self):
|
||||
expl = getattr(self, "reason", None) or self.get("reason", None)
|
||||
if not expl:
|
||||
if not hasattr(self, "expr"):
|
||||
return ""
|
||||
else:
|
||||
return "condition: " + str(self.expr)
|
||||
return expl
|
|
@ -46,11 +46,19 @@ def get_empty_parameterset_mark(
|
|||
) -> "MarkDecorator":
|
||||
from ..nodes import Collector
|
||||
|
||||
fs, lineno = getfslineno(func)
|
||||
reason = "got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames,
|
||||
func.__name__,
|
||||
fs,
|
||||
lineno,
|
||||
)
|
||||
|
||||
requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
|
||||
if requested_mark in ("", None, "skip"):
|
||||
mark = MARK_GEN.skip
|
||||
mark = MARK_GEN.skip(reason=reason)
|
||||
elif requested_mark == "xfail":
|
||||
mark = MARK_GEN.xfail(run=False)
|
||||
mark = MARK_GEN.xfail(reason=reason, run=False)
|
||||
elif requested_mark == "fail_at_collect":
|
||||
f_name = func.__name__
|
||||
_, lineno = getfslineno(func)
|
||||
|
@ -59,14 +67,7 @@ def get_empty_parameterset_mark(
|
|||
)
|
||||
else:
|
||||
raise LookupError(requested_mark)
|
||||
fs, lineno = getfslineno(func)
|
||||
reason = "got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames,
|
||||
func.__name__,
|
||||
fs,
|
||||
lineno,
|
||||
)
|
||||
return mark(reason=reason)
|
||||
return mark
|
||||
|
||||
|
||||
class ParameterSet(
|
||||
|
@ -379,6 +380,76 @@ def store_mark(obj, mark: Mark) -> None:
|
|||
obj.pytestmark = get_unpacked_marks(obj) + [mark]
|
||||
|
||||
|
||||
# Typing for builtin pytest marks. This is cheating; it gives builtin marks
|
||||
# special privilege, and breaks modularity. But practicality beats purity...
|
||||
if TYPE_CHECKING:
|
||||
from _pytest.fixtures import _Scope
|
||||
|
||||
class _SkipMarkDecorator(MarkDecorator):
|
||||
@overload # type: ignore[override,misc]
|
||||
def __call__(self, arg: _Markable) -> _Markable:
|
||||
raise NotImplementedError()
|
||||
|
||||
@overload # noqa: F811
|
||||
def __call__(self, reason: str = ...) -> "MarkDecorator": # noqa: F811
|
||||
raise NotImplementedError()
|
||||
|
||||
class _SkipifMarkDecorator(MarkDecorator):
|
||||
def __call__( # type: ignore[override]
|
||||
self,
|
||||
condition: Union[str, bool] = ...,
|
||||
*conditions: Union[str, bool],
|
||||
reason: str = ...
|
||||
) -> MarkDecorator:
|
||||
raise NotImplementedError()
|
||||
|
||||
class _XfailMarkDecorator(MarkDecorator):
|
||||
@overload # type: ignore[override,misc]
|
||||
def __call__(self, arg: _Markable) -> _Markable:
|
||||
raise NotImplementedError()
|
||||
|
||||
@overload # noqa: F811
|
||||
def __call__( # noqa: F811
|
||||
self,
|
||||
condition: Union[str, bool] = ...,
|
||||
*conditions: Union[str, bool],
|
||||
reason: str = ...,
|
||||
run: bool = ...,
|
||||
raises: Union[BaseException, Tuple[BaseException, ...]] = ...,
|
||||
strict: bool = ...
|
||||
) -> MarkDecorator:
|
||||
raise NotImplementedError()
|
||||
|
||||
class _ParametrizeMarkDecorator(MarkDecorator):
|
||||
def __call__( # type: ignore[override]
|
||||
self,
|
||||
argnames: Union[str, List[str], Tuple[str, ...]],
|
||||
argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
|
||||
*,
|
||||
indirect: Union[bool, Sequence[str]] = ...,
|
||||
ids: Optional[
|
||||
Union[
|
||||
Iterable[Union[None, str, float, int, bool]],
|
||||
Callable[[object], Optional[object]],
|
||||
]
|
||||
] = ...,
|
||||
scope: Optional[_Scope] = ...
|
||||
) -> MarkDecorator:
|
||||
raise NotImplementedError()
|
||||
|
||||
class _UsefixturesMarkDecorator(MarkDecorator):
|
||||
def __call__( # type: ignore[override]
|
||||
self, *fixtures: str
|
||||
) -> MarkDecorator:
|
||||
raise NotImplementedError()
|
||||
|
||||
class _FilterwarningsMarkDecorator(MarkDecorator):
|
||||
def __call__( # type: ignore[override]
|
||||
self, *filters: str
|
||||
) -> MarkDecorator:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
class MarkGenerator:
|
||||
"""Factory for :class:`MarkDecorator` objects - exposed as
|
||||
a ``pytest.mark`` singleton instance.
|
||||
|
@ -397,6 +468,15 @@ class MarkGenerator:
|
|||
_config = None # type: Optional[Config]
|
||||
_markers = set() # type: Set[str]
|
||||
|
||||
# See TYPE_CHECKING above.
|
||||
if TYPE_CHECKING:
|
||||
skip = None # type: _SkipMarkDecorator
|
||||
skipif = None # type: _SkipifMarkDecorator
|
||||
xfail = None # type: _XfailMarkDecorator
|
||||
parametrize = None # type: _ParametrizeMarkDecorator
|
||||
usefixtures = None # type: _UsefixturesMarkDecorator
|
||||
filterwarnings = None # type: _FilterwarningsMarkDecorator
|
||||
|
||||
def __getattr__(self, name: str) -> MarkDecorator:
|
||||
if name[0] == "_":
|
||||
raise AttributeError("Marker name must NOT start with underscore")
|
||||
|
|
|
@ -73,7 +73,7 @@ def resolve(name: str) -> object:
|
|||
if expected == used:
|
||||
raise
|
||||
else:
|
||||
raise ImportError("import error in {}: {}".format(used, ex))
|
||||
raise ImportError("import error in {}: {}".format(used, ex)) from ex
|
||||
found = annotated_getattr(found, part, used)
|
||||
return found
|
||||
|
||||
|
@ -81,12 +81,12 @@ def resolve(name: str) -> object:
|
|||
def annotated_getattr(obj: object, name: str, ann: str) -> object:
|
||||
try:
|
||||
obj = getattr(obj, name)
|
||||
except AttributeError:
|
||||
except AttributeError as e:
|
||||
raise AttributeError(
|
||||
"{!r} object at {} has no attribute {!r}".format(
|
||||
type(obj).__name__, ann, name
|
||||
)
|
||||
)
|
||||
) from e
|
||||
return obj
|
||||
|
||||
|
||||
|
|
|
@ -276,7 +276,7 @@ class Node(metaclass=NodeMeta):
|
|||
marker_ = getattr(MARK_GEN, marker)
|
||||
else:
|
||||
raise ValueError("is not a string or pytest.mark.* Marker")
|
||||
self.keywords[marker_.name] = marker
|
||||
self.keywords[marker_.name] = marker_
|
||||
if append:
|
||||
self.own_markers.append(marker_.mark)
|
||||
else:
|
||||
|
|
|
@ -466,7 +466,7 @@ def import_path(
|
|||
"""
|
||||
mode = ImportMode(mode)
|
||||
|
||||
path = Path(p)
|
||||
path = Path(str(p))
|
||||
|
||||
if not path.exists():
|
||||
raise ImportError(path)
|
||||
|
|
|
@ -1054,7 +1054,7 @@ class Testdir:
|
|||
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
|
||||
return args
|
||||
|
||||
def parseconfig(self, *args: Union[str, py.path.local]) -> Config:
|
||||
def parseconfig(self, *args) -> Config:
|
||||
"""Return a new pytest Config instance from given commandline args.
|
||||
|
||||
This invokes the pytest bootstrapping code in _pytest.config to create
|
||||
|
@ -1070,14 +1070,14 @@ class Testdir:
|
|||
|
||||
import _pytest.config
|
||||
|
||||
config = _pytest.config._prepareconfig(args, self.plugins) # type: Config
|
||||
config = _pytest.config._prepareconfig(args, self.plugins) # type: ignore[arg-type]
|
||||
# we don't know what the test will do with this half-setup config
|
||||
# object and thus we make sure it gets unconfigured properly in any
|
||||
# case (otherwise capturing could still be active, for example)
|
||||
self.request.addfinalizer(config._ensure_unconfigure)
|
||||
return config
|
||||
|
||||
def parseconfigure(self, *args):
|
||||
def parseconfigure(self, *args) -> Config:
|
||||
"""Return a new pytest configured Config instance.
|
||||
|
||||
This returns a new :py:class:`_pytest.config.Config` instance like
|
||||
|
@ -1318,7 +1318,7 @@ class Testdir:
|
|||
Returns a :py:class:`RunResult`.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
p = make_numbered_dir(root=Path(self.tmpdir), prefix="runpytest-")
|
||||
p = make_numbered_dir(root=Path(str(self.tmpdir)), prefix="runpytest-")
|
||||
args = ("--basetemp=%s" % p,) + args
|
||||
plugins = [x for x in self.plugins if isinstance(x, str)]
|
||||
if plugins:
|
||||
|
|
|
@ -551,8 +551,10 @@ class Module(nodes.File, PyCollector):
|
|||
importmode = self.config.getoption("--import-mode")
|
||||
try:
|
||||
mod = import_path(self.fspath, mode=importmode)
|
||||
except SyntaxError:
|
||||
raise self.CollectError(ExceptionInfo.from_current().getrepr(style="short"))
|
||||
except SyntaxError as e:
|
||||
raise self.CollectError(
|
||||
ExceptionInfo.from_current().getrepr(style="short")
|
||||
) from e
|
||||
except ImportPathMismatchError as e:
|
||||
raise self.CollectError(
|
||||
"import file mismatch:\n"
|
||||
|
@ -562,8 +564,8 @@ class Module(nodes.File, PyCollector):
|
|||
" %s\n"
|
||||
"HINT: remove __pycache__ / .pyc files and/or use a "
|
||||
"unique basename for your test file modules" % e.args
|
||||
)
|
||||
except ImportError:
|
||||
) from e
|
||||
except ImportError as e:
|
||||
exc_info = ExceptionInfo.from_current()
|
||||
if self.config.getoption("verbose") < 2:
|
||||
exc_info.traceback = exc_info.traceback.filter(filter_traceback)
|
||||
|
@ -578,7 +580,7 @@ class Module(nodes.File, PyCollector):
|
|||
"Hint: make sure your test modules/packages have valid Python names.\n"
|
||||
"Traceback:\n"
|
||||
"{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
|
||||
)
|
||||
) from e
|
||||
except _pytest.runner.Skipped as e:
|
||||
if e.allow_module_level:
|
||||
raise
|
||||
|
@ -587,7 +589,7 @@ class Module(nodes.File, PyCollector):
|
|||
"To decorate a test function, use the @pytest.mark.skip "
|
||||
"or @pytest.mark.skipif decorators instead, and to skip a "
|
||||
"module use `pytestmark = pytest.mark.{skip,skipif}."
|
||||
)
|
||||
) from e
|
||||
self.config.pluginmanager.consider_module(mod)
|
||||
return mod
|
||||
|
||||
|
@ -836,8 +838,8 @@ class CallSpec2:
|
|||
def getparam(self, name: str) -> object:
|
||||
try:
|
||||
return self.params[name]
|
||||
except KeyError:
|
||||
raise ValueError(name)
|
||||
except KeyError as e:
|
||||
raise ValueError(name) from e
|
||||
|
||||
@property
|
||||
def id(self) -> str:
|
||||
|
@ -1074,8 +1076,8 @@ class Metafunc:
|
|||
except TypeError:
|
||||
try:
|
||||
iter(ids)
|
||||
except TypeError:
|
||||
raise TypeError("ids must be a callable or an iterable")
|
||||
except TypeError as e:
|
||||
raise TypeError("ids must be a callable or an iterable") from e
|
||||
num_ids = len(parameters)
|
||||
|
||||
# num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849
|
||||
|
|
|
@ -1,24 +1,30 @@
|
|||
""" support for skip/xfail functions and markers. """
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
import traceback
|
||||
from typing import Generator
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
||||
import attr
|
||||
|
||||
import _pytest._code
|
||||
from _pytest.compat import TYPE_CHECKING
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.config.argparsing import Parser
|
||||
from _pytest.mark.evaluate import MarkEvaluator
|
||||
from _pytest.mark.structures import Mark
|
||||
from _pytest.nodes import Item
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.outcomes import skip
|
||||
from _pytest.outcomes import xfail
|
||||
from _pytest.python import Function
|
||||
from _pytest.reports import BaseReport
|
||||
from _pytest.runner import CallInfo
|
||||
from _pytest.store import StoreKey
|
||||
|
||||
|
||||
skipped_by_mark_key = StoreKey[bool]()
|
||||
evalxfail_key = StoreKey[MarkEvaluator]()
|
||||
unexpectedsuccess_key = StoreKey[str]()
|
||||
if TYPE_CHECKING:
|
||||
from typing import Type
|
||||
|
||||
|
||||
def pytest_addoption(parser: Parser) -> None:
|
||||
|
@ -62,81 +68,200 @@ def pytest_configure(config: Config) -> None:
|
|||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"skipif(condition): skip the given test function if eval(condition) "
|
||||
"results in a True value. Evaluation happens within the "
|
||||
"module global context. Example: skipif('sys.platform == \"win32\"') "
|
||||
"skips the test if we are on the win32 platform. see "
|
||||
"https://docs.pytest.org/en/latest/skipping.html",
|
||||
"skipif(condition, ..., *, reason=...): "
|
||||
"skip the given test function if any of the conditions evaluate to True. "
|
||||
"Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
|
||||
"See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
|
||||
)
|
||||
config.addinivalue_line(
|
||||
"markers",
|
||||
"xfail(condition, reason=None, run=True, raises=None, strict=False): "
|
||||
"mark the test function as an expected failure if eval(condition) "
|
||||
"has a True value. Optionally specify a reason for better reporting "
|
||||
"xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
|
||||
"mark the test function as an expected failure if any of the conditions "
|
||||
"evaluate to True. Optionally specify a reason for better reporting "
|
||||
"and run=False if you don't even want to execute the test function. "
|
||||
"If only specific exception(s) are expected, you can list them in "
|
||||
"raises, and if the test fails in other ways, it will be reported as "
|
||||
"a true failure. See https://docs.pytest.org/en/latest/skipping.html",
|
||||
"a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
|
||||
)
|
||||
|
||||
|
||||
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
|
||||
"""Evaluate a single skipif/xfail condition.
|
||||
|
||||
If an old-style string condition is given, it is eval()'d, otherwise the
|
||||
condition is bool()'d. If this fails, an appropriately formatted pytest.fail
|
||||
is raised.
|
||||
|
||||
Returns (result, reason). The reason is only relevant if the result is True.
|
||||
"""
|
||||
# String condition.
|
||||
if isinstance(condition, str):
|
||||
globals_ = {
|
||||
"os": os,
|
||||
"sys": sys,
|
||||
"platform": platform,
|
||||
"config": item.config,
|
||||
}
|
||||
if hasattr(item, "obj"):
|
||||
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
|
||||
try:
|
||||
condition_code = _pytest._code.compile(condition, mode="eval")
|
||||
result = eval(condition_code, globals_)
|
||||
except SyntaxError as exc:
|
||||
msglines = [
|
||||
"Error evaluating %r condition" % mark.name,
|
||||
" " + condition,
|
||||
" " + " " * (exc.offset or 0) + "^",
|
||||
"SyntaxError: invalid syntax",
|
||||
]
|
||||
fail("\n".join(msglines), pytrace=False)
|
||||
except Exception as exc:
|
||||
msglines = [
|
||||
"Error evaluating %r condition" % mark.name,
|
||||
" " + condition,
|
||||
*traceback.format_exception_only(type(exc), exc),
|
||||
]
|
||||
fail("\n".join(msglines), pytrace=False)
|
||||
|
||||
# Boolean condition.
|
||||
else:
|
||||
try:
|
||||
result = bool(condition)
|
||||
except Exception as exc:
|
||||
msglines = [
|
||||
"Error evaluating %r condition as a boolean" % mark.name,
|
||||
*traceback.format_exception_only(type(exc), exc),
|
||||
]
|
||||
fail("\n".join(msglines), pytrace=False)
|
||||
|
||||
reason = mark.kwargs.get("reason", None)
|
||||
if reason is None:
|
||||
if isinstance(condition, str):
|
||||
reason = "condition: " + condition
|
||||
else:
|
||||
# XXX better be checked at collection time
|
||||
msg = (
|
||||
"Error evaluating %r: " % mark.name
|
||||
+ "you need to specify reason=STRING when using booleans as conditions."
|
||||
)
|
||||
fail(msg, pytrace=False)
|
||||
|
||||
return result, reason
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class Skip:
|
||||
"""The result of evaluate_skip_marks()."""
|
||||
|
||||
reason = attr.ib(type=str)
|
||||
|
||||
|
||||
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
|
||||
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
|
||||
for mark in item.iter_markers(name="skipif"):
|
||||
if "condition" not in mark.kwargs:
|
||||
conditions = mark.args
|
||||
else:
|
||||
conditions = (mark.kwargs["condition"],)
|
||||
|
||||
# Unconditional.
|
||||
if not conditions:
|
||||
reason = mark.kwargs.get("reason", "")
|
||||
return Skip(reason)
|
||||
|
||||
# If any of the conditions are true.
|
||||
for condition in conditions:
|
||||
result, reason = evaluate_condition(item, mark, condition)
|
||||
if result:
|
||||
return Skip(reason)
|
||||
|
||||
for mark in item.iter_markers(name="skip"):
|
||||
if "reason" in mark.kwargs:
|
||||
reason = mark.kwargs["reason"]
|
||||
elif mark.args:
|
||||
reason = mark.args[0]
|
||||
else:
|
||||
reason = "unconditional skip"
|
||||
return Skip(reason)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
@attr.s(slots=True, frozen=True)
|
||||
class Xfail:
|
||||
"""The result of evaluate_xfail_marks()."""
|
||||
|
||||
reason = attr.ib(type=str)
|
||||
run = attr.ib(type=bool)
|
||||
strict = attr.ib(type=bool)
|
||||
raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]])
|
||||
|
||||
|
||||
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
|
||||
"""Evaluate xfail marks on item, returning Xfail if triggered."""
|
||||
for mark in item.iter_markers(name="xfail"):
|
||||
run = mark.kwargs.get("run", True)
|
||||
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
|
||||
raises = mark.kwargs.get("raises", None)
|
||||
if "condition" not in mark.kwargs:
|
||||
conditions = mark.args
|
||||
else:
|
||||
conditions = (mark.kwargs["condition"],)
|
||||
|
||||
# Unconditional.
|
||||
if not conditions:
|
||||
reason = mark.kwargs.get("reason", "")
|
||||
return Xfail(reason, run, strict, raises)
|
||||
|
||||
# If any of the conditions are true.
|
||||
for condition in conditions:
|
||||
result, reason = evaluate_condition(item, mark, condition)
|
||||
if result:
|
||||
return Xfail(reason, run, strict, raises)
|
||||
|
||||
return None
|
||||
|
||||
|
||||
# Whether skipped due to skip or skipif marks.
|
||||
skipped_by_mark_key = StoreKey[bool]()
|
||||
# Saves the xfail mark evaluation. Can be refreshed during call if None.
|
||||
xfailed_key = StoreKey[Optional[Xfail]]()
|
||||
unexpectedsuccess_key = StoreKey[str]()
|
||||
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_setup(item: Item) -> None:
|
||||
# Check if skip or skipif are specified as pytest marks
|
||||
item._store[skipped_by_mark_key] = False
|
||||
eval_skipif = MarkEvaluator(item, "skipif")
|
||||
if eval_skipif.istrue():
|
||||
item._store[skipped_by_mark_key] = True
|
||||
skip(eval_skipif.getexplanation())
|
||||
|
||||
for skip_info in item.iter_markers(name="skip"):
|
||||
skipped = evaluate_skip_marks(item)
|
||||
if skipped:
|
||||
item._store[skipped_by_mark_key] = True
|
||||
if "reason" in skip_info.kwargs:
|
||||
skip(skip_info.kwargs["reason"])
|
||||
elif skip_info.args:
|
||||
skip(skip_info.args[0])
|
||||
else:
|
||||
skip("unconditional skip")
|
||||
skip(skipped.reason)
|
||||
|
||||
item._store[evalxfail_key] = MarkEvaluator(item, "xfail")
|
||||
check_xfail_no_run(item)
|
||||
if not item.config.option.runxfail:
|
||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
if xfailed and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_pyfunc_call(pyfuncitem: Function):
|
||||
check_xfail_no_run(pyfuncitem)
|
||||
outcome = yield
|
||||
passed = outcome.excinfo is None
|
||||
if passed:
|
||||
check_strict_xfail(pyfuncitem)
|
||||
def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
|
||||
xfailed = item._store.get(xfailed_key, None)
|
||||
if xfailed is None:
|
||||
item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
|
||||
|
||||
|
||||
def check_xfail_no_run(item: Item) -> None:
|
||||
"""check xfail(run=False)"""
|
||||
if not item.config.option.runxfail:
|
||||
evalxfail = item._store[evalxfail_key]
|
||||
if evalxfail.istrue():
|
||||
if not evalxfail.get("run", True):
|
||||
xfail("[NOTRUN] " + evalxfail.getexplanation())
|
||||
if xfailed and not xfailed.run:
|
||||
xfail("[NOTRUN] " + xfailed.reason)
|
||||
|
||||
|
||||
def check_strict_xfail(pyfuncitem: Function) -> None:
|
||||
"""check xfail(strict=True) for the given PASSING test"""
|
||||
evalxfail = pyfuncitem._store[evalxfail_key]
|
||||
if evalxfail.istrue():
|
||||
strict_default = pyfuncitem.config.getini("xfail_strict")
|
||||
is_strict_xfail = evalxfail.get("strict", strict_default)
|
||||
if is_strict_xfail:
|
||||
del pyfuncitem._store[evalxfail_key]
|
||||
explanation = evalxfail.getexplanation()
|
||||
fail("[XPASS(strict)] " + explanation, pytrace=False)
|
||||
yield
|
||||
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
evalxfail = item._store.get(evalxfail_key, None)
|
||||
xfailed = item._store.get(xfailed_key, None)
|
||||
# unittest special case, see setting of unexpectedsuccess_key
|
||||
if unexpectedsuccess_key in item._store and rep.when == "call":
|
||||
reason = item._store[unexpectedsuccess_key]
|
||||
|
@ -145,30 +270,27 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
|||
else:
|
||||
rep.longrepr = "Unexpected success"
|
||||
rep.outcome = "failed"
|
||||
|
||||
elif item.config.option.runxfail:
|
||||
pass # don't interfere
|
||||
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
|
||||
assert call.excinfo.value.msg is not None
|
||||
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||||
rep.outcome = "skipped"
|
||||
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue():
|
||||
elif not rep.skipped and xfailed:
|
||||
if call.excinfo:
|
||||
if evalxfail.invalidraise(call.excinfo.value):
|
||||
raises = xfailed.raises
|
||||
if raises is not None and not isinstance(call.excinfo.value, raises):
|
||||
rep.outcome = "failed"
|
||||
else:
|
||||
rep.outcome = "skipped"
|
||||
rep.wasxfail = evalxfail.getexplanation()
|
||||
rep.wasxfail = xfailed.reason
|
||||
elif call.when == "call":
|
||||
strict_default = item.config.getini("xfail_strict")
|
||||
is_strict_xfail = evalxfail.get("strict", strict_default)
|
||||
explanation = evalxfail.getexplanation()
|
||||
if is_strict_xfail:
|
||||
if xfailed.strict:
|
||||
rep.outcome = "failed"
|
||||
rep.longrepr = "[XPASS(strict)] {}".format(explanation)
|
||||
rep.longrepr = "[XPASS(strict)] " + xfailed.reason
|
||||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = explanation
|
||||
rep.wasxfail = xfailed.reason
|
||||
elif (
|
||||
item._store.get(skipped_by_mark_key, True)
|
||||
and rep.skipped
|
||||
|
@ -183,9 +305,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
|
|||
rep.longrepr = str(filename), line + 1, reason
|
||||
|
||||
|
||||
# called by terminalreporter progress reporting
|
||||
|
||||
|
||||
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
|
||||
if hasattr(report, "wasxfail"):
|
||||
if report.skipped:
|
||||
|
|
|
@ -13,6 +13,7 @@ from .pathlib import LOCK_TIMEOUT
|
|||
from .pathlib import make_numbered_dir
|
||||
from .pathlib import make_numbered_dir_with_cleanup
|
||||
from .pathlib import Path
|
||||
from _pytest.config import Config
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
|
@ -135,7 +136,7 @@ def get_user() -> Optional[str]:
|
|||
return None
|
||||
|
||||
|
||||
def pytest_configure(config) -> None:
|
||||
def pytest_configure(config: Config) -> None:
|
||||
"""Create a TempdirFactory and attach it to the config object.
|
||||
|
||||
This is to comply with existing plugins which expect the handler to be
|
||||
|
|
|
@ -48,8 +48,8 @@ def _parse_filter(
|
|||
lineno = int(lineno_)
|
||||
if lineno < 0:
|
||||
raise ValueError
|
||||
except (ValueError, OverflowError):
|
||||
raise warnings._OptionError("invalid lineno {!r}".format(lineno_))
|
||||
except (ValueError, OverflowError) as e:
|
||||
raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) from e
|
||||
else:
|
||||
lineno = 0
|
||||
return (action, message, category, module, lineno)
|
||||
|
|
|
@ -585,11 +585,11 @@ class TestInvocationVariants:
|
|||
# Type ignored because `py.test` is not and will not be typed.
|
||||
assert pytest.main == py.test.cmdline.main # type: ignore[attr-defined]
|
||||
|
||||
def test_invoke_with_invalid_type(self):
|
||||
def test_invoke_with_invalid_type(self) -> None:
|
||||
with pytest.raises(
|
||||
TypeError, match="expected to be a list of strings, got: '-h'"
|
||||
):
|
||||
pytest.main("-h")
|
||||
pytest.main("-h") # type: ignore[arg-type]
|
||||
|
||||
def test_invoke_with_path(self, tmpdir, capsys):
|
||||
retcode = pytest.main(tmpdir)
|
||||
|
|
|
@ -372,7 +372,7 @@ def test_excinfo_no_python_sourcecode(tmpdir):
|
|||
for item in excinfo.traceback:
|
||||
print(item) # XXX: for some reason jinja.Template.render is printed in full
|
||||
item.source # shouldn't fail
|
||||
if item.path.basename == "test.txt":
|
||||
if isinstance(item.path, py.path.local) and item.path.basename == "test.txt":
|
||||
assert str(item.source) == "{{ h()}}:"
|
||||
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import py.path
|
|||
import _pytest._code
|
||||
import pytest
|
||||
from _pytest.compat import importlib_metadata
|
||||
from _pytest.config import _get_plugin_specs_as_list
|
||||
from _pytest.config import _iter_rewritable_modules
|
||||
from _pytest.config import Config
|
||||
from _pytest.config import ConftestImportFailure
|
||||
|
@ -1119,21 +1120,17 @@ def test_load_initial_conftest_last_ordering(_config_for_test):
|
|||
assert [x.function.__module__ for x in values] == expected
|
||||
|
||||
|
||||
def test_get_plugin_specs_as_list():
|
||||
from _pytest.config import _get_plugin_specs_as_list
|
||||
|
||||
def exp_match(val):
|
||||
def test_get_plugin_specs_as_list() -> None:
|
||||
def exp_match(val: object) -> str:
|
||||
return (
|
||||
"Plugin specs must be a ','-separated string"
|
||||
" or a list/tuple of strings for plugin names. Given: {}".format(
|
||||
re.escape(repr(val))
|
||||
)
|
||||
"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %s"
|
||||
% re.escape(repr(val))
|
||||
)
|
||||
|
||||
with pytest.raises(pytest.UsageError, match=exp_match({"foo"})):
|
||||
_get_plugin_specs_as_list({"foo"})
|
||||
_get_plugin_specs_as_list({"foo"}) # type: ignore[arg-type]
|
||||
with pytest.raises(pytest.UsageError, match=exp_match({})):
|
||||
_get_plugin_specs_as_list(dict())
|
||||
_get_plugin_specs_as_list(dict()) # type: ignore[arg-type]
|
||||
|
||||
assert _get_plugin_specs_as_list(None) == []
|
||||
assert _get_plugin_specs_as_list("") == []
|
||||
|
@ -1782,5 +1779,7 @@ def test_conftest_import_error_repr(tmpdir):
|
|||
):
|
||||
try:
|
||||
raise RuntimeError("some error")
|
||||
except Exception:
|
||||
raise ConftestImportFailure(path, sys.exc_info())
|
||||
except Exception as exc:
|
||||
assert exc.__traceback__ is not None
|
||||
exc_info = (type(exc), exc, exc.__traceback__)
|
||||
raise ConftestImportFailure(path, exc_info) from exc
|
||||
|
|
|
@ -266,7 +266,7 @@ class TestPython:
|
|||
|
||||
@pytest.fixture
|
||||
def arg(request):
|
||||
raise ValueError()
|
||||
raise ValueError("Error reason")
|
||||
def test_function(arg):
|
||||
pass
|
||||
"""
|
||||
|
@ -278,7 +278,7 @@ class TestPython:
|
|||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(classname="test_setup_error", name="test_function")
|
||||
fnode = tnode.find_first_by_tag("error")
|
||||
fnode.assert_attr(message="test setup failure")
|
||||
fnode.assert_attr(message='failed on setup with "ValueError: Error reason"')
|
||||
assert "ValueError" in fnode.toxml()
|
||||
|
||||
@parametrize_families
|
||||
|
@ -290,7 +290,7 @@ class TestPython:
|
|||
@pytest.fixture
|
||||
def arg():
|
||||
yield
|
||||
raise ValueError()
|
||||
raise ValueError('Error reason')
|
||||
def test_function(arg):
|
||||
pass
|
||||
"""
|
||||
|
@ -301,7 +301,7 @@ class TestPython:
|
|||
tnode = node.find_first_by_tag("testcase")
|
||||
tnode.assert_attr(classname="test_teardown_error", name="test_function")
|
||||
fnode = tnode.find_first_by_tag("error")
|
||||
fnode.assert_attr(message="test teardown failure")
|
||||
fnode.assert_attr(message='failed on teardown with "ValueError: Error reason"')
|
||||
assert "ValueError" in fnode.toxml()
|
||||
|
||||
@parametrize_families
|
||||
|
@ -328,7 +328,9 @@ class TestPython:
|
|||
fnode = first.find_first_by_tag("failure")
|
||||
fnode.assert_attr(message="Exception: Call Exception")
|
||||
snode = second.find_first_by_tag("error")
|
||||
snode.assert_attr(message="test teardown failure")
|
||||
snode.assert_attr(
|
||||
message='failed on teardown with "Exception: Teardown Exception"'
|
||||
)
|
||||
|
||||
@parametrize_families
|
||||
def test_skip_contains_name_reason(self, testdir, run_and_parse, xunit_family):
|
||||
|
|
|
@ -534,8 +534,8 @@ def test_outcomeexception_passes_except_Exception() -> None:
|
|||
with pytest.raises(outcomes.OutcomeException):
|
||||
try:
|
||||
raise outcomes.OutcomeException("test")
|
||||
except Exception:
|
||||
raise NotImplementedError()
|
||||
except Exception as e:
|
||||
raise NotImplementedError from e
|
||||
|
||||
|
||||
def test_pytest_exit() -> None:
|
||||
|
|
|
@ -2,68 +2,74 @@ import sys
|
|||
|
||||
import pytest
|
||||
from _pytest.runner import runtestprotocol
|
||||
from _pytest.skipping import MarkEvaluator
|
||||
from _pytest.skipping import evaluate_skip_marks
|
||||
from _pytest.skipping import evaluate_xfail_marks
|
||||
from _pytest.skipping import pytest_runtest_setup
|
||||
|
||||
|
||||
class TestEvaluator:
|
||||
class TestEvaluation:
|
||||
def test_no_marker(self, testdir):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
evalskipif = MarkEvaluator(item, "skipif")
|
||||
assert not evalskipif
|
||||
assert not evalskipif.istrue()
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert not skipped
|
||||
|
||||
def test_marked_no_args(self, testdir):
|
||||
def test_marked_xfail_no_args(self, testdir):
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.xyz
|
||||
@pytest.mark.xfail
|
||||
def test_func():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
ev = MarkEvaluator(item, "xyz")
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == ""
|
||||
assert not ev.get("run", False)
|
||||
xfailed = evaluate_xfail_marks(item)
|
||||
assert xfailed
|
||||
assert xfailed.reason == ""
|
||||
assert xfailed.run
|
||||
|
||||
def test_marked_skipif_no_args(self, testdir):
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.skipif
|
||||
def test_func():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert skipped
|
||||
assert skipped.reason == ""
|
||||
|
||||
def test_marked_one_arg(self, testdir):
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.xyz("hasattr(os, 'sep')")
|
||||
@pytest.mark.skipif("hasattr(os, 'sep')")
|
||||
def test_func():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
ev = MarkEvaluator(item, "xyz")
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: hasattr(os, 'sep')"
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert skipped
|
||||
assert skipped.reason == "condition: hasattr(os, 'sep')"
|
||||
|
||||
def test_marked_one_arg_with_reason(self, testdir):
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world")
|
||||
@pytest.mark.skipif("hasattr(os, 'sep')", attr=2, reason="hello world")
|
||||
def test_func():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
ev = MarkEvaluator(item, "xyz")
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "hello world"
|
||||
assert ev.get("attr") == 2
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert skipped
|
||||
assert skipped.reason == "hello world"
|
||||
|
||||
def test_marked_one_arg_twice(self, testdir):
|
||||
lines = [
|
||||
"""@pytest.mark.skipif("not hasattr(os, 'murks')")""",
|
||||
"""@pytest.mark.skipif("hasattr(os, 'murks')")""",
|
||||
"""@pytest.mark.skipif(condition="hasattr(os, 'murks')")""",
|
||||
]
|
||||
for i in range(0, 2):
|
||||
item = testdir.getitem(
|
||||
|
@ -76,11 +82,9 @@ class TestEvaluator:
|
|||
"""
|
||||
% (lines[i], lines[(i + 1) % 2])
|
||||
)
|
||||
ev = MarkEvaluator(item, "skipif")
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: not hasattr(os, 'murks')"
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert skipped
|
||||
assert skipped.reason == "condition: not hasattr(os, 'murks')"
|
||||
|
||||
def test_marked_one_arg_twice2(self, testdir):
|
||||
item = testdir.getitem(
|
||||
|
@ -92,13 +96,11 @@ class TestEvaluator:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
ev = MarkEvaluator(item, "skipif")
|
||||
assert ev
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: not hasattr(os, 'murks')"
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert skipped
|
||||
assert skipped.reason == "condition: not hasattr(os, 'murks')"
|
||||
|
||||
def test_marked_skip_with_not_string(self, testdir) -> None:
|
||||
def test_marked_skipif_with_boolean_without_reason(self, testdir) -> None:
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -107,14 +109,34 @@ class TestEvaluator:
|
|||
pass
|
||||
"""
|
||||
)
|
||||
ev = MarkEvaluator(item, "skipif")
|
||||
exc = pytest.raises(pytest.fail.Exception, ev.istrue)
|
||||
assert exc.value.msg is not None
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
evaluate_skip_marks(item)
|
||||
assert excinfo.value.msg is not None
|
||||
assert (
|
||||
"""Failed: you need to specify reason=STRING when using booleans as conditions."""
|
||||
in exc.value.msg
|
||||
"""Error evaluating 'skipif': you need to specify reason=STRING when using booleans as conditions."""
|
||||
in excinfo.value.msg
|
||||
)
|
||||
|
||||
def test_marked_skipif_with_invalid_boolean(self, testdir) -> None:
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
class InvalidBool:
|
||||
def __bool__(self):
|
||||
raise TypeError("INVALID")
|
||||
|
||||
@pytest.mark.skipif(InvalidBool(), reason="xxx")
|
||||
def test_func():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
evaluate_skip_marks(item)
|
||||
assert excinfo.value.msg is not None
|
||||
assert "Error evaluating 'skipif' condition as a boolean" in excinfo.value.msg
|
||||
assert "INVALID" in excinfo.value.msg
|
||||
|
||||
def test_skipif_class(self, testdir):
|
||||
(item,) = testdir.getitems(
|
||||
"""
|
||||
|
@ -126,10 +148,9 @@ class TestEvaluator:
|
|||
"""
|
||||
)
|
||||
item.config._hackxyz = 3
|
||||
ev = MarkEvaluator(item, "skipif")
|
||||
assert ev.istrue()
|
||||
expl = ev.getexplanation()
|
||||
assert expl == "condition: config._hackxyz"
|
||||
skipped = evaluate_skip_marks(item)
|
||||
assert skipped
|
||||
assert skipped.reason == "condition: config._hackxyz"
|
||||
|
||||
|
||||
class TestXFail:
|
||||
|
@ -895,10 +916,10 @@ def test_errors_in_xfail_skip_expressions(testdir) -> None:
|
|||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*ERROR*test_nameerror*",
|
||||
"*evaluating*skipif*expression*",
|
||||
"*evaluating*skipif*condition*",
|
||||
"*asd*",
|
||||
"*ERROR*test_syntax*",
|
||||
"*evaluating*xfail*expression*",
|
||||
"*evaluating*xfail*condition*",
|
||||
" syntax error",
|
||||
markline,
|
||||
"SyntaxError: invalid syntax",
|
||||
|
@ -924,25 +945,12 @@ def test_xfail_skipif_with_globals(testdir):
|
|||
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
|
||||
|
||||
|
||||
def test_direct_gives_error(testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
@pytest.mark.skipif(True)
|
||||
def test_skip1():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*1 error*"])
|
||||
|
||||
|
||||
def test_default_markers(testdir):
|
||||
result = testdir.runpytest("--markers")
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*skipif(*condition)*skip*",
|
||||
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*",
|
||||
"*skipif(condition, ..., [*], reason=...)*skip*",
|
||||
"*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=xfail_strict)*expected failure*",
|
||||
]
|
||||
)
|
||||
|
||||
|
@ -1137,7 +1145,9 @@ def test_mark_xfail_item(testdir):
|
|||
class MyItem(pytest.Item):
|
||||
nodeid = 'foo'
|
||||
def setup(self):
|
||||
marker = pytest.mark.xfail(True, reason="Expected failure")
|
||||
marker = pytest.mark.xfail("1 == 2", reason="Expected failure - false")
|
||||
self.add_marker(marker)
|
||||
marker = pytest.mark.xfail(True, reason="Expected failure - true")
|
||||
self.add_marker(marker)
|
||||
def runtest(self):
|
||||
assert False
|
||||
|
|
Loading…
Reference in New Issue