Merge remote-tracking branch 'origin/master' into issue_7295

This commit is contained in:
Gleb Nikonorov 2020-06-23 23:21:36 -04:00
commit 0dd77b3e39
31 changed files with 587 additions and 423 deletions

View File

@ -233,6 +233,7 @@ Pulkit Goyal
Punyashloka Biswal Punyashloka Biswal
Quentin Pradet Quentin Pradet
Ralf Schmitt Ralf Schmitt
Ram Rachum
Ralph Giles Ralph Giles
Ran Benita Ran Benita
Raphael Castaneda Raphael Castaneda

View File

@ -0,0 +1 @@
Fixed exception causes all over the codebase, i.e. use `raise new_exception from old_exception` when wrapping an exception.

View File

@ -0,0 +1,13 @@
``--junitxml`` now includes the exception cause in the ``message`` XML attribute for failures during setup and teardown.
Previously:
.. code-block:: xml
<error message="test setup failure">
Now:
.. code-block:: xml
<error message="failed on setup with &quot;ValueError: Some error during setup&quot;">

View File

@ -6,6 +6,7 @@ from .code import Frame
from .code import getfslineno from .code import getfslineno
from .code import getrawcode from .code import getrawcode
from .code import Traceback from .code import Traceback
from .code import TracebackEntry
from .source import compile_ as compile from .source import compile_ as compile
from .source import Source from .source import Source
@ -17,6 +18,7 @@ __all__ = [
"getfslineno", "getfslineno",
"getrawcode", "getrawcode",
"Traceback", "Traceback",
"TracebackEntry",
"compile", "compile",
"Source", "Source",
] ]

View File

@ -213,7 +213,7 @@ class TracebackEntry:
return source.getstatement(self.lineno) return source.getstatement(self.lineno)
@property @property
def path(self): def path(self) -> Union[py.path.local, str]:
""" path to the source code """ """ path to the source code """
return self.frame.code.path return self.frame.code.path

View File

@ -215,7 +215,7 @@ class Source:
newex.offset = ex.offset newex.offset = ex.offset
newex.lineno = ex.lineno newex.lineno = ex.lineno
newex.text = ex.text newex.text = ex.text
raise newex raise newex from ex
else: else:
if flag & ast.PyCF_ONLY_AST: if flag & ast.PyCF_ONLY_AST:
assert isinstance(co, ast.AST) assert isinstance(co, ast.AST)

View File

@ -1,5 +1,6 @@
""" command line options, ini-file and conftest.py processing. """ """ command line options, ini-file and conftest.py processing. """
import argparse import argparse
import collections.abc
import contextlib import contextlib
import copy import copy
import enum import enum
@ -15,10 +16,13 @@ from typing import Any
from typing import Callable from typing import Callable
from typing import Dict from typing import Dict
from typing import IO from typing import IO
from typing import Iterable
from typing import Iterator
from typing import List from typing import List
from typing import Optional from typing import Optional
from typing import Sequence from typing import Sequence
from typing import Set from typing import Set
from typing import TextIO
from typing import Tuple from typing import Tuple
from typing import Union from typing import Union
@ -42,6 +46,7 @@ from _pytest.compat import TYPE_CHECKING
from _pytest.outcomes import fail from _pytest.outcomes import fail
from _pytest.outcomes import Skipped from _pytest.outcomes import Skipped
from _pytest.pathlib import import_path from _pytest.pathlib import import_path
from _pytest.pathlib import ImportMode
from _pytest.pathlib import Path from _pytest.pathlib import Path
from _pytest.store import Store from _pytest.store import Store
from _pytest.warning_types import PytestConfigWarning from _pytest.warning_types import PytestConfigWarning
@ -50,6 +55,7 @@ if TYPE_CHECKING:
from typing import Type from typing import Type
from _pytest._code.code import _TracebackStyle from _pytest._code.code import _TracebackStyle
from _pytest.terminal import TerminalReporter
from .argparsing import Argument from .argparsing import Argument
@ -88,18 +94,24 @@ class ExitCode(enum.IntEnum):
class ConftestImportFailure(Exception): class ConftestImportFailure(Exception):
def __init__(self, path, excinfo): def __init__(
self,
path: py.path.local,
excinfo: Tuple["Type[Exception]", Exception, TracebackType],
) -> None:
super().__init__(path, excinfo) super().__init__(path, excinfo)
self.path = path self.path = path
self.excinfo = excinfo # type: Tuple[Type[Exception], Exception, TracebackType] self.excinfo = excinfo
def __str__(self): def __str__(self) -> str:
return "{}: {} (from {})".format( return "{}: {} (from {})".format(
self.excinfo[0].__name__, self.excinfo[1], self.path self.excinfo[0].__name__, self.excinfo[1], self.path
) )
def filter_traceback_for_conftest_import_failure(entry) -> bool: def filter_traceback_for_conftest_import_failure(
entry: _pytest._code.TracebackEntry,
) -> bool:
"""filters tracebacks entries which point to pytest internals or importlib. """filters tracebacks entries which point to pytest internals or importlib.
Make a special case for importlib because we use it to import test modules and conftest files Make a special case for importlib because we use it to import test modules and conftest files
@ -108,7 +120,10 @@ def filter_traceback_for_conftest_import_failure(entry) -> bool:
return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep)
def main(args=None, plugins=None) -> Union[int, ExitCode]: def main(
args: Optional[List[str]] = None,
plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
) -> Union[int, ExitCode]:
""" return exit code, after performing an in-process test run. """ return exit code, after performing an in-process test run.
:arg args: list of command line arguments. :arg args: list of command line arguments.
@ -177,7 +192,7 @@ class cmdline: # compatibility namespace
main = staticmethod(main) main = staticmethod(main)
def filename_arg(path, optname): def filename_arg(path: str, optname: str) -> str:
""" Argparse type validator for filename arguments. """ Argparse type validator for filename arguments.
:path: path of filename :path: path of filename
@ -188,7 +203,7 @@ def filename_arg(path, optname):
return path return path
def directory_arg(path, optname): def directory_arg(path: str, optname: str) -> str:
"""Argparse type validator for directory arguments. """Argparse type validator for directory arguments.
:path: path of directory :path: path of directory
@ -239,13 +254,16 @@ builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester") builtin_plugins.add("pytester")
def get_config(args=None, plugins=None): def get_config(
args: Optional[List[str]] = None,
plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
) -> "Config":
# subsequent calls to main will create a fresh instance # subsequent calls to main will create a fresh instance
pluginmanager = PytestPluginManager() pluginmanager = PytestPluginManager()
config = Config( config = Config(
pluginmanager, pluginmanager,
invocation_params=Config.InvocationParams( invocation_params=Config.InvocationParams(
args=args or (), plugins=plugins, dir=Path.cwd() args=args or (), plugins=plugins, dir=Path.cwd(),
), ),
) )
@ -255,10 +273,11 @@ def get_config(args=None, plugins=None):
for spec in default_plugins: for spec in default_plugins:
pluginmanager.import_plugin(spec) pluginmanager.import_plugin(spec)
return config return config
def get_plugin_manager(): def get_plugin_manager() -> "PytestPluginManager":
""" """
Obtain a new instance of the Obtain a new instance of the
:py:class:`_pytest.config.PytestPluginManager`, with default plugins :py:class:`_pytest.config.PytestPluginManager`, with default plugins
@ -271,8 +290,9 @@ def get_plugin_manager():
def _prepareconfig( def _prepareconfig(
args: Optional[Union[py.path.local, List[str]]] = None, plugins=None args: Optional[Union[py.path.local, List[str]]] = None,
): plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None,
) -> "Config":
if args is None: if args is None:
args = sys.argv[1:] args = sys.argv[1:]
elif isinstance(args, py.path.local): elif isinstance(args, py.path.local):
@ -290,9 +310,10 @@ def _prepareconfig(
pluginmanager.consider_pluginarg(plugin) pluginmanager.consider_pluginarg(plugin)
else: else:
pluginmanager.register(plugin) pluginmanager.register(plugin)
return pluginmanager.hook.pytest_cmdline_parse( config = pluginmanager.hook.pytest_cmdline_parse(
pluginmanager=pluginmanager, args=args pluginmanager=pluginmanager, args=args
) )
return config
except BaseException: except BaseException:
config._ensure_unconfigure() config._ensure_unconfigure()
raise raise
@ -313,13 +334,11 @@ class PytestPluginManager(PluginManager):
super().__init__("pytest") super().__init__("pytest")
# The objects are module objects, only used generically. # The objects are module objects, only used generically.
self._conftest_plugins = set() # type: Set[object] self._conftest_plugins = set() # type: Set[types.ModuleType]
# state related to local conftest plugins # State related to local conftest plugins.
# Maps a py.path.local to a list of module objects. self._dirpath2confmods = {} # type: Dict[py.path.local, List[types.ModuleType]]
self._dirpath2confmods = {} # type: Dict[Any, List[object]] self._conftestpath2mod = {} # type: Dict[Path, types.ModuleType]
# Maps a py.path.local to a module object.
self._conftestpath2mod = {} # type: Dict[Any, object]
self._confcutdir = None # type: Optional[py.path.local] self._confcutdir = None # type: Optional[py.path.local]
self._noconftest = False self._noconftest = False
self._duplicatepaths = set() # type: Set[py.path.local] self._duplicatepaths = set() # type: Set[py.path.local]
@ -328,7 +347,7 @@ class PytestPluginManager(PluginManager):
self.register(self) self.register(self)
if os.environ.get("PYTEST_DEBUG"): if os.environ.get("PYTEST_DEBUG"):
err = sys.stderr # type: IO[str] err = sys.stderr # type: IO[str]
encoding = getattr(err, "encoding", "utf8") encoding = getattr(err, "encoding", "utf8") # type: str
try: try:
err = open( err = open(
os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding, os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding,
@ -343,7 +362,7 @@ class PytestPluginManager(PluginManager):
# Used to know when we are importing conftests after the pytest_configure stage # Used to know when we are importing conftests after the pytest_configure stage
self._configured = False self._configured = False
def parse_hookimpl_opts(self, plugin, name): def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str):
# pytest hooks are always prefixed with pytest_ # pytest hooks are always prefixed with pytest_
# so we avoid accessing possibly non-readable attributes # so we avoid accessing possibly non-readable attributes
# (see issue #1073) # (see issue #1073)
@ -372,7 +391,7 @@ class PytestPluginManager(PluginManager):
opts.setdefault(name, hasattr(method, name) or name in known_marks) opts.setdefault(name, hasattr(method, name) or name in known_marks)
return opts return opts
def parse_hookspec_opts(self, module_or_class, name): def parse_hookspec_opts(self, module_or_class, name: str):
opts = super().parse_hookspec_opts(module_or_class, name) opts = super().parse_hookspec_opts(module_or_class, name)
if opts is None: if opts is None:
method = getattr(module_or_class, name) method = getattr(module_or_class, name)
@ -389,7 +408,9 @@ class PytestPluginManager(PluginManager):
} }
return opts return opts
def register(self, plugin: _PluggyPlugin, name: Optional[str] = None): def register(
self, plugin: _PluggyPlugin, name: Optional[str] = None
) -> Optional[str]:
if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS:
warnings.warn( warnings.warn(
PytestConfigWarning( PytestConfigWarning(
@ -399,8 +420,8 @@ class PytestPluginManager(PluginManager):
) )
) )
) )
return return None
ret = super().register(plugin, name) ret = super().register(plugin, name) # type: Optional[str]
if ret: if ret:
self.hook.pytest_plugin_registered.call_historic( self.hook.pytest_plugin_registered.call_historic(
kwargs=dict(plugin=plugin, manager=self) kwargs=dict(plugin=plugin, manager=self)
@ -410,11 +431,12 @@ class PytestPluginManager(PluginManager):
self.consider_module(plugin) self.consider_module(plugin)
return ret return ret
def getplugin(self, name): def getplugin(self, name: str):
# support deprecated naming because plugins (xdist e.g.) use it # support deprecated naming because plugins (xdist e.g.) use it
return self.get_plugin(name) plugin = self.get_plugin(name) # type: Optional[_PluggyPlugin]
return plugin
def hasplugin(self, name): def hasplugin(self, name: str) -> bool:
"""Return True if the plugin with the given name is registered.""" """Return True if the plugin with the given name is registered."""
return bool(self.get_plugin(name)) return bool(self.get_plugin(name))
@ -436,7 +458,7 @@ class PytestPluginManager(PluginManager):
# #
# internal API for local conftest plugin handling # internal API for local conftest plugin handling
# #
def _set_initial_conftests(self, namespace): def _set_initial_conftests(self, namespace: argparse.Namespace) -> None:
""" load initial conftest files given a preparsed "namespace". """ load initial conftest files given a preparsed "namespace".
As conftest files may add their own command line options As conftest files may add their own command line options
which have arguments ('--my-opt somepath') we might get some which have arguments ('--my-opt somepath') we might get some
@ -454,8 +476,8 @@ class PytestPluginManager(PluginManager):
self._using_pyargs = namespace.pyargs self._using_pyargs = namespace.pyargs
testpaths = namespace.file_or_dir testpaths = namespace.file_or_dir
foundanchor = False foundanchor = False
for path in testpaths: for testpath in testpaths:
path = str(path) path = str(testpath)
# remove node-id syntax # remove node-id syntax
i = path.find("::") i = path.find("::")
if i != -1: if i != -1:
@ -467,7 +489,9 @@ class PytestPluginManager(PluginManager):
if not foundanchor: if not foundanchor:
self._try_load_conftest(current, namespace.importmode) self._try_load_conftest(current, namespace.importmode)
def _try_load_conftest(self, anchor, importmode): def _try_load_conftest(
self, anchor: py.path.local, importmode: Union[str, ImportMode]
) -> None:
self._getconftestmodules(anchor, importmode) self._getconftestmodules(anchor, importmode)
# let's also consider test* subdirs # let's also consider test* subdirs
if anchor.check(dir=1): if anchor.check(dir=1):
@ -476,7 +500,9 @@ class PytestPluginManager(PluginManager):
self._getconftestmodules(x, importmode) self._getconftestmodules(x, importmode)
@lru_cache(maxsize=128) @lru_cache(maxsize=128)
def _getconftestmodules(self, path, importmode): def _getconftestmodules(
self, path: py.path.local, importmode: Union[str, ImportMode],
) -> List[types.ModuleType]:
if self._noconftest: if self._noconftest:
return [] return []
@ -499,7 +525,9 @@ class PytestPluginManager(PluginManager):
self._dirpath2confmods[directory] = clist self._dirpath2confmods[directory] = clist
return clist return clist
def _rget_with_confmod(self, name, path, importmode): def _rget_with_confmod(
self, name: str, path: py.path.local, importmode: Union[str, ImportMode],
) -> Tuple[types.ModuleType, Any]:
modules = self._getconftestmodules(path, importmode) modules = self._getconftestmodules(path, importmode)
for mod in reversed(modules): for mod in reversed(modules):
try: try:
@ -508,7 +536,9 @@ class PytestPluginManager(PluginManager):
continue continue
raise KeyError(name) raise KeyError(name)
def _importconftest(self, conftestpath, importmode): def _importconftest(
self, conftestpath: py.path.local, importmode: Union[str, ImportMode],
) -> types.ModuleType:
# Use a resolved Path object as key to avoid loading the same conftest twice # Use a resolved Path object as key to avoid loading the same conftest twice
# with build systems that create build directories containing # with build systems that create build directories containing
# symlinks to actual files. # symlinks to actual files.
@ -526,7 +556,9 @@ class PytestPluginManager(PluginManager):
try: try:
mod = import_path(conftestpath, mode=importmode) mod = import_path(conftestpath, mode=importmode)
except Exception as e: except Exception as e:
raise ConftestImportFailure(conftestpath, sys.exc_info()) from e assert e.__traceback__ is not None
exc_info = (type(e), e, e.__traceback__)
raise ConftestImportFailure(conftestpath, exc_info) from e
self._check_non_top_pytest_plugins(mod, conftestpath) self._check_non_top_pytest_plugins(mod, conftestpath)
@ -542,7 +574,9 @@ class PytestPluginManager(PluginManager):
self.consider_conftest(mod) self.consider_conftest(mod)
return mod return mod
def _check_non_top_pytest_plugins(self, mod, conftestpath): def _check_non_top_pytest_plugins(
self, mod: types.ModuleType, conftestpath: py.path.local,
) -> None:
if ( if (
hasattr(mod, "pytest_plugins") hasattr(mod, "pytest_plugins")
and self._configured and self._configured
@ -564,7 +598,9 @@ class PytestPluginManager(PluginManager):
# #
# #
def consider_preparse(self, args, *, exclude_only: bool = False) -> None: def consider_preparse(
self, args: Sequence[str], *, exclude_only: bool = False
) -> None:
i = 0 i = 0
n = len(args) n = len(args)
while i < n: while i < n:
@ -585,7 +621,7 @@ class PytestPluginManager(PluginManager):
continue continue
self.consider_pluginarg(parg) self.consider_pluginarg(parg)
def consider_pluginarg(self, arg) -> None: def consider_pluginarg(self, arg: str) -> None:
if arg.startswith("no:"): if arg.startswith("no:"):
name = arg[3:] name = arg[3:]
if name in essential_plugins: if name in essential_plugins:
@ -610,7 +646,7 @@ class PytestPluginManager(PluginManager):
del self._name2plugin["pytest_" + name] del self._name2plugin["pytest_" + name]
self.import_plugin(arg, consider_entry_points=True) self.import_plugin(arg, consider_entry_points=True)
def consider_conftest(self, conftestmodule) -> None: def consider_conftest(self, conftestmodule: types.ModuleType) -> None:
self.register(conftestmodule, name=conftestmodule.__file__) self.register(conftestmodule, name=conftestmodule.__file__)
def consider_env(self) -> None: def consider_env(self) -> None:
@ -619,7 +655,9 @@ class PytestPluginManager(PluginManager):
def consider_module(self, mod: types.ModuleType) -> None: def consider_module(self, mod: types.ModuleType) -> None:
self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) self._import_plugin_specs(getattr(mod, "pytest_plugins", []))
def _import_plugin_specs(self, spec): def _import_plugin_specs(
self, spec: Union[None, types.ModuleType, str, Sequence[str]]
) -> None:
plugins = _get_plugin_specs_as_list(spec) plugins = _get_plugin_specs_as_list(spec)
for import_spec in plugins: for import_spec in plugins:
self.import_plugin(import_spec) self.import_plugin(import_spec)
@ -636,7 +674,6 @@ class PytestPluginManager(PluginManager):
assert isinstance(modname, str), ( assert isinstance(modname, str), (
"module name as text required, got %r" % modname "module name as text required, got %r" % modname
) )
modname = str(modname)
if self.is_blocked(modname) or self.get_plugin(modname) is not None: if self.is_blocked(modname) or self.get_plugin(modname) is not None:
return return
@ -668,27 +705,29 @@ class PytestPluginManager(PluginManager):
self.register(mod, modname) self.register(mod, modname)
def _get_plugin_specs_as_list(specs): def _get_plugin_specs_as_list(
""" specs: Union[None, types.ModuleType, str, Sequence[str]]
Parses a list of "plugin specs" and returns a list of plugin names. ) -> List[str]:
"""Parse a plugins specification into a list of plugin names."""
Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in # None means empty.
which case it is returned as a list. Specs can also be `None` in which case an if specs is None:
empty list is returned. return []
""" # Workaround for #3899 - a submodule which happens to be called "pytest_plugins".
if specs is not None and not isinstance(specs, types.ModuleType): if isinstance(specs, types.ModuleType):
if isinstance(specs, str): return []
specs = specs.split(",") if specs else [] # Comma-separated list.
if not isinstance(specs, (list, tuple)): if isinstance(specs, str):
raise UsageError( return specs.split(",") if specs else []
"Plugin specs must be a ','-separated string or a " # Direct specification.
"list/tuple of strings for plugin names. Given: %r" % specs if isinstance(specs, collections.abc.Sequence):
)
return list(specs) return list(specs)
return [] raise UsageError(
"Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r"
% specs
)
def _ensure_removed_sysmodule(modname): def _ensure_removed_sysmodule(modname: str) -> None:
try: try:
del sys.modules[modname] del sys.modules[modname]
except KeyError: except KeyError:
@ -703,7 +742,7 @@ class Notset:
notset = Notset() notset = Notset()
def _iter_rewritable_modules(package_files): def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]:
""" """
Given an iterable of file names in a source distribution, return the "names" that should Given an iterable of file names in a source distribution, return the "names" that should
be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should
@ -766,6 +805,10 @@ def _iter_rewritable_modules(package_files):
yield from _iter_rewritable_modules(new_package_files) yield from _iter_rewritable_modules(new_package_files)
def _args_converter(args: Iterable[str]) -> Tuple[str, ...]:
return tuple(args)
class Config: class Config:
""" """
Access to configuration values, pluginmanager and plugin hooks. Access to configuration values, pluginmanager and plugin hooks.
@ -793,9 +836,9 @@ class Config:
Plugins accessing ``InvocationParams`` must be aware of that. Plugins accessing ``InvocationParams`` must be aware of that.
""" """
args = attr.ib(converter=tuple) args = attr.ib(type=Tuple[str, ...], converter=_args_converter)
"""tuple of command-line arguments as passed to ``pytest.main()``.""" """tuple of command-line arguments as passed to ``pytest.main()``."""
plugins = attr.ib() plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]])
"""list of extra plugins, might be `None`.""" """list of extra plugins, might be `None`."""
dir = attr.ib(type=Path) dir = attr.ib(type=Path)
"""directory where ``pytest.main()`` was invoked from.""" """directory where ``pytest.main()`` was invoked from."""
@ -855,7 +898,7 @@ class Config:
"""Backward compatibility""" """Backward compatibility"""
return py.path.local(str(self.invocation_params.dir)) return py.path.local(str(self.invocation_params.dir))
def add_cleanup(self, func) -> None: def add_cleanup(self, func: Callable[[], None]) -> None:
""" Add a function to be called when the config object gets out of """ Add a function to be called when the config object gets out of
use (usually coninciding with pytest_unconfigure).""" use (usually coninciding with pytest_unconfigure)."""
self._cleanup.append(func) self._cleanup.append(func)
@ -876,12 +919,15 @@ class Config:
fin = self._cleanup.pop() fin = self._cleanup.pop()
fin() fin()
def get_terminal_writer(self): def get_terminal_writer(self) -> TerminalWriter:
return self.pluginmanager.get_plugin("terminalreporter")._tw terminalreporter = self.pluginmanager.get_plugin(
"terminalreporter"
) # type: TerminalReporter
return terminalreporter._tw
def pytest_cmdline_parse( def pytest_cmdline_parse(
self, pluginmanager: PytestPluginManager, args: List[str] self, pluginmanager: PytestPluginManager, args: List[str]
) -> object: ) -> "Config":
try: try:
self.parse(args) self.parse(args)
except UsageError: except UsageError:
@ -923,7 +969,7 @@ class Config:
sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.write("INTERNALERROR> %s\n" % line)
sys.stderr.flush() sys.stderr.flush()
def cwd_relative_nodeid(self, nodeid): def cwd_relative_nodeid(self, nodeid: str) -> str:
# nodeid's are relative to the rootpath, compute relative to cwd # nodeid's are relative to the rootpath, compute relative to cwd
if self.invocation_dir != self.rootdir: if self.invocation_dir != self.rootdir:
fullpath = self.rootdir.join(nodeid) fullpath = self.rootdir.join(nodeid)
@ -931,7 +977,7 @@ class Config:
return nodeid return nodeid
@classmethod @classmethod
def fromdictargs(cls, option_dict, args): def fromdictargs(cls, option_dict, args) -> "Config":
""" constructor usable for subprocesses. """ """ constructor usable for subprocesses. """
config = get_config(args) config = get_config(args)
config.option.__dict__.update(option_dict) config.option.__dict__.update(option_dict)
@ -949,7 +995,7 @@ class Config:
setattr(self.option, opt.dest, opt.default) setattr(self.option, opt.dest, opt.default)
@hookimpl(trylast=True) @hookimpl(trylast=True)
def pytest_load_initial_conftests(self, early_config): def pytest_load_initial_conftests(self, early_config: "Config") -> None:
self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
def _initini(self, args: Sequence[str]) -> None: def _initini(self, args: Sequence[str]) -> None:
@ -1078,7 +1124,7 @@ class Config:
raise raise
self._validate_keys() self._validate_keys()
def _checkversion(self): def _checkversion(self) -> None:
import pytest import pytest
minver = self.inicfg.get("minversion", None) minver = self.inicfg.get("minversion", None)
@ -1172,7 +1218,7 @@ class Config:
except PrintHelp: except PrintHelp:
pass pass
def addinivalue_line(self, name, line): def addinivalue_line(self, name: str, line: str) -> None:
""" add a line to an ini-file option. The option must have been """ add a line to an ini-file option. The option must have been
declared but might not yet be set in which case the line becomes the declared but might not yet be set in which case the line becomes the
the first line in its value. """ the first line in its value. """
@ -1191,11 +1237,11 @@ class Config:
self._inicache[name] = val = self._getini(name) self._inicache[name] = val = self._getini(name)
return val return val
def _getini(self, name: str) -> Any: def _getini(self, name: str):
try: try:
description, type, default = self._parser._inidict[name] description, type, default = self._parser._inidict[name]
except KeyError: except KeyError as e:
raise ValueError("unknown configuration value: {!r}".format(name)) raise ValueError("unknown configuration value: {!r}".format(name)) from e
override_value = self._get_override_ini_value(name) override_value = self._get_override_ini_value(name)
if override_value is None: if override_value is None:
try: try:
@ -1236,12 +1282,14 @@ class Config:
else: else:
return value return value
elif type == "bool": elif type == "bool":
return bool(_strtobool(str(value).strip())) return _strtobool(str(value).strip())
else: else:
assert type is None assert type is None
return value return value
def _getconftest_pathlist(self, name, path): def _getconftest_pathlist(
self, name: str, path: py.path.local
) -> Optional[List[py.path.local]]:
try: try:
mod, relroots = self.pluginmanager._rget_with_confmod( mod, relroots = self.pluginmanager._rget_with_confmod(
name, path, self.getoption("importmode") name, path, self.getoption("importmode")
@ -1249,7 +1297,7 @@ class Config:
except KeyError: except KeyError:
return None return None
modpath = py.path.local(mod.__file__).dirpath() modpath = py.path.local(mod.__file__).dirpath()
values = [] values = [] # type: List[py.path.local]
for relroot in relroots: for relroot in relroots:
if not isinstance(relroot, py.path.local): if not isinstance(relroot, py.path.local):
relroot = relroot.replace("/", py.path.local.sep) relroot = relroot.replace("/", py.path.local.sep)
@ -1291,20 +1339,20 @@ class Config:
if val is None and skip: if val is None and skip:
raise AttributeError(name) raise AttributeError(name)
return val return val
except AttributeError: except AttributeError as e:
if default is not notset: if default is not notset:
return default return default
if skip: if skip:
import pytest import pytest
pytest.skip("no {!r} option found".format(name)) pytest.skip("no {!r} option found".format(name))
raise ValueError("no option named {!r}".format(name)) raise ValueError("no option named {!r}".format(name)) from e
def getvalue(self, name, path=None): def getvalue(self, name: str, path=None):
""" (deprecated, use getoption()) """ """ (deprecated, use getoption()) """
return self.getoption(name) return self.getoption(name)
def getvalueorskip(self, name, path=None): def getvalueorskip(self, name: str, path=None):
""" (deprecated, use getoption(skip=True)) """ """ (deprecated, use getoption(skip=True)) """
return self.getoption(name, skip=True) return self.getoption(name, skip=True)
@ -1330,7 +1378,7 @@ class Config:
) )
def _assertion_supported(): def _assertion_supported() -> bool:
try: try:
assert False assert False
except AssertionError: except AssertionError:
@ -1339,12 +1387,14 @@ def _assertion_supported():
return False return False
def create_terminal_writer(config: Config, *args, **kwargs) -> TerminalWriter: def create_terminal_writer(
config: Config, file: Optional[TextIO] = None
) -> TerminalWriter:
"""Create a TerminalWriter instance configured according to the options """Create a TerminalWriter instance configured according to the options
in the config object. Every code which requires a TerminalWriter object in the config object. Every code which requires a TerminalWriter object
and has access to a config object should use this function. and has access to a config object should use this function.
""" """
tw = TerminalWriter(*args, **kwargs) tw = TerminalWriter(file=file)
if config.option.color == "yes": if config.option.color == "yes":
tw.hasmarkup = True tw.hasmarkup = True
if config.option.color == "no": if config.option.color == "no":
@ -1352,8 +1402,8 @@ def create_terminal_writer(config: Config, *args, **kwargs) -> TerminalWriter:
return tw return tw
def _strtobool(val): def _strtobool(val: str) -> bool:
"""Convert a string representation of truth to true (1) or false (0). """Convert a string representation of truth to True or False.
True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
@ -1363,8 +1413,8 @@ def _strtobool(val):
""" """
val = val.lower() val = val.lower()
if val in ("y", "yes", "t", "true", "on", "1"): if val in ("y", "yes", "t", "true", "on", "1"):
return 1 return True
elif val in ("n", "no", "f", "false", "off", "0"): elif val in ("n", "no", "f", "false", "off", "0"):
return 0 return False
else: else:
raise ValueError("invalid truth value {!r}".format(val)) raise ValueError("invalid truth value {!r}".format(val))

View File

@ -265,9 +265,9 @@ class Argument:
else: else:
try: try:
self.dest = self._short_opts[0][1:] self.dest = self._short_opts[0][1:]
except IndexError: except IndexError as e:
self.dest = "???" # Needed for the error repr. self.dest = "???" # Needed for the error repr.
raise ArgumentError("need a long or short option", self) raise ArgumentError("need a long or short option", self) from e
def names(self) -> List[str]: def names(self) -> List[str]:
return self._short_opts + self._long_opts return self._short_opts + self._long_opts

View File

@ -26,7 +26,7 @@ def _parse_ini_config(path: py.path.local) -> iniconfig.IniConfig:
try: try:
return iniconfig.IniConfig(path) return iniconfig.IniConfig(path)
except iniconfig.ParseError as exc: except iniconfig.ParseError as exc:
raise UsageError(str(exc)) raise UsageError(str(exc)) from exc
def load_config_dict_from_file( def load_config_dict_from_file(

View File

@ -28,10 +28,10 @@ def _validate_usepdb_cls(value: str) -> Tuple[str, str]:
"""Validate syntax of --pdbcls option.""" """Validate syntax of --pdbcls option."""
try: try:
modname, classname = value.split(":") modname, classname = value.split(":")
except ValueError: except ValueError as e:
raise argparse.ArgumentTypeError( raise argparse.ArgumentTypeError(
"{!r} is not in the format 'modname:classname'".format(value) "{!r} is not in the format 'modname:classname'".format(value)
) ) from e
return (modname, classname) return (modname, classname)
@ -130,7 +130,7 @@ class pytestPDB:
value = ":".join((modname, classname)) value = ":".join((modname, classname))
raise UsageError( raise UsageError(
"--pdbcls: could not import {!r}: {}".format(value, exc) "--pdbcls: could not import {!r}: {}".format(value, exc)
) ) from exc
else: else:
import pdb import pdb

View File

@ -938,13 +938,13 @@ def _eval_scope_callable(
# Type ignored because there is no typing mechanism to specify # Type ignored because there is no typing mechanism to specify
# keyword arguments, currently. # keyword arguments, currently.
result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] # noqa: F821 result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] # noqa: F821
except Exception: except Exception as e:
raise TypeError( raise TypeError(
"Error evaluating {} while defining fixture '{}'.\n" "Error evaluating {} while defining fixture '{}'.\n"
"Expected a function with the signature (*, fixture_name, config)".format( "Expected a function with the signature (*, fixture_name, config)".format(
scope_callable, fixture_name scope_callable, fixture_name
) )
) ) from e
if not isinstance(result, str): if not isinstance(result, str):
fail( fail(
"Expected {} to return a 'str' while defining fixture '{}', but it returned:\n" "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n"

View File

@ -96,7 +96,7 @@ def pytest_addoption(parser: Parser) -> None:
@pytest.hookimpl(hookwrapper=True) @pytest.hookimpl(hookwrapper=True)
def pytest_cmdline_parse(): def pytest_cmdline_parse():
outcome = yield outcome = yield
config = outcome.get_result() config = outcome.get_result() # type: Config
if config.option.debug: if config.option.debug:
path = os.path.abspath("pytestdebug.log") path = os.path.abspath("pytestdebug.log")
debugfile = open(path, "w") debugfile = open(path, "w")
@ -124,7 +124,7 @@ def pytest_cmdline_parse():
config.add_cleanup(unset_tracing) config.add_cleanup(unset_tracing)
def showversion(config): def showversion(config: Config) -> None:
if config.option.version > 1: if config.option.version > 1:
sys.stderr.write( sys.stderr.write(
"This is pytest version {}, imported from {}\n".format( "This is pytest version {}, imported from {}\n".format(
@ -224,7 +224,7 @@ def showhelp(config: Config) -> None:
conftest_options = [("pytest_plugins", "list of plugin names to load")] conftest_options = [("pytest_plugins", "list of plugin names to load")]
def getpluginversioninfo(config): def getpluginversioninfo(config: Config) -> List[str]:
lines = [] lines = []
plugininfo = config.pluginmanager.list_plugin_distinfo() plugininfo = config.pluginmanager.list_plugin_distinfo()
if plugininfo: if plugininfo:

View File

@ -143,7 +143,7 @@ def pytest_configure(config: "Config") -> None:
@hookspec(firstresult=True) @hookspec(firstresult=True)
def pytest_cmdline_parse( def pytest_cmdline_parse(
pluginmanager: "PytestPluginManager", args: List[str] pluginmanager: "PytestPluginManager", args: List[str]
) -> Optional[object]: ) -> Optional["Config"]:
"""return initialized config object, parsing the specified args. """return initialized config object, parsing the specified args.
Stops at first non-None result, see :ref:`firstresult` Stops at first non-None result, see :ref:`firstresult`

View File

@ -236,10 +236,16 @@ class _NodeReporter:
self._add_simple(Junit.skipped, "collection skipped", report.longrepr) self._add_simple(Junit.skipped, "collection skipped", report.longrepr)
def append_error(self, report: TestReport) -> None: def append_error(self, report: TestReport) -> None:
if report.when == "teardown": assert report.longrepr is not None
msg = "test teardown failure" if getattr(report.longrepr, "reprcrash", None) is not None:
reason = report.longrepr.reprcrash.message
else: else:
msg = "test setup failure" reason = str(report.longrepr)
if report.when == "teardown":
msg = 'failed on teardown with "{}"'.format(reason)
else:
msg = 'failed on setup with "{}"'.format(reason)
self._add_simple(Junit.error, msg, report.longrepr) self._add_simple(Junit.error, msg, report.longrepr)
def append_skipped(self, report: TestReport) -> None: def append_skipped(self, report: TestReport) -> None:

View File

@ -141,9 +141,14 @@ class PercentStyleMultiline(logging.PercentStyle):
if auto_indent_option is None: if auto_indent_option is None:
return 0 return 0
elif type(auto_indent_option) is int: elif isinstance(auto_indent_option, bool):
if auto_indent_option:
return -1
else:
return 0
elif isinstance(auto_indent_option, int):
return int(auto_indent_option) return int(auto_indent_option)
elif type(auto_indent_option) is str: elif isinstance(auto_indent_option, str):
try: try:
return int(auto_indent_option) return int(auto_indent_option)
except ValueError: except ValueError:
@ -153,9 +158,6 @@ class PercentStyleMultiline(logging.PercentStyle):
return -1 return -1
except ValueError: except ValueError:
return 0 return 0
elif type(auto_indent_option) is bool:
if auto_indent_option:
return -1
return 0 return 0
@ -487,13 +489,13 @@ def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[i
log_level = log_level.upper() log_level = log_level.upper()
try: try:
return int(getattr(logging, log_level, log_level)) return int(getattr(logging, log_level, log_level))
except ValueError: except ValueError as e:
# Python logging does not recognise this as a logging level # Python logging does not recognise this as a logging level
raise pytest.UsageError( raise pytest.UsageError(
"'{}' is not recognized as a logging level name for " "'{}' is not recognized as a logging level name for "
"'{}'. Please consider passing the " "'{}'. Please consider passing the "
"logging level num instead.".format(log_level, setting_name) "logging level num instead.".format(log_level, setting_name)
) ) from e
# run after terminalreporter/capturemanager are configured # run after terminalreporter/capturemanager are configured

View File

@ -1,124 +0,0 @@
import os
import platform
import sys
import traceback
from typing import Any
from typing import Dict
from typing import List
from typing import Optional
from ..outcomes import fail
from ..outcomes import TEST_OUTCOME
from .structures import Mark
from _pytest.nodes import Item
def compiled_eval(expr: str, d: Dict[str, object]) -> Any:
import _pytest._code
exprcode = _pytest._code.compile(expr, mode="eval")
return eval(exprcode, d)
class MarkEvaluator:
def __init__(self, item: Item, name: str) -> None:
self.item = item
self._marks = None # type: Optional[List[Mark]]
self._mark = None # type: Optional[Mark]
self._mark_name = name
def __bool__(self) -> bool:
# don't cache here to prevent staleness
return bool(self._get_marks())
def wasvalid(self) -> bool:
return not hasattr(self, "exc")
def _get_marks(self) -> List[Mark]:
return list(self.item.iter_markers(name=self._mark_name))
def invalidraise(self, exc) -> Optional[bool]:
raises = self.get("raises")
if not raises:
return None
return not isinstance(exc, raises)
def istrue(self) -> bool:
try:
return self._istrue()
except TEST_OUTCOME:
self.exc = sys.exc_info()
if isinstance(self.exc[1], SyntaxError):
# TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here.
assert self.exc[1].offset is not None
msg = [" " * (self.exc[1].offset + 4) + "^"]
msg.append("SyntaxError: invalid syntax")
else:
msg = traceback.format_exception_only(*self.exc[:2])
fail(
"Error evaluating %r expression\n"
" %s\n"
"%s" % (self._mark_name, self.expr, "\n".join(msg)),
pytrace=False,
)
def _getglobals(self) -> Dict[str, object]:
d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config}
if hasattr(self.item, "obj"):
d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821
return d
def _istrue(self) -> bool:
if hasattr(self, "result"):
result = getattr(self, "result") # type: bool
return result
self._marks = self._get_marks()
if self._marks:
self.result = False
for mark in self._marks:
self._mark = mark
if "condition" not in mark.kwargs:
args = mark.args
else:
args = (mark.kwargs["condition"],)
for expr in args:
self.expr = expr
if isinstance(expr, str):
d = self._getglobals()
result = compiled_eval(expr, d)
else:
if "reason" not in mark.kwargs:
# XXX better be checked at collection time
msg = (
"you need to specify reason=STRING "
"when using booleans as conditions."
)
fail(msg)
result = bool(expr)
if result:
self.result = True
self.reason = mark.kwargs.get("reason", None)
self.expr = expr
return self.result
if not args:
self.result = True
self.reason = mark.kwargs.get("reason", None)
return self.result
return False
def get(self, attr, default=None):
if self._mark is None:
return default
return self._mark.kwargs.get(attr, default)
def getexplanation(self):
expl = getattr(self, "reason", None) or self.get("reason", None)
if not expl:
if not hasattr(self, "expr"):
return ""
else:
return "condition: " + str(self.expr)
return expl

View File

@ -46,11 +46,19 @@ def get_empty_parameterset_mark(
) -> "MarkDecorator": ) -> "MarkDecorator":
from ..nodes import Collector from ..nodes import Collector
fs, lineno = getfslineno(func)
reason = "got empty parameter set %r, function %s at %s:%d" % (
argnames,
func.__name__,
fs,
lineno,
)
requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION)
if requested_mark in ("", None, "skip"): if requested_mark in ("", None, "skip"):
mark = MARK_GEN.skip mark = MARK_GEN.skip(reason=reason)
elif requested_mark == "xfail": elif requested_mark == "xfail":
mark = MARK_GEN.xfail(run=False) mark = MARK_GEN.xfail(reason=reason, run=False)
elif requested_mark == "fail_at_collect": elif requested_mark == "fail_at_collect":
f_name = func.__name__ f_name = func.__name__
_, lineno = getfslineno(func) _, lineno = getfslineno(func)
@ -59,14 +67,7 @@ def get_empty_parameterset_mark(
) )
else: else:
raise LookupError(requested_mark) raise LookupError(requested_mark)
fs, lineno = getfslineno(func) return mark
reason = "got empty parameter set %r, function %s at %s:%d" % (
argnames,
func.__name__,
fs,
lineno,
)
return mark(reason=reason)
class ParameterSet( class ParameterSet(
@ -379,6 +380,76 @@ def store_mark(obj, mark: Mark) -> None:
obj.pytestmark = get_unpacked_marks(obj) + [mark] obj.pytestmark = get_unpacked_marks(obj) + [mark]
# Typing for builtin pytest marks. This is cheating; it gives builtin marks
# special privilege, and breaks modularity. But practicality beats purity...
if TYPE_CHECKING:
from _pytest.fixtures import _Scope
class _SkipMarkDecorator(MarkDecorator):
@overload # type: ignore[override,misc]
def __call__(self, arg: _Markable) -> _Markable:
raise NotImplementedError()
@overload # noqa: F811
def __call__(self, reason: str = ...) -> "MarkDecorator": # noqa: F811
raise NotImplementedError()
class _SkipifMarkDecorator(MarkDecorator):
def __call__( # type: ignore[override]
self,
condition: Union[str, bool] = ...,
*conditions: Union[str, bool],
reason: str = ...
) -> MarkDecorator:
raise NotImplementedError()
class _XfailMarkDecorator(MarkDecorator):
@overload # type: ignore[override,misc]
def __call__(self, arg: _Markable) -> _Markable:
raise NotImplementedError()
@overload # noqa: F811
def __call__( # noqa: F811
self,
condition: Union[str, bool] = ...,
*conditions: Union[str, bool],
reason: str = ...,
run: bool = ...,
raises: Union[BaseException, Tuple[BaseException, ...]] = ...,
strict: bool = ...
) -> MarkDecorator:
raise NotImplementedError()
class _ParametrizeMarkDecorator(MarkDecorator):
def __call__( # type: ignore[override]
self,
argnames: Union[str, List[str], Tuple[str, ...]],
argvalues: Iterable[Union[ParameterSet, Sequence[object], object]],
*,
indirect: Union[bool, Sequence[str]] = ...,
ids: Optional[
Union[
Iterable[Union[None, str, float, int, bool]],
Callable[[object], Optional[object]],
]
] = ...,
scope: Optional[_Scope] = ...
) -> MarkDecorator:
raise NotImplementedError()
class _UsefixturesMarkDecorator(MarkDecorator):
def __call__( # type: ignore[override]
self, *fixtures: str
) -> MarkDecorator:
raise NotImplementedError()
class _FilterwarningsMarkDecorator(MarkDecorator):
def __call__( # type: ignore[override]
self, *filters: str
) -> MarkDecorator:
raise NotImplementedError()
class MarkGenerator: class MarkGenerator:
"""Factory for :class:`MarkDecorator` objects - exposed as """Factory for :class:`MarkDecorator` objects - exposed as
a ``pytest.mark`` singleton instance. a ``pytest.mark`` singleton instance.
@ -397,6 +468,15 @@ class MarkGenerator:
_config = None # type: Optional[Config] _config = None # type: Optional[Config]
_markers = set() # type: Set[str] _markers = set() # type: Set[str]
# See TYPE_CHECKING above.
if TYPE_CHECKING:
skip = None # type: _SkipMarkDecorator
skipif = None # type: _SkipifMarkDecorator
xfail = None # type: _XfailMarkDecorator
parametrize = None # type: _ParametrizeMarkDecorator
usefixtures = None # type: _UsefixturesMarkDecorator
filterwarnings = None # type: _FilterwarningsMarkDecorator
def __getattr__(self, name: str) -> MarkDecorator: def __getattr__(self, name: str) -> MarkDecorator:
if name[0] == "_": if name[0] == "_":
raise AttributeError("Marker name must NOT start with underscore") raise AttributeError("Marker name must NOT start with underscore")

View File

@ -73,7 +73,7 @@ def resolve(name: str) -> object:
if expected == used: if expected == used:
raise raise
else: else:
raise ImportError("import error in {}: {}".format(used, ex)) raise ImportError("import error in {}: {}".format(used, ex)) from ex
found = annotated_getattr(found, part, used) found = annotated_getattr(found, part, used)
return found return found
@ -81,12 +81,12 @@ def resolve(name: str) -> object:
def annotated_getattr(obj: object, name: str, ann: str) -> object: def annotated_getattr(obj: object, name: str, ann: str) -> object:
try: try:
obj = getattr(obj, name) obj = getattr(obj, name)
except AttributeError: except AttributeError as e:
raise AttributeError( raise AttributeError(
"{!r} object at {} has no attribute {!r}".format( "{!r} object at {} has no attribute {!r}".format(
type(obj).__name__, ann, name type(obj).__name__, ann, name
) )
) ) from e
return obj return obj

View File

@ -276,7 +276,7 @@ class Node(metaclass=NodeMeta):
marker_ = getattr(MARK_GEN, marker) marker_ = getattr(MARK_GEN, marker)
else: else:
raise ValueError("is not a string or pytest.mark.* Marker") raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker_.name] = marker self.keywords[marker_.name] = marker_
if append: if append:
self.own_markers.append(marker_.mark) self.own_markers.append(marker_.mark)
else: else:

View File

@ -466,7 +466,7 @@ def import_path(
""" """
mode = ImportMode(mode) mode = ImportMode(mode)
path = Path(p) path = Path(str(p))
if not path.exists(): if not path.exists():
raise ImportError(path) raise ImportError(path)

View File

@ -1054,7 +1054,7 @@ class Testdir:
args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp"))
return args return args
def parseconfig(self, *args: Union[str, py.path.local]) -> Config: def parseconfig(self, *args) -> Config:
"""Return a new pytest Config instance from given commandline args. """Return a new pytest Config instance from given commandline args.
This invokes the pytest bootstrapping code in _pytest.config to create This invokes the pytest bootstrapping code in _pytest.config to create
@ -1070,14 +1070,14 @@ class Testdir:
import _pytest.config import _pytest.config
config = _pytest.config._prepareconfig(args, self.plugins) # type: Config config = _pytest.config._prepareconfig(args, self.plugins) # type: ignore[arg-type]
# we don't know what the test will do with this half-setup config # we don't know what the test will do with this half-setup config
# object and thus we make sure it gets unconfigured properly in any # object and thus we make sure it gets unconfigured properly in any
# case (otherwise capturing could still be active, for example) # case (otherwise capturing could still be active, for example)
self.request.addfinalizer(config._ensure_unconfigure) self.request.addfinalizer(config._ensure_unconfigure)
return config return config
def parseconfigure(self, *args): def parseconfigure(self, *args) -> Config:
"""Return a new pytest configured Config instance. """Return a new pytest configured Config instance.
This returns a new :py:class:`_pytest.config.Config` instance like This returns a new :py:class:`_pytest.config.Config` instance like
@ -1318,7 +1318,7 @@ class Testdir:
Returns a :py:class:`RunResult`. Returns a :py:class:`RunResult`.
""" """
__tracebackhide__ = True __tracebackhide__ = True
p = make_numbered_dir(root=Path(self.tmpdir), prefix="runpytest-") p = make_numbered_dir(root=Path(str(self.tmpdir)), prefix="runpytest-")
args = ("--basetemp=%s" % p,) + args args = ("--basetemp=%s" % p,) + args
plugins = [x for x in self.plugins if isinstance(x, str)] plugins = [x for x in self.plugins if isinstance(x, str)]
if plugins: if plugins:

View File

@ -551,8 +551,10 @@ class Module(nodes.File, PyCollector):
importmode = self.config.getoption("--import-mode") importmode = self.config.getoption("--import-mode")
try: try:
mod = import_path(self.fspath, mode=importmode) mod = import_path(self.fspath, mode=importmode)
except SyntaxError: except SyntaxError as e:
raise self.CollectError(ExceptionInfo.from_current().getrepr(style="short")) raise self.CollectError(
ExceptionInfo.from_current().getrepr(style="short")
) from e
except ImportPathMismatchError as e: except ImportPathMismatchError as e:
raise self.CollectError( raise self.CollectError(
"import file mismatch:\n" "import file mismatch:\n"
@ -562,8 +564,8 @@ class Module(nodes.File, PyCollector):
" %s\n" " %s\n"
"HINT: remove __pycache__ / .pyc files and/or use a " "HINT: remove __pycache__ / .pyc files and/or use a "
"unique basename for your test file modules" % e.args "unique basename for your test file modules" % e.args
) ) from e
except ImportError: except ImportError as e:
exc_info = ExceptionInfo.from_current() exc_info = ExceptionInfo.from_current()
if self.config.getoption("verbose") < 2: if self.config.getoption("verbose") < 2:
exc_info.traceback = exc_info.traceback.filter(filter_traceback) exc_info.traceback = exc_info.traceback.filter(filter_traceback)
@ -578,7 +580,7 @@ class Module(nodes.File, PyCollector):
"Hint: make sure your test modules/packages have valid Python names.\n" "Hint: make sure your test modules/packages have valid Python names.\n"
"Traceback:\n" "Traceback:\n"
"{traceback}".format(fspath=self.fspath, traceback=formatted_tb) "{traceback}".format(fspath=self.fspath, traceback=formatted_tb)
) ) from e
except _pytest.runner.Skipped as e: except _pytest.runner.Skipped as e:
if e.allow_module_level: if e.allow_module_level:
raise raise
@ -587,7 +589,7 @@ class Module(nodes.File, PyCollector):
"To decorate a test function, use the @pytest.mark.skip " "To decorate a test function, use the @pytest.mark.skip "
"or @pytest.mark.skipif decorators instead, and to skip a " "or @pytest.mark.skipif decorators instead, and to skip a "
"module use `pytestmark = pytest.mark.{skip,skipif}." "module use `pytestmark = pytest.mark.{skip,skipif}."
) ) from e
self.config.pluginmanager.consider_module(mod) self.config.pluginmanager.consider_module(mod)
return mod return mod
@ -836,8 +838,8 @@ class CallSpec2:
def getparam(self, name: str) -> object: def getparam(self, name: str) -> object:
try: try:
return self.params[name] return self.params[name]
except KeyError: except KeyError as e:
raise ValueError(name) raise ValueError(name) from e
@property @property
def id(self) -> str: def id(self) -> str:
@ -1074,8 +1076,8 @@ class Metafunc:
except TypeError: except TypeError:
try: try:
iter(ids) iter(ids)
except TypeError: except TypeError as e:
raise TypeError("ids must be a callable or an iterable") raise TypeError("ids must be a callable or an iterable") from e
num_ids = len(parameters) num_ids = len(parameters)
# num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849

View File

@ -1,24 +1,30 @@
""" support for skip/xfail functions and markers. """ """ support for skip/xfail functions and markers. """
import os
import platform
import sys
import traceback
from typing import Generator
from typing import Optional from typing import Optional
from typing import Tuple from typing import Tuple
import attr
import _pytest._code
from _pytest.compat import TYPE_CHECKING
from _pytest.config import Config from _pytest.config import Config
from _pytest.config import hookimpl from _pytest.config import hookimpl
from _pytest.config.argparsing import Parser from _pytest.config.argparsing import Parser
from _pytest.mark.evaluate import MarkEvaluator from _pytest.mark.structures import Mark
from _pytest.nodes import Item from _pytest.nodes import Item
from _pytest.outcomes import fail from _pytest.outcomes import fail
from _pytest.outcomes import skip from _pytest.outcomes import skip
from _pytest.outcomes import xfail from _pytest.outcomes import xfail
from _pytest.python import Function
from _pytest.reports import BaseReport from _pytest.reports import BaseReport
from _pytest.runner import CallInfo from _pytest.runner import CallInfo
from _pytest.store import StoreKey from _pytest.store import StoreKey
if TYPE_CHECKING:
skipped_by_mark_key = StoreKey[bool]() from typing import Type
evalxfail_key = StoreKey[MarkEvaluator]()
unexpectedsuccess_key = StoreKey[str]()
def pytest_addoption(parser: Parser) -> None: def pytest_addoption(parser: Parser) -> None:
@ -62,81 +68,200 @@ def pytest_configure(config: Config) -> None:
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",
"skipif(condition): skip the given test function if eval(condition) " "skipif(condition, ..., *, reason=...): "
"results in a True value. Evaluation happens within the " "skip the given test function if any of the conditions evaluate to True. "
"module global context. Example: skipif('sys.platform == \"win32\"') " "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. "
"skips the test if we are on the win32 platform. see " "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif",
"https://docs.pytest.org/en/latest/skipping.html",
) )
config.addinivalue_line( config.addinivalue_line(
"markers", "markers",
"xfail(condition, reason=None, run=True, raises=None, strict=False): " "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): "
"mark the test function as an expected failure if eval(condition) " "mark the test function as an expected failure if any of the conditions "
"has a True value. Optionally specify a reason for better reporting " "evaluate to True. Optionally specify a reason for better reporting "
"and run=False if you don't even want to execute the test function. " "and run=False if you don't even want to execute the test function. "
"If only specific exception(s) are expected, you can list them in " "If only specific exception(s) are expected, you can list them in "
"raises, and if the test fails in other ways, it will be reported as " "raises, and if the test fails in other ways, it will be reported as "
"a true failure. See https://docs.pytest.org/en/latest/skipping.html", "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail",
) )
def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]:
"""Evaluate a single skipif/xfail condition.
If an old-style string condition is given, it is eval()'d, otherwise the
condition is bool()'d. If this fails, an appropriately formatted pytest.fail
is raised.
Returns (result, reason). The reason is only relevant if the result is True.
"""
# String condition.
if isinstance(condition, str):
globals_ = {
"os": os,
"sys": sys,
"platform": platform,
"config": item.config,
}
if hasattr(item, "obj"):
globals_.update(item.obj.__globals__) # type: ignore[attr-defined]
try:
condition_code = _pytest._code.compile(condition, mode="eval")
result = eval(condition_code, globals_)
except SyntaxError as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
" " + " " * (exc.offset or 0) + "^",
"SyntaxError: invalid syntax",
]
fail("\n".join(msglines), pytrace=False)
except Exception as exc:
msglines = [
"Error evaluating %r condition" % mark.name,
" " + condition,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
# Boolean condition.
else:
try:
result = bool(condition)
except Exception as exc:
msglines = [
"Error evaluating %r condition as a boolean" % mark.name,
*traceback.format_exception_only(type(exc), exc),
]
fail("\n".join(msglines), pytrace=False)
reason = mark.kwargs.get("reason", None)
if reason is None:
if isinstance(condition, str):
reason = "condition: " + condition
else:
# XXX better be checked at collection time
msg = (
"Error evaluating %r: " % mark.name
+ "you need to specify reason=STRING when using booleans as conditions."
)
fail(msg, pytrace=False)
return result, reason
@attr.s(slots=True, frozen=True)
class Skip:
"""The result of evaluate_skip_marks()."""
reason = attr.ib(type=str)
def evaluate_skip_marks(item: Item) -> Optional[Skip]:
"""Evaluate skip and skipif marks on item, returning Skip if triggered."""
for mark in item.iter_markers(name="skipif"):
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Skip(reason)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Skip(reason)
for mark in item.iter_markers(name="skip"):
if "reason" in mark.kwargs:
reason = mark.kwargs["reason"]
elif mark.args:
reason = mark.args[0]
else:
reason = "unconditional skip"
return Skip(reason)
return None
@attr.s(slots=True, frozen=True)
class Xfail:
"""The result of evaluate_xfail_marks()."""
reason = attr.ib(type=str)
run = attr.ib(type=bool)
strict = attr.ib(type=bool)
raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]])
def evaluate_xfail_marks(item: Item) -> Optional[Xfail]:
"""Evaluate xfail marks on item, returning Xfail if triggered."""
for mark in item.iter_markers(name="xfail"):
run = mark.kwargs.get("run", True)
strict = mark.kwargs.get("strict", item.config.getini("xfail_strict"))
raises = mark.kwargs.get("raises", None)
if "condition" not in mark.kwargs:
conditions = mark.args
else:
conditions = (mark.kwargs["condition"],)
# Unconditional.
if not conditions:
reason = mark.kwargs.get("reason", "")
return Xfail(reason, run, strict, raises)
# If any of the conditions are true.
for condition in conditions:
result, reason = evaluate_condition(item, mark, condition)
if result:
return Xfail(reason, run, strict, raises)
return None
# Whether skipped due to skip or skipif marks.
skipped_by_mark_key = StoreKey[bool]()
# Saves the xfail mark evaluation. Can be refreshed during call if None.
xfailed_key = StoreKey[Optional[Xfail]]()
unexpectedsuccess_key = StoreKey[str]()
@hookimpl(tryfirst=True) @hookimpl(tryfirst=True)
def pytest_runtest_setup(item: Item) -> None: def pytest_runtest_setup(item: Item) -> None:
# Check if skip or skipif are specified as pytest marks
item._store[skipped_by_mark_key] = False item._store[skipped_by_mark_key] = False
eval_skipif = MarkEvaluator(item, "skipif")
if eval_skipif.istrue():
item._store[skipped_by_mark_key] = True
skip(eval_skipif.getexplanation())
for skip_info in item.iter_markers(name="skip"): skipped = evaluate_skip_marks(item)
if skipped:
item._store[skipped_by_mark_key] = True item._store[skipped_by_mark_key] = True
if "reason" in skip_info.kwargs: skip(skipped.reason)
skip(skip_info.kwargs["reason"])
elif skip_info.args:
skip(skip_info.args[0])
else:
skip("unconditional skip")
item._store[evalxfail_key] = MarkEvaluator(item, "xfail") if not item.config.option.runxfail:
check_xfail_no_run(item) item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if xfailed and not xfailed.run:
xfail("[NOTRUN] " + xfailed.reason)
@hookimpl(hookwrapper=True) @hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem: Function): def pytest_runtest_call(item: Item) -> Generator[None, None, None]:
check_xfail_no_run(pyfuncitem) xfailed = item._store.get(xfailed_key, None)
outcome = yield if xfailed is None:
passed = outcome.excinfo is None item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item)
if passed:
check_strict_xfail(pyfuncitem)
def check_xfail_no_run(item: Item) -> None:
"""check xfail(run=False)"""
if not item.config.option.runxfail: if not item.config.option.runxfail:
evalxfail = item._store[evalxfail_key] if xfailed and not xfailed.run:
if evalxfail.istrue(): xfail("[NOTRUN] " + xfailed.reason)
if not evalxfail.get("run", True):
xfail("[NOTRUN] " + evalxfail.getexplanation())
yield
def check_strict_xfail(pyfuncitem: Function) -> None:
"""check xfail(strict=True) for the given PASSING test"""
evalxfail = pyfuncitem._store[evalxfail_key]
if evalxfail.istrue():
strict_default = pyfuncitem.config.getini("xfail_strict")
is_strict_xfail = evalxfail.get("strict", strict_default)
if is_strict_xfail:
del pyfuncitem._store[evalxfail_key]
explanation = evalxfail.getexplanation()
fail("[XPASS(strict)] " + explanation, pytrace=False)
@hookimpl(hookwrapper=True) @hookimpl(hookwrapper=True)
def pytest_runtest_makereport(item: Item, call: CallInfo[None]): def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
outcome = yield outcome = yield
rep = outcome.get_result() rep = outcome.get_result()
evalxfail = item._store.get(evalxfail_key, None) xfailed = item._store.get(xfailed_key, None)
# unittest special case, see setting of unexpectedsuccess_key # unittest special case, see setting of unexpectedsuccess_key
if unexpectedsuccess_key in item._store and rep.when == "call": if unexpectedsuccess_key in item._store and rep.when == "call":
reason = item._store[unexpectedsuccess_key] reason = item._store[unexpectedsuccess_key]
@ -145,30 +270,27 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
else: else:
rep.longrepr = "Unexpected success" rep.longrepr = "Unexpected success"
rep.outcome = "failed" rep.outcome = "failed"
elif item.config.option.runxfail: elif item.config.option.runxfail:
pass # don't interfere pass # don't interfere
elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception):
assert call.excinfo.value.msg is not None assert call.excinfo.value.msg is not None
rep.wasxfail = "reason: " + call.excinfo.value.msg rep.wasxfail = "reason: " + call.excinfo.value.msg
rep.outcome = "skipped" rep.outcome = "skipped"
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): elif not rep.skipped and xfailed:
if call.excinfo: if call.excinfo:
if evalxfail.invalidraise(call.excinfo.value): raises = xfailed.raises
if raises is not None and not isinstance(call.excinfo.value, raises):
rep.outcome = "failed" rep.outcome = "failed"
else: else:
rep.outcome = "skipped" rep.outcome = "skipped"
rep.wasxfail = evalxfail.getexplanation() rep.wasxfail = xfailed.reason
elif call.when == "call": elif call.when == "call":
strict_default = item.config.getini("xfail_strict") if xfailed.strict:
is_strict_xfail = evalxfail.get("strict", strict_default)
explanation = evalxfail.getexplanation()
if is_strict_xfail:
rep.outcome = "failed" rep.outcome = "failed"
rep.longrepr = "[XPASS(strict)] {}".format(explanation) rep.longrepr = "[XPASS(strict)] " + xfailed.reason
else: else:
rep.outcome = "passed" rep.outcome = "passed"
rep.wasxfail = explanation rep.wasxfail = xfailed.reason
elif ( elif (
item._store.get(skipped_by_mark_key, True) item._store.get(skipped_by_mark_key, True)
and rep.skipped and rep.skipped
@ -183,9 +305,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]):
rep.longrepr = str(filename), line + 1, reason rep.longrepr = str(filename), line + 1, reason
# called by terminalreporter progress reporting
def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]:
if hasattr(report, "wasxfail"): if hasattr(report, "wasxfail"):
if report.skipped: if report.skipped:

View File

@ -13,6 +13,7 @@ from .pathlib import LOCK_TIMEOUT
from .pathlib import make_numbered_dir from .pathlib import make_numbered_dir
from .pathlib import make_numbered_dir_with_cleanup from .pathlib import make_numbered_dir_with_cleanup
from .pathlib import Path from .pathlib import Path
from _pytest.config import Config
from _pytest.fixtures import FixtureRequest from _pytest.fixtures import FixtureRequest
from _pytest.monkeypatch import MonkeyPatch from _pytest.monkeypatch import MonkeyPatch
@ -135,7 +136,7 @@ def get_user() -> Optional[str]:
return None return None
def pytest_configure(config) -> None: def pytest_configure(config: Config) -> None:
"""Create a TempdirFactory and attach it to the config object. """Create a TempdirFactory and attach it to the config object.
This is to comply with existing plugins which expect the handler to be This is to comply with existing plugins which expect the handler to be

View File

@ -48,8 +48,8 @@ def _parse_filter(
lineno = int(lineno_) lineno = int(lineno_)
if lineno < 0: if lineno < 0:
raise ValueError raise ValueError
except (ValueError, OverflowError): except (ValueError, OverflowError) as e:
raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) from e
else: else:
lineno = 0 lineno = 0
return (action, message, category, module, lineno) return (action, message, category, module, lineno)

View File

@ -585,11 +585,11 @@ class TestInvocationVariants:
# Type ignored because `py.test` is not and will not be typed. # Type ignored because `py.test` is not and will not be typed.
assert pytest.main == py.test.cmdline.main # type: ignore[attr-defined] assert pytest.main == py.test.cmdline.main # type: ignore[attr-defined]
def test_invoke_with_invalid_type(self): def test_invoke_with_invalid_type(self) -> None:
with pytest.raises( with pytest.raises(
TypeError, match="expected to be a list of strings, got: '-h'" TypeError, match="expected to be a list of strings, got: '-h'"
): ):
pytest.main("-h") pytest.main("-h") # type: ignore[arg-type]
def test_invoke_with_path(self, tmpdir, capsys): def test_invoke_with_path(self, tmpdir, capsys):
retcode = pytest.main(tmpdir) retcode = pytest.main(tmpdir)

View File

@ -372,7 +372,7 @@ def test_excinfo_no_python_sourcecode(tmpdir):
for item in excinfo.traceback: for item in excinfo.traceback:
print(item) # XXX: for some reason jinja.Template.render is printed in full print(item) # XXX: for some reason jinja.Template.render is printed in full
item.source # shouldn't fail item.source # shouldn't fail
if item.path.basename == "test.txt": if isinstance(item.path, py.path.local) and item.path.basename == "test.txt":
assert str(item.source) == "{{ h()}}:" assert str(item.source) == "{{ h()}}:"

View File

@ -11,6 +11,7 @@ import py.path
import _pytest._code import _pytest._code
import pytest import pytest
from _pytest.compat import importlib_metadata from _pytest.compat import importlib_metadata
from _pytest.config import _get_plugin_specs_as_list
from _pytest.config import _iter_rewritable_modules from _pytest.config import _iter_rewritable_modules
from _pytest.config import Config from _pytest.config import Config
from _pytest.config import ConftestImportFailure from _pytest.config import ConftestImportFailure
@ -1119,21 +1120,17 @@ def test_load_initial_conftest_last_ordering(_config_for_test):
assert [x.function.__module__ for x in values] == expected assert [x.function.__module__ for x in values] == expected
def test_get_plugin_specs_as_list(): def test_get_plugin_specs_as_list() -> None:
from _pytest.config import _get_plugin_specs_as_list def exp_match(val: object) -> str:
def exp_match(val):
return ( return (
"Plugin specs must be a ','-separated string" "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %s"
" or a list/tuple of strings for plugin names. Given: {}".format( % re.escape(repr(val))
re.escape(repr(val))
)
) )
with pytest.raises(pytest.UsageError, match=exp_match({"foo"})): with pytest.raises(pytest.UsageError, match=exp_match({"foo"})):
_get_plugin_specs_as_list({"foo"}) _get_plugin_specs_as_list({"foo"}) # type: ignore[arg-type]
with pytest.raises(pytest.UsageError, match=exp_match({})): with pytest.raises(pytest.UsageError, match=exp_match({})):
_get_plugin_specs_as_list(dict()) _get_plugin_specs_as_list(dict()) # type: ignore[arg-type]
assert _get_plugin_specs_as_list(None) == [] assert _get_plugin_specs_as_list(None) == []
assert _get_plugin_specs_as_list("") == [] assert _get_plugin_specs_as_list("") == []
@ -1782,5 +1779,7 @@ def test_conftest_import_error_repr(tmpdir):
): ):
try: try:
raise RuntimeError("some error") raise RuntimeError("some error")
except Exception: except Exception as exc:
raise ConftestImportFailure(path, sys.exc_info()) assert exc.__traceback__ is not None
exc_info = (type(exc), exc, exc.__traceback__)
raise ConftestImportFailure(path, exc_info) from exc

View File

@ -266,7 +266,7 @@ class TestPython:
@pytest.fixture @pytest.fixture
def arg(request): def arg(request):
raise ValueError() raise ValueError("Error reason")
def test_function(arg): def test_function(arg):
pass pass
""" """
@ -278,7 +278,7 @@ class TestPython:
tnode = node.find_first_by_tag("testcase") tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(classname="test_setup_error", name="test_function") tnode.assert_attr(classname="test_setup_error", name="test_function")
fnode = tnode.find_first_by_tag("error") fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="test setup failure") fnode.assert_attr(message='failed on setup with "ValueError: Error reason"')
assert "ValueError" in fnode.toxml() assert "ValueError" in fnode.toxml()
@parametrize_families @parametrize_families
@ -290,7 +290,7 @@ class TestPython:
@pytest.fixture @pytest.fixture
def arg(): def arg():
yield yield
raise ValueError() raise ValueError('Error reason')
def test_function(arg): def test_function(arg):
pass pass
""" """
@ -301,7 +301,7 @@ class TestPython:
tnode = node.find_first_by_tag("testcase") tnode = node.find_first_by_tag("testcase")
tnode.assert_attr(classname="test_teardown_error", name="test_function") tnode.assert_attr(classname="test_teardown_error", name="test_function")
fnode = tnode.find_first_by_tag("error") fnode = tnode.find_first_by_tag("error")
fnode.assert_attr(message="test teardown failure") fnode.assert_attr(message='failed on teardown with "ValueError: Error reason"')
assert "ValueError" in fnode.toxml() assert "ValueError" in fnode.toxml()
@parametrize_families @parametrize_families
@ -328,7 +328,9 @@ class TestPython:
fnode = first.find_first_by_tag("failure") fnode = first.find_first_by_tag("failure")
fnode.assert_attr(message="Exception: Call Exception") fnode.assert_attr(message="Exception: Call Exception")
snode = second.find_first_by_tag("error") snode = second.find_first_by_tag("error")
snode.assert_attr(message="test teardown failure") snode.assert_attr(
message='failed on teardown with "Exception: Teardown Exception"'
)
@parametrize_families @parametrize_families
def test_skip_contains_name_reason(self, testdir, run_and_parse, xunit_family): def test_skip_contains_name_reason(self, testdir, run_and_parse, xunit_family):

View File

@ -534,8 +534,8 @@ def test_outcomeexception_passes_except_Exception() -> None:
with pytest.raises(outcomes.OutcomeException): with pytest.raises(outcomes.OutcomeException):
try: try:
raise outcomes.OutcomeException("test") raise outcomes.OutcomeException("test")
except Exception: except Exception as e:
raise NotImplementedError() raise NotImplementedError from e
def test_pytest_exit() -> None: def test_pytest_exit() -> None:

View File

@ -2,68 +2,74 @@ import sys
import pytest import pytest
from _pytest.runner import runtestprotocol from _pytest.runner import runtestprotocol
from _pytest.skipping import MarkEvaluator from _pytest.skipping import evaluate_skip_marks
from _pytest.skipping import evaluate_xfail_marks
from _pytest.skipping import pytest_runtest_setup from _pytest.skipping import pytest_runtest_setup
class TestEvaluator: class TestEvaluation:
def test_no_marker(self, testdir): def test_no_marker(self, testdir):
item = testdir.getitem("def test_func(): pass") item = testdir.getitem("def test_func(): pass")
evalskipif = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert not evalskipif assert not skipped
assert not evalskipif.istrue()
def test_marked_no_args(self, testdir): def test_marked_xfail_no_args(self, testdir):
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@pytest.mark.xyz @pytest.mark.xfail
def test_func(): def test_func():
pass pass
""" """
) )
ev = MarkEvaluator(item, "xyz") xfailed = evaluate_xfail_marks(item)
assert ev assert xfailed
assert ev.istrue() assert xfailed.reason == ""
expl = ev.getexplanation() assert xfailed.run
assert expl == ""
assert not ev.get("run", False) def test_marked_skipif_no_args(self, testdir):
item = testdir.getitem(
"""
import pytest
@pytest.mark.skipif
def test_func():
pass
"""
)
skipped = evaluate_skip_marks(item)
assert skipped
assert skipped.reason == ""
def test_marked_one_arg(self, testdir): def test_marked_one_arg(self, testdir):
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@pytest.mark.xyz("hasattr(os, 'sep')") @pytest.mark.skipif("hasattr(os, 'sep')")
def test_func(): def test_func():
pass pass
""" """
) )
ev = MarkEvaluator(item, "xyz") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "condition: hasattr(os, 'sep')"
expl = ev.getexplanation()
assert expl == "condition: hasattr(os, 'sep')"
def test_marked_one_arg_with_reason(self, testdir): def test_marked_one_arg_with_reason(self, testdir):
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") @pytest.mark.skipif("hasattr(os, 'sep')", attr=2, reason="hello world")
def test_func(): def test_func():
pass pass
""" """
) )
ev = MarkEvaluator(item, "xyz") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "hello world"
expl = ev.getexplanation()
assert expl == "hello world"
assert ev.get("attr") == 2
def test_marked_one_arg_twice(self, testdir): def test_marked_one_arg_twice(self, testdir):
lines = [ lines = [
"""@pytest.mark.skipif("not hasattr(os, 'murks')")""", """@pytest.mark.skipif("not hasattr(os, 'murks')")""",
"""@pytest.mark.skipif("hasattr(os, 'murks')")""", """@pytest.mark.skipif(condition="hasattr(os, 'murks')")""",
] ]
for i in range(0, 2): for i in range(0, 2):
item = testdir.getitem( item = testdir.getitem(
@ -76,11 +82,9 @@ class TestEvaluator:
""" """
% (lines[i], lines[(i + 1) % 2]) % (lines[i], lines[(i + 1) % 2])
) )
ev = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "condition: not hasattr(os, 'murks')"
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_one_arg_twice2(self, testdir): def test_marked_one_arg_twice2(self, testdir):
item = testdir.getitem( item = testdir.getitem(
@ -92,13 +96,11 @@ class TestEvaluator:
pass pass
""" """
) )
ev = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert ev assert skipped
assert ev.istrue() assert skipped.reason == "condition: not hasattr(os, 'murks')"
expl = ev.getexplanation()
assert expl == "condition: not hasattr(os, 'murks')"
def test_marked_skip_with_not_string(self, testdir) -> None: def test_marked_skipif_with_boolean_without_reason(self, testdir) -> None:
item = testdir.getitem( item = testdir.getitem(
""" """
import pytest import pytest
@ -107,14 +109,34 @@ class TestEvaluator:
pass pass
""" """
) )
ev = MarkEvaluator(item, "skipif") with pytest.raises(pytest.fail.Exception) as excinfo:
exc = pytest.raises(pytest.fail.Exception, ev.istrue) evaluate_skip_marks(item)
assert exc.value.msg is not None assert excinfo.value.msg is not None
assert ( assert (
"""Failed: you need to specify reason=STRING when using booleans as conditions.""" """Error evaluating 'skipif': you need to specify reason=STRING when using booleans as conditions."""
in exc.value.msg in excinfo.value.msg
) )
def test_marked_skipif_with_invalid_boolean(self, testdir) -> None:
item = testdir.getitem(
"""
import pytest
class InvalidBool:
def __bool__(self):
raise TypeError("INVALID")
@pytest.mark.skipif(InvalidBool(), reason="xxx")
def test_func():
pass
"""
)
with pytest.raises(pytest.fail.Exception) as excinfo:
evaluate_skip_marks(item)
assert excinfo.value.msg is not None
assert "Error evaluating 'skipif' condition as a boolean" in excinfo.value.msg
assert "INVALID" in excinfo.value.msg
def test_skipif_class(self, testdir): def test_skipif_class(self, testdir):
(item,) = testdir.getitems( (item,) = testdir.getitems(
""" """
@ -126,10 +148,9 @@ class TestEvaluator:
""" """
) )
item.config._hackxyz = 3 item.config._hackxyz = 3
ev = MarkEvaluator(item, "skipif") skipped = evaluate_skip_marks(item)
assert ev.istrue() assert skipped
expl = ev.getexplanation() assert skipped.reason == "condition: config._hackxyz"
assert expl == "condition: config._hackxyz"
class TestXFail: class TestXFail:
@ -895,10 +916,10 @@ def test_errors_in_xfail_skip_expressions(testdir) -> None:
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"*ERROR*test_nameerror*", "*ERROR*test_nameerror*",
"*evaluating*skipif*expression*", "*evaluating*skipif*condition*",
"*asd*", "*asd*",
"*ERROR*test_syntax*", "*ERROR*test_syntax*",
"*evaluating*xfail*expression*", "*evaluating*xfail*condition*",
" syntax error", " syntax error",
markline, markline,
"SyntaxError: invalid syntax", "SyntaxError: invalid syntax",
@ -924,25 +945,12 @@ def test_xfail_skipif_with_globals(testdir):
result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"]) result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"])
def test_direct_gives_error(testdir):
testdir.makepyfile(
"""
import pytest
@pytest.mark.skipif(True)
def test_skip1():
pass
"""
)
result = testdir.runpytest()
result.stdout.fnmatch_lines(["*1 error*"])
def test_default_markers(testdir): def test_default_markers(testdir):
result = testdir.runpytest("--markers") result = testdir.runpytest("--markers")
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
[ [
"*skipif(*condition)*skip*", "*skipif(condition, ..., [*], reason=...)*skip*",
"*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", "*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=xfail_strict)*expected failure*",
] ]
) )
@ -1137,7 +1145,9 @@ def test_mark_xfail_item(testdir):
class MyItem(pytest.Item): class MyItem(pytest.Item):
nodeid = 'foo' nodeid = 'foo'
def setup(self): def setup(self):
marker = pytest.mark.xfail(True, reason="Expected failure") marker = pytest.mark.xfail("1 == 2", reason="Expected failure - false")
self.add_marker(marker)
marker = pytest.mark.xfail(True, reason="Expected failure - true")
self.add_marker(marker) self.add_marker(marker)
def runtest(self): def runtest(self):
assert False assert False