From a1f841d5d26364b45de70f5a61a03adecc6b5462 Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Fri, 19 Jun 2020 13:33:53 +0300 Subject: [PATCH 01/11] skipping: use pytest_runtest_call instead of pytest_pyfunc_call `@pytest.mark.xfail` is meant to work with arbitrary items, and there is a test `test_mark_xfail_item` which verifies this. However, the code for some reason uses `pytest_pyfunc_call` for the call phase check, which only works for Function items. The test mentioned above only passed "accidentally" because the `pytest_runtest_makereport` hook also runs a `evalxfail.istrue()` which triggers and evaluation, but conceptually it shouldn't do that. Change to `pytest_runtest_call` to make the xfail checking properly generic. --- src/_pytest/skipping.py | 15 +++++++-------- 1 file changed, 7 insertions(+), 8 deletions(-) diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index bbd4593fd..4e4b5a3c4 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -10,7 +10,6 @@ from _pytest.nodes import Item from _pytest.outcomes import fail from _pytest.outcomes import skip from _pytest.outcomes import xfail -from _pytest.python import Function from _pytest.reports import BaseReport from _pytest.runner import CallInfo from _pytest.store import StoreKey @@ -103,12 +102,12 @@ def pytest_runtest_setup(item: Item) -> None: @hookimpl(hookwrapper=True) -def pytest_pyfunc_call(pyfuncitem: Function): - check_xfail_no_run(pyfuncitem) +def pytest_runtest_call(item: Item): + check_xfail_no_run(item) outcome = yield passed = outcome.excinfo is None if passed: - check_strict_xfail(pyfuncitem) + check_strict_xfail(item) def check_xfail_no_run(item: Item) -> None: @@ -120,14 +119,14 @@ def check_xfail_no_run(item: Item) -> None: xfail("[NOTRUN] " + evalxfail.getexplanation()) -def check_strict_xfail(pyfuncitem: Function) -> None: +def check_strict_xfail(item: Item) -> None: """check xfail(strict=True) for the given PASSING test""" - evalxfail = pyfuncitem._store[evalxfail_key] + evalxfail = item._store[evalxfail_key] if evalxfail.istrue(): - strict_default = pyfuncitem.config.getini("xfail_strict") + strict_default = item.config.getini("xfail_strict") is_strict_xfail = evalxfail.get("strict", strict_default) if is_strict_xfail: - del pyfuncitem._store[evalxfail_key] + del item._store[evalxfail_key] explanation = evalxfail.getexplanation() fail("[XPASS(strict)] " + explanation, pytrace=False) From 6072c9950d76a20da5397547932557842b84e078 Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Fri, 19 Jun 2020 13:33:54 +0300 Subject: [PATCH 02/11] skipping: move MarkEvaluator from _pytest.mark.evaluate to _pytest.skipping This type was actually in `_pytest.skipping` previously, but was moved to `_pytest.mark.evaluate` in cf40c0743c565ed25bc14753e2350e010b39025a. I think the previous location was more appropriate, because the `MarkEvaluator` is not a generic mark facility, it is explicitly and exclusively used by the `skipif` and `xfail` marks to evaluate their particular set of arguments. So it is better to put it in the plugin code. Putting `skipping` related functionality into the core `_pytest.mark` module also causes some import cycles which we can avoid. --- src/_pytest/mark/evaluate.py | 124 --------------------------------- src/_pytest/skipping.py | 131 +++++++++++++++++++++++++++++++++-- 2 files changed, 125 insertions(+), 130 deletions(-) delete mode 100644 src/_pytest/mark/evaluate.py diff --git a/src/_pytest/mark/evaluate.py b/src/_pytest/mark/evaluate.py deleted file mode 100644 index eb9903a59..000000000 --- a/src/_pytest/mark/evaluate.py +++ /dev/null @@ -1,124 +0,0 @@ -import os -import platform -import sys -import traceback -from typing import Any -from typing import Dict -from typing import List -from typing import Optional - -from ..outcomes import fail -from ..outcomes import TEST_OUTCOME -from .structures import Mark -from _pytest.nodes import Item - - -def compiled_eval(expr: str, d: Dict[str, object]) -> Any: - import _pytest._code - - exprcode = _pytest._code.compile(expr, mode="eval") - return eval(exprcode, d) - - -class MarkEvaluator: - def __init__(self, item: Item, name: str) -> None: - self.item = item - self._marks = None # type: Optional[List[Mark]] - self._mark = None # type: Optional[Mark] - self._mark_name = name - - def __bool__(self) -> bool: - # don't cache here to prevent staleness - return bool(self._get_marks()) - - def wasvalid(self) -> bool: - return not hasattr(self, "exc") - - def _get_marks(self) -> List[Mark]: - return list(self.item.iter_markers(name=self._mark_name)) - - def invalidraise(self, exc) -> Optional[bool]: - raises = self.get("raises") - if not raises: - return None - return not isinstance(exc, raises) - - def istrue(self) -> bool: - try: - return self._istrue() - except TEST_OUTCOME: - self.exc = sys.exc_info() - if isinstance(self.exc[1], SyntaxError): - # TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here. - assert self.exc[1].offset is not None - msg = [" " * (self.exc[1].offset + 4) + "^"] - msg.append("SyntaxError: invalid syntax") - else: - msg = traceback.format_exception_only(*self.exc[:2]) - fail( - "Error evaluating %r expression\n" - " %s\n" - "%s" % (self._mark_name, self.expr, "\n".join(msg)), - pytrace=False, - ) - - def _getglobals(self) -> Dict[str, object]: - d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} - if hasattr(self.item, "obj"): - d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821 - return d - - def _istrue(self) -> bool: - if hasattr(self, "result"): - result = getattr(self, "result") # type: bool - return result - self._marks = self._get_marks() - - if self._marks: - self.result = False - for mark in self._marks: - self._mark = mark - if "condition" not in mark.kwargs: - args = mark.args - else: - args = (mark.kwargs["condition"],) - - for expr in args: - self.expr = expr - if isinstance(expr, str): - d = self._getglobals() - result = compiled_eval(expr, d) - else: - if "reason" not in mark.kwargs: - # XXX better be checked at collection time - msg = ( - "you need to specify reason=STRING " - "when using booleans as conditions." - ) - fail(msg) - result = bool(expr) - if result: - self.result = True - self.reason = mark.kwargs.get("reason", None) - self.expr = expr - return self.result - - if not args: - self.result = True - self.reason = mark.kwargs.get("reason", None) - return self.result - return False - - def get(self, attr, default=None): - if self._mark is None: - return default - return self._mark.kwargs.get(attr, default) - - def getexplanation(self): - expl = getattr(self, "reason", None) or self.get("reason", None) - if not expl: - if not hasattr(self, "expr"): - return "" - else: - return "condition: " + str(self.expr) - return expl diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index 4e4b5a3c4..ee6b40daa 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -1,25 +1,28 @@ """ support for skip/xfail functions and markers. """ +import os +import platform +import sys +import traceback +from typing import Any +from typing import Dict +from typing import List from typing import Optional from typing import Tuple from _pytest.config import Config from _pytest.config import hookimpl from _pytest.config.argparsing import Parser -from _pytest.mark.evaluate import MarkEvaluator +from _pytest.mark.structures import Mark from _pytest.nodes import Item from _pytest.outcomes import fail from _pytest.outcomes import skip +from _pytest.outcomes import TEST_OUTCOME from _pytest.outcomes import xfail from _pytest.reports import BaseReport from _pytest.runner import CallInfo from _pytest.store import StoreKey -skipped_by_mark_key = StoreKey[bool]() -evalxfail_key = StoreKey[MarkEvaluator]() -unexpectedsuccess_key = StoreKey[str]() - - def pytest_addoption(parser: Parser) -> None: group = parser.getgroup("general") group.addoption( @@ -79,6 +82,122 @@ def pytest_configure(config: Config) -> None: ) +def compiled_eval(expr: str, d: Dict[str, object]) -> Any: + import _pytest._code + + exprcode = _pytest._code.compile(expr, mode="eval") + return eval(exprcode, d) + + +class MarkEvaluator: + def __init__(self, item: Item, name: str) -> None: + self.item = item + self._marks = None # type: Optional[List[Mark]] + self._mark = None # type: Optional[Mark] + self._mark_name = name + + def __bool__(self) -> bool: + # don't cache here to prevent staleness + return bool(self._get_marks()) + + def wasvalid(self) -> bool: + return not hasattr(self, "exc") + + def _get_marks(self) -> List[Mark]: + return list(self.item.iter_markers(name=self._mark_name)) + + def invalidraise(self, exc) -> Optional[bool]: + raises = self.get("raises") + if not raises: + return None + return not isinstance(exc, raises) + + def istrue(self) -> bool: + try: + return self._istrue() + except TEST_OUTCOME: + self.exc = sys.exc_info() + if isinstance(self.exc[1], SyntaxError): + # TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here. + assert self.exc[1].offset is not None + msg = [" " * (self.exc[1].offset + 4) + "^"] + msg.append("SyntaxError: invalid syntax") + else: + msg = traceback.format_exception_only(*self.exc[:2]) + fail( + "Error evaluating %r expression\n" + " %s\n" + "%s" % (self._mark_name, self.expr, "\n".join(msg)), + pytrace=False, + ) + + def _getglobals(self) -> Dict[str, object]: + d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} + if hasattr(self.item, "obj"): + d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821 + return d + + def _istrue(self) -> bool: + if hasattr(self, "result"): + result = getattr(self, "result") # type: bool + return result + self._marks = self._get_marks() + + if self._marks: + self.result = False + for mark in self._marks: + self._mark = mark + if "condition" not in mark.kwargs: + args = mark.args + else: + args = (mark.kwargs["condition"],) + + for expr in args: + self.expr = expr + if isinstance(expr, str): + d = self._getglobals() + result = compiled_eval(expr, d) + else: + if "reason" not in mark.kwargs: + # XXX better be checked at collection time + msg = ( + "you need to specify reason=STRING " + "when using booleans as conditions." + ) + fail(msg) + result = bool(expr) + if result: + self.result = True + self.reason = mark.kwargs.get("reason", None) + self.expr = expr + return self.result + + if not args: + self.result = True + self.reason = mark.kwargs.get("reason", None) + return self.result + return False + + def get(self, attr, default=None): + if self._mark is None: + return default + return self._mark.kwargs.get(attr, default) + + def getexplanation(self): + expl = getattr(self, "reason", None) or self.get("reason", None) + if not expl: + if not hasattr(self, "expr"): + return "" + else: + return "condition: " + str(self.expr) + return expl + + +skipped_by_mark_key = StoreKey[bool]() +evalxfail_key = StoreKey[MarkEvaluator]() +unexpectedsuccess_key = StoreKey[str]() + + @hookimpl(tryfirst=True) def pytest_runtest_setup(item: Item) -> None: # Check if skip or skipif are specified as pytest marks From dd446bee5eb2d3ab0976309803dc77821eeac93e Mon Sep 17 00:00:00 2001 From: Ram Rachum Date: Fri, 19 Jun 2020 12:53:44 +0300 Subject: [PATCH 03/11] Fix exception causes all over the codebase --- AUTHORS | 1 + changelog/7383.bugfix.rst | 1 + src/_pytest/_code/source.py | 2 +- src/_pytest/config/__init__.py | 8 ++++---- src/_pytest/config/argparsing.py | 4 ++-- src/_pytest/config/findpaths.py | 2 +- src/_pytest/debugging.py | 6 +++--- src/_pytest/fixtures.py | 4 ++-- src/_pytest/logging.py | 4 ++-- src/_pytest/monkeypatch.py | 6 +++--- src/_pytest/python.py | 22 ++++++++++++---------- src/_pytest/warnings.py | 4 ++-- testing/test_config.py | 4 ++-- testing/test_runner.py | 4 ++-- 14 files changed, 38 insertions(+), 34 deletions(-) create mode 100644 changelog/7383.bugfix.rst diff --git a/AUTHORS b/AUTHORS index 821a7d8f4..e40237682 100644 --- a/AUTHORS +++ b/AUTHORS @@ -233,6 +233,7 @@ Pulkit Goyal Punyashloka Biswal Quentin Pradet Ralf Schmitt +Ram Rachum Ralph Giles Ran Benita Raphael Castaneda diff --git a/changelog/7383.bugfix.rst b/changelog/7383.bugfix.rst new file mode 100644 index 000000000..d43106880 --- /dev/null +++ b/changelog/7383.bugfix.rst @@ -0,0 +1 @@ +Fixed exception causes all over the codebase, i.e. use `raise new_exception from old_exception` when wrapping an exception. diff --git a/src/_pytest/_code/source.py b/src/_pytest/_code/source.py index 3f732792f..2ccbaf657 100644 --- a/src/_pytest/_code/source.py +++ b/src/_pytest/_code/source.py @@ -215,7 +215,7 @@ class Source: newex.offset = ex.offset newex.lineno = ex.lineno newex.text = ex.text - raise newex + raise newex from ex else: if flag & ast.PyCF_ONLY_AST: assert isinstance(co, ast.AST) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index 31b73a2c9..b4a5a70ad 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -1189,8 +1189,8 @@ class Config: def _getini(self, name: str) -> Any: try: description, type, default = self._parser._inidict[name] - except KeyError: - raise ValueError("unknown configuration value: {!r}".format(name)) + except KeyError as e: + raise ValueError("unknown configuration value: {!r}".format(name)) from e override_value = self._get_override_ini_value(name) if override_value is None: try: @@ -1286,14 +1286,14 @@ class Config: if val is None and skip: raise AttributeError(name) return val - except AttributeError: + except AttributeError as e: if default is not notset: return default if skip: import pytest pytest.skip("no {!r} option found".format(name)) - raise ValueError("no option named {!r}".format(name)) + raise ValueError("no option named {!r}".format(name)) from e def getvalue(self, name, path=None): """ (deprecated, use getoption()) """ diff --git a/src/_pytest/config/argparsing.py b/src/_pytest/config/argparsing.py index 985a3fd1c..084ce16e5 100644 --- a/src/_pytest/config/argparsing.py +++ b/src/_pytest/config/argparsing.py @@ -265,9 +265,9 @@ class Argument: else: try: self.dest = self._short_opts[0][1:] - except IndexError: + except IndexError as e: self.dest = "???" # Needed for the error repr. - raise ArgumentError("need a long or short option", self) + raise ArgumentError("need a long or short option", self) from e def names(self) -> List[str]: return self._short_opts + self._long_opts diff --git a/src/_pytest/config/findpaths.py b/src/_pytest/config/findpaths.py index ae8c5f47f..08a71122d 100644 --- a/src/_pytest/config/findpaths.py +++ b/src/_pytest/config/findpaths.py @@ -26,7 +26,7 @@ def _parse_ini_config(path: py.path.local) -> iniconfig.IniConfig: try: return iniconfig.IniConfig(path) except iniconfig.ParseError as exc: - raise UsageError(str(exc)) + raise UsageError(str(exc)) from exc def load_config_dict_from_file( diff --git a/src/_pytest/debugging.py b/src/_pytest/debugging.py index 0567927c0..63126cbe0 100644 --- a/src/_pytest/debugging.py +++ b/src/_pytest/debugging.py @@ -28,10 +28,10 @@ def _validate_usepdb_cls(value: str) -> Tuple[str, str]: """Validate syntax of --pdbcls option.""" try: modname, classname = value.split(":") - except ValueError: + except ValueError as e: raise argparse.ArgumentTypeError( "{!r} is not in the format 'modname:classname'".format(value) - ) + ) from e return (modname, classname) @@ -130,7 +130,7 @@ class pytestPDB: value = ":".join((modname, classname)) raise UsageError( "--pdbcls: could not import {!r}: {}".format(value, exc) - ) + ) from exc else: import pdb diff --git a/src/_pytest/fixtures.py b/src/_pytest/fixtures.py index 05f0ecb6a..4b2c6a774 100644 --- a/src/_pytest/fixtures.py +++ b/src/_pytest/fixtures.py @@ -938,13 +938,13 @@ def _eval_scope_callable( # Type ignored because there is no typing mechanism to specify # keyword arguments, currently. result = scope_callable(fixture_name=fixture_name, config=config) # type: ignore[call-arg] # noqa: F821 - except Exception: + except Exception as e: raise TypeError( "Error evaluating {} while defining fixture '{}'.\n" "Expected a function with the signature (*, fixture_name, config)".format( scope_callable, fixture_name ) - ) + ) from e if not isinstance(result, str): fail( "Expected {} to return a 'str' while defining fixture '{}', but it returned:\n" diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py index 8755e5611..a06dc1ab5 100644 --- a/src/_pytest/logging.py +++ b/src/_pytest/logging.py @@ -487,13 +487,13 @@ def get_log_level_for_setting(config: Config, *setting_names: str) -> Optional[i log_level = log_level.upper() try: return int(getattr(logging, log_level, log_level)) - except ValueError: + except ValueError as e: # Python logging does not recognise this as a logging level raise pytest.UsageError( "'{}' is not recognized as a logging level name for " "'{}'. Please consider passing the " "logging level num instead.".format(log_level, setting_name) - ) + ) from e # run after terminalreporter/capturemanager are configured diff --git a/src/_pytest/monkeypatch.py b/src/_pytest/monkeypatch.py index 09f1ac36e..2e5cca526 100644 --- a/src/_pytest/monkeypatch.py +++ b/src/_pytest/monkeypatch.py @@ -73,7 +73,7 @@ def resolve(name: str) -> object: if expected == used: raise else: - raise ImportError("import error in {}: {}".format(used, ex)) + raise ImportError("import error in {}: {}".format(used, ex)) from ex found = annotated_getattr(found, part, used) return found @@ -81,12 +81,12 @@ def resolve(name: str) -> object: def annotated_getattr(obj: object, name: str, ann: str) -> object: try: obj = getattr(obj, name) - except AttributeError: + except AttributeError as e: raise AttributeError( "{!r} object at {} has no attribute {!r}".format( type(obj).__name__, ann, name ) - ) + ) from e return obj diff --git a/src/_pytest/python.py b/src/_pytest/python.py index bf45b8830..f3c42f421 100644 --- a/src/_pytest/python.py +++ b/src/_pytest/python.py @@ -551,8 +551,10 @@ class Module(nodes.File, PyCollector): importmode = self.config.getoption("--import-mode") try: mod = import_path(self.fspath, mode=importmode) - except SyntaxError: - raise self.CollectError(ExceptionInfo.from_current().getrepr(style="short")) + except SyntaxError as e: + raise self.CollectError( + ExceptionInfo.from_current().getrepr(style="short") + ) from e except ImportPathMismatchError as e: raise self.CollectError( "import file mismatch:\n" @@ -562,8 +564,8 @@ class Module(nodes.File, PyCollector): " %s\n" "HINT: remove __pycache__ / .pyc files and/or use a " "unique basename for your test file modules" % e.args - ) - except ImportError: + ) from e + except ImportError as e: exc_info = ExceptionInfo.from_current() if self.config.getoption("verbose") < 2: exc_info.traceback = exc_info.traceback.filter(filter_traceback) @@ -578,7 +580,7 @@ class Module(nodes.File, PyCollector): "Hint: make sure your test modules/packages have valid Python names.\n" "Traceback:\n" "{traceback}".format(fspath=self.fspath, traceback=formatted_tb) - ) + ) from e except _pytest.runner.Skipped as e: if e.allow_module_level: raise @@ -587,7 +589,7 @@ class Module(nodes.File, PyCollector): "To decorate a test function, use the @pytest.mark.skip " "or @pytest.mark.skipif decorators instead, and to skip a " "module use `pytestmark = pytest.mark.{skip,skipif}." - ) + ) from e self.config.pluginmanager.consider_module(mod) return mod @@ -836,8 +838,8 @@ class CallSpec2: def getparam(self, name: str) -> object: try: return self.params[name] - except KeyError: - raise ValueError(name) + except KeyError as e: + raise ValueError(name) from e @property def id(self) -> str: @@ -1074,8 +1076,8 @@ class Metafunc: except TypeError: try: iter(ids) - except TypeError: - raise TypeError("ids must be a callable or an iterable") + except TypeError as e: + raise TypeError("ids must be a callable or an iterable") from e num_ids = len(parameters) # num_ids == 0 is a special case: https://github.com/pytest-dev/pytest/issues/1849 diff --git a/src/_pytest/warnings.py b/src/_pytest/warnings.py index 5cedba244..f92350b20 100644 --- a/src/_pytest/warnings.py +++ b/src/_pytest/warnings.py @@ -48,8 +48,8 @@ def _parse_filter( lineno = int(lineno_) if lineno < 0: raise ValueError - except (ValueError, OverflowError): - raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) + except (ValueError, OverflowError) as e: + raise warnings._OptionError("invalid lineno {!r}".format(lineno_)) from e else: lineno = 0 return (action, message, category, module, lineno) diff --git a/testing/test_config.py b/testing/test_config.py index c9eea7a16..4e64a6928 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1778,5 +1778,5 @@ def test_conftest_import_error_repr(tmpdir): ): try: raise RuntimeError("some error") - except Exception: - raise ConftestImportFailure(path, sys.exc_info()) + except Exception as e: + raise ConftestImportFailure(path, sys.exc_info()) from e diff --git a/testing/test_runner.py b/testing/test_runner.py index 9c19ded0e..474ff4df8 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -534,8 +534,8 @@ def test_outcomeexception_passes_except_Exception() -> None: with pytest.raises(outcomes.OutcomeException): try: raise outcomes.OutcomeException("test") - except Exception: - raise NotImplementedError() + except Exception as e: + raise NotImplementedError from e def test_pytest_exit() -> None: From 3e6fe92b7ea3c120e8024a970bf37a7c6c137714 Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Fri, 19 Jun 2020 13:33:55 +0300 Subject: [PATCH 04/11] skipping: refactor skipif/xfail mark evaluation Previously, skipif/xfail marks were evaluated using a `MarkEvaluator` class. I found this class very difficult to understand. Instead of `MarkEvaluator`, rewrite using straight functions which are hopefully easier to follow. I tried to keep the semantics exactly as before, except improving a few error messages. --- src/_pytest/skipping.py | 344 ++++++++++++++++++++------------------- testing/test_skipping.py | 140 ++++++++-------- 2 files changed, 251 insertions(+), 233 deletions(-) diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index ee6b40daa..894eda499 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -3,12 +3,13 @@ import os import platform import sys import traceback -from typing import Any -from typing import Dict -from typing import List from typing import Optional from typing import Tuple +import attr + +import _pytest._code +from _pytest.compat import TYPE_CHECKING from _pytest.config import Config from _pytest.config import hookimpl from _pytest.config.argparsing import Parser @@ -16,12 +17,14 @@ from _pytest.mark.structures import Mark from _pytest.nodes import Item from _pytest.outcomes import fail from _pytest.outcomes import skip -from _pytest.outcomes import TEST_OUTCOME from _pytest.outcomes import xfail from _pytest.reports import BaseReport from _pytest.runner import CallInfo from _pytest.store import StoreKey +if TYPE_CHECKING: + from typing import Type + def pytest_addoption(parser: Parser) -> None: group = parser.getgroup("general") @@ -64,17 +67,16 @@ def pytest_configure(config: Config) -> None: ) config.addinivalue_line( "markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "https://docs.pytest.org/en/latest/skipping.html", + "skipif(condition, ..., *, reason=...): " + "skip the given test function if any of the conditions evaluate to True. " + "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " + "see https://docs.pytest.org/en/latest/skipping.html", ) config.addinivalue_line( "markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " + "xfail(condition, ..., *, reason=..., run=True, raises=None, strict=xfail_strict): " + "mark the test function as an expected failure if any of the conditions " + "evaluate to True. Optionally specify a reason for better reporting " "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " @@ -82,179 +84,191 @@ def pytest_configure(config: Config) -> None: ) -def compiled_eval(expr: str, d: Dict[str, object]) -> Any: - import _pytest._code +def evaluate_condition(item: Item, mark: Mark, condition: object) -> Tuple[bool, str]: + """Evaluate a single skipif/xfail condition. - exprcode = _pytest._code.compile(expr, mode="eval") - return eval(exprcode, d) + If an old-style string condition is given, it is eval()'d, otherwise the + condition is bool()'d. If this fails, an appropriately formatted pytest.fail + is raised. - -class MarkEvaluator: - def __init__(self, item: Item, name: str) -> None: - self.item = item - self._marks = None # type: Optional[List[Mark]] - self._mark = None # type: Optional[Mark] - self._mark_name = name - - def __bool__(self) -> bool: - # don't cache here to prevent staleness - return bool(self._get_marks()) - - def wasvalid(self) -> bool: - return not hasattr(self, "exc") - - def _get_marks(self) -> List[Mark]: - return list(self.item.iter_markers(name=self._mark_name)) - - def invalidraise(self, exc) -> Optional[bool]: - raises = self.get("raises") - if not raises: - return None - return not isinstance(exc, raises) - - def istrue(self) -> bool: + Returns (result, reason). The reason is only relevant if the result is True. + """ + # String condition. + if isinstance(condition, str): + globals_ = { + "os": os, + "sys": sys, + "platform": platform, + "config": item.config, + } + if hasattr(item, "obj"): + globals_.update(item.obj.__globals__) # type: ignore[attr-defined] try: - return self._istrue() - except TEST_OUTCOME: - self.exc = sys.exc_info() - if isinstance(self.exc[1], SyntaxError): - # TODO: Investigate why SyntaxError.offset is Optional, and if it can be None here. - assert self.exc[1].offset is not None - msg = [" " * (self.exc[1].offset + 4) + "^"] - msg.append("SyntaxError: invalid syntax") - else: - msg = traceback.format_exception_only(*self.exc[:2]) - fail( - "Error evaluating %r expression\n" - " %s\n" - "%s" % (self._mark_name, self.expr, "\n".join(msg)), - pytrace=False, + condition_code = _pytest._code.compile(condition, mode="eval") + result = eval(condition_code, globals_) + except SyntaxError as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + " " + " " * (exc.offset or 0) + "^", + "SyntaxError: invalid syntax", + ] + fail("\n".join(msglines), pytrace=False) + except Exception as exc: + msglines = [ + "Error evaluating %r condition" % mark.name, + " " + condition, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + # Boolean condition. + else: + try: + result = bool(condition) + except Exception as exc: + msglines = [ + "Error evaluating %r condition as a boolean" % mark.name, + *traceback.format_exception_only(type(exc), exc), + ] + fail("\n".join(msglines), pytrace=False) + + reason = mark.kwargs.get("reason", None) + if reason is None: + if isinstance(condition, str): + reason = "condition: " + condition + else: + # XXX better be checked at collection time + msg = ( + "Error evaluating %r: " % mark.name + + "you need to specify reason=STRING when using booleans as conditions." ) + fail(msg, pytrace=False) - def _getglobals(self) -> Dict[str, object]: - d = {"os": os, "sys": sys, "platform": platform, "config": self.item.config} - if hasattr(self.item, "obj"): - d.update(self.item.obj.__globals__) # type: ignore[attr-defined] # noqa: F821 - return d - - def _istrue(self) -> bool: - if hasattr(self, "result"): - result = getattr(self, "result") # type: bool - return result - self._marks = self._get_marks() - - if self._marks: - self.result = False - for mark in self._marks: - self._mark = mark - if "condition" not in mark.kwargs: - args = mark.args - else: - args = (mark.kwargs["condition"],) - - for expr in args: - self.expr = expr - if isinstance(expr, str): - d = self._getglobals() - result = compiled_eval(expr, d) - else: - if "reason" not in mark.kwargs: - # XXX better be checked at collection time - msg = ( - "you need to specify reason=STRING " - "when using booleans as conditions." - ) - fail(msg) - result = bool(expr) - if result: - self.result = True - self.reason = mark.kwargs.get("reason", None) - self.expr = expr - return self.result - - if not args: - self.result = True - self.reason = mark.kwargs.get("reason", None) - return self.result - return False - - def get(self, attr, default=None): - if self._mark is None: - return default - return self._mark.kwargs.get(attr, default) - - def getexplanation(self): - expl = getattr(self, "reason", None) or self.get("reason", None) - if not expl: - if not hasattr(self, "expr"): - return "" - else: - return "condition: " + str(self.expr) - return expl + return result, reason +@attr.s(slots=True, frozen=True) +class Skip: + """The result of evaluate_skip_marks().""" + + reason = attr.ib(type=str) + + +def evaluate_skip_marks(item: Item) -> Optional[Skip]: + """Evaluate skip and skipif marks on item, returning Skip if triggered.""" + for mark in item.iter_markers(name="skipif"): + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Skip(reason) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Skip(reason) + + for mark in item.iter_markers(name="skip"): + if "reason" in mark.kwargs: + reason = mark.kwargs["reason"] + elif mark.args: + reason = mark.args[0] + else: + reason = "unconditional skip" + return Skip(reason) + + return None + + +@attr.s(slots=True, frozen=True) +class Xfail: + """The result of evaluate_xfail_marks().""" + + reason = attr.ib(type=str) + run = attr.ib(type=bool) + strict = attr.ib(type=bool) + raises = attr.ib(type=Optional[Tuple["Type[BaseException]", ...]]) + + +def evaluate_xfail_marks(item: Item) -> Optional[Xfail]: + """Evaluate xfail marks on item, returning Xfail if triggered.""" + for mark in item.iter_markers(name="xfail"): + run = mark.kwargs.get("run", True) + strict = mark.kwargs.get("strict", item.config.getini("xfail_strict")) + raises = mark.kwargs.get("raises", None) + if "condition" not in mark.kwargs: + conditions = mark.args + else: + conditions = (mark.kwargs["condition"],) + + # Unconditional. + if not conditions: + reason = mark.kwargs.get("reason", "") + return Xfail(reason, run, strict, raises) + + # If any of the conditions are true. + for condition in conditions: + result, reason = evaluate_condition(item, mark, condition) + if result: + return Xfail(reason, run, strict, raises) + + return None + + +# Whether skipped due to skip or skipif marks. skipped_by_mark_key = StoreKey[bool]() -evalxfail_key = StoreKey[MarkEvaluator]() +# Saves the xfail mark evaluation. Can be refreshed during call if None. +xfailed_key = StoreKey[Optional[Xfail]]() unexpectedsuccess_key = StoreKey[str]() @hookimpl(tryfirst=True) def pytest_runtest_setup(item: Item) -> None: - # Check if skip or skipif are specified as pytest marks item._store[skipped_by_mark_key] = False - eval_skipif = MarkEvaluator(item, "skipif") - if eval_skipif.istrue(): - item._store[skipped_by_mark_key] = True - skip(eval_skipif.getexplanation()) - for skip_info in item.iter_markers(name="skip"): + skipped = evaluate_skip_marks(item) + if skipped: item._store[skipped_by_mark_key] = True - if "reason" in skip_info.kwargs: - skip(skip_info.kwargs["reason"]) - elif skip_info.args: - skip(skip_info.args[0]) - else: - skip("unconditional skip") + skip(skipped.reason) - item._store[evalxfail_key] = MarkEvaluator(item, "xfail") - check_xfail_no_run(item) + if not item.config.option.runxfail: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) @hookimpl(hookwrapper=True) def pytest_runtest_call(item: Item): - check_xfail_no_run(item) + if not item.config.option.runxfail: + xfailed = item._store.get(xfailed_key, None) + if xfailed is None: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and not xfailed.run: + xfail("[NOTRUN] " + xfailed.reason) + outcome = yield passed = outcome.excinfo is None + if passed: - check_strict_xfail(item) - - -def check_xfail_no_run(item: Item) -> None: - """check xfail(run=False)""" - if not item.config.option.runxfail: - evalxfail = item._store[evalxfail_key] - if evalxfail.istrue(): - if not evalxfail.get("run", True): - xfail("[NOTRUN] " + evalxfail.getexplanation()) - - -def check_strict_xfail(item: Item) -> None: - """check xfail(strict=True) for the given PASSING test""" - evalxfail = item._store[evalxfail_key] - if evalxfail.istrue(): - strict_default = item.config.getini("xfail_strict") - is_strict_xfail = evalxfail.get("strict", strict_default) - if is_strict_xfail: - del item._store[evalxfail_key] - explanation = evalxfail.getexplanation() - fail("[XPASS(strict)] " + explanation, pytrace=False) + xfailed = item._store.get(xfailed_key, None) + if xfailed is None: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if xfailed and xfailed.strict: + del item._store[xfailed_key] + fail("[XPASS(strict)] " + xfailed.reason, pytrace=False) @hookimpl(hookwrapper=True) def pytest_runtest_makereport(item: Item, call: CallInfo[None]): outcome = yield rep = outcome.get_result() - evalxfail = item._store.get(evalxfail_key, None) + xfailed = item._store.get(xfailed_key, None) # unittest special case, see setting of unexpectedsuccess_key if unexpectedsuccess_key in item._store and rep.when == "call": reason = item._store[unexpectedsuccess_key] @@ -263,30 +277,27 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]): else: rep.longrepr = "Unexpected success" rep.outcome = "failed" - elif item.config.option.runxfail: pass # don't interfere elif call.excinfo and isinstance(call.excinfo.value, xfail.Exception): assert call.excinfo.value.msg is not None rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" - elif evalxfail and not rep.skipped and evalxfail.wasvalid() and evalxfail.istrue(): + elif not rep.skipped and xfailed: if call.excinfo: - if evalxfail.invalidraise(call.excinfo.value): + raises = xfailed.raises + if raises is not None and not isinstance(call.excinfo.value, raises): rep.outcome = "failed" else: rep.outcome = "skipped" - rep.wasxfail = evalxfail.getexplanation() + rep.wasxfail = xfailed.reason elif call.when == "call": - strict_default = item.config.getini("xfail_strict") - is_strict_xfail = evalxfail.get("strict", strict_default) - explanation = evalxfail.getexplanation() - if is_strict_xfail: + if xfailed.strict: rep.outcome = "failed" - rep.longrepr = "[XPASS(strict)] {}".format(explanation) + rep.longrepr = "[XPASS(strict)] " + xfailed.reason else: rep.outcome = "passed" - rep.wasxfail = explanation + rep.wasxfail = xfailed.reason elif ( item._store.get(skipped_by_mark_key, True) and rep.skipped @@ -301,9 +312,6 @@ def pytest_runtest_makereport(item: Item, call: CallInfo[None]): rep.longrepr = str(filename), line + 1, reason -# called by terminalreporter progress reporting - - def pytest_report_teststatus(report: BaseReport) -> Optional[Tuple[str, str, str]]: if hasattr(report, "wasxfail"): if report.skipped: diff --git a/testing/test_skipping.py b/testing/test_skipping.py index a6f1a9c09..0b1c0b49b 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -2,68 +2,74 @@ import sys import pytest from _pytest.runner import runtestprotocol -from _pytest.skipping import MarkEvaluator +from _pytest.skipping import evaluate_skip_marks +from _pytest.skipping import evaluate_xfail_marks from _pytest.skipping import pytest_runtest_setup -class TestEvaluator: +class TestEvaluation: def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") - evalskipif = MarkEvaluator(item, "skipif") - assert not evalskipif - assert not evalskipif.istrue() + skipped = evaluate_skip_marks(item) + assert not skipped - def test_marked_no_args(self, testdir): + def test_marked_xfail_no_args(self, testdir): item = testdir.getitem( """ import pytest - @pytest.mark.xyz + @pytest.mark.xfail def test_func(): pass """ ) - ev = MarkEvaluator(item, "xyz") - assert ev - assert ev.istrue() - expl = ev.getexplanation() - assert expl == "" - assert not ev.get("run", False) + xfailed = evaluate_xfail_marks(item) + assert xfailed + assert xfailed.reason == "" + assert xfailed.run + + def test_marked_skipif_no_args(self, testdir): + item = testdir.getitem( + """ + import pytest + @pytest.mark.skipif + def test_func(): + pass + """ + ) + skipped = evaluate_skip_marks(item) + assert skipped + assert skipped.reason == "" def test_marked_one_arg(self, testdir): item = testdir.getitem( """ import pytest - @pytest.mark.xyz("hasattr(os, 'sep')") + @pytest.mark.skipif("hasattr(os, 'sep')") def test_func(): pass """ ) - ev = MarkEvaluator(item, "xyz") - assert ev - assert ev.istrue() - expl = ev.getexplanation() - assert expl == "condition: hasattr(os, 'sep')" + skipped = evaluate_skip_marks(item) + assert skipped + assert skipped.reason == "condition: hasattr(os, 'sep')" def test_marked_one_arg_with_reason(self, testdir): item = testdir.getitem( """ import pytest - @pytest.mark.xyz("hasattr(os, 'sep')", attr=2, reason="hello world") + @pytest.mark.skipif("hasattr(os, 'sep')", attr=2, reason="hello world") def test_func(): pass """ ) - ev = MarkEvaluator(item, "xyz") - assert ev - assert ev.istrue() - expl = ev.getexplanation() - assert expl == "hello world" - assert ev.get("attr") == 2 + skipped = evaluate_skip_marks(item) + assert skipped + assert skipped.reason == "hello world" def test_marked_one_arg_twice(self, testdir): lines = [ """@pytest.mark.skipif("not hasattr(os, 'murks')")""", - """@pytest.mark.skipif("hasattr(os, 'murks')")""", + """@pytest.mark.skipif(condition="hasattr(os, 'murks')")""", ] for i in range(0, 2): item = testdir.getitem( @@ -76,11 +82,9 @@ class TestEvaluator: """ % (lines[i], lines[(i + 1) % 2]) ) - ev = MarkEvaluator(item, "skipif") - assert ev - assert ev.istrue() - expl = ev.getexplanation() - assert expl == "condition: not hasattr(os, 'murks')" + skipped = evaluate_skip_marks(item) + assert skipped + assert skipped.reason == "condition: not hasattr(os, 'murks')" def test_marked_one_arg_twice2(self, testdir): item = testdir.getitem( @@ -92,13 +96,11 @@ class TestEvaluator: pass """ ) - ev = MarkEvaluator(item, "skipif") - assert ev - assert ev.istrue() - expl = ev.getexplanation() - assert expl == "condition: not hasattr(os, 'murks')" + skipped = evaluate_skip_marks(item) + assert skipped + assert skipped.reason == "condition: not hasattr(os, 'murks')" - def test_marked_skip_with_not_string(self, testdir) -> None: + def test_marked_skipif_with_boolean_without_reason(self, testdir) -> None: item = testdir.getitem( """ import pytest @@ -107,14 +109,34 @@ class TestEvaluator: pass """ ) - ev = MarkEvaluator(item, "skipif") - exc = pytest.raises(pytest.fail.Exception, ev.istrue) - assert exc.value.msg is not None + with pytest.raises(pytest.fail.Exception) as excinfo: + evaluate_skip_marks(item) + assert excinfo.value.msg is not None assert ( - """Failed: you need to specify reason=STRING when using booleans as conditions.""" - in exc.value.msg + """Error evaluating 'skipif': you need to specify reason=STRING when using booleans as conditions.""" + in excinfo.value.msg ) + def test_marked_skipif_with_invalid_boolean(self, testdir) -> None: + item = testdir.getitem( + """ + import pytest + + class InvalidBool: + def __bool__(self): + raise TypeError("INVALID") + + @pytest.mark.skipif(InvalidBool(), reason="xxx") + def test_func(): + pass + """ + ) + with pytest.raises(pytest.fail.Exception) as excinfo: + evaluate_skip_marks(item) + assert excinfo.value.msg is not None + assert "Error evaluating 'skipif' condition as a boolean" in excinfo.value.msg + assert "INVALID" in excinfo.value.msg + def test_skipif_class(self, testdir): (item,) = testdir.getitems( """ @@ -126,10 +148,9 @@ class TestEvaluator: """ ) item.config._hackxyz = 3 - ev = MarkEvaluator(item, "skipif") - assert ev.istrue() - expl = ev.getexplanation() - assert expl == "condition: config._hackxyz" + skipped = evaluate_skip_marks(item) + assert skipped + assert skipped.reason == "condition: config._hackxyz" class TestXFail: @@ -895,10 +916,10 @@ def test_errors_in_xfail_skip_expressions(testdir) -> None: result.stdout.fnmatch_lines( [ "*ERROR*test_nameerror*", - "*evaluating*skipif*expression*", + "*evaluating*skipif*condition*", "*asd*", "*ERROR*test_syntax*", - "*evaluating*xfail*expression*", + "*evaluating*xfail*condition*", " syntax error", markline, "SyntaxError: invalid syntax", @@ -924,25 +945,12 @@ def test_xfail_skipif_with_globals(testdir): result.stdout.fnmatch_lines(["*SKIP*x == 3*", "*XFAIL*test_boolean*", "*x == 3*"]) -def test_direct_gives_error(testdir): - testdir.makepyfile( - """ - import pytest - @pytest.mark.skipif(True) - def test_skip1(): - pass - """ - ) - result = testdir.runpytest() - result.stdout.fnmatch_lines(["*1 error*"]) - - def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines( [ - "*skipif(*condition)*skip*", - "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", + "*skipif(condition, ..., [*], reason=...)*skip*", + "*xfail(condition, ..., [*], reason=..., run=True, raises=None, strict=xfail_strict)*expected failure*", ] ) @@ -1137,7 +1145,9 @@ def test_mark_xfail_item(testdir): class MyItem(pytest.Item): nodeid = 'foo' def setup(self): - marker = pytest.mark.xfail(True, reason="Expected failure") + marker = pytest.mark.xfail("1 == 2", reason="Expected failure - false") + self.add_marker(marker) + marker = pytest.mark.xfail(True, reason="Expected failure - true") self.add_marker(marker) def runtest(self): assert False From c9737ae914891027da5f0bd39494dd51a3b3f19f Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Sat, 20 Jun 2020 15:59:29 +0300 Subject: [PATCH 05/11] skipping: simplify xfail handling during call phase There is no need to do the XPASS check here, pytest_runtest_makereport already handled that (the current handling there is dead code). All the hook needs to do is refresh the xfail evaluation if needed, and check the NOTRUN condition again. --- src/_pytest/skipping.py | 21 +++++++-------------- 1 file changed, 7 insertions(+), 14 deletions(-) diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index 894eda499..7fc43fce1 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -3,6 +3,7 @@ import os import platform import sys import traceback +from typing import Generator from typing import Optional from typing import Tuple @@ -244,24 +245,16 @@ def pytest_runtest_setup(item: Item) -> None: @hookimpl(hookwrapper=True) -def pytest_runtest_call(item: Item): +def pytest_runtest_call(item: Item) -> Generator[None, None, None]: + xfailed = item._store.get(xfailed_key, None) + if xfailed is None: + item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) + if not item.config.option.runxfail: - xfailed = item._store.get(xfailed_key, None) - if xfailed is None: - item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) if xfailed and not xfailed.run: xfail("[NOTRUN] " + xfailed.reason) - outcome = yield - passed = outcome.excinfo is None - - if passed: - xfailed = item._store.get(xfailed_key, None) - if xfailed is None: - item._store[xfailed_key] = xfailed = evaluate_xfail_marks(item) - if xfailed and xfailed.strict: - del item._store[xfailed_key] - fail("[XPASS(strict)] " + xfailed.reason, pytrace=False) + yield @hookimpl(hookwrapper=True) From 7d8d1b4440028660c81ca242968df89e8c6b896e Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Sun, 21 Jun 2020 20:14:45 +0300 Subject: [PATCH 06/11] skipping: better links in --markers output Suggested by Bruno. --- src/_pytest/skipping.py | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/src/_pytest/skipping.py b/src/_pytest/skipping.py index 7fc43fce1..7bd975e5a 100644 --- a/src/_pytest/skipping.py +++ b/src/_pytest/skipping.py @@ -71,7 +71,7 @@ def pytest_configure(config: Config) -> None: "skipif(condition, ..., *, reason=...): " "skip the given test function if any of the conditions evaluate to True. " "Example: skipif(sys.platform == 'win32') skips the test if we are on the win32 platform. " - "see https://docs.pytest.org/en/latest/skipping.html", + "See https://docs.pytest.org/en/stable/reference.html#pytest-mark-skipif", ) config.addinivalue_line( "markers", @@ -81,7 +81,7 @@ def pytest_configure(config: Config) -> None: "and run=False if you don't even want to execute the test function. " "If only specific exception(s) are expected, you can list them in " "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See https://docs.pytest.org/en/latest/skipping.html", + "a true failure. See https://docs.pytest.org/en/stable/reference.html#pytest-mark-xfail", ) From b3fb5a2d47743a09c551555da22da27ce9e73f41 Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Tue, 16 Jun 2020 21:16:04 +0300 Subject: [PATCH 07/11] Type annotate pytest.mark.* builtin marks --- src/_pytest/mark/structures.py | 100 +++++++++++++++++++++++++++++---- 1 file changed, 90 insertions(+), 10 deletions(-) diff --git a/src/_pytest/mark/structures.py b/src/_pytest/mark/structures.py index 3d512816c..2d756bb42 100644 --- a/src/_pytest/mark/structures.py +++ b/src/_pytest/mark/structures.py @@ -46,11 +46,19 @@ def get_empty_parameterset_mark( ) -> "MarkDecorator": from ..nodes import Collector + fs, lineno = getfslineno(func) + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, + func.__name__, + fs, + lineno, + ) + requested_mark = config.getini(EMPTY_PARAMETERSET_OPTION) if requested_mark in ("", None, "skip"): - mark = MARK_GEN.skip + mark = MARK_GEN.skip(reason=reason) elif requested_mark == "xfail": - mark = MARK_GEN.xfail(run=False) + mark = MARK_GEN.xfail(reason=reason, run=False) elif requested_mark == "fail_at_collect": f_name = func.__name__ _, lineno = getfslineno(func) @@ -59,14 +67,7 @@ def get_empty_parameterset_mark( ) else: raise LookupError(requested_mark) - fs, lineno = getfslineno(func) - reason = "got empty parameter set %r, function %s at %s:%d" % ( - argnames, - func.__name__, - fs, - lineno, - ) - return mark(reason=reason) + return mark class ParameterSet( @@ -379,6 +380,76 @@ def store_mark(obj, mark: Mark) -> None: obj.pytestmark = get_unpacked_marks(obj) + [mark] +# Typing for builtin pytest marks. This is cheating; it gives builtin marks +# special privilege, and breaks modularity. But practicality beats purity... +if TYPE_CHECKING: + from _pytest.fixtures import _Scope + + class _SkipMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc] + def __call__(self, arg: _Markable) -> _Markable: + raise NotImplementedError() + + @overload # noqa: F811 + def __call__(self, reason: str = ...) -> "MarkDecorator": # noqa: F811 + raise NotImplementedError() + + class _SkipifMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ... + ) -> MarkDecorator: + raise NotImplementedError() + + class _XfailMarkDecorator(MarkDecorator): + @overload # type: ignore[override,misc] + def __call__(self, arg: _Markable) -> _Markable: + raise NotImplementedError() + + @overload # noqa: F811 + def __call__( # noqa: F811 + self, + condition: Union[str, bool] = ..., + *conditions: Union[str, bool], + reason: str = ..., + run: bool = ..., + raises: Union[BaseException, Tuple[BaseException, ...]] = ..., + strict: bool = ... + ) -> MarkDecorator: + raise NotImplementedError() + + class _ParametrizeMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, + argnames: Union[str, List[str], Tuple[str, ...]], + argvalues: Iterable[Union[ParameterSet, Sequence[object], object]], + *, + indirect: Union[bool, Sequence[str]] = ..., + ids: Optional[ + Union[ + Iterable[Union[None, str, float, int, bool]], + Callable[[object], Optional[object]], + ] + ] = ..., + scope: Optional[_Scope] = ... + ) -> MarkDecorator: + raise NotImplementedError() + + class _UsefixturesMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, *fixtures: str + ) -> MarkDecorator: + raise NotImplementedError() + + class _FilterwarningsMarkDecorator(MarkDecorator): + def __call__( # type: ignore[override] + self, *filters: str + ) -> MarkDecorator: + raise NotImplementedError() + + class MarkGenerator: """Factory for :class:`MarkDecorator` objects - exposed as a ``pytest.mark`` singleton instance. @@ -397,6 +468,15 @@ class MarkGenerator: _config = None # type: Optional[Config] _markers = set() # type: Set[str] + # See TYPE_CHECKING above. + if TYPE_CHECKING: + skip = None # type: _SkipMarkDecorator + skipif = None # type: _SkipifMarkDecorator + xfail = None # type: _XfailMarkDecorator + parametrize = None # type: _ParametrizeMarkDecorator + usefixtures = None # type: _UsefixturesMarkDecorator + filterwarnings = None # type: _FilterwarningsMarkDecorator + def __getattr__(self, name: str) -> MarkDecorator: if name[0] == "_": raise AttributeError("Marker name must NOT start with underscore") From 4655b7998540d47e6f8dd783c82b37588719556d Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Sun, 21 Jun 2020 00:34:41 +0300 Subject: [PATCH 08/11] config: improve typing --- src/_pytest/_code/__init__.py | 2 + src/_pytest/_code/code.py | 2 +- src/_pytest/config/__init__.py | 185 ++++++++++++++++++++------------- src/_pytest/helpconfig.py | 6 +- src/_pytest/hookspec.py | 2 +- src/_pytest/logging.py | 12 ++- src/_pytest/pathlib.py | 2 +- src/_pytest/pytester.py | 8 +- src/_pytest/tmpdir.py | 3 +- testing/acceptance_test.py | 4 +- testing/code/test_excinfo.py | 2 +- testing/test_config.py | 6 +- 12 files changed, 143 insertions(+), 91 deletions(-) diff --git a/src/_pytest/_code/__init__.py b/src/_pytest/_code/__init__.py index 38019298c..76963c0eb 100644 --- a/src/_pytest/_code/__init__.py +++ b/src/_pytest/_code/__init__.py @@ -6,6 +6,7 @@ from .code import Frame from .code import getfslineno from .code import getrawcode from .code import Traceback +from .code import TracebackEntry from .source import compile_ as compile from .source import Source @@ -17,6 +18,7 @@ __all__ = [ "getfslineno", "getrawcode", "Traceback", + "TracebackEntry", "compile", "Source", ] diff --git a/src/_pytest/_code/code.py b/src/_pytest/_code/code.py index 65e5aa6d5..e548bceb7 100644 --- a/src/_pytest/_code/code.py +++ b/src/_pytest/_code/code.py @@ -213,7 +213,7 @@ class TracebackEntry: return source.getstatement(self.lineno) @property - def path(self): + def path(self) -> Union[py.path.local, str]: """ path to the source code """ return self.frame.code.path diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index b4a5a70ad..ac7afcd56 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -15,10 +15,13 @@ from typing import Any from typing import Callable from typing import Dict from typing import IO +from typing import Iterable +from typing import Iterator from typing import List from typing import Optional from typing import Sequence from typing import Set +from typing import TextIO from typing import Tuple from typing import Union @@ -42,6 +45,7 @@ from _pytest.compat import TYPE_CHECKING from _pytest.outcomes import fail from _pytest.outcomes import Skipped from _pytest.pathlib import import_path +from _pytest.pathlib import ImportMode from _pytest.pathlib import Path from _pytest.store import Store from _pytest.warning_types import PytestConfigWarning @@ -50,6 +54,7 @@ if TYPE_CHECKING: from typing import Type from _pytest._code.code import _TracebackStyle + from _pytest.terminal import TerminalReporter from .argparsing import Argument @@ -88,18 +93,24 @@ class ExitCode(enum.IntEnum): class ConftestImportFailure(Exception): - def __init__(self, path, excinfo): + def __init__( + self, + path: py.path.local, + excinfo: Tuple["Type[Exception]", Exception, TracebackType], + ) -> None: super().__init__(path, excinfo) self.path = path - self.excinfo = excinfo # type: Tuple[Type[Exception], Exception, TracebackType] + self.excinfo = excinfo - def __str__(self): + def __str__(self) -> str: return "{}: {} (from {})".format( self.excinfo[0].__name__, self.excinfo[1], self.path ) -def filter_traceback_for_conftest_import_failure(entry) -> bool: +def filter_traceback_for_conftest_import_failure( + entry: _pytest._code.TracebackEntry, +) -> bool: """filters tracebacks entries which point to pytest internals or importlib. Make a special case for importlib because we use it to import test modules and conftest files @@ -108,7 +119,10 @@ def filter_traceback_for_conftest_import_failure(entry) -> bool: return filter_traceback(entry) and "importlib" not in str(entry.path).split(os.sep) -def main(args=None, plugins=None) -> Union[int, ExitCode]: +def main( + args: Optional[List[str]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> Union[int, ExitCode]: """ return exit code, after performing an in-process test run. :arg args: list of command line arguments. @@ -177,7 +191,7 @@ class cmdline: # compatibility namespace main = staticmethod(main) -def filename_arg(path, optname): +def filename_arg(path: str, optname: str) -> str: """ Argparse type validator for filename arguments. :path: path of filename @@ -188,7 +202,7 @@ def filename_arg(path, optname): return path -def directory_arg(path, optname): +def directory_arg(path: str, optname: str) -> str: """Argparse type validator for directory arguments. :path: path of directory @@ -239,13 +253,16 @@ builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") -def get_config(args=None, plugins=None): +def get_config( + args: Optional[List[str]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": # subsequent calls to main will create a fresh instance pluginmanager = PytestPluginManager() config = Config( pluginmanager, invocation_params=Config.InvocationParams( - args=args or (), plugins=plugins, dir=Path.cwd() + args=args or (), plugins=plugins, dir=Path.cwd(), ), ) @@ -255,10 +272,11 @@ def get_config(args=None, plugins=None): for spec in default_plugins: pluginmanager.import_plugin(spec) + return config -def get_plugin_manager(): +def get_plugin_manager() -> "PytestPluginManager": """ Obtain a new instance of the :py:class:`_pytest.config.PytestPluginManager`, with default plugins @@ -271,8 +289,9 @@ def get_plugin_manager(): def _prepareconfig( - args: Optional[Union[py.path.local, List[str]]] = None, plugins=None -): + args: Optional[Union[py.path.local, List[str]]] = None, + plugins: Optional[Sequence[Union[str, _PluggyPlugin]]] = None, +) -> "Config": if args is None: args = sys.argv[1:] elif isinstance(args, py.path.local): @@ -290,9 +309,10 @@ def _prepareconfig( pluginmanager.consider_pluginarg(plugin) else: pluginmanager.register(plugin) - return pluginmanager.hook.pytest_cmdline_parse( + config = pluginmanager.hook.pytest_cmdline_parse( pluginmanager=pluginmanager, args=args ) + return config except BaseException: config._ensure_unconfigure() raise @@ -313,13 +333,11 @@ class PytestPluginManager(PluginManager): super().__init__("pytest") # The objects are module objects, only used generically. - self._conftest_plugins = set() # type: Set[object] + self._conftest_plugins = set() # type: Set[types.ModuleType] - # state related to local conftest plugins - # Maps a py.path.local to a list of module objects. - self._dirpath2confmods = {} # type: Dict[Any, List[object]] - # Maps a py.path.local to a module object. - self._conftestpath2mod = {} # type: Dict[Any, object] + # State related to local conftest plugins. + self._dirpath2confmods = {} # type: Dict[py.path.local, List[types.ModuleType]] + self._conftestpath2mod = {} # type: Dict[Path, types.ModuleType] self._confcutdir = None # type: Optional[py.path.local] self._noconftest = False self._duplicatepaths = set() # type: Set[py.path.local] @@ -328,7 +346,7 @@ class PytestPluginManager(PluginManager): self.register(self) if os.environ.get("PYTEST_DEBUG"): err = sys.stderr # type: IO[str] - encoding = getattr(err, "encoding", "utf8") + encoding = getattr(err, "encoding", "utf8") # type: str try: err = open( os.dup(err.fileno()), mode=err.mode, buffering=1, encoding=encoding, @@ -343,7 +361,7 @@ class PytestPluginManager(PluginManager): # Used to know when we are importing conftests after the pytest_configure stage self._configured = False - def parse_hookimpl_opts(self, plugin, name): + def parse_hookimpl_opts(self, plugin: _PluggyPlugin, name: str): # pytest hooks are always prefixed with pytest_ # so we avoid accessing possibly non-readable attributes # (see issue #1073) @@ -372,7 +390,7 @@ class PytestPluginManager(PluginManager): opts.setdefault(name, hasattr(method, name) or name in known_marks) return opts - def parse_hookspec_opts(self, module_or_class, name): + def parse_hookspec_opts(self, module_or_class, name: str): opts = super().parse_hookspec_opts(module_or_class, name) if opts is None: method = getattr(module_or_class, name) @@ -389,7 +407,9 @@ class PytestPluginManager(PluginManager): } return opts - def register(self, plugin: _PluggyPlugin, name: Optional[str] = None): + def register( + self, plugin: _PluggyPlugin, name: Optional[str] = None + ) -> Optional[str]: if name in _pytest.deprecated.DEPRECATED_EXTERNAL_PLUGINS: warnings.warn( PytestConfigWarning( @@ -399,8 +419,8 @@ class PytestPluginManager(PluginManager): ) ) ) - return - ret = super().register(plugin, name) + return None + ret = super().register(plugin, name) # type: Optional[str] if ret: self.hook.pytest_plugin_registered.call_historic( kwargs=dict(plugin=plugin, manager=self) @@ -410,11 +430,12 @@ class PytestPluginManager(PluginManager): self.consider_module(plugin) return ret - def getplugin(self, name): + def getplugin(self, name: str): # support deprecated naming because plugins (xdist e.g.) use it - return self.get_plugin(name) + plugin = self.get_plugin(name) # type: Optional[_PluggyPlugin] + return plugin - def hasplugin(self, name): + def hasplugin(self, name: str) -> bool: """Return True if the plugin with the given name is registered.""" return bool(self.get_plugin(name)) @@ -436,7 +457,7 @@ class PytestPluginManager(PluginManager): # # internal API for local conftest plugin handling # - def _set_initial_conftests(self, namespace): + def _set_initial_conftests(self, namespace: argparse.Namespace) -> None: """ load initial conftest files given a preparsed "namespace". As conftest files may add their own command line options which have arguments ('--my-opt somepath') we might get some @@ -454,8 +475,8 @@ class PytestPluginManager(PluginManager): self._using_pyargs = namespace.pyargs testpaths = namespace.file_or_dir foundanchor = False - for path in testpaths: - path = str(path) + for testpath in testpaths: + path = str(testpath) # remove node-id syntax i = path.find("::") if i != -1: @@ -467,7 +488,9 @@ class PytestPluginManager(PluginManager): if not foundanchor: self._try_load_conftest(current, namespace.importmode) - def _try_load_conftest(self, anchor, importmode): + def _try_load_conftest( + self, anchor: py.path.local, importmode: Union[str, ImportMode] + ) -> None: self._getconftestmodules(anchor, importmode) # let's also consider test* subdirs if anchor.check(dir=1): @@ -476,7 +499,9 @@ class PytestPluginManager(PluginManager): self._getconftestmodules(x, importmode) @lru_cache(maxsize=128) - def _getconftestmodules(self, path, importmode): + def _getconftestmodules( + self, path: py.path.local, importmode: Union[str, ImportMode], + ) -> List[types.ModuleType]: if self._noconftest: return [] @@ -499,7 +524,9 @@ class PytestPluginManager(PluginManager): self._dirpath2confmods[directory] = clist return clist - def _rget_with_confmod(self, name, path, importmode): + def _rget_with_confmod( + self, name: str, path: py.path.local, importmode: Union[str, ImportMode], + ) -> Tuple[types.ModuleType, Any]: modules = self._getconftestmodules(path, importmode) for mod in reversed(modules): try: @@ -508,7 +535,9 @@ class PytestPluginManager(PluginManager): continue raise KeyError(name) - def _importconftest(self, conftestpath, importmode): + def _importconftest( + self, conftestpath: py.path.local, importmode: Union[str, ImportMode], + ) -> types.ModuleType: # Use a resolved Path object as key to avoid loading the same conftest twice # with build systems that create build directories containing # symlinks to actual files. @@ -526,7 +555,9 @@ class PytestPluginManager(PluginManager): try: mod = import_path(conftestpath, mode=importmode) except Exception as e: - raise ConftestImportFailure(conftestpath, sys.exc_info()) from e + assert e.__traceback__ is not None + exc_info = (type(e), e, e.__traceback__) + raise ConftestImportFailure(conftestpath, exc_info) from e self._check_non_top_pytest_plugins(mod, conftestpath) @@ -542,7 +573,9 @@ class PytestPluginManager(PluginManager): self.consider_conftest(mod) return mod - def _check_non_top_pytest_plugins(self, mod, conftestpath): + def _check_non_top_pytest_plugins( + self, mod: types.ModuleType, conftestpath: py.path.local, + ) -> None: if ( hasattr(mod, "pytest_plugins") and self._configured @@ -564,7 +597,9 @@ class PytestPluginManager(PluginManager): # # - def consider_preparse(self, args, *, exclude_only: bool = False) -> None: + def consider_preparse( + self, args: Sequence[str], *, exclude_only: bool = False + ) -> None: i = 0 n = len(args) while i < n: @@ -585,7 +620,7 @@ class PytestPluginManager(PluginManager): continue self.consider_pluginarg(parg) - def consider_pluginarg(self, arg) -> None: + def consider_pluginarg(self, arg: str) -> None: if arg.startswith("no:"): name = arg[3:] if name in essential_plugins: @@ -610,7 +645,7 @@ class PytestPluginManager(PluginManager): del self._name2plugin["pytest_" + name] self.import_plugin(arg, consider_entry_points=True) - def consider_conftest(self, conftestmodule) -> None: + def consider_conftest(self, conftestmodule: types.ModuleType) -> None: self.register(conftestmodule, name=conftestmodule.__file__) def consider_env(self) -> None: @@ -619,7 +654,7 @@ class PytestPluginManager(PluginManager): def consider_module(self, mod: types.ModuleType) -> None: self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) - def _import_plugin_specs(self, spec): + def _import_plugin_specs(self, spec) -> None: plugins = _get_plugin_specs_as_list(spec) for import_spec in plugins: self.import_plugin(import_spec) @@ -636,7 +671,6 @@ class PytestPluginManager(PluginManager): assert isinstance(modname, str), ( "module name as text required, got %r" % modname ) - modname = str(modname) if self.is_blocked(modname) or self.get_plugin(modname) is not None: return @@ -668,7 +702,7 @@ class PytestPluginManager(PluginManager): self.register(mod, modname) -def _get_plugin_specs_as_list(specs): +def _get_plugin_specs_as_list(specs) -> List[str]: """ Parses a list of "plugin specs" and returns a list of plugin names. @@ -688,7 +722,7 @@ def _get_plugin_specs_as_list(specs): return [] -def _ensure_removed_sysmodule(modname): +def _ensure_removed_sysmodule(modname: str) -> None: try: del sys.modules[modname] except KeyError: @@ -703,7 +737,7 @@ class Notset: notset = Notset() -def _iter_rewritable_modules(package_files): +def _iter_rewritable_modules(package_files: Iterable[str]) -> Iterator[str]: """ Given an iterable of file names in a source distribution, return the "names" that should be marked for assertion rewrite (for example the package "pytest_mock/__init__.py" should @@ -766,6 +800,10 @@ def _iter_rewritable_modules(package_files): yield from _iter_rewritable_modules(new_package_files) +def _args_converter(args: Iterable[str]) -> Tuple[str, ...]: + return tuple(args) + + class Config: """ Access to configuration values, pluginmanager and plugin hooks. @@ -793,9 +831,9 @@ class Config: Plugins accessing ``InvocationParams`` must be aware of that. """ - args = attr.ib(converter=tuple) + args = attr.ib(type=Tuple[str, ...], converter=_args_converter) """tuple of command-line arguments as passed to ``pytest.main()``.""" - plugins = attr.ib() + plugins = attr.ib(type=Optional[Sequence[Union[str, _PluggyPlugin]]]) """list of extra plugins, might be `None`.""" dir = attr.ib(type=Path) """directory where ``pytest.main()`` was invoked from.""" @@ -855,7 +893,7 @@ class Config: """Backward compatibility""" return py.path.local(str(self.invocation_params.dir)) - def add_cleanup(self, func) -> None: + def add_cleanup(self, func: Callable[[], None]) -> None: """ Add a function to be called when the config object gets out of use (usually coninciding with pytest_unconfigure).""" self._cleanup.append(func) @@ -876,12 +914,15 @@ class Config: fin = self._cleanup.pop() fin() - def get_terminal_writer(self): - return self.pluginmanager.get_plugin("terminalreporter")._tw + def get_terminal_writer(self) -> TerminalWriter: + terminalreporter = self.pluginmanager.get_plugin( + "terminalreporter" + ) # type: TerminalReporter + return terminalreporter._tw def pytest_cmdline_parse( self, pluginmanager: PytestPluginManager, args: List[str] - ) -> object: + ) -> "Config": try: self.parse(args) except UsageError: @@ -923,7 +964,7 @@ class Config: sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() - def cwd_relative_nodeid(self, nodeid): + def cwd_relative_nodeid(self, nodeid: str) -> str: # nodeid's are relative to the rootpath, compute relative to cwd if self.invocation_dir != self.rootdir: fullpath = self.rootdir.join(nodeid) @@ -931,7 +972,7 @@ class Config: return nodeid @classmethod - def fromdictargs(cls, option_dict, args): + def fromdictargs(cls, option_dict, args) -> "Config": """ constructor usable for subprocesses. """ config = get_config(args) config.option.__dict__.update(option_dict) @@ -949,7 +990,7 @@ class Config: setattr(self.option, opt.dest, opt.default) @hookimpl(trylast=True) - def pytest_load_initial_conftests(self, early_config): + def pytest_load_initial_conftests(self, early_config: "Config") -> None: self.pluginmanager._set_initial_conftests(early_config.known_args_namespace) def _initini(self, args: Sequence[str]) -> None: @@ -1078,7 +1119,7 @@ class Config: raise self._validate_keys() - def _checkversion(self): + def _checkversion(self) -> None: import pytest minver = self.inicfg.get("minversion", None) @@ -1167,7 +1208,7 @@ class Config: except PrintHelp: pass - def addinivalue_line(self, name, line): + def addinivalue_line(self, name: str, line: str) -> None: """ add a line to an ini-file option. The option must have been declared but might not yet be set in which case the line becomes the the first line in its value. """ @@ -1186,7 +1227,7 @@ class Config: self._inicache[name] = val = self._getini(name) return val - def _getini(self, name: str) -> Any: + def _getini(self, name: str): try: description, type, default = self._parser._inidict[name] except KeyError as e: @@ -1231,12 +1272,14 @@ class Config: else: return value elif type == "bool": - return bool(_strtobool(str(value).strip())) + return _strtobool(str(value).strip()) else: assert type is None return value - def _getconftest_pathlist(self, name, path): + def _getconftest_pathlist( + self, name: str, path: py.path.local + ) -> Optional[List[py.path.local]]: try: mod, relroots = self.pluginmanager._rget_with_confmod( name, path, self.getoption("importmode") @@ -1244,7 +1287,7 @@ class Config: except KeyError: return None modpath = py.path.local(mod.__file__).dirpath() - values = [] + values = [] # type: List[py.path.local] for relroot in relroots: if not isinstance(relroot, py.path.local): relroot = relroot.replace("/", py.path.local.sep) @@ -1295,16 +1338,16 @@ class Config: pytest.skip("no {!r} option found".format(name)) raise ValueError("no option named {!r}".format(name)) from e - def getvalue(self, name, path=None): + def getvalue(self, name: str, path=None): """ (deprecated, use getoption()) """ return self.getoption(name) - def getvalueorskip(self, name, path=None): + def getvalueorskip(self, name: str, path=None): """ (deprecated, use getoption(skip=True)) """ return self.getoption(name, skip=True) -def _assertion_supported(): +def _assertion_supported() -> bool: try: assert False except AssertionError: @@ -1313,7 +1356,7 @@ def _assertion_supported(): return False -def _warn_about_missing_assertion(mode): +def _warn_about_missing_assertion(mode) -> None: if not _assertion_supported(): if mode == "plain": sys.stderr.write( @@ -1331,12 +1374,14 @@ def _warn_about_missing_assertion(mode): ) -def create_terminal_writer(config: Config, *args, **kwargs) -> TerminalWriter: +def create_terminal_writer( + config: Config, file: Optional[TextIO] = None +) -> TerminalWriter: """Create a TerminalWriter instance configured according to the options in the config object. Every code which requires a TerminalWriter object and has access to a config object should use this function. """ - tw = TerminalWriter(*args, **kwargs) + tw = TerminalWriter(file=file) if config.option.color == "yes": tw.hasmarkup = True if config.option.color == "no": @@ -1344,8 +1389,8 @@ def create_terminal_writer(config: Config, *args, **kwargs) -> TerminalWriter: return tw -def _strtobool(val): - """Convert a string representation of truth to true (1) or false (0). +def _strtobool(val: str) -> bool: + """Convert a string representation of truth to True or False. True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if @@ -1355,8 +1400,8 @@ def _strtobool(val): """ val = val.lower() if val in ("y", "yes", "t", "true", "on", "1"): - return 1 + return True elif val in ("n", "no", "f", "false", "off", "0"): - return 0 + return False else: raise ValueError("invalid truth value {!r}".format(val)) diff --git a/src/_pytest/helpconfig.py b/src/_pytest/helpconfig.py index 06e0954cf..24952852b 100644 --- a/src/_pytest/helpconfig.py +++ b/src/_pytest/helpconfig.py @@ -96,7 +96,7 @@ def pytest_addoption(parser: Parser) -> None: @pytest.hookimpl(hookwrapper=True) def pytest_cmdline_parse(): outcome = yield - config = outcome.get_result() + config = outcome.get_result() # type: Config if config.option.debug: path = os.path.abspath("pytestdebug.log") debugfile = open(path, "w") @@ -124,7 +124,7 @@ def pytest_cmdline_parse(): config.add_cleanup(unset_tracing) -def showversion(config): +def showversion(config: Config) -> None: if config.option.version > 1: sys.stderr.write( "This is pytest version {}, imported from {}\n".format( @@ -224,7 +224,7 @@ def showhelp(config: Config) -> None: conftest_options = [("pytest_plugins", "list of plugin names to load")] -def getpluginversioninfo(config): +def getpluginversioninfo(config: Config) -> List[str]: lines = [] plugininfo = config.pluginmanager.list_plugin_distinfo() if plugininfo: diff --git a/src/_pytest/hookspec.py b/src/_pytest/hookspec.py index eba6f5ba9..c05b60791 100644 --- a/src/_pytest/hookspec.py +++ b/src/_pytest/hookspec.py @@ -143,7 +143,7 @@ def pytest_configure(config: "Config") -> None: @hookspec(firstresult=True) def pytest_cmdline_parse( pluginmanager: "PytestPluginManager", args: List[str] -) -> Optional[object]: +) -> Optional["Config"]: """return initialized config object, parsing the specified args. Stops at first non-None result, see :ref:`firstresult` diff --git a/src/_pytest/logging.py b/src/_pytest/logging.py index a06dc1ab5..57aa14f27 100644 --- a/src/_pytest/logging.py +++ b/src/_pytest/logging.py @@ -141,9 +141,14 @@ class PercentStyleMultiline(logging.PercentStyle): if auto_indent_option is None: return 0 - elif type(auto_indent_option) is int: + elif isinstance(auto_indent_option, bool): + if auto_indent_option: + return -1 + else: + return 0 + elif isinstance(auto_indent_option, int): return int(auto_indent_option) - elif type(auto_indent_option) is str: + elif isinstance(auto_indent_option, str): try: return int(auto_indent_option) except ValueError: @@ -153,9 +158,6 @@ class PercentStyleMultiline(logging.PercentStyle): return -1 except ValueError: return 0 - elif type(auto_indent_option) is bool: - if auto_indent_option: - return -1 return 0 diff --git a/src/_pytest/pathlib.py b/src/_pytest/pathlib.py index 66ae9a51d..dd7443f07 100644 --- a/src/_pytest/pathlib.py +++ b/src/_pytest/pathlib.py @@ -466,7 +466,7 @@ def import_path( """ mode = ImportMode(mode) - path = Path(p) + path = Path(str(p)) if not path.exists(): raise ImportError(path) diff --git a/src/_pytest/pytester.py b/src/_pytest/pytester.py index cf3dbd201..fd4c10577 100644 --- a/src/_pytest/pytester.py +++ b/src/_pytest/pytester.py @@ -1054,7 +1054,7 @@ class Testdir: args.append("--basetemp=%s" % self.tmpdir.dirpath("basetemp")) return args - def parseconfig(self, *args: Union[str, py.path.local]) -> Config: + def parseconfig(self, *args) -> Config: """Return a new pytest Config instance from given commandline args. This invokes the pytest bootstrapping code in _pytest.config to create @@ -1070,14 +1070,14 @@ class Testdir: import _pytest.config - config = _pytest.config._prepareconfig(args, self.plugins) # type: Config + config = _pytest.config._prepareconfig(args, self.plugins) # type: ignore[arg-type] # we don't know what the test will do with this half-setup config # object and thus we make sure it gets unconfigured properly in any # case (otherwise capturing could still be active, for example) self.request.addfinalizer(config._ensure_unconfigure) return config - def parseconfigure(self, *args): + def parseconfigure(self, *args) -> Config: """Return a new pytest configured Config instance. This returns a new :py:class:`_pytest.config.Config` instance like @@ -1318,7 +1318,7 @@ class Testdir: Returns a :py:class:`RunResult`. """ __tracebackhide__ = True - p = make_numbered_dir(root=Path(self.tmpdir), prefix="runpytest-") + p = make_numbered_dir(root=Path(str(self.tmpdir)), prefix="runpytest-") args = ("--basetemp=%s" % p,) + args plugins = [x for x in self.plugins if isinstance(x, str)] if plugins: diff --git a/src/_pytest/tmpdir.py b/src/_pytest/tmpdir.py index 199c7c937..f6d1799ad 100644 --- a/src/_pytest/tmpdir.py +++ b/src/_pytest/tmpdir.py @@ -13,6 +13,7 @@ from .pathlib import LOCK_TIMEOUT from .pathlib import make_numbered_dir from .pathlib import make_numbered_dir_with_cleanup from .pathlib import Path +from _pytest.config import Config from _pytest.fixtures import FixtureRequest from _pytest.monkeypatch import MonkeyPatch @@ -135,7 +136,7 @@ def get_user() -> Optional[str]: return None -def pytest_configure(config) -> None: +def pytest_configure(config: Config) -> None: """Create a TempdirFactory and attach it to the config object. This is to comply with existing plugins which expect the handler to be diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index d8f7a501a..66c2bf0bf 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -585,11 +585,11 @@ class TestInvocationVariants: # Type ignored because `py.test` is not and will not be typed. assert pytest.main == py.test.cmdline.main # type: ignore[attr-defined] - def test_invoke_with_invalid_type(self): + def test_invoke_with_invalid_type(self) -> None: with pytest.raises( TypeError, match="expected to be a list of strings, got: '-h'" ): - pytest.main("-h") + pytest.main("-h") # type: ignore[arg-type] def test_invoke_with_path(self, tmpdir, capsys): retcode = pytest.main(tmpdir) diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index 0ff00bcaa..75c937612 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -372,7 +372,7 @@ def test_excinfo_no_python_sourcecode(tmpdir): for item in excinfo.traceback: print(item) # XXX: for some reason jinja.Template.render is printed in full item.source # shouldn't fail - if item.path.basename == "test.txt": + if isinstance(item.path, py.path.local) and item.path.basename == "test.txt": assert str(item.source) == "{{ h()}}:" diff --git a/testing/test_config.py b/testing/test_config.py index 4e64a6928..c1e4471b9 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1778,5 +1778,7 @@ def test_conftest_import_error_repr(tmpdir): ): try: raise RuntimeError("some error") - except Exception as e: - raise ConftestImportFailure(path, sys.exc_info()) from e + except Exception as exc: + assert exc.__traceback__ is not None + exc_info = (type(exc), exc, exc.__traceback__) + raise ConftestImportFailure(path, exc_info) from exc From 04a6d378234e3c72055c7e90084b1a2d36d3f89d Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Mon, 22 Jun 2020 15:07:50 +0300 Subject: [PATCH 09/11] nodes: fix string possibly stored in Node.keywords instead of MarkDecorator This mistake was introduced in 7259c453d6c1dba6727cd328e6db5635ccf5821c. --- src/_pytest/nodes.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/src/_pytest/nodes.py b/src/_pytest/nodes.py index 4c7aa1bcd..24e466586 100644 --- a/src/_pytest/nodes.py +++ b/src/_pytest/nodes.py @@ -276,7 +276,7 @@ class Node(metaclass=NodeMeta): marker_ = getattr(MARK_GEN, marker) else: raise ValueError("is not a string or pytest.mark.* Marker") - self.keywords[marker_.name] = marker + self.keywords[marker_.name] = marker_ if append: self.own_markers.append(marker_.mark) else: From 8994e1e3a17bd625e0c258d0a402062542908fe3 Mon Sep 17 00:00:00 2001 From: Ran Benita Date: Tue, 23 Jun 2020 11:38:21 +0300 Subject: [PATCH 10/11] config: make _get_plugin_specs_as_list a little clearer and more general --- src/_pytest/config/__init__.py | 41 +++++++++++++++++++--------------- testing/test_config.py | 17 ++++++-------- 2 files changed, 30 insertions(+), 28 deletions(-) diff --git a/src/_pytest/config/__init__.py b/src/_pytest/config/__init__.py index ac7afcd56..717743e79 100644 --- a/src/_pytest/config/__init__.py +++ b/src/_pytest/config/__init__.py @@ -1,5 +1,6 @@ """ command line options, ini-file and conftest.py processing. """ import argparse +import collections.abc import contextlib import copy import enum @@ -654,7 +655,9 @@ class PytestPluginManager(PluginManager): def consider_module(self, mod: types.ModuleType) -> None: self._import_plugin_specs(getattr(mod, "pytest_plugins", [])) - def _import_plugin_specs(self, spec) -> None: + def _import_plugin_specs( + self, spec: Union[None, types.ModuleType, str, Sequence[str]] + ) -> None: plugins = _get_plugin_specs_as_list(spec) for import_spec in plugins: self.import_plugin(import_spec) @@ -702,24 +705,26 @@ class PytestPluginManager(PluginManager): self.register(mod, modname) -def _get_plugin_specs_as_list(specs) -> List[str]: - """ - Parses a list of "plugin specs" and returns a list of plugin names. - - Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in - which case it is returned as a list. Specs can also be `None` in which case an - empty list is returned. - """ - if specs is not None and not isinstance(specs, types.ModuleType): - if isinstance(specs, str): - specs = specs.split(",") if specs else [] - if not isinstance(specs, (list, tuple)): - raise UsageError( - "Plugin specs must be a ','-separated string or a " - "list/tuple of strings for plugin names. Given: %r" % specs - ) +def _get_plugin_specs_as_list( + specs: Union[None, types.ModuleType, str, Sequence[str]] +) -> List[str]: + """Parse a plugins specification into a list of plugin names.""" + # None means empty. + if specs is None: + return [] + # Workaround for #3899 - a submodule which happens to be called "pytest_plugins". + if isinstance(specs, types.ModuleType): + return [] + # Comma-separated list. + if isinstance(specs, str): + return specs.split(",") if specs else [] + # Direct specification. + if isinstance(specs, collections.abc.Sequence): return list(specs) - return [] + raise UsageError( + "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %r" + % specs + ) def _ensure_removed_sysmodule(modname: str) -> None: diff --git a/testing/test_config.py b/testing/test_config.py index c1e4471b9..bc0da93a5 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -11,6 +11,7 @@ import py.path import _pytest._code import pytest from _pytest.compat import importlib_metadata +from _pytest.config import _get_plugin_specs_as_list from _pytest.config import _iter_rewritable_modules from _pytest.config import Config from _pytest.config import ConftestImportFailure @@ -1115,21 +1116,17 @@ def test_load_initial_conftest_last_ordering(_config_for_test): assert [x.function.__module__ for x in values] == expected -def test_get_plugin_specs_as_list(): - from _pytest.config import _get_plugin_specs_as_list - - def exp_match(val): +def test_get_plugin_specs_as_list() -> None: + def exp_match(val: object) -> str: return ( - "Plugin specs must be a ','-separated string" - " or a list/tuple of strings for plugin names. Given: {}".format( - re.escape(repr(val)) - ) + "Plugins may be specified as a sequence or a ','-separated string of plugin names. Got: %s" + % re.escape(repr(val)) ) with pytest.raises(pytest.UsageError, match=exp_match({"foo"})): - _get_plugin_specs_as_list({"foo"}) + _get_plugin_specs_as_list({"foo"}) # type: ignore[arg-type] with pytest.raises(pytest.UsageError, match=exp_match({})): - _get_plugin_specs_as_list(dict()) + _get_plugin_specs_as_list(dict()) # type: ignore[arg-type] assert _get_plugin_specs_as_list(None) == [] assert _get_plugin_specs_as_list("") == [] From 617bf8be5b0d5fa59dfb72a27c66f4f5f54f7e26 Mon Sep 17 00:00:00 2001 From: David Diaz Barquero Date: Tue, 23 Jun 2020 10:03:46 -0600 Subject: [PATCH 11/11] Add details to error message for junit (#7390) Co-authored-by: Bruno Oliveira --- changelog/7385.improvement.rst | 13 +++++++++++++ src/_pytest/junitxml.py | 12 +++++++++--- testing/test_junitxml.py | 12 +++++++----- 3 files changed, 29 insertions(+), 8 deletions(-) create mode 100644 changelog/7385.improvement.rst diff --git a/changelog/7385.improvement.rst b/changelog/7385.improvement.rst new file mode 100644 index 000000000..c02fee5da --- /dev/null +++ b/changelog/7385.improvement.rst @@ -0,0 +1,13 @@ +``--junitxml`` now includes the exception cause in the ``message`` XML attribute for failures during setup and teardown. + +Previously: + +.. code-block:: xml + + + +Now: + +.. code-block:: xml + + diff --git a/src/_pytest/junitxml.py b/src/_pytest/junitxml.py index 86e8fcf38..4df7535de 100644 --- a/src/_pytest/junitxml.py +++ b/src/_pytest/junitxml.py @@ -236,10 +236,16 @@ class _NodeReporter: self._add_simple(Junit.skipped, "collection skipped", report.longrepr) def append_error(self, report: TestReport) -> None: - if report.when == "teardown": - msg = "test teardown failure" + assert report.longrepr is not None + if getattr(report.longrepr, "reprcrash", None) is not None: + reason = report.longrepr.reprcrash.message else: - msg = "test setup failure" + reason = str(report.longrepr) + + if report.when == "teardown": + msg = 'failed on teardown with "{}"'.format(reason) + else: + msg = 'failed on setup with "{}"'.format(reason) self._add_simple(Junit.error, msg, report.longrepr) def append_skipped(self, report: TestReport) -> None: diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index f8a6a295f..5e5826b23 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -266,7 +266,7 @@ class TestPython: @pytest.fixture def arg(request): - raise ValueError() + raise ValueError("Error reason") def test_function(arg): pass """ @@ -278,7 +278,7 @@ class TestPython: tnode = node.find_first_by_tag("testcase") tnode.assert_attr(classname="test_setup_error", name="test_function") fnode = tnode.find_first_by_tag("error") - fnode.assert_attr(message="test setup failure") + fnode.assert_attr(message='failed on setup with "ValueError: Error reason"') assert "ValueError" in fnode.toxml() @parametrize_families @@ -290,7 +290,7 @@ class TestPython: @pytest.fixture def arg(): yield - raise ValueError() + raise ValueError('Error reason') def test_function(arg): pass """ @@ -301,7 +301,7 @@ class TestPython: tnode = node.find_first_by_tag("testcase") tnode.assert_attr(classname="test_teardown_error", name="test_function") fnode = tnode.find_first_by_tag("error") - fnode.assert_attr(message="test teardown failure") + fnode.assert_attr(message='failed on teardown with "ValueError: Error reason"') assert "ValueError" in fnode.toxml() @parametrize_families @@ -328,7 +328,9 @@ class TestPython: fnode = first.find_first_by_tag("failure") fnode.assert_attr(message="Exception: Call Exception") snode = second.find_first_by_tag("error") - snode.assert_attr(message="test teardown failure") + snode.assert_attr( + message='failed on teardown with "Exception: Teardown Exception"' + ) @parametrize_families def test_skip_contains_name_reason(self, testdir, run_and_parse, xunit_family):