Merge remote-tracking branch 'upstream/master' into mm
Conflicts: * src/_pytest/_code/code.py * src/_pytest/main.py * testing/python/metafunc.py * testing/test_parseopt.py * testing/test_pytester.py
This commit is contained in:
commit
93b74d28d2
|
@ -140,18 +140,18 @@ jobs:
|
|||
run: "tox -e ${{ matrix.tox_env }}"
|
||||
|
||||
- name: Prepare coverage token
|
||||
if: success() && !matrix.skip_coverage && ( github.repository == 'pytest-dev/pytest' || github.event_name == 'pull_request' )
|
||||
if: (!matrix.skip_coverage && ( github.repository == 'pytest-dev/pytest' || github.event_name == 'pull_request' ))
|
||||
run: |
|
||||
python scripts/append_codecov_token.py
|
||||
|
||||
- name: Combine coverage
|
||||
if: success() && !matrix.skip_coverage
|
||||
if: (!matrix.skip_coverage)
|
||||
run: |
|
||||
python -m coverage combine
|
||||
python -m coverage xml
|
||||
|
||||
- name: Codecov upload
|
||||
if: success() && !matrix.skip_coverage
|
||||
if: (!matrix.skip_coverage)
|
||||
uses: codecov/codecov-action@v1
|
||||
with:
|
||||
token: ${{ secrets.codecov }}
|
||||
|
|
1
AUTHORS
1
AUTHORS
|
@ -55,7 +55,6 @@ Charles Cloud
|
|||
Charles Machalow
|
||||
Charnjit SiNGH (CCSJ)
|
||||
Chris Lamb
|
||||
Chris NeJame
|
||||
Christian Boelsen
|
||||
Christian Fetzer
|
||||
Christian Neumüller
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Captured output during teardown is shown with ``-rP``.
|
|
@ -1,2 +0,0 @@
|
|||
Fix a ``pytest-xdist`` crash when dealing with exceptions raised in subprocesses created by the
|
||||
``multiprocessing`` module.
|
|
@ -1 +0,0 @@
|
|||
Optimized automatic renaming of test parameter IDs.
|
|
@ -1,3 +0,0 @@
|
|||
:class:`FixtureDef <_pytest.fixtures.FixtureDef>` objects now properly register their finalizers with autouse and
|
||||
parameterized fixtures that execute before them in the fixture stack so they are torn
|
||||
down at the right times, and in the right order.
|
|
@ -1 +0,0 @@
|
|||
Fix parsing of outcomes containing multiple errors with ``testdir`` results (regression in 5.3.0).
|
|
@ -6,6 +6,8 @@ Release announcements
|
|||
:maxdepth: 2
|
||||
|
||||
|
||||
release-5.3.4
|
||||
release-5.3.3
|
||||
release-5.3.2
|
||||
release-5.3.1
|
||||
release-5.3.0
|
||||
|
|
|
@ -0,0 +1,30 @@
|
|||
pytest-5.3.3
|
||||
=======================================
|
||||
|
||||
pytest 5.3.3 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Adam Johnson
|
||||
* Alexandre Mulatinho
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Chris NeJame
|
||||
* Daniel Hahler
|
||||
* Hugo van Kemenade
|
||||
* Marcelo Duarte Trevisani
|
||||
* PaulC
|
||||
* Ran Benita
|
||||
* Ryan Barner
|
||||
* Seth Junot
|
||||
* marc
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -0,0 +1,20 @@
|
|||
pytest-5.3.4
|
||||
=======================================
|
||||
|
||||
pytest 5.3.4 has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Ran Benita
|
||||
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -28,6 +28,44 @@ with advance notice in the **Deprecations** section of releases.
|
|||
|
||||
.. towncrier release notes start
|
||||
|
||||
pytest 5.3.4 (2020-01-20)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#6496 <https://github.com/pytest-dev/pytest/issues/6496>`_: Revert `#6436 <https://github.com/pytest-dev/pytest/issues/6436>`__: unfortunately this change has caused a number of regressions in many suites,
|
||||
so the team decided to revert this change and make a new release while we continue to look for a solution.
|
||||
|
||||
|
||||
pytest 5.3.3 (2020-01-16)
|
||||
=========================
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#2780 <https://github.com/pytest-dev/pytest/issues/2780>`_: Captured output during teardown is shown with ``-rP``.
|
||||
|
||||
|
||||
- `#5971 <https://github.com/pytest-dev/pytest/issues/5971>`_: Fix a ``pytest-xdist`` crash when dealing with exceptions raised in subprocesses created by the
|
||||
``multiprocessing`` module.
|
||||
|
||||
|
||||
- `#6436 <https://github.com/pytest-dev/pytest/issues/6436>`_: :class:`FixtureDef <_pytest.fixtures.FixtureDef>` objects now properly register their finalizers with autouse and
|
||||
parameterized fixtures that execute before them in the fixture stack so they are torn
|
||||
down at the right times, and in the right order.
|
||||
|
||||
|
||||
- `#6532 <https://github.com/pytest-dev/pytest/issues/6532>`_: Fix parsing of outcomes containing multiple errors with ``testdir`` results (regression in 5.3.0).
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#6350 <https://github.com/pytest-dev/pytest/issues/6350>`_: Optimized automatic renaming of test parameter IDs.
|
||||
|
||||
|
||||
pytest 5.3.2 (2019-12-13)
|
||||
=========================
|
||||
|
||||
|
@ -4842,7 +4880,7 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
* Updated docstrings with a more uniform style.
|
||||
|
||||
* Add stderr write for ``pytest.exit(msg)`` during startup. Previously the message was never shown.
|
||||
Thanks `@BeyondEvil`_ for reporting `#1210`_. Thanks to `@jgsonesen`_ and
|
||||
Thanks `@BeyondEvil`_ for reporting `#1210`_. Thanks to @jgsonesen and
|
||||
`@tomviner`_ for the PR.
|
||||
|
||||
* No longer display the incorrect test deselection reason (`#1372`_).
|
||||
|
@ -4974,7 +5012,6 @@ time or change existing behaviors in order to make them less surprising/more use
|
|||
.. _@gprasad84: https://github.com/gprasad84
|
||||
.. _@graingert: https://github.com/graingert
|
||||
.. _@hartym: https://github.com/hartym
|
||||
.. _@jgsonesen: https://github.com/jgsonesen
|
||||
.. _@kalekundert: https://github.com/kalekundert
|
||||
.. _@kvas-it: https://github.com/kvas-it
|
||||
.. _@marscher: https://github.com/marscher
|
||||
|
|
|
@ -20,8 +20,6 @@ which were registered by installed plugins.
|
|||
Initialization: determining rootdir and inifile
|
||||
-----------------------------------------------
|
||||
|
||||
|
||||
|
||||
pytest determines a ``rootdir`` for each test run which depends on
|
||||
the command line arguments (specified test files, paths) and on
|
||||
the existence of *ini-files*. The determined ``rootdir`` and *ini-file* are
|
||||
|
@ -30,17 +28,17 @@ printed as part of the pytest header during startup.
|
|||
Here's a summary what ``pytest`` uses ``rootdir`` for:
|
||||
|
||||
* Construct *nodeids* during collection; each test is assigned
|
||||
a unique *nodeid* which is rooted at the ``rootdir`` and takes in account full path,
|
||||
class name, function name and parametrization (if any).
|
||||
a unique *nodeid* which is rooted at the ``rootdir`` and takes into account
|
||||
the full path, class name, function name and parametrization (if any).
|
||||
|
||||
* Is used by plugins as a stable location to store project/test run specific information;
|
||||
for example, the internal :ref:`cache <cache>` plugin creates a ``.pytest_cache`` subdirectory
|
||||
in ``rootdir`` to store its cross-test run state.
|
||||
|
||||
Important to emphasize that ``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or
|
||||
``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or
|
||||
influence how modules are imported. See :ref:`pythonpath` for more details.
|
||||
|
||||
``--rootdir=path`` command-line option can be used to force a specific directory.
|
||||
The ``--rootdir=path`` command-line option can be used to force a specific directory.
|
||||
The directory passed may contain environment variables when it is used in conjunction
|
||||
with ``addopts`` in a ``pytest.ini`` file.
|
||||
|
||||
|
|
|
@ -443,7 +443,7 @@ Now we can profile which test functions execute the slowest:
|
|||
========================= slowest 3 test durations =========================
|
||||
0.30s call test_some_are_slow.py::test_funcslow2
|
||||
0.20s call test_some_are_slow.py::test_funcslow1
|
||||
0.10s call test_some_are_slow.py::test_funcfast
|
||||
0.11s call test_some_are_slow.py::test_funcfast
|
||||
============================ 3 passed in 0.12s =============================
|
||||
|
||||
incremental testing - test steps
|
||||
|
|
|
@ -100,7 +100,7 @@ def pre_release(version, *, skip_check_links):
|
|||
print()
|
||||
print(f"{Fore.CYAN}[generate.pre_release] {Fore.GREEN}All done!")
|
||||
print()
|
||||
print(f"Please push your branch and open a PR.")
|
||||
print("Please push your branch and open a PR.")
|
||||
|
||||
|
||||
def changelog(version, write_out=False):
|
||||
|
|
|
@ -41,7 +41,7 @@ if TYPE_CHECKING:
|
|||
|
||||
from _pytest._code import Source
|
||||
|
||||
_TracebackStyle = Literal["long", "short", "no", "native"]
|
||||
_TracebackStyle = Literal["long", "short", "line", "no", "native"]
|
||||
|
||||
|
||||
class Code:
|
||||
|
@ -67,9 +67,10 @@ class Code:
|
|||
return not self == other
|
||||
|
||||
@property
|
||||
def path(self):
|
||||
""" return a path object pointing to source code (note that it
|
||||
might not point to an actually existing file). """
|
||||
def path(self) -> Union[py.path.local, str]:
|
||||
""" return a path object pointing to source code (or a str in case
|
||||
of OSError / non-existing file).
|
||||
"""
|
||||
try:
|
||||
p = py.path.local(self.raw.co_filename)
|
||||
# maybe don't try this checking
|
||||
|
@ -335,7 +336,7 @@ class Traceback(List[TracebackEntry]):
|
|||
(path is None or codepath == path)
|
||||
and (
|
||||
excludepath is None
|
||||
or not hasattr(codepath, "relto")
|
||||
or not isinstance(codepath, py.path.local)
|
||||
or not codepath.relto(excludepath)
|
||||
)
|
||||
and (lineno is None or x.lineno == lineno)
|
||||
|
@ -919,7 +920,7 @@ class TerminalRepr:
|
|||
def __repr__(self) -> str:
|
||||
return "<{} instance at {:0x}>".format(self.__class__, id(self))
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
|
@ -930,7 +931,7 @@ class ExceptionRepr(TerminalRepr):
|
|||
def addsection(self, name: str, content: str, sep: str = "-") -> None:
|
||||
self.sections.append((name, content, sep))
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
for name, content, sep in self.sections:
|
||||
tw.sep(sep, name)
|
||||
tw.line(content)
|
||||
|
@ -950,7 +951,7 @@ class ExceptionChainRepr(ExceptionRepr):
|
|||
self.reprtraceback = chain[-1][0]
|
||||
self.reprcrash = chain[-1][1]
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
for element in self.chain:
|
||||
element[0].toterminal(tw)
|
||||
if element[2] is not None:
|
||||
|
@ -967,7 +968,7 @@ class ReprExceptionInfo(ExceptionRepr):
|
|||
self.reprtraceback = reprtraceback
|
||||
self.reprcrash = reprcrash
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
self.reprtraceback.toterminal(tw)
|
||||
super().toterminal(tw)
|
||||
|
||||
|
@ -985,7 +986,7 @@ class ReprTraceback(TerminalRepr):
|
|||
self.extraline = extraline
|
||||
self.style = style
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
# the entries might have different styles
|
||||
for i, entry in enumerate(self.reprentries):
|
||||
if entry.style == "long":
|
||||
|
@ -1017,7 +1018,7 @@ class ReprEntryNative(TerminalRepr):
|
|||
def __init__(self, tblines: Sequence[str]) -> None:
|
||||
self.lines = tblines
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
tw.write("".join(self.lines))
|
||||
|
||||
|
||||
|
@ -1036,7 +1037,7 @@ class ReprEntry(TerminalRepr):
|
|||
self.reprfileloc = filelocrepr
|
||||
self.style = style
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
if self.style == "short":
|
||||
assert self.reprfileloc is not None
|
||||
self.reprfileloc.toterminal(tw)
|
||||
|
@ -1071,7 +1072,7 @@ class ReprFileLocation(TerminalRepr):
|
|||
self.lineno = lineno
|
||||
self.message = message
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
# filename and lineno output for each entry,
|
||||
# using an output format that most editors understand
|
||||
msg = self.message
|
||||
|
@ -1086,7 +1087,7 @@ class ReprLocals(TerminalRepr):
|
|||
def __init__(self, lines: Sequence[str]) -> None:
|
||||
self.lines = lines
|
||||
|
||||
def toterminal(self, tw, indent="") -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter, indent="") -> None:
|
||||
for line in self.lines:
|
||||
tw.line(indent + line)
|
||||
|
||||
|
@ -1095,7 +1096,7 @@ class ReprFuncArgs(TerminalRepr):
|
|||
def __init__(self, args: Sequence[Tuple[str, object]]) -> None:
|
||||
self.args = args
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
if self.args:
|
||||
linesofar = ""
|
||||
for name, value in self.args:
|
||||
|
|
|
@ -5,8 +5,8 @@ import sys
|
|||
import textwrap
|
||||
import tokenize
|
||||
import warnings
|
||||
from ast import PyCF_ONLY_AST as _AST_FLAG
|
||||
from bisect import bisect_right
|
||||
from types import CodeType
|
||||
from types import FrameType
|
||||
from typing import Iterator
|
||||
from typing import List
|
||||
|
@ -18,6 +18,10 @@ from typing import Union
|
|||
import py
|
||||
|
||||
from _pytest.compat import overload
|
||||
from _pytest.compat import TYPE_CHECKING
|
||||
|
||||
if TYPE_CHECKING:
|
||||
from typing_extensions import Literal
|
||||
|
||||
|
||||
class Source:
|
||||
|
@ -121,7 +125,7 @@ class Source:
|
|||
start, end = self.getstatementrange(lineno)
|
||||
return self[start:end]
|
||||
|
||||
def getstatementrange(self, lineno: int):
|
||||
def getstatementrange(self, lineno: int) -> Tuple[int, int]:
|
||||
""" return (start, end) tuple which spans the minimal
|
||||
statement region which containing the given lineno.
|
||||
"""
|
||||
|
@ -154,14 +158,36 @@ class Source:
|
|||
def __str__(self) -> str:
|
||||
return "\n".join(self.lines)
|
||||
|
||||
@overload
|
||||
def compile(
|
||||
self,
|
||||
filename=None,
|
||||
mode="exec",
|
||||
filename: Optional[str] = ...,
|
||||
mode: str = ...,
|
||||
flag: "Literal[0]" = ...,
|
||||
dont_inherit: int = ...,
|
||||
_genframe: Optional[FrameType] = ...,
|
||||
) -> CodeType:
|
||||
raise NotImplementedError()
|
||||
|
||||
@overload # noqa: F811
|
||||
def compile( # noqa: F811
|
||||
self,
|
||||
filename: Optional[str] = ...,
|
||||
mode: str = ...,
|
||||
flag: int = ...,
|
||||
dont_inherit: int = ...,
|
||||
_genframe: Optional[FrameType] = ...,
|
||||
) -> Union[CodeType, ast.AST]:
|
||||
raise NotImplementedError()
|
||||
|
||||
def compile( # noqa: F811
|
||||
self,
|
||||
filename: Optional[str] = None,
|
||||
mode: str = "exec",
|
||||
flag: int = 0,
|
||||
dont_inherit: int = 0,
|
||||
_genframe: Optional[FrameType] = None,
|
||||
):
|
||||
) -> Union[CodeType, ast.AST]:
|
||||
""" return compiled code object. if filename is None
|
||||
invent an artificial filename which displays
|
||||
the source/line position of the caller frame.
|
||||
|
@ -191,8 +217,10 @@ class Source:
|
|||
newex.text = ex.text
|
||||
raise newex
|
||||
else:
|
||||
if flag & _AST_FLAG:
|
||||
if flag & ast.PyCF_ONLY_AST:
|
||||
assert isinstance(co, ast.AST)
|
||||
return co
|
||||
assert isinstance(co, CodeType)
|
||||
lines = [(x + "\n") for x in self.lines]
|
||||
# Type ignored because linecache.cache is private.
|
||||
linecache.cache[filename] = (1, None, lines, filename) # type: ignore
|
||||
|
@ -204,7 +232,35 @@ class Source:
|
|||
#
|
||||
|
||||
|
||||
def compile_(source, filename=None, mode="exec", flags: int = 0, dont_inherit: int = 0):
|
||||
@overload
|
||||
def compile_(
|
||||
source: Union[str, bytes, ast.mod, ast.AST],
|
||||
filename: Optional[str] = ...,
|
||||
mode: str = ...,
|
||||
flags: "Literal[0]" = ...,
|
||||
dont_inherit: int = ...,
|
||||
) -> CodeType:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
@overload # noqa: F811
|
||||
def compile_( # noqa: F811
|
||||
source: Union[str, bytes, ast.mod, ast.AST],
|
||||
filename: Optional[str] = ...,
|
||||
mode: str = ...,
|
||||
flags: int = ...,
|
||||
dont_inherit: int = ...,
|
||||
) -> Union[CodeType, ast.AST]:
|
||||
raise NotImplementedError()
|
||||
|
||||
|
||||
def compile_( # noqa: F811
|
||||
source: Union[str, bytes, ast.mod, ast.AST],
|
||||
filename: Optional[str] = None,
|
||||
mode: str = "exec",
|
||||
flags: int = 0,
|
||||
dont_inherit: int = 0,
|
||||
) -> Union[CodeType, ast.AST]:
|
||||
""" compile the given source to a raw code object,
|
||||
and maintain an internal cache which allows later
|
||||
retrieval of the source code for the code object
|
||||
|
@ -212,14 +268,16 @@ def compile_(source, filename=None, mode="exec", flags: int = 0, dont_inherit: i
|
|||
"""
|
||||
if isinstance(source, ast.AST):
|
||||
# XXX should Source support having AST?
|
||||
return compile(source, filename, mode, flags, dont_inherit)
|
||||
assert filename is not None
|
||||
co = compile(source, filename, mode, flags, dont_inherit)
|
||||
assert isinstance(co, (CodeType, ast.AST))
|
||||
return co
|
||||
_genframe = sys._getframe(1) # the caller
|
||||
s = Source(source)
|
||||
co = s.compile(filename, mode, flags, _genframe=_genframe)
|
||||
return co
|
||||
return s.compile(filename, mode, flags, _genframe=_genframe)
|
||||
|
||||
|
||||
def getfslineno(obj):
|
||||
def getfslineno(obj) -> Tuple[Union[str, py.path.local], int]:
|
||||
""" Return source location (path, lineno) for the given object.
|
||||
If the source cannot be determined return ("", -1).
|
||||
|
||||
|
@ -316,7 +374,7 @@ def getstatementrange_ast(
|
|||
# don't produce duplicate warnings when compiling source to find ast
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter("ignore")
|
||||
astnode = compile(content, "source", "exec", _AST_FLAG)
|
||||
astnode = ast.parse(content, "source", "exec")
|
||||
|
||||
start, end = get_statement_startend2(lineno, astnode)
|
||||
# we need to correct the end:
|
||||
|
|
|
@ -98,8 +98,10 @@ def getlocation(function, curdir=None) -> str:
|
|||
function = get_real_func(function)
|
||||
fn = py.path.local(inspect.getfile(function))
|
||||
lineno = function.__code__.co_firstlineno
|
||||
if curdir is not None and fn.relto(curdir):
|
||||
fn = fn.relto(curdir)
|
||||
if curdir is not None:
|
||||
relfn = fn.relto(curdir)
|
||||
if relfn:
|
||||
return "%s:%d" % (relfn, lineno + 1)
|
||||
return "%s:%d" % (fn, lineno + 1)
|
||||
|
||||
|
||||
|
|
|
@ -121,7 +121,9 @@ def determine_setup(
|
|||
sections = ["tool:pytest", "pytest"] if is_cfg_file else ["pytest"]
|
||||
for section in sections:
|
||||
try:
|
||||
inicfg = iniconfig[section]
|
||||
inicfg = iniconfig[
|
||||
section
|
||||
] # type: Optional[py.iniconfig._SectionWrapper]
|
||||
if is_cfg_file and section == "pytest" and config is not None:
|
||||
fail(
|
||||
CFG_PYTEST_SECTION.format(filename=str(inifile)), pytrace=False
|
||||
|
|
|
@ -13,6 +13,8 @@ from typing import Sequence
|
|||
from typing import Tuple
|
||||
from typing import Union
|
||||
|
||||
import py
|
||||
|
||||
import pytest
|
||||
from _pytest import outcomes
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
|
@ -137,7 +139,7 @@ class ReprFailDoctest(TerminalRepr):
|
|||
):
|
||||
self.reprlocation_lines = reprlocation_lines
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
for reprlocation, lines in self.reprlocation_lines:
|
||||
for line in lines:
|
||||
tw.line(line)
|
||||
|
|
|
@ -425,7 +425,7 @@ class FixtureRequest:
|
|||
return self._pyfuncitem.getparent(_pytest.python.Module).obj
|
||||
|
||||
@scopeproperty()
|
||||
def fspath(self):
|
||||
def fspath(self) -> py.path.local:
|
||||
""" the file system path of the test module which collected this test. """
|
||||
return self._pyfuncitem.fspath
|
||||
|
||||
|
@ -749,7 +749,7 @@ class FixtureLookupErrorRepr(TerminalRepr):
|
|||
self.firstlineno = firstlineno
|
||||
self.argname = argname
|
||||
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
# tw.line("FixtureLookupError: %s" %(self.argname), red=True)
|
||||
for tbline in self.tblines:
|
||||
tw.line(tbline.rstrip())
|
||||
|
@ -881,7 +881,9 @@ class FixtureDef:
|
|||
self._finalizers = []
|
||||
|
||||
def execute(self, request):
|
||||
for argname in self._dependee_fixture_argnames(request):
|
||||
# get required arguments and register our own finish()
|
||||
# with their finalization
|
||||
for argname in self.argnames:
|
||||
fixturedef = request._get_active_fixturedef(argname)
|
||||
if argname != "request":
|
||||
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
|
||||
|
@ -904,61 +906,6 @@ class FixtureDef:
|
|||
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
|
||||
return hook.pytest_fixture_setup(fixturedef=self, request=request)
|
||||
|
||||
def _dependee_fixture_argnames(self, request):
|
||||
"""A list of argnames for fixtures that this fixture depends on.
|
||||
|
||||
Given a request, this looks at the currently known list of fixture argnames, and
|
||||
attempts to determine what slice of the list contains fixtures that it can know
|
||||
should execute before it. This information is necessary so that this fixture can
|
||||
know what fixtures to register its finalizer with to make sure that if they
|
||||
would be torn down, they would tear down this fixture before themselves. It's
|
||||
crucial for fixtures to be torn down in the inverse order that they were set up
|
||||
in so that they don't try to clean up something that another fixture is still
|
||||
depending on.
|
||||
|
||||
When autouse fixtures are involved, it can be tricky to figure out when fixtures
|
||||
should be torn down. To solve this, this method leverages the ``fixturenames``
|
||||
list provided by the ``request`` object, as this list is at least somewhat
|
||||
sorted (in terms of the order fixtures are set up in) by the time this method is
|
||||
reached. It's sorted enough that the starting point of fixtures that depend on
|
||||
this one can be found using the ``self._parent_request`` stack.
|
||||
|
||||
If a request in the ``self._parent_request`` stack has a ``:class:FixtureDef``
|
||||
associated with it, then that fixture is dependent on this one, so any fixture
|
||||
names that appear in the list of fixture argnames that come after it can also be
|
||||
ruled out. The argnames of all fixtures associated with a request in the
|
||||
``self._parent_request`` stack are found, and the lowest index argname is
|
||||
considered the earliest point in the list of fixture argnames where everything
|
||||
from that point onward can be considered to execute after this fixture.
|
||||
Everything before this point can be considered fixtures that this fixture
|
||||
depends on, and so this fixture should register its finalizer with all of them
|
||||
to ensure that if any of them are to be torn down, they will tear this fixture
|
||||
down first.
|
||||
|
||||
This is the first part of the list of fixture argnames that is returned. The last
|
||||
part of the list is everything in ``self.argnames`` as those are explicit
|
||||
dependees of this fixture, so this fixture should definitely register its
|
||||
finalizer with them.
|
||||
"""
|
||||
all_fix_names = request.fixturenames
|
||||
try:
|
||||
current_fix_index = all_fix_names.index(self.argname)
|
||||
except ValueError:
|
||||
current_fix_index = len(request.fixturenames)
|
||||
parent_fixture_indexes = set()
|
||||
|
||||
parent_request = request._parent_request
|
||||
while hasattr(parent_request, "_parent_request"):
|
||||
if hasattr(parent_request, "_fixturedef"):
|
||||
parent_fix_name = parent_request._fixturedef.argname
|
||||
if parent_fix_name in all_fix_names:
|
||||
parent_fixture_indexes.add(all_fix_names.index(parent_fix_name))
|
||||
parent_request = parent_request._parent_request
|
||||
|
||||
stack_slice_index = min([current_fix_index, *parent_fixture_indexes])
|
||||
active_fixture_argnames = all_fix_names[:stack_slice_index]
|
||||
return {*active_fixture_argnames, *self.argnames}
|
||||
|
||||
def cache_key(self, request):
|
||||
return request.param_index if not hasattr(request, "param") else request.param
|
||||
|
||||
|
|
|
@ -378,7 +378,9 @@ class _bestrelpath_cache(dict):
|
|||
class Session(nodes.FSCollector):
|
||||
Interrupted = Interrupted
|
||||
Failed = Failed
|
||||
# Set on the session by runner.pytest_sessionstart.
|
||||
_setupstate = None # type: SetupState
|
||||
# Set on the session by fixtures.pytest_sessionstart.
|
||||
_fixturemanager = None # type: FixtureManager
|
||||
|
||||
def __init__(self, config):
|
||||
|
|
|
@ -90,7 +90,7 @@ class Node(metaclass=NodeMeta):
|
|||
|
||||
def __init__(
|
||||
self,
|
||||
name,
|
||||
name: str,
|
||||
parent: Optional["Node"] = None,
|
||||
config: Optional[Config] = None,
|
||||
session: Optional["Session"] = None,
|
||||
|
@ -476,7 +476,7 @@ class Item(Node):
|
|||
if content:
|
||||
self._report_sections.append((when, key, content))
|
||||
|
||||
def reportinfo(self) -> Tuple[str, Optional[int], str]:
|
||||
def reportinfo(self) -> Tuple[Union[py.path.local, str], Optional[int], str]:
|
||||
return self.fspath, None, ""
|
||||
|
||||
@cached_property
|
||||
|
|
|
@ -39,6 +39,8 @@ from _pytest.reports import TestReport
|
|||
if TYPE_CHECKING:
|
||||
from typing import Type
|
||||
|
||||
import pexpect
|
||||
|
||||
|
||||
IGNORE_PAM = [ # filenames added when obtaining details about the current user
|
||||
"/var/lib/sss/mc/passwd"
|
||||
|
@ -1235,7 +1237,9 @@ class Testdir:
|
|||
args = self._getpytestargs() + args
|
||||
return self.run(*args, timeout=timeout)
|
||||
|
||||
def spawn_pytest(self, string, expect_timeout=10.0):
|
||||
def spawn_pytest(
|
||||
self, string: str, expect_timeout: float = 10.0
|
||||
) -> "pexpect.spawn":
|
||||
"""Run pytest using pexpect.
|
||||
|
||||
This makes sure to use the right pytest and sets up the temporary
|
||||
|
@ -1249,7 +1253,7 @@ class Testdir:
|
|||
cmd = "{} --basetemp={} {}".format(invoke, basetemp, string)
|
||||
return self.spawn(cmd, expect_timeout=expect_timeout)
|
||||
|
||||
def spawn(self, cmd, expect_timeout=10.0):
|
||||
def spawn(self, cmd: str, expect_timeout: float = 10.0) -> "pexpect.spawn":
|
||||
"""Run a command using pexpect.
|
||||
|
||||
The pexpect child is returned.
|
||||
|
|
|
@ -57,7 +57,7 @@ def deprecated_call(func=None, *args, **kwargs):
|
|||
|
||||
@overload
|
||||
def warns(
|
||||
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||
expected_warning: Optional[Union["Type[Warning]", Tuple["Type[Warning]", ...]]],
|
||||
*,
|
||||
match: "Optional[Union[str, Pattern]]" = ...
|
||||
) -> "WarningsChecker":
|
||||
|
@ -66,7 +66,7 @@ def warns(
|
|||
|
||||
@overload # noqa: F811
|
||||
def warns( # noqa: F811
|
||||
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||
expected_warning: Optional[Union["Type[Warning]", Tuple["Type[Warning]", ...]]],
|
||||
func: Callable,
|
||||
*args: Any,
|
||||
match: Optional[Union[str, "Pattern"]] = ...,
|
||||
|
@ -76,7 +76,7 @@ def warns( # noqa: F811
|
|||
|
||||
|
||||
def warns( # noqa: F811
|
||||
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||
expected_warning: Optional[Union["Type[Warning]", Tuple["Type[Warning]", ...]]],
|
||||
*args: Any,
|
||||
match: Optional[Union[str, "Pattern"]] = None,
|
||||
**kwargs: Any
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
from io import StringIO
|
||||
from pprint import pprint
|
||||
from typing import Any
|
||||
from typing import List
|
||||
from typing import Optional
|
||||
from typing import Tuple
|
||||
|
@ -17,6 +18,7 @@ from _pytest._code.code import ReprFuncArgs
|
|||
from _pytest._code.code import ReprLocals
|
||||
from _pytest._code.code import ReprTraceback
|
||||
from _pytest._code.code import TerminalRepr
|
||||
from _pytest.compat import TYPE_CHECKING
|
||||
from _pytest.nodes import Node
|
||||
from _pytest.outcomes import skip
|
||||
from _pytest.pathlib import Path
|
||||
|
@ -41,9 +43,14 @@ class BaseReport:
|
|||
sections = [] # type: List[Tuple[str, str]]
|
||||
nodeid = None # type: str
|
||||
|
||||
def __init__(self, **kw):
|
||||
def __init__(self, **kw: Any) -> None:
|
||||
self.__dict__.update(kw)
|
||||
|
||||
if TYPE_CHECKING:
|
||||
# Can have arbitrary fields given to __init__().
|
||||
def __getattr__(self, key: str) -> Any:
|
||||
raise NotImplementedError()
|
||||
|
||||
def toterminal(self, out) -> None:
|
||||
if hasattr(self, "node"):
|
||||
out.line(getslaveinfoline(self.node)) # type: ignore
|
||||
|
@ -114,7 +121,7 @@ class BaseReport:
|
|||
skipped = property(lambda x: x.outcome == "skipped")
|
||||
|
||||
@property
|
||||
def fspath(self):
|
||||
def fspath(self) -> str:
|
||||
return self.nodeid.split("::")[0]
|
||||
|
||||
@property
|
||||
|
|
|
@ -15,7 +15,9 @@ from .reports import CollectErrorRepr
|
|||
from .reports import CollectReport
|
||||
from .reports import TestReport
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
from _pytest._code.code import ExceptionRepr
|
||||
from _pytest.compat import TYPE_CHECKING
|
||||
from _pytest.nodes import Collector
|
||||
from _pytest.nodes import Node
|
||||
from _pytest.outcomes import Exit
|
||||
from _pytest.outcomes import Skipped
|
||||
|
@ -251,7 +253,7 @@ def pytest_runtest_makereport(item, call):
|
|||
return TestReport.from_item_and_call(item, call)
|
||||
|
||||
|
||||
def pytest_make_collect_report(collector) -> CollectReport:
|
||||
def pytest_make_collect_report(collector: Collector) -> CollectReport:
|
||||
call = CallInfo.from_call(lambda: list(collector.collect()), "collect")
|
||||
longrepr = None
|
||||
if not call.excinfo:
|
||||
|
@ -264,7 +266,10 @@ def pytest_make_collect_report(collector) -> CollectReport:
|
|||
skip_exceptions.append(unittest.SkipTest) # type: ignore
|
||||
if call.excinfo.errisinstance(tuple(skip_exceptions)):
|
||||
outcome = "skipped"
|
||||
r = collector._repr_failure_py(call.excinfo, "line").reprcrash
|
||||
r_ = collector._repr_failure_py(call.excinfo, "line")
|
||||
assert isinstance(r_, ExceptionRepr), r_
|
||||
r = r_.reprcrash
|
||||
assert r
|
||||
longrepr = (str(r.path), r.lineno, r.message)
|
||||
else:
|
||||
outcome = "failed"
|
||||
|
|
|
@ -606,7 +606,7 @@ class TestInvocationVariants:
|
|||
def test_equivalence_pytest_pytest(self):
|
||||
assert pytest.main == py.test.cmdline.main
|
||||
|
||||
def test_invoke_with_invalid_type(self, capsys):
|
||||
def test_invoke_with_invalid_type(self):
|
||||
with pytest.raises(
|
||||
TypeError, match="expected to be a list or tuple of strings, got: '-h'"
|
||||
):
|
||||
|
@ -617,7 +617,7 @@ class TestInvocationVariants:
|
|||
assert retcode == ExitCode.NO_TESTS_COLLECTED
|
||||
out, err = capsys.readouterr()
|
||||
|
||||
def test_invoke_plugin_api(self, testdir, capsys):
|
||||
def test_invoke_plugin_api(self, capsys):
|
||||
class MyPlugin:
|
||||
def pytest_addoption(self, parser):
|
||||
parser.addoption("--myopt")
|
||||
|
|
|
@ -21,8 +21,6 @@ except ImportError:
|
|||
else:
|
||||
invalidate_import_caches = getattr(importlib, "invalidate_caches", None)
|
||||
|
||||
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def limited_recursion_depth():
|
||||
|
@ -857,7 +855,7 @@ raise ValueError()
|
|||
from _pytest._code.code import TerminalRepr
|
||||
|
||||
class MyRepr(TerminalRepr):
|
||||
def toterminal(self, tw) -> None:
|
||||
def toterminal(self, tw: py.io.TerminalWriter) -> None:
|
||||
tw.line("я")
|
||||
|
||||
x = str(MyRepr())
|
||||
|
|
|
@ -4,10 +4,13 @@
|
|||
import ast
|
||||
import inspect
|
||||
import sys
|
||||
from types import CodeType
|
||||
from typing import Any
|
||||
from typing import Dict
|
||||
from typing import Optional
|
||||
|
||||
import py
|
||||
|
||||
import _pytest._code
|
||||
import pytest
|
||||
from _pytest._code import Source
|
||||
|
@ -147,6 +150,10 @@ class TestAccesses:
|
|||
assert len(x.lines) == 2
|
||||
assert str(x) == "def f(x):\n pass"
|
||||
|
||||
def test_getrange_step_not_supported(self) -> None:
|
||||
with pytest.raises(IndexError, match=r"step"):
|
||||
self.source[::2]
|
||||
|
||||
def test_getline(self) -> None:
|
||||
x = self.source[0]
|
||||
assert x == "def f(x):"
|
||||
|
@ -449,6 +456,14 @@ def test_idem_compile_and_getsource() -> None:
|
|||
assert src == expected
|
||||
|
||||
|
||||
def test_compile_ast() -> None:
|
||||
# We don't necessarily want to support this.
|
||||
# This test was added just for coverage.
|
||||
stmt = ast.parse("def x(): pass")
|
||||
co = _pytest._code.compile(stmt, filename="foo.py")
|
||||
assert isinstance(co, CodeType)
|
||||
|
||||
|
||||
def test_findsource_fallback() -> None:
|
||||
from _pytest._code.source import findsource
|
||||
|
||||
|
@ -488,6 +503,7 @@ def test_getfslineno() -> None:
|
|||
|
||||
fspath, lineno = getfslineno(f)
|
||||
|
||||
assert isinstance(fspath, py.path.local)
|
||||
assert fspath.basename == "test_source.py"
|
||||
assert lineno == f.__code__.co_firstlineno - 1 # see findsource
|
||||
|
||||
|
|
|
@ -17,7 +17,7 @@ if sys.gettrace():
|
|||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
def pytest_collection_modifyitems(items):
|
||||
"""Prefer faster tests.
|
||||
|
||||
Use a hookwrapper to do this in the beginning, so e.g. --ff still works
|
||||
|
|
|
@ -626,7 +626,7 @@ def test_log_cli_ini_level(testdir):
|
|||
"cli_args",
|
||||
["", "--log-level=WARNING", "--log-file-level=WARNING", "--log-cli-level=WARNING"],
|
||||
)
|
||||
def test_log_cli_auto_enable(testdir, request, cli_args):
|
||||
def test_log_cli_auto_enable(testdir, cli_args):
|
||||
"""Check that live logs are enabled if --log-level or --log-cli-level is passed on the CLI.
|
||||
It should not be auto enabled if the same configs are set on the INI file.
|
||||
"""
|
||||
|
|
|
@ -286,7 +286,7 @@ class TestFunction:
|
|||
|
||||
return pytest.Function.from_parent(config=config, parent=session, **kwargs)
|
||||
|
||||
def test_function_equality(self, testdir, tmpdir):
|
||||
def test_function_equality(self, testdir):
|
||||
def func1():
|
||||
pass
|
||||
|
||||
|
@ -492,7 +492,7 @@ class TestFunction:
|
|||
)
|
||||
assert "foo" in keywords[1] and "bar" in keywords[1] and "baz" in keywords[1]
|
||||
|
||||
def test_function_equality_with_callspec(self, testdir, tmpdir):
|
||||
def test_function_equality_with_callspec(self, testdir):
|
||||
items = testdir.getitems(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -509,11 +509,11 @@ class TestFunction:
|
|||
config = item.config
|
||||
|
||||
class MyPlugin1:
|
||||
def pytest_pyfunc_call(self, pyfuncitem):
|
||||
def pytest_pyfunc_call(self):
|
||||
raise ValueError
|
||||
|
||||
class MyPlugin2:
|
||||
def pytest_pyfunc_call(self, pyfuncitem):
|
||||
def pytest_pyfunc_call(self):
|
||||
return True
|
||||
|
||||
config.pluginmanager.register(MyPlugin1())
|
||||
|
@ -1015,7 +1015,7 @@ class TestTracebackCutting:
|
|||
|
||||
|
||||
class TestReportInfo:
|
||||
def test_itemreport_reportinfo(self, testdir, linecomp):
|
||||
def test_itemreport_reportinfo(self, testdir):
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
|
|
|
@ -1716,138 +1716,6 @@ class TestAutouseDiscovery:
|
|||
reprec.assertoutcome(passed=3)
|
||||
|
||||
|
||||
class TestMultiLevelAutouseAndParameterization:
|
||||
def test_setup_and_teardown_order(self, testdir):
|
||||
"""Tests that parameterized fixtures effect subsequent fixtures. (#6436)
|
||||
|
||||
If a fixture uses a parameterized fixture, or, for any other reason, is executed
|
||||
after the parameterized fixture in the fixture stack, then it should be affected
|
||||
by the parameterization, and as a result, should be torn down before the
|
||||
parameterized fixture, every time the parameterized fixture is torn down. This
|
||||
should be the case even if autouse is involved and/or the linear order of
|
||||
fixture execution isn't deterministic. In other words, before any fixture can be
|
||||
torn down, every fixture that was executed after it must also be torn down.
|
||||
"""
|
||||
testdir.makepyfile(
|
||||
test_auto="""
|
||||
import pytest
|
||||
def f(param):
|
||||
return param
|
||||
@pytest.fixture(scope="session", autouse=True)
|
||||
def s_fix(request):
|
||||
yield
|
||||
@pytest.fixture(scope="package", params=["p1", "p2"], ids=f, autouse=True)
|
||||
def p_fix(request):
|
||||
yield
|
||||
@pytest.fixture(scope="module", params=["m1", "m2"], ids=f, autouse=True)
|
||||
def m_fix(request):
|
||||
yield
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def another_c_fix(m_fix):
|
||||
yield
|
||||
@pytest.fixture(scope="class")
|
||||
def c_fix():
|
||||
yield
|
||||
@pytest.fixture(scope="function", params=["f1", "f2"], ids=f, autouse=True)
|
||||
def f_fix(request):
|
||||
yield
|
||||
class TestFixtures:
|
||||
def test_a(self, c_fix):
|
||||
pass
|
||||
def test_b(self, c_fix):
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("--setup-plan")
|
||||
test_fixtures_used = (
|
||||
"(fixtures used: another_c_fix, c_fix, f_fix, m_fix, p_fix, request, s_fix)"
|
||||
)
|
||||
result.stdout.fnmatch_lines(
|
||||
"""
|
||||
SETUP S s_fix
|
||||
SETUP P p_fix[p1]
|
||||
SETUP M m_fix[m1]
|
||||
SETUP C another_c_fix (fixtures used: m_fix)
|
||||
SETUP C c_fix
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_a[p1-m1-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_a[p1-m1-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_b[p1-m1-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_b[p1-m1-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
TEARDOWN C c_fix
|
||||
TEARDOWN C another_c_fix
|
||||
TEARDOWN M m_fix[m1]
|
||||
SETUP M m_fix[m2]
|
||||
SETUP C another_c_fix (fixtures used: m_fix)
|
||||
SETUP C c_fix
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_a[p1-m2-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_a[p1-m2-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_b[p1-m2-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_b[p1-m2-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
TEARDOWN C c_fix
|
||||
TEARDOWN C another_c_fix
|
||||
TEARDOWN M m_fix[m2]
|
||||
TEARDOWN P p_fix[p1]
|
||||
SETUP P p_fix[p2]
|
||||
SETUP M m_fix[m1]
|
||||
SETUP C another_c_fix (fixtures used: m_fix)
|
||||
SETUP C c_fix
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_a[p2-m1-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_a[p2-m1-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_b[p2-m1-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_b[p2-m1-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
TEARDOWN C c_fix
|
||||
TEARDOWN C another_c_fix
|
||||
TEARDOWN M m_fix[m1]
|
||||
SETUP M m_fix[m2]
|
||||
SETUP C another_c_fix (fixtures used: m_fix)
|
||||
SETUP C c_fix
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_a[p2-m2-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_a[p2-m2-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
SETUP F f_fix[f1]
|
||||
test_auto.py::TestFixtures::test_b[p2-m2-f1] {0}
|
||||
TEARDOWN F f_fix[f1]
|
||||
SETUP F f_fix[f2]
|
||||
test_auto.py::TestFixtures::test_b[p2-m2-f2] {0}
|
||||
TEARDOWN F f_fix[f2]
|
||||
TEARDOWN C c_fix
|
||||
TEARDOWN C another_c_fix
|
||||
TEARDOWN M m_fix[m2]
|
||||
TEARDOWN P p_fix[p2]
|
||||
TEARDOWN S s_fix
|
||||
""".format(
|
||||
test_fixtures_used
|
||||
)
|
||||
)
|
||||
|
||||
|
||||
class TestAutouseManagement:
|
||||
def test_autouse_conftest_mid_directory(self, testdir):
|
||||
pkgdir = testdir.mkpydir("xyz123")
|
||||
|
@ -4238,7 +4106,7 @@ def test_fixture_named_request(testdir):
|
|||
)
|
||||
|
||||
|
||||
def test_fixture_duplicated_arguments(testdir):
|
||||
def test_fixture_duplicated_arguments():
|
||||
"""Raise error if there are positional and keyword arguments for the same parameter (#1682)."""
|
||||
with pytest.raises(TypeError) as excinfo:
|
||||
|
||||
|
@ -4253,7 +4121,7 @@ def test_fixture_duplicated_arguments(testdir):
|
|||
)
|
||||
|
||||
|
||||
def test_fixture_with_positionals(testdir):
|
||||
def test_fixture_with_positionals():
|
||||
"""Raise warning, but the positionals should still works (#1682)."""
|
||||
from _pytest.deprecated import FIXTURE_POSITIONAL_ARGUMENTS
|
||||
|
||||
|
|
|
@ -33,7 +33,7 @@ class TestMetafunc:
|
|||
definition = DefinitionMock._create(func)
|
||||
return python.Metafunc(definition, fixtureinfo, config)
|
||||
|
||||
def test_no_funcargs(self, testdir):
|
||||
def test_no_funcargs(self):
|
||||
def function():
|
||||
pass
|
||||
|
||||
|
@ -96,7 +96,7 @@ class TestMetafunc:
|
|||
):
|
||||
metafunc.parametrize("x", [1, 2, 3], ids=gen())
|
||||
|
||||
def test_parametrize_bad_scope(self, testdir):
|
||||
def test_parametrize_bad_scope(self):
|
||||
def func(x):
|
||||
pass
|
||||
|
||||
|
@ -188,7 +188,7 @@ class TestMetafunc:
|
|||
ids = [x.id for x in metafunc._calls]
|
||||
assert ids == ["basic", "advanced"]
|
||||
|
||||
def test_parametrize_with_wrong_number_of_ids(self, testdir):
|
||||
def test_parametrize_with_wrong_number_of_ids(self):
|
||||
def func(x, y):
|
||||
pass
|
||||
|
||||
|
@ -712,7 +712,7 @@ class TestMetafunc:
|
|||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines(["*test_simple*a-b*", "*1 passed*"])
|
||||
|
||||
def test_parametrize_indirect_list_error(self, testdir):
|
||||
def test_parametrize_indirect_list_error(self):
|
||||
"""#714"""
|
||||
|
||||
def func(x, y):
|
||||
|
|
|
@ -1299,7 +1299,7 @@ def test_AssertionError_message(testdir):
|
|||
)
|
||||
|
||||
|
||||
def test_diff_newline_at_end(monkeypatch, testdir):
|
||||
def test_diff_newline_at_end(testdir):
|
||||
testdir.makepyfile(
|
||||
r"""
|
||||
def test_diff():
|
||||
|
@ -1354,7 +1354,7 @@ def test_assert_indirect_tuple_no_warning(testdir):
|
|||
assert "WR1" not in output
|
||||
|
||||
|
||||
def test_assert_with_unicode(monkeypatch, testdir):
|
||||
def test_assert_with_unicode(testdir):
|
||||
testdir.makepyfile(
|
||||
"""\
|
||||
def test_unicode():
|
||||
|
|
|
@ -268,9 +268,11 @@ class TestLastFailed:
|
|||
"*1 failed*2 passed*",
|
||||
]
|
||||
)
|
||||
testdir.tmpdir.join(".pytest_cache").mkdir(".git")
|
||||
result = testdir.runpytest(str(p), "--lf", "--cache-clear")
|
||||
result.stdout.fnmatch_lines(["*1 failed*2 passed*"])
|
||||
assert testdir.tmpdir.join(".pytest_cache", "README.md").isfile()
|
||||
assert testdir.tmpdir.join(".pytest_cache", ".git").isdir()
|
||||
|
||||
# Run this again to make sure clear-cache is robust
|
||||
if os.path.isdir(".pytest_cache"):
|
||||
|
|
|
@ -960,7 +960,7 @@ class TestFDCapture:
|
|||
cap.done()
|
||||
assert s == "hello\n"
|
||||
|
||||
def test_stdin(self, tmpfile):
|
||||
def test_stdin(self):
|
||||
cap = capture.FDCapture(0)
|
||||
cap.start()
|
||||
x = os.read(0, 100).strip()
|
||||
|
@ -981,7 +981,7 @@ class TestFDCapture:
|
|||
stmp = stmp_file.read()
|
||||
assert stmp == data2
|
||||
|
||||
def test_simple_resume_suspend(self, tmpfile):
|
||||
def test_simple_resume_suspend(self):
|
||||
with saved_fd(1):
|
||||
cap = capture.FDCapture(1)
|
||||
cap.start()
|
||||
|
|
|
@ -243,7 +243,7 @@ class TestCollectPluginHookRelay:
|
|||
wascalled = []
|
||||
|
||||
class Plugin:
|
||||
def pytest_collect_file(self, path, parent):
|
||||
def pytest_collect_file(self, path):
|
||||
if not path.basename.startswith("."):
|
||||
# Ignore hidden files, e.g. .testmondata.
|
||||
wascalled.append(path)
|
||||
|
@ -257,7 +257,7 @@ class TestCollectPluginHookRelay:
|
|||
wascalled = []
|
||||
|
||||
class Plugin:
|
||||
def pytest_collect_directory(self, path, parent):
|
||||
def pytest_collect_directory(self, path):
|
||||
wascalled.append(path.basename)
|
||||
|
||||
testdir.mkdir("hello")
|
||||
|
@ -1210,7 +1210,7 @@ def test_collect_symlink_out_of_tree(testdir):
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_collectignore_via_conftest(testdir, monkeypatch):
|
||||
def test_collectignore_via_conftest(testdir):
|
||||
"""collect_ignore in parent conftest skips importing child (issue #4592)."""
|
||||
tests = testdir.mkpydir("tests")
|
||||
tests.ensure("conftest.py").write("collect_ignore = ['ignore_me']")
|
||||
|
|
|
@ -32,7 +32,7 @@ class TestParseIni:
|
|||
)
|
||||
)
|
||||
)
|
||||
rootdir, inifile, cfg = getcfg([sub])
|
||||
_, _, cfg = getcfg([sub])
|
||||
assert cfg["name"] == "value"
|
||||
config = testdir.parseconfigure(sub)
|
||||
assert config.inicfg["name"] == "value"
|
||||
|
@ -441,8 +441,6 @@ class TestConfigAPI:
|
|||
|
||||
class TestConfigFromdictargs:
|
||||
def test_basic_behavior(self, _sys_snapshot):
|
||||
from _pytest.config import Config
|
||||
|
||||
option_dict = {"verbose": 444, "foo": "bar", "capture": "no"}
|
||||
args = ["a", "b"]
|
||||
|
||||
|
@ -456,8 +454,6 @@ class TestConfigFromdictargs:
|
|||
|
||||
def test_invocation_params_args(self, _sys_snapshot):
|
||||
"""Show that fromdictargs can handle args in their "orig" format"""
|
||||
from _pytest.config import Config
|
||||
|
||||
option_dict = {}
|
||||
args = ["-vvvv", "-s", "a", "b"]
|
||||
|
||||
|
@ -477,8 +473,6 @@ class TestConfigFromdictargs:
|
|||
)
|
||||
)
|
||||
|
||||
from _pytest.config import Config
|
||||
|
||||
inifile = "../../foo/bar.ini"
|
||||
option_dict = {"inifilename": inifile, "capture": "no"}
|
||||
|
||||
|
@ -771,23 +765,23 @@ def test_notify_exception(testdir, capfd):
|
|||
with pytest.raises(ValueError) as excinfo:
|
||||
raise ValueError(1)
|
||||
config.notify_exception(excinfo, config.option)
|
||||
out, err = capfd.readouterr()
|
||||
_, err = capfd.readouterr()
|
||||
assert "ValueError" in err
|
||||
|
||||
class A:
|
||||
def pytest_internalerror(self, excrepr):
|
||||
def pytest_internalerror(self):
|
||||
return True
|
||||
|
||||
config.pluginmanager.register(A())
|
||||
config.notify_exception(excinfo, config.option)
|
||||
out, err = capfd.readouterr()
|
||||
_, err = capfd.readouterr()
|
||||
assert not err
|
||||
|
||||
config = testdir.parseconfig("-p", "no:terminal")
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
raise ValueError(1)
|
||||
config.notify_exception(excinfo, config.option)
|
||||
out, err = capfd.readouterr()
|
||||
_, err = capfd.readouterr()
|
||||
assert "ValueError" in err
|
||||
|
||||
|
||||
|
@ -797,7 +791,7 @@ def test_no_terminal_discovery_error(testdir):
|
|||
assert result.ret == ExitCode.INTERRUPTED
|
||||
|
||||
|
||||
def test_load_initial_conftest_last_ordering(testdir, _config_for_test):
|
||||
def test_load_initial_conftest_last_ordering(_config_for_test):
|
||||
pm = _config_for_test.pluginmanager
|
||||
|
||||
class My:
|
||||
|
@ -866,21 +860,21 @@ class TestRootdir:
|
|||
a = tmpdir.mkdir("a")
|
||||
b = a.mkdir("b")
|
||||
for args in ([tmpdir], [a], [b]):
|
||||
rootdir, inifile, inicfg = determine_setup(None, args)
|
||||
rootdir, parsed_inifile, _ = determine_setup(None, args)
|
||||
assert rootdir == tmpdir
|
||||
assert inifile == inifile
|
||||
rootdir, inifile, inicfg = determine_setup(None, [b, a])
|
||||
assert parsed_inifile == inifile
|
||||
rootdir, parsed_inifile, _ = determine_setup(None, [b, a])
|
||||
assert rootdir == tmpdir
|
||||
assert inifile == inifile
|
||||
assert parsed_inifile == inifile
|
||||
|
||||
@pytest.mark.parametrize("name", "setup.cfg tox.ini".split())
|
||||
def test_pytestini_overrides_empty_other(self, tmpdir, name) -> None:
|
||||
inifile = tmpdir.ensure("pytest.ini")
|
||||
a = tmpdir.mkdir("a")
|
||||
a.ensure(name)
|
||||
rootdir, inifile, inicfg = determine_setup(None, [a])
|
||||
rootdir, parsed_inifile, _ = determine_setup(None, [a])
|
||||
assert rootdir == tmpdir
|
||||
assert inifile == inifile
|
||||
assert parsed_inifile == inifile
|
||||
|
||||
def test_setuppy_fallback(self, tmpdir) -> None:
|
||||
a = tmpdir.mkdir("a")
|
||||
|
@ -900,7 +894,7 @@ class TestRootdir:
|
|||
|
||||
def test_with_specific_inifile(self, tmpdir) -> None:
|
||||
inifile = tmpdir.ensure("pytest.ini")
|
||||
rootdir, inifile, inicfg = determine_setup(inifile, [tmpdir])
|
||||
rootdir, _, _ = determine_setup(inifile, [tmpdir])
|
||||
assert rootdir == tmpdir
|
||||
|
||||
|
||||
|
@ -1043,7 +1037,7 @@ class TestOverrideIniArgs:
|
|||
monkeypatch.chdir(str(tmpdir))
|
||||
a = tmpdir.mkdir("a")
|
||||
b = tmpdir.mkdir("b")
|
||||
rootdir, inifile, inicfg = determine_setup(None, [a, b])
|
||||
rootdir, inifile, _ = determine_setup(None, [a, b])
|
||||
assert rootdir == tmpdir
|
||||
assert inifile is None
|
||||
|
||||
|
@ -1051,14 +1045,14 @@ class TestOverrideIniArgs:
|
|||
a = tmpdir.mkdir("a")
|
||||
b = tmpdir.mkdir("b")
|
||||
inifile = a.ensure("pytest.ini")
|
||||
rootdir, parsed_inifile, inicfg = determine_setup(None, [a, b])
|
||||
rootdir, parsed_inifile, _ = determine_setup(None, [a, b])
|
||||
assert rootdir == a
|
||||
assert inifile == parsed_inifile
|
||||
|
||||
@pytest.mark.parametrize("dirs", ([], ["does-not-exist"], ["a/does-not-exist"]))
|
||||
def test_with_non_dir_arg(self, dirs, tmpdir) -> None:
|
||||
with tmpdir.ensure(dir=True).as_cwd():
|
||||
rootdir, inifile, inicfg = determine_setup(None, dirs)
|
||||
rootdir, inifile, _ = determine_setup(None, dirs)
|
||||
assert rootdir == tmpdir
|
||||
assert inifile is None
|
||||
|
||||
|
@ -1066,7 +1060,7 @@ class TestOverrideIniArgs:
|
|||
a = tmpdir.mkdir("a")
|
||||
a.ensure("exist")
|
||||
with tmpdir.as_cwd():
|
||||
rootdir, inifile, inicfg = determine_setup(None, ["a/exist"])
|
||||
rootdir, inifile, _ = determine_setup(None, ["a/exist"])
|
||||
assert rootdir == tmpdir
|
||||
assert inifile is None
|
||||
|
||||
|
@ -1111,7 +1105,7 @@ class TestOverrideIniArgs:
|
|||
config._preparse(["-o", "cache_dir=/cache", "/some/test/path"])
|
||||
assert config._override_ini == ["cache_dir=/cache"]
|
||||
|
||||
def test_multiple_override_ini_options(self, testdir, request):
|
||||
def test_multiple_override_ini_options(self, testdir):
|
||||
"""Ensure a file path following a '-o' option does not generate an error (#3103)"""
|
||||
testdir.makepyfile(
|
||||
**{
|
||||
|
@ -1201,7 +1195,7 @@ def test_help_and_version_after_argument_error(testdir):
|
|||
assert result.ret == ExitCode.USAGE_ERROR
|
||||
|
||||
|
||||
def test_help_formatter_uses_py_get_terminal_width(testdir, monkeypatch):
|
||||
def test_help_formatter_uses_py_get_terminal_width(monkeypatch):
|
||||
from _pytest.config.argparsing import DropShorterLongHelpFormatter
|
||||
|
||||
monkeypatch.setenv("COLUMNS", "90")
|
||||
|
|
|
@ -357,7 +357,7 @@ def test_conftest_import_order(testdir, monkeypatch):
|
|||
assert conftest._getconftestmodules(sub) == [ct1, ct2]
|
||||
|
||||
|
||||
def test_fixture_dependency(testdir, monkeypatch):
|
||||
def test_fixture_dependency(testdir):
|
||||
ct1 = testdir.makeconftest("")
|
||||
ct1 = testdir.makepyfile("__init__.py")
|
||||
ct1.write("")
|
||||
|
|
|
@ -463,7 +463,7 @@ class TestPDB:
|
|||
child.read()
|
||||
self.flush(child)
|
||||
|
||||
def test_pdb_interaction_doctest(self, testdir, monkeypatch):
|
||||
def test_pdb_interaction_doctest(self, testdir):
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
def function_1():
|
||||
|
@ -489,7 +489,7 @@ class TestPDB:
|
|||
assert "1 failed" in rest
|
||||
self.flush(child)
|
||||
|
||||
def test_doctest_set_trace_quit(self, testdir, monkeypatch):
|
||||
def test_doctest_set_trace_quit(self, testdir):
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
def function_1():
|
||||
|
@ -833,7 +833,7 @@ class TestPDB:
|
|||
]
|
||||
)
|
||||
|
||||
def test_pdb_validate_usepdb_cls(self, testdir):
|
||||
def test_pdb_validate_usepdb_cls(self):
|
||||
assert _validate_usepdb_cls("os.path:dirname.__name__") == (
|
||||
"os.path",
|
||||
"dirname.__name__",
|
||||
|
|
|
@ -82,7 +82,7 @@ def test_timeout(testdir, enabled):
|
|||
|
||||
|
||||
@pytest.mark.parametrize("hook_name", ["pytest_enter_pdb", "pytest_exception_interact"])
|
||||
def test_cancel_timeout_on_hook(monkeypatch, pytestconfig, hook_name):
|
||||
def test_cancel_timeout_on_hook(monkeypatch, hook_name):
|
||||
"""Make sure that we are cancelling any scheduled traceback dumping due
|
||||
to timeout before entering pdb (pytest-dev/pytest-faulthandler#12) or any other interactive
|
||||
exception (pytest-dev/pytest-faulthandler#14).
|
||||
|
|
|
@ -57,7 +57,7 @@ def test_traceconfig(testdir):
|
|||
result.stdout.fnmatch_lines(["*using*pytest*py*", "*active plugins*"])
|
||||
|
||||
|
||||
def test_debug(testdir, monkeypatch):
|
||||
def test_debug(testdir):
|
||||
result = testdir.runpytest_subprocess("--debug")
|
||||
assert result.ret == ExitCode.NO_TESTS_COLLECTED
|
||||
p = testdir.tmpdir.join("pytestdebug.log")
|
||||
|
|
|
@ -41,7 +41,7 @@ class TestMark:
|
|||
mark._some_name
|
||||
|
||||
|
||||
def test_marked_class_run_twice(testdir, request):
|
||||
def test_marked_class_run_twice(testdir):
|
||||
"""Test fails file is run twice that contains marked class.
|
||||
See issue#683.
|
||||
"""
|
||||
|
|
|
@ -375,3 +375,17 @@ def test_skip_test_with_unicode(testdir):
|
|||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["* 1 skipped *"])
|
||||
|
||||
|
||||
def test_issue_6517(testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
from nose.tools import raises
|
||||
|
||||
@raises(RuntimeError)
|
||||
def test_fail_without_tcp():
|
||||
raise RuntimeError
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["* 1 passed *"])
|
||||
|
|
|
@ -251,14 +251,14 @@ class TestParser:
|
|||
assert args.func_arg is False
|
||||
assert args.file_or_dir == ["abcd"]
|
||||
|
||||
def test_drop_short_help0(self, parser: parseopt.Parser, capsys) -> None:
|
||||
def test_drop_short_help0(self, parser: parseopt.Parser) -> None:
|
||||
parser.addoption("--func-args", "--doit", help="foo", action="store_true")
|
||||
parser.parse([])
|
||||
help = parser.optparser.format_help()
|
||||
assert "--func-args, --doit foo" in help
|
||||
|
||||
# testing would be more helpful with all help generated
|
||||
def test_drop_short_help1(self, parser: parseopt.Parser, capsys) -> None:
|
||||
def test_drop_short_help1(self, parser: parseopt.Parser) -> None:
|
||||
group = parser.getgroup("general")
|
||||
group.addoption("--doit", "--func-args", action="store_true", help="foo")
|
||||
group._addoption(
|
||||
|
|
|
@ -71,7 +71,7 @@ class TestPytestPluginInteractions:
|
|||
values = []
|
||||
|
||||
class A:
|
||||
def pytest_configure(self, config):
|
||||
def pytest_configure(self):
|
||||
values.append(self)
|
||||
|
||||
config.pluginmanager.register(A())
|
||||
|
|
|
@ -2,6 +2,7 @@ import os
|
|||
import subprocess
|
||||
import sys
|
||||
import time
|
||||
from typing import List
|
||||
|
||||
import py.path
|
||||
|
||||
|
@ -9,6 +10,7 @@ import _pytest.pytester as pytester
|
|||
import pytest
|
||||
from _pytest.config import PytestPluginManager
|
||||
from _pytest.main import ExitCode
|
||||
from _pytest.outcomes import Failed
|
||||
from _pytest.pytester import CwdSnapshot
|
||||
from _pytest.pytester import HookRecorder
|
||||
from _pytest.pytester import LineMatcher
|
||||
|
@ -16,7 +18,7 @@ from _pytest.pytester import SysModulesSnapshot
|
|||
from _pytest.pytester import SysPathsSnapshot
|
||||
|
||||
|
||||
def test_make_hook_recorder(testdir):
|
||||
def test_make_hook_recorder(testdir) -> None:
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
recorder = testdir.make_hook_recorder(item.config.pluginmanager)
|
||||
assert not recorder.getfailures()
|
||||
|
@ -36,23 +38,23 @@ def test_make_hook_recorder(testdir):
|
|||
failures = recorder.getfailures()
|
||||
assert failures == [rep]
|
||||
|
||||
class rep:
|
||||
class rep2:
|
||||
excinfo = None
|
||||
passed = False
|
||||
failed = False
|
||||
skipped = True
|
||||
when = "call"
|
||||
|
||||
rep.passed = False
|
||||
rep.skipped = True
|
||||
recorder.hook.pytest_runtest_logreport(report=rep)
|
||||
rep2.passed = False
|
||||
rep2.skipped = True
|
||||
recorder.hook.pytest_runtest_logreport(report=rep2)
|
||||
|
||||
modcol = testdir.getmodulecol("")
|
||||
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
rep.passed = False
|
||||
rep.failed = True
|
||||
rep.skipped = False
|
||||
recorder.hook.pytest_collectreport(report=rep)
|
||||
rep3 = modcol.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
rep3.passed = False
|
||||
rep3.failed = True
|
||||
rep3.skipped = False
|
||||
recorder.hook.pytest_collectreport(report=rep3)
|
||||
|
||||
passed, skipped, failed = recorder.listoutcomes()
|
||||
assert not passed and skipped and failed
|
||||
|
@ -65,17 +67,17 @@ def test_make_hook_recorder(testdir):
|
|||
|
||||
recorder.unregister()
|
||||
recorder.clear()
|
||||
recorder.hook.pytest_runtest_logreport(report=rep)
|
||||
recorder.hook.pytest_runtest_logreport(report=rep3)
|
||||
pytest.raises(ValueError, recorder.getfailures)
|
||||
|
||||
|
||||
def test_parseconfig(testdir):
|
||||
def test_parseconfig(testdir) -> None:
|
||||
config1 = testdir.parseconfig()
|
||||
config2 = testdir.parseconfig()
|
||||
assert config2 is not config1
|
||||
|
||||
|
||||
def test_testdir_runs_with_plugin(testdir):
|
||||
def test_testdir_runs_with_plugin(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
pytest_plugins = "pytester"
|
||||
|
@ -87,7 +89,7 @@ def test_testdir_runs_with_plugin(testdir):
|
|||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def test_runresult_assertion_on_xfail(testdir):
|
||||
def test_runresult_assertion_on_xfail(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -104,7 +106,7 @@ def test_runresult_assertion_on_xfail(testdir):
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_runresult_assertion_on_xpassed(testdir):
|
||||
def test_runresult_assertion_on_xpassed(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -121,7 +123,7 @@ def test_runresult_assertion_on_xpassed(testdir):
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_xpassed_with_strict_is_considered_a_failure(testdir):
|
||||
def test_xpassed_with_strict_is_considered_a_failure(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -154,13 +156,13 @@ def make_holder():
|
|||
def pytest_xyz_noarg():
|
||||
"x"
|
||||
|
||||
apimod.pytest_xyz = pytest_xyz
|
||||
apimod.pytest_xyz_noarg = pytest_xyz_noarg
|
||||
apimod.pytest_xyz = pytest_xyz # type: ignore
|
||||
apimod.pytest_xyz_noarg = pytest_xyz_noarg # type: ignore
|
||||
return apiclass, apimod
|
||||
|
||||
|
||||
@pytest.mark.parametrize("holder", make_holder())
|
||||
def test_hookrecorder_basic(holder):
|
||||
def test_hookrecorder_basic(holder) -> None:
|
||||
pm = PytestPluginManager()
|
||||
pm.add_hookspecs(holder)
|
||||
rec = HookRecorder(pm)
|
||||
|
@ -168,17 +170,17 @@ def test_hookrecorder_basic(holder):
|
|||
call = rec.popcall("pytest_xyz")
|
||||
assert call.arg == 123
|
||||
assert call._name == "pytest_xyz"
|
||||
pytest.raises(pytest.fail.Exception, rec.popcall, "abc")
|
||||
pytest.raises(Failed, rec.popcall, "abc")
|
||||
pm.hook.pytest_xyz_noarg()
|
||||
call = rec.popcall("pytest_xyz_noarg")
|
||||
assert call._name == "pytest_xyz_noarg"
|
||||
|
||||
|
||||
def test_makepyfile_unicode(testdir):
|
||||
def test_makepyfile_unicode(testdir) -> None:
|
||||
testdir.makepyfile(chr(0xFFFD))
|
||||
|
||||
|
||||
def test_makepyfile_utf8(testdir):
|
||||
def test_makepyfile_utf8(testdir) -> None:
|
||||
"""Ensure makepyfile accepts utf-8 bytes as input (#2738)"""
|
||||
utf8_contents = """
|
||||
def setup_function(function):
|
||||
|
@ -189,7 +191,7 @@ def test_makepyfile_utf8(testdir):
|
|||
|
||||
|
||||
class TestInlineRunModulesCleanup:
|
||||
def test_inline_run_test_module_not_cleaned_up(self, testdir):
|
||||
def test_inline_run_test_module_not_cleaned_up(self, testdir) -> None:
|
||||
test_mod = testdir.makepyfile("def test_foo(): assert True")
|
||||
result = testdir.inline_run(str(test_mod))
|
||||
assert result.ret == ExitCode.OK
|
||||
|
@ -200,9 +202,9 @@ class TestInlineRunModulesCleanup:
|
|||
|
||||
def spy_factory(self):
|
||||
class SysModulesSnapshotSpy:
|
||||
instances = []
|
||||
instances = [] # type: List[SysModulesSnapshotSpy]
|
||||
|
||||
def __init__(self, preserve=None):
|
||||
def __init__(self, preserve=None) -> None:
|
||||
SysModulesSnapshotSpy.instances.append(self)
|
||||
self._spy_restore_count = 0
|
||||
self._spy_preserve = preserve
|
||||
|
@ -216,7 +218,7 @@ class TestInlineRunModulesCleanup:
|
|||
|
||||
def test_inline_run_taking_and_restoring_a_sys_modules_snapshot(
|
||||
self, testdir, monkeypatch
|
||||
):
|
||||
) -> None:
|
||||
spy_factory = self.spy_factory()
|
||||
monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory)
|
||||
testdir.syspathinsert()
|
||||
|
@ -237,7 +239,7 @@ class TestInlineRunModulesCleanup:
|
|||
|
||||
def test_inline_run_sys_modules_snapshot_restore_preserving_modules(
|
||||
self, testdir, monkeypatch
|
||||
):
|
||||
) -> None:
|
||||
spy_factory = self.spy_factory()
|
||||
monkeypatch.setattr(pytester, "SysModulesSnapshot", spy_factory)
|
||||
test_mod = testdir.makepyfile("def test_foo(): pass")
|
||||
|
@ -248,7 +250,7 @@ class TestInlineRunModulesCleanup:
|
|||
assert spy._spy_preserve("zope.interface")
|
||||
assert spy._spy_preserve("zopelicious")
|
||||
|
||||
def test_external_test_module_imports_not_cleaned_up(self, testdir):
|
||||
def test_external_test_module_imports_not_cleaned_up(self, testdir) -> None:
|
||||
testdir.syspathinsert()
|
||||
testdir.makepyfile(imported="data = 'you son of a silly person'")
|
||||
import imported
|
||||
|
@ -263,7 +265,7 @@ class TestInlineRunModulesCleanup:
|
|||
assert imported.data == 42
|
||||
|
||||
|
||||
def test_assert_outcomes_after_pytest_error(testdir):
|
||||
def test_assert_outcomes_after_pytest_error(testdir) -> None:
|
||||
testdir.makepyfile("def test_foo(): assert True")
|
||||
|
||||
result = testdir.runpytest("--unexpected-argument")
|
||||
|
@ -271,7 +273,7 @@ def test_assert_outcomes_after_pytest_error(testdir):
|
|||
result.assert_outcomes(passed=0)
|
||||
|
||||
|
||||
def test_cwd_snapshot(tmpdir):
|
||||
def test_cwd_snapshot(tmpdir) -> None:
|
||||
foo = tmpdir.ensure("foo", dir=1)
|
||||
bar = tmpdir.ensure("bar", dir=1)
|
||||
foo.chdir()
|
||||
|
@ -285,16 +287,16 @@ def test_cwd_snapshot(tmpdir):
|
|||
class TestSysModulesSnapshot:
|
||||
key = "my-test-module"
|
||||
|
||||
def test_remove_added(self):
|
||||
def test_remove_added(self) -> None:
|
||||
original = dict(sys.modules)
|
||||
assert self.key not in sys.modules
|
||||
snapshot = SysModulesSnapshot()
|
||||
sys.modules[self.key] = "something"
|
||||
sys.modules[self.key] = "something" # type: ignore
|
||||
assert self.key in sys.modules
|
||||
snapshot.restore()
|
||||
assert sys.modules == original
|
||||
|
||||
def test_add_removed(self, monkeypatch):
|
||||
def test_add_removed(self, monkeypatch) -> None:
|
||||
assert self.key not in sys.modules
|
||||
monkeypatch.setitem(sys.modules, self.key, "something")
|
||||
assert self.key in sys.modules
|
||||
|
@ -305,17 +307,17 @@ class TestSysModulesSnapshot:
|
|||
snapshot.restore()
|
||||
assert sys.modules == original
|
||||
|
||||
def test_restore_reloaded(self, monkeypatch):
|
||||
def test_restore_reloaded(self, monkeypatch) -> None:
|
||||
assert self.key not in sys.modules
|
||||
monkeypatch.setitem(sys.modules, self.key, "something")
|
||||
assert self.key in sys.modules
|
||||
original = dict(sys.modules)
|
||||
snapshot = SysModulesSnapshot()
|
||||
sys.modules[self.key] = "something else"
|
||||
sys.modules[self.key] = "something else" # type: ignore
|
||||
snapshot.restore()
|
||||
assert sys.modules == original
|
||||
|
||||
def test_preserve_modules(self, monkeypatch):
|
||||
def test_preserve_modules(self, monkeypatch) -> None:
|
||||
key = [self.key + str(i) for i in range(3)]
|
||||
assert not any(k in sys.modules for k in key)
|
||||
for i, k in enumerate(key):
|
||||
|
@ -326,17 +328,17 @@ class TestSysModulesSnapshot:
|
|||
return name in (key[0], key[1], "some-other-key")
|
||||
|
||||
snapshot = SysModulesSnapshot(preserve=preserve)
|
||||
sys.modules[key[0]] = original[key[0]] = "something else0"
|
||||
sys.modules[key[1]] = original[key[1]] = "something else1"
|
||||
sys.modules[key[2]] = "something else2"
|
||||
sys.modules[key[0]] = original[key[0]] = "something else0" # type: ignore
|
||||
sys.modules[key[1]] = original[key[1]] = "something else1" # type: ignore
|
||||
sys.modules[key[2]] = "something else2" # type: ignore
|
||||
snapshot.restore()
|
||||
assert sys.modules == original
|
||||
|
||||
def test_preserve_container(self, monkeypatch):
|
||||
def test_preserve_container(self, monkeypatch) -> None:
|
||||
original = dict(sys.modules)
|
||||
assert self.key not in original
|
||||
replacement = dict(sys.modules)
|
||||
replacement[self.key] = "life of brian"
|
||||
replacement[self.key] = "life of brian" # type: ignore
|
||||
snapshot = SysModulesSnapshot()
|
||||
monkeypatch.setattr(sys, "modules", replacement)
|
||||
snapshot.restore()
|
||||
|
@ -349,10 +351,10 @@ class TestSysPathsSnapshot:
|
|||
other_path = {"path": "meta_path", "meta_path": "path"}
|
||||
|
||||
@staticmethod
|
||||
def path(n):
|
||||
def path(n: int) -> str:
|
||||
return "my-dirty-little-secret-" + str(n)
|
||||
|
||||
def test_restore(self, monkeypatch, path_type):
|
||||
def test_restore(self, monkeypatch, path_type) -> None:
|
||||
other_path_type = self.other_path[path_type]
|
||||
for i in range(10):
|
||||
assert self.path(i) not in getattr(sys, path_type)
|
||||
|
@ -375,12 +377,12 @@ class TestSysPathsSnapshot:
|
|||
assert getattr(sys, path_type) == original
|
||||
assert getattr(sys, other_path_type) == original_other
|
||||
|
||||
def test_preserve_container(self, monkeypatch, path_type):
|
||||
def test_preserve_container(self, monkeypatch, path_type) -> None:
|
||||
other_path_type = self.other_path[path_type]
|
||||
original_data = list(getattr(sys, path_type))
|
||||
original_other = getattr(sys, other_path_type)
|
||||
original_other_data = list(original_other)
|
||||
new = []
|
||||
new = [] # type: List[object]
|
||||
snapshot = SysPathsSnapshot()
|
||||
monkeypatch.setattr(sys, path_type, new)
|
||||
snapshot.restore()
|
||||
|
@ -390,7 +392,7 @@ class TestSysPathsSnapshot:
|
|||
assert getattr(sys, other_path_type) == original_other_data
|
||||
|
||||
|
||||
def test_testdir_subprocess(testdir):
|
||||
def test_testdir_subprocess(testdir) -> None:
|
||||
testfile = testdir.makepyfile("def test_one(): pass")
|
||||
assert testdir.runpytest_subprocess(testfile).ret == 0
|
||||
|
||||
|
@ -416,17 +418,17 @@ def test_testdir_subprocess_via_runpytest_arg(testdir) -> None:
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_unicode_args(testdir):
|
||||
def test_unicode_args(testdir) -> None:
|
||||
result = testdir.runpytest("-k", "💩")
|
||||
assert result.ret == ExitCode.NO_TESTS_COLLECTED
|
||||
|
||||
|
||||
def test_testdir_run_no_timeout(testdir):
|
||||
def test_testdir_run_no_timeout(testdir) -> None:
|
||||
testfile = testdir.makepyfile("def test_no_timeout(): pass")
|
||||
assert testdir.runpytest_subprocess(testfile).ret == ExitCode.OK
|
||||
|
||||
|
||||
def test_testdir_run_with_timeout(testdir):
|
||||
def test_testdir_run_with_timeout(testdir) -> None:
|
||||
testfile = testdir.makepyfile("def test_no_timeout(): pass")
|
||||
|
||||
timeout = 120
|
||||
|
@ -440,7 +442,7 @@ def test_testdir_run_with_timeout(testdir):
|
|||
assert duration < timeout
|
||||
|
||||
|
||||
def test_testdir_run_timeout_expires(testdir):
|
||||
def test_testdir_run_timeout_expires(testdir) -> None:
|
||||
testfile = testdir.makepyfile(
|
||||
"""
|
||||
import time
|
||||
|
@ -452,7 +454,7 @@ def test_testdir_run_timeout_expires(testdir):
|
|||
testdir.runpytest_subprocess(testfile, timeout=1)
|
||||
|
||||
|
||||
def test_linematcher_with_nonlist():
|
||||
def test_linematcher_with_nonlist() -> None:
|
||||
"""Test LineMatcher with regard to passing in a set (accidentally)."""
|
||||
lm = LineMatcher([])
|
||||
|
||||
|
@ -467,10 +469,11 @@ def test_linematcher_with_nonlist():
|
|||
assert lm._getlines(set()) == set()
|
||||
|
||||
|
||||
def test_linematcher_match_failure():
|
||||
def test_linematcher_match_failure() -> None:
|
||||
lm = LineMatcher(["foo", "foo", "bar"])
|
||||
with pytest.raises(pytest.fail.Exception) as e:
|
||||
with pytest.raises(Failed) as e:
|
||||
lm.fnmatch_lines(["foo", "f*", "baz"])
|
||||
assert e.value.msg is not None
|
||||
assert e.value.msg.splitlines() == [
|
||||
"exact match: 'foo'",
|
||||
"fnmatch: 'f*'",
|
||||
|
@ -481,8 +484,9 @@ def test_linematcher_match_failure():
|
|||
]
|
||||
|
||||
lm = LineMatcher(["foo", "foo", "bar"])
|
||||
with pytest.raises(pytest.fail.Exception) as e:
|
||||
with pytest.raises(Failed) as e:
|
||||
lm.re_match_lines(["foo", "^f.*", "baz"])
|
||||
assert e.value.msg is not None
|
||||
assert e.value.msg.splitlines() == [
|
||||
"exact match: 'foo'",
|
||||
"re.match: '^f.*'",
|
||||
|
@ -494,7 +498,7 @@ def test_linematcher_match_failure():
|
|||
|
||||
|
||||
@pytest.mark.parametrize("function", ["no_fnmatch_line", "no_re_match_line"])
|
||||
def test_no_matching(function):
|
||||
def test_no_matching(function) -> None:
|
||||
if function == "no_fnmatch_line":
|
||||
good_pattern = "*.py OK*"
|
||||
bad_pattern = "*X.py OK*"
|
||||
|
@ -515,7 +519,7 @@ def test_no_matching(function):
|
|||
|
||||
# check the function twice to ensure we don't accumulate the internal buffer
|
||||
for i in range(2):
|
||||
with pytest.raises(pytest.fail.Exception) as e:
|
||||
with pytest.raises(Failed) as e:
|
||||
func = getattr(lm, function)
|
||||
func(good_pattern)
|
||||
obtained = str(e.value).splitlines()
|
||||
|
@ -542,15 +546,15 @@ def test_no_matching(function):
|
|||
func(bad_pattern) # bad pattern does not match any line: passes
|
||||
|
||||
|
||||
def test_no_matching_after_match():
|
||||
def test_no_matching_after_match() -> None:
|
||||
lm = LineMatcher(["1", "2", "3"])
|
||||
lm.fnmatch_lines(["1", "3"])
|
||||
with pytest.raises(pytest.fail.Exception) as e:
|
||||
with pytest.raises(Failed) as e:
|
||||
lm.no_fnmatch_line("*")
|
||||
assert str(e.value).splitlines() == ["fnmatch: '*'", " with: '1'"]
|
||||
|
||||
|
||||
def test_pytester_addopts_before_testdir(request, monkeypatch):
|
||||
def test_pytester_addopts_before_testdir(request, monkeypatch) -> None:
|
||||
orig = os.environ.get("PYTEST_ADDOPTS", None)
|
||||
monkeypatch.setenv("PYTEST_ADDOPTS", "--orig-unused")
|
||||
testdir = request.getfixturevalue("testdir")
|
||||
|
@ -561,7 +565,7 @@ def test_pytester_addopts_before_testdir(request, monkeypatch):
|
|||
assert os.environ.get("PYTEST_ADDOPTS") == orig
|
||||
|
||||
|
||||
def test_run_stdin(testdir):
|
||||
def test_run_stdin(testdir) -> None:
|
||||
with pytest.raises(testdir.TimeoutExpired):
|
||||
testdir.run(
|
||||
sys.executable,
|
||||
|
@ -591,7 +595,7 @@ def test_run_stdin(testdir):
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_popen_stdin_pipe(testdir):
|
||||
def test_popen_stdin_pipe(testdir) -> None:
|
||||
proc = testdir.popen(
|
||||
[sys.executable, "-c", "import sys; print(sys.stdin.read())"],
|
||||
stdout=subprocess.PIPE,
|
||||
|
@ -605,7 +609,7 @@ def test_popen_stdin_pipe(testdir):
|
|||
assert proc.returncode == 0
|
||||
|
||||
|
||||
def test_popen_stdin_bytes(testdir):
|
||||
def test_popen_stdin_bytes(testdir) -> None:
|
||||
proc = testdir.popen(
|
||||
[sys.executable, "-c", "import sys; print(sys.stdin.read())"],
|
||||
stdout=subprocess.PIPE,
|
||||
|
@ -618,7 +622,7 @@ def test_popen_stdin_bytes(testdir):
|
|||
assert proc.returncode == 0
|
||||
|
||||
|
||||
def test_popen_default_stdin_stderr_and_stdin_None(testdir):
|
||||
def test_popen_default_stdin_stderr_and_stdin_None(testdir) -> None:
|
||||
# stdout, stderr default to pipes,
|
||||
# stdin can be None to not close the pipe, avoiding
|
||||
# "ValueError: flush of closed file" with `communicate()`.
|
||||
|
@ -637,7 +641,7 @@ def test_popen_default_stdin_stderr_and_stdin_None(testdir):
|
|||
assert proc.returncode == 0
|
||||
|
||||
|
||||
def test_spawn_uses_tmphome(testdir):
|
||||
def test_spawn_uses_tmphome(testdir) -> None:
|
||||
tmphome = str(testdir.tmpdir)
|
||||
assert os.environ.get("HOME") == tmphome
|
||||
|
||||
|
@ -659,7 +663,7 @@ def test_spawn_uses_tmphome(testdir):
|
|||
assert child.wait() == 0, out.decode("utf8")
|
||||
|
||||
|
||||
def test_run_result_repr():
|
||||
def test_run_result_repr() -> None:
|
||||
outlines = ["some", "normal", "output"]
|
||||
errlines = ["some", "nasty", "errors", "happened"]
|
||||
|
||||
|
|
|
@ -1,17 +1,19 @@
|
|||
import re
|
||||
import warnings
|
||||
from typing import Optional
|
||||
|
||||
import pytest
|
||||
from _pytest.outcomes import Failed
|
||||
from _pytest.recwarn import WarningsRecorder
|
||||
|
||||
|
||||
def test_recwarn_stacklevel(recwarn):
|
||||
def test_recwarn_stacklevel(recwarn: WarningsRecorder) -> None:
|
||||
warnings.warn("hello")
|
||||
warn = recwarn.pop()
|
||||
assert warn.filename == __file__
|
||||
|
||||
|
||||
def test_recwarn_functional(testdir):
|
||||
def test_recwarn_functional(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import warnings
|
||||
|
@ -26,7 +28,7 @@ def test_recwarn_functional(testdir):
|
|||
|
||||
|
||||
class TestWarningsRecorderChecker:
|
||||
def test_recording(self):
|
||||
def test_recording(self) -> None:
|
||||
rec = WarningsRecorder()
|
||||
with rec:
|
||||
assert not rec.list
|
||||
|
@ -42,23 +44,23 @@ class TestWarningsRecorderChecker:
|
|||
assert values is rec.list
|
||||
pytest.raises(AssertionError, rec.pop)
|
||||
|
||||
def test_warn_stacklevel(self):
|
||||
def test_warn_stacklevel(self) -> None:
|
||||
"""#4243"""
|
||||
rec = WarningsRecorder()
|
||||
with rec:
|
||||
warnings.warn("test", DeprecationWarning, 2)
|
||||
|
||||
def test_typechecking(self):
|
||||
def test_typechecking(self) -> None:
|
||||
from _pytest.recwarn import WarningsChecker
|
||||
|
||||
with pytest.raises(TypeError):
|
||||
WarningsChecker(5)
|
||||
WarningsChecker(5) # type: ignore
|
||||
with pytest.raises(TypeError):
|
||||
WarningsChecker(("hi", RuntimeWarning))
|
||||
WarningsChecker(("hi", RuntimeWarning)) # type: ignore
|
||||
with pytest.raises(TypeError):
|
||||
WarningsChecker([DeprecationWarning, RuntimeWarning])
|
||||
WarningsChecker([DeprecationWarning, RuntimeWarning]) # type: ignore
|
||||
|
||||
def test_invalid_enter_exit(self):
|
||||
def test_invalid_enter_exit(self) -> None:
|
||||
# wrap this test in WarningsRecorder to ensure warning state gets reset
|
||||
with WarningsRecorder():
|
||||
with pytest.raises(RuntimeError):
|
||||
|
@ -75,50 +77,52 @@ class TestWarningsRecorderChecker:
|
|||
class TestDeprecatedCall:
|
||||
"""test pytest.deprecated_call()"""
|
||||
|
||||
def dep(self, i, j=None):
|
||||
def dep(self, i: int, j: Optional[int] = None) -> int:
|
||||
if i == 0:
|
||||
warnings.warn("is deprecated", DeprecationWarning, stacklevel=1)
|
||||
return 42
|
||||
|
||||
def dep_explicit(self, i):
|
||||
def dep_explicit(self, i: int) -> None:
|
||||
if i == 0:
|
||||
warnings.warn_explicit(
|
||||
"dep_explicit", category=DeprecationWarning, filename="hello", lineno=3
|
||||
)
|
||||
|
||||
def test_deprecated_call_raises(self):
|
||||
with pytest.raises(pytest.fail.Exception, match="No warnings of type"):
|
||||
def test_deprecated_call_raises(self) -> None:
|
||||
with pytest.raises(Failed, match="No warnings of type"):
|
||||
pytest.deprecated_call(self.dep, 3, 5)
|
||||
|
||||
def test_deprecated_call(self):
|
||||
def test_deprecated_call(self) -> None:
|
||||
pytest.deprecated_call(self.dep, 0, 5)
|
||||
|
||||
def test_deprecated_call_ret(self):
|
||||
def test_deprecated_call_ret(self) -> None:
|
||||
ret = pytest.deprecated_call(self.dep, 0)
|
||||
assert ret == 42
|
||||
|
||||
def test_deprecated_call_preserves(self):
|
||||
onceregistry = warnings.onceregistry.copy()
|
||||
filters = warnings.filters[:]
|
||||
def test_deprecated_call_preserves(self) -> None:
|
||||
# Type ignored because `onceregistry` and `filters` are not
|
||||
# documented API.
|
||||
onceregistry = warnings.onceregistry.copy() # type: ignore
|
||||
filters = warnings.filters[:] # type: ignore
|
||||
warn = warnings.warn
|
||||
warn_explicit = warnings.warn_explicit
|
||||
self.test_deprecated_call_raises()
|
||||
self.test_deprecated_call()
|
||||
assert onceregistry == warnings.onceregistry
|
||||
assert filters == warnings.filters
|
||||
assert onceregistry == warnings.onceregistry # type: ignore
|
||||
assert filters == warnings.filters # type: ignore
|
||||
assert warn is warnings.warn
|
||||
assert warn_explicit is warnings.warn_explicit
|
||||
|
||||
def test_deprecated_explicit_call_raises(self):
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
def test_deprecated_explicit_call_raises(self) -> None:
|
||||
with pytest.raises(Failed):
|
||||
pytest.deprecated_call(self.dep_explicit, 3)
|
||||
|
||||
def test_deprecated_explicit_call(self):
|
||||
def test_deprecated_explicit_call(self) -> None:
|
||||
pytest.deprecated_call(self.dep_explicit, 0)
|
||||
pytest.deprecated_call(self.dep_explicit, 0)
|
||||
|
||||
@pytest.mark.parametrize("mode", ["context_manager", "call"])
|
||||
def test_deprecated_call_no_warning(self, mode):
|
||||
def test_deprecated_call_no_warning(self, mode) -> None:
|
||||
"""Ensure deprecated_call() raises the expected failure when its block/function does
|
||||
not raise a deprecation warning.
|
||||
"""
|
||||
|
@ -127,7 +131,7 @@ class TestDeprecatedCall:
|
|||
pass
|
||||
|
||||
msg = "No warnings of type (.*DeprecationWarning.*, .*PendingDeprecationWarning.*)"
|
||||
with pytest.raises(pytest.fail.Exception, match=msg):
|
||||
with pytest.raises(Failed, match=msg):
|
||||
if mode == "call":
|
||||
pytest.deprecated_call(f)
|
||||
else:
|
||||
|
@ -140,7 +144,7 @@ class TestDeprecatedCall:
|
|||
@pytest.mark.parametrize("mode", ["context_manager", "call"])
|
||||
@pytest.mark.parametrize("call_f_first", [True, False])
|
||||
@pytest.mark.filterwarnings("ignore")
|
||||
def test_deprecated_call_modes(self, warning_type, mode, call_f_first):
|
||||
def test_deprecated_call_modes(self, warning_type, mode, call_f_first) -> None:
|
||||
"""Ensure deprecated_call() captures a deprecation warning as expected inside its
|
||||
block/function.
|
||||
"""
|
||||
|
@ -159,7 +163,7 @@ class TestDeprecatedCall:
|
|||
assert f() == 10
|
||||
|
||||
@pytest.mark.parametrize("mode", ["context_manager", "call"])
|
||||
def test_deprecated_call_exception_is_raised(self, mode):
|
||||
def test_deprecated_call_exception_is_raised(self, mode) -> None:
|
||||
"""If the block of the code being tested by deprecated_call() raises an exception,
|
||||
it must raise the exception undisturbed.
|
||||
"""
|
||||
|
@ -174,7 +178,7 @@ class TestDeprecatedCall:
|
|||
with pytest.deprecated_call():
|
||||
f()
|
||||
|
||||
def test_deprecated_call_specificity(self):
|
||||
def test_deprecated_call_specificity(self) -> None:
|
||||
other_warnings = [
|
||||
Warning,
|
||||
UserWarning,
|
||||
|
@ -189,40 +193,40 @@ class TestDeprecatedCall:
|
|||
def f():
|
||||
warnings.warn(warning("hi"))
|
||||
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
with pytest.raises(Failed):
|
||||
pytest.deprecated_call(f)
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
with pytest.raises(Failed):
|
||||
with pytest.deprecated_call():
|
||||
f()
|
||||
|
||||
def test_deprecated_call_supports_match(self):
|
||||
def test_deprecated_call_supports_match(self) -> None:
|
||||
with pytest.deprecated_call(match=r"must be \d+$"):
|
||||
warnings.warn("value must be 42", DeprecationWarning)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
with pytest.raises(Failed):
|
||||
with pytest.deprecated_call(match=r"must be \d+$"):
|
||||
warnings.warn("this is not here", DeprecationWarning)
|
||||
|
||||
|
||||
class TestWarns:
|
||||
def test_check_callable(self):
|
||||
def test_check_callable(self) -> None:
|
||||
source = "warnings.warn('w1', RuntimeWarning)"
|
||||
with pytest.raises(TypeError, match=r".* must be callable"):
|
||||
pytest.warns(RuntimeWarning, source)
|
||||
pytest.warns(RuntimeWarning, source) # type: ignore
|
||||
|
||||
def test_several_messages(self):
|
||||
def test_several_messages(self) -> None:
|
||||
# different messages, b/c Python suppresses multiple identical warnings
|
||||
pytest.warns(RuntimeWarning, lambda: warnings.warn("w1", RuntimeWarning))
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
with pytest.raises(Failed):
|
||||
pytest.warns(UserWarning, lambda: warnings.warn("w2", RuntimeWarning))
|
||||
pytest.warns(RuntimeWarning, lambda: warnings.warn("w3", RuntimeWarning))
|
||||
|
||||
def test_function(self):
|
||||
def test_function(self) -> None:
|
||||
pytest.warns(
|
||||
SyntaxWarning, lambda msg: warnings.warn(msg, SyntaxWarning), "syntax"
|
||||
)
|
||||
|
||||
def test_warning_tuple(self):
|
||||
def test_warning_tuple(self) -> None:
|
||||
pytest.warns(
|
||||
(RuntimeWarning, SyntaxWarning), lambda: warnings.warn("w1", RuntimeWarning)
|
||||
)
|
||||
|
@ -230,21 +234,21 @@ class TestWarns:
|
|||
(RuntimeWarning, SyntaxWarning), lambda: warnings.warn("w2", SyntaxWarning)
|
||||
)
|
||||
pytest.raises(
|
||||
pytest.fail.Exception,
|
||||
Failed,
|
||||
lambda: pytest.warns(
|
||||
(RuntimeWarning, SyntaxWarning),
|
||||
lambda: warnings.warn("w3", UserWarning),
|
||||
),
|
||||
)
|
||||
|
||||
def test_as_contextmanager(self):
|
||||
def test_as_contextmanager(self) -> None:
|
||||
with pytest.warns(RuntimeWarning):
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
|
||||
with pytest.warns(UserWarning):
|
||||
warnings.warn("user", UserWarning)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
with pytest.raises(Failed) as excinfo:
|
||||
with pytest.warns(RuntimeWarning):
|
||||
warnings.warn("user", UserWarning)
|
||||
excinfo.match(
|
||||
|
@ -252,7 +256,7 @@ class TestWarns:
|
|||
r"The list of emitted warnings is: \[UserWarning\('user',?\)\]."
|
||||
)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
with pytest.raises(Failed) as excinfo:
|
||||
with pytest.warns(UserWarning):
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
excinfo.match(
|
||||
|
@ -260,7 +264,7 @@ class TestWarns:
|
|||
r"The list of emitted warnings is: \[RuntimeWarning\('runtime',?\)\]."
|
||||
)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
with pytest.raises(Failed) as excinfo:
|
||||
with pytest.warns(UserWarning):
|
||||
pass
|
||||
excinfo.match(
|
||||
|
@ -269,7 +273,7 @@ class TestWarns:
|
|||
)
|
||||
|
||||
warning_classes = (UserWarning, FutureWarning)
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
with pytest.raises(Failed) as excinfo:
|
||||
with pytest.warns(warning_classes) as warninfo:
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
warnings.warn("import", ImportWarning)
|
||||
|
@ -286,14 +290,14 @@ class TestWarns:
|
|||
)
|
||||
)
|
||||
|
||||
def test_record(self):
|
||||
def test_record(self) -> None:
|
||||
with pytest.warns(UserWarning) as record:
|
||||
warnings.warn("user", UserWarning)
|
||||
|
||||
assert len(record) == 1
|
||||
assert str(record[0].message) == "user"
|
||||
|
||||
def test_record_only(self):
|
||||
def test_record_only(self) -> None:
|
||||
with pytest.warns(None) as record:
|
||||
warnings.warn("user", UserWarning)
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
|
@ -302,7 +306,7 @@ class TestWarns:
|
|||
assert str(record[0].message) == "user"
|
||||
assert str(record[1].message) == "runtime"
|
||||
|
||||
def test_record_by_subclass(self):
|
||||
def test_record_by_subclass(self) -> None:
|
||||
with pytest.warns(Warning) as record:
|
||||
warnings.warn("user", UserWarning)
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
|
@ -325,7 +329,7 @@ class TestWarns:
|
|||
assert str(record[0].message) == "user"
|
||||
assert str(record[1].message) == "runtime"
|
||||
|
||||
def test_double_test(self, testdir):
|
||||
def test_double_test(self, testdir) -> None:
|
||||
"""If a test is run again, the warning should still be raised"""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
|
@ -341,32 +345,32 @@ class TestWarns:
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*2 passed in*"])
|
||||
|
||||
def test_match_regex(self):
|
||||
def test_match_regex(self) -> None:
|
||||
with pytest.warns(UserWarning, match=r"must be \d+$"):
|
||||
warnings.warn("value must be 42", UserWarning)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
with pytest.raises(Failed):
|
||||
with pytest.warns(UserWarning, match=r"must be \d+$"):
|
||||
warnings.warn("this is not here", UserWarning)
|
||||
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
with pytest.raises(Failed):
|
||||
with pytest.warns(FutureWarning, match=r"must be \d+$"):
|
||||
warnings.warn("value must be 42", UserWarning)
|
||||
|
||||
def test_one_from_multiple_warns(self):
|
||||
def test_one_from_multiple_warns(self) -> None:
|
||||
with pytest.warns(UserWarning, match=r"aaa"):
|
||||
warnings.warn("cccccccccc", UserWarning)
|
||||
warnings.warn("bbbbbbbbbb", UserWarning)
|
||||
warnings.warn("aaaaaaaaaa", UserWarning)
|
||||
|
||||
def test_none_of_multiple_warns(self):
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
def test_none_of_multiple_warns(self) -> None:
|
||||
with pytest.raises(Failed):
|
||||
with pytest.warns(UserWarning, match=r"aaa"):
|
||||
warnings.warn("bbbbbbbbbb", UserWarning)
|
||||
warnings.warn("cccccccccc", UserWarning)
|
||||
|
||||
@pytest.mark.filterwarnings("ignore")
|
||||
def test_can_capture_previously_warned(self):
|
||||
def test_can_capture_previously_warned(self) -> None:
|
||||
def f():
|
||||
warnings.warn(UserWarning("ohai"))
|
||||
return 10
|
||||
|
@ -375,8 +379,8 @@ class TestWarns:
|
|||
assert pytest.warns(UserWarning, f) == 10
|
||||
assert pytest.warns(UserWarning, f) == 10
|
||||
|
||||
def test_warns_context_manager_with_kwargs(self):
|
||||
def test_warns_context_manager_with_kwargs(self) -> None:
|
||||
with pytest.raises(TypeError) as excinfo:
|
||||
with pytest.warns(UserWarning, foo="bar"):
|
||||
with pytest.warns(UserWarning, foo="bar"): # type: ignore
|
||||
pass
|
||||
assert "Unexpected keyword arguments" in str(excinfo.value)
|
||||
|
|
|
@ -2,6 +2,9 @@ import inspect
|
|||
import os
|
||||
import sys
|
||||
import types
|
||||
from typing import Dict
|
||||
from typing import List
|
||||
from typing import Tuple
|
||||
|
||||
import py
|
||||
|
||||
|
@ -11,11 +14,17 @@ from _pytest import main
|
|||
from _pytest import outcomes
|
||||
from _pytest import reports
|
||||
from _pytest import runner
|
||||
from _pytest.outcomes import Exit
|
||||
from _pytest.outcomes import Failed
|
||||
from _pytest.outcomes import OutcomeException
|
||||
from _pytest.outcomes import Skipped
|
||||
|
||||
if False: # TYPE_CHECKING
|
||||
from typing import Type
|
||||
|
||||
|
||||
class TestSetupState:
|
||||
def test_setup(self, testdir):
|
||||
def test_setup(self, testdir) -> None:
|
||||
ss = runner.SetupState()
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
values = [1]
|
||||
|
@ -25,14 +34,14 @@ class TestSetupState:
|
|||
ss._pop_and_teardown()
|
||||
assert not values
|
||||
|
||||
def test_teardown_exact_stack_empty(self, testdir):
|
||||
def test_teardown_exact_stack_empty(self, testdir) -> None:
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
ss = runner.SetupState()
|
||||
ss.teardown_exact(item, None)
|
||||
ss.teardown_exact(item, None)
|
||||
ss.teardown_exact(item, None)
|
||||
|
||||
def test_setup_fails_and_failure_is_cached(self, testdir):
|
||||
def test_setup_fails_and_failure_is_cached(self, testdir) -> None:
|
||||
item = testdir.getitem(
|
||||
"""
|
||||
def setup_module(mod):
|
||||
|
@ -44,7 +53,7 @@ class TestSetupState:
|
|||
pytest.raises(ValueError, lambda: ss.prepare(item))
|
||||
pytest.raises(ValueError, lambda: ss.prepare(item))
|
||||
|
||||
def test_teardown_multiple_one_fails(self, testdir):
|
||||
def test_teardown_multiple_one_fails(self, testdir) -> None:
|
||||
r = []
|
||||
|
||||
def fin1():
|
||||
|
@ -66,7 +75,7 @@ class TestSetupState:
|
|||
assert err.value.args == ("oops",)
|
||||
assert r == ["fin3", "fin1"]
|
||||
|
||||
def test_teardown_multiple_fail(self, testdir):
|
||||
def test_teardown_multiple_fail(self, testdir) -> None:
|
||||
# Ensure the first exception is the one which is re-raised.
|
||||
# Ideally both would be reported however.
|
||||
def fin1():
|
||||
|
@ -83,7 +92,7 @@ class TestSetupState:
|
|||
ss._callfinalizers(item)
|
||||
assert err.value.args == ("oops2",)
|
||||
|
||||
def test_teardown_multiple_scopes_one_fails(self, testdir):
|
||||
def test_teardown_multiple_scopes_one_fails(self, testdir) -> None:
|
||||
module_teardown = []
|
||||
|
||||
def fin_func():
|
||||
|
@ -103,7 +112,7 @@ class TestSetupState:
|
|||
|
||||
|
||||
class BaseFunctionalTests:
|
||||
def test_passfunction(self, testdir):
|
||||
def test_passfunction(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -116,7 +125,7 @@ class BaseFunctionalTests:
|
|||
assert rep.outcome == "passed"
|
||||
assert not rep.longrepr
|
||||
|
||||
def test_failfunction(self, testdir):
|
||||
def test_failfunction(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -131,7 +140,7 @@ class BaseFunctionalTests:
|
|||
assert rep.outcome == "failed"
|
||||
# assert isinstance(rep.longrepr, ReprExceptionInfo)
|
||||
|
||||
def test_skipfunction(self, testdir):
|
||||
def test_skipfunction(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -151,7 +160,7 @@ class BaseFunctionalTests:
|
|||
# assert rep.skipped.location.path
|
||||
# assert not rep.skipped.failurerepr
|
||||
|
||||
def test_skip_in_setup_function(self, testdir):
|
||||
def test_skip_in_setup_function(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -172,7 +181,7 @@ class BaseFunctionalTests:
|
|||
assert len(reports) == 2
|
||||
assert reports[1].passed # teardown
|
||||
|
||||
def test_failure_in_setup_function(self, testdir):
|
||||
def test_failure_in_setup_function(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -189,7 +198,7 @@ class BaseFunctionalTests:
|
|||
assert rep.when == "setup"
|
||||
assert len(reports) == 2
|
||||
|
||||
def test_failure_in_teardown_function(self, testdir):
|
||||
def test_failure_in_teardown_function(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -209,7 +218,7 @@ class BaseFunctionalTests:
|
|||
# assert rep.longrepr.reprcrash.lineno == 3
|
||||
# assert rep.longrepr.reprtraceback.reprentries
|
||||
|
||||
def test_custom_failure_repr(self, testdir):
|
||||
def test_custom_failure_repr(self, testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
conftest="""
|
||||
import pytest
|
||||
|
@ -234,7 +243,7 @@ class BaseFunctionalTests:
|
|||
# assert rep.failed.where.path.basename == "test_func.py"
|
||||
# assert rep.failed.failurerepr == "hello"
|
||||
|
||||
def test_teardown_final_returncode(self, testdir):
|
||||
def test_teardown_final_returncode(self, testdir) -> None:
|
||||
rec = testdir.inline_runsource(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -245,7 +254,7 @@ class BaseFunctionalTests:
|
|||
)
|
||||
assert rec.ret == 1
|
||||
|
||||
def test_logstart_logfinish_hooks(self, testdir):
|
||||
def test_logstart_logfinish_hooks(self, testdir) -> None:
|
||||
rec = testdir.inline_runsource(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -262,7 +271,7 @@ class BaseFunctionalTests:
|
|||
assert rep.nodeid == "test_logstart_logfinish_hooks.py::test_func"
|
||||
assert rep.location == ("test_logstart_logfinish_hooks.py", 1, "test_func")
|
||||
|
||||
def test_exact_teardown_issue90(self, testdir):
|
||||
def test_exact_teardown_issue90(self, testdir) -> None:
|
||||
rec = testdir.inline_runsource(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -302,7 +311,7 @@ class BaseFunctionalTests:
|
|||
assert reps[5].nodeid.endswith("test_func")
|
||||
assert reps[5].failed
|
||||
|
||||
def test_exact_teardown_issue1206(self, testdir):
|
||||
def test_exact_teardown_issue1206(self, testdir) -> None:
|
||||
"""issue shadowing error with wrong number of arguments on teardown_method."""
|
||||
rec = testdir.inline_runsource(
|
||||
"""
|
||||
|
@ -338,7 +347,7 @@ class BaseFunctionalTests:
|
|||
"TypeError: teardown_method() takes exactly 4 arguments (2 given)",
|
||||
)
|
||||
|
||||
def test_failure_in_setup_function_ignores_custom_repr(self, testdir):
|
||||
def test_failure_in_setup_function_ignores_custom_repr(self, testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
conftest="""
|
||||
import pytest
|
||||
|
@ -366,7 +375,7 @@ class BaseFunctionalTests:
|
|||
# assert rep.outcome.where.path.basename == "test_func.py"
|
||||
# assert instanace(rep.failed.failurerepr, PythonFailureRepr)
|
||||
|
||||
def test_systemexit_does_not_bail_out(self, testdir):
|
||||
def test_systemexit_does_not_bail_out(self, testdir) -> None:
|
||||
try:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
|
@ -380,7 +389,7 @@ class BaseFunctionalTests:
|
|||
assert rep.failed
|
||||
assert rep.when == "call"
|
||||
|
||||
def test_exit_propagates(self, testdir):
|
||||
def test_exit_propagates(self, testdir) -> None:
|
||||
try:
|
||||
testdir.runitem(
|
||||
"""
|
||||
|
@ -389,7 +398,7 @@ class BaseFunctionalTests:
|
|||
raise pytest.exit.Exception()
|
||||
"""
|
||||
)
|
||||
except pytest.exit.Exception:
|
||||
except Exit:
|
||||
pass
|
||||
else:
|
||||
pytest.fail("did not raise")
|
||||
|
@ -402,7 +411,7 @@ class TestExecutionNonForked(BaseFunctionalTests):
|
|||
|
||||
return f
|
||||
|
||||
def test_keyboardinterrupt_propagates(self, testdir):
|
||||
def test_keyboardinterrupt_propagates(self, testdir) -> None:
|
||||
try:
|
||||
testdir.runitem(
|
||||
"""
|
||||
|
@ -424,7 +433,7 @@ class TestExecutionForked(BaseFunctionalTests):
|
|||
boxed = pytest.importorskip("xdist.boxed")
|
||||
return boxed.forked_run_report
|
||||
|
||||
def test_suicide(self, testdir):
|
||||
def test_suicide(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -438,7 +447,7 @@ class TestExecutionForked(BaseFunctionalTests):
|
|||
|
||||
|
||||
class TestSessionReports:
|
||||
def test_collect_result(self, testdir):
|
||||
def test_collect_result(self, testdir) -> None:
|
||||
col = testdir.getmodulecol(
|
||||
"""
|
||||
def test_func1():
|
||||
|
@ -461,20 +470,24 @@ class TestSessionReports:
|
|||
assert res[1].name == "TestClass"
|
||||
|
||||
|
||||
reporttypes = [reports.BaseReport, reports.TestReport, reports.CollectReport]
|
||||
reporttypes = [
|
||||
reports.BaseReport,
|
||||
reports.TestReport,
|
||||
reports.CollectReport,
|
||||
] # type: List[Type[reports.BaseReport]]
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"reporttype", reporttypes, ids=[x.__name__ for x in reporttypes]
|
||||
)
|
||||
def test_report_extra_parameters(reporttype):
|
||||
def test_report_extra_parameters(reporttype: "Type[reports.BaseReport]") -> None:
|
||||
args = list(inspect.signature(reporttype.__init__).parameters.keys())[1:]
|
||||
basekw = dict.fromkeys(args, [])
|
||||
basekw = dict.fromkeys(args, []) # type: Dict[str, List[object]]
|
||||
report = reporttype(newthing=1, **basekw)
|
||||
assert report.newthing == 1
|
||||
|
||||
|
||||
def test_callinfo():
|
||||
def test_callinfo() -> None:
|
||||
ci = runner.CallInfo.from_call(lambda: 0, "123")
|
||||
assert ci.when == "123"
|
||||
assert ci.result == 0
|
||||
|
@ -503,7 +516,7 @@ def test_callinfo():
|
|||
|
||||
|
||||
@pytest.mark.xfail
|
||||
def test_runtest_in_module_ordering(testdir):
|
||||
def test_runtest_in_module_ordering(testdir) -> None:
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -534,12 +547,12 @@ def test_runtest_in_module_ordering(testdir):
|
|||
result.stdout.fnmatch_lines(["*2 passed*"])
|
||||
|
||||
|
||||
def test_outcomeexception_exceptionattributes():
|
||||
def test_outcomeexception_exceptionattributes() -> None:
|
||||
outcome = outcomes.OutcomeException("test")
|
||||
assert outcome.args[0] == outcome.msg
|
||||
|
||||
|
||||
def test_outcomeexception_passes_except_Exception():
|
||||
def test_outcomeexception_passes_except_Exception() -> None:
|
||||
with pytest.raises(outcomes.OutcomeException):
|
||||
try:
|
||||
raise outcomes.OutcomeException("test")
|
||||
|
@ -547,20 +560,22 @@ def test_outcomeexception_passes_except_Exception():
|
|||
pass
|
||||
|
||||
|
||||
def test_pytest_exit():
|
||||
with pytest.raises(pytest.exit.Exception) as excinfo:
|
||||
def test_pytest_exit() -> None:
|
||||
assert Exit == pytest.exit.Exception # type: ignore
|
||||
with pytest.raises(Exit) as excinfo:
|
||||
pytest.exit("hello")
|
||||
assert excinfo.errisinstance(pytest.exit.Exception)
|
||||
assert excinfo.errisinstance(Exit)
|
||||
|
||||
|
||||
def test_pytest_fail():
|
||||
with pytest.raises(pytest.fail.Exception) as excinfo:
|
||||
def test_pytest_fail() -> None:
|
||||
assert Failed == pytest.fail.Exception # type: ignore
|
||||
with pytest.raises(Failed) as excinfo:
|
||||
pytest.fail("hello")
|
||||
s = excinfo.exconly(tryshort=True)
|
||||
assert s.startswith("Failed")
|
||||
|
||||
|
||||
def test_pytest_exit_msg(testdir):
|
||||
def test_pytest_exit_msg(testdir) -> None:
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -583,7 +598,7 @@ def _strip_resource_warnings(lines):
|
|||
]
|
||||
|
||||
|
||||
def test_pytest_exit_returncode(testdir):
|
||||
def test_pytest_exit_returncode(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""\
|
||||
import pytest
|
||||
|
@ -614,7 +629,7 @@ def test_pytest_exit_returncode(testdir):
|
|||
assert result.ret == 98
|
||||
|
||||
|
||||
def test_pytest_fail_notrace_runtest(testdir):
|
||||
def test_pytest_fail_notrace_runtest(testdir) -> None:
|
||||
"""Test pytest.fail(..., pytrace=False) does not show tracebacks during test run."""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
|
@ -630,7 +645,7 @@ def test_pytest_fail_notrace_runtest(testdir):
|
|||
result.stdout.no_fnmatch_line("*def teardown_function*")
|
||||
|
||||
|
||||
def test_pytest_fail_notrace_collection(testdir):
|
||||
def test_pytest_fail_notrace_collection(testdir) -> None:
|
||||
"""Test pytest.fail(..., pytrace=False) does not show tracebacks during collection."""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
|
@ -645,7 +660,7 @@ def test_pytest_fail_notrace_collection(testdir):
|
|||
result.stdout.no_fnmatch_line("*def some_internal_function()*")
|
||||
|
||||
|
||||
def test_pytest_fail_notrace_non_ascii(testdir):
|
||||
def test_pytest_fail_notrace_non_ascii(testdir) -> None:
|
||||
"""Fix pytest.fail with pytrace=False with non-ascii characters (#1178).
|
||||
|
||||
This tests with native and unicode strings containing non-ascii chars.
|
||||
|
@ -663,7 +678,7 @@ def test_pytest_fail_notrace_non_ascii(testdir):
|
|||
result.stdout.no_fnmatch_line("*def test_hello*")
|
||||
|
||||
|
||||
def test_pytest_no_tests_collected_exit_status(testdir):
|
||||
def test_pytest_no_tests_collected_exit_status(testdir) -> None:
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*collected 0 items*"])
|
||||
assert result.ret == main.ExitCode.NO_TESTS_COLLECTED
|
||||
|
@ -685,16 +700,17 @@ def test_pytest_no_tests_collected_exit_status(testdir):
|
|||
assert result.ret == main.ExitCode.NO_TESTS_COLLECTED
|
||||
|
||||
|
||||
def test_exception_printing_skip():
|
||||
def test_exception_printing_skip() -> None:
|
||||
assert Skipped == pytest.skip.Exception # type: ignore
|
||||
try:
|
||||
pytest.skip("hello")
|
||||
except pytest.skip.Exception:
|
||||
except Skipped:
|
||||
excinfo = _pytest._code.ExceptionInfo.from_current()
|
||||
s = excinfo.exconly(tryshort=True)
|
||||
assert s.startswith("Skipped")
|
||||
|
||||
|
||||
def test_importorskip(monkeypatch):
|
||||
def test_importorskip(monkeypatch) -> None:
|
||||
importorskip = pytest.importorskip
|
||||
|
||||
def f():
|
||||
|
@ -705,45 +721,49 @@ def test_importorskip(monkeypatch):
|
|||
assert sysmod is sys
|
||||
# path = pytest.importorskip("os.path")
|
||||
# assert path == os.path
|
||||
excinfo = pytest.raises(pytest.skip.Exception, f)
|
||||
path = py.path.local(excinfo.getrepr().reprcrash.path)
|
||||
excinfo = pytest.raises(Skipped, f)
|
||||
assert excinfo is not None
|
||||
excrepr = excinfo.getrepr()
|
||||
assert excrepr is not None
|
||||
assert excrepr.reprcrash is not None
|
||||
path = py.path.local(excrepr.reprcrash.path)
|
||||
# check that importorskip reports the actual call
|
||||
# in this test the test_runner.py file
|
||||
assert path.purebasename == "test_runner"
|
||||
pytest.raises(SyntaxError, pytest.importorskip, "x y z")
|
||||
pytest.raises(SyntaxError, pytest.importorskip, "x=y")
|
||||
mod = types.ModuleType("hello123")
|
||||
mod.__version__ = "1.3"
|
||||
mod.__version__ = "1.3" # type: ignore
|
||||
monkeypatch.setitem(sys.modules, "hello123", mod)
|
||||
with pytest.raises(pytest.skip.Exception):
|
||||
with pytest.raises(Skipped):
|
||||
pytest.importorskip("hello123", minversion="1.3.1")
|
||||
mod2 = pytest.importorskip("hello123", minversion="1.3")
|
||||
assert mod2 == mod
|
||||
except pytest.skip.Exception:
|
||||
except Skipped:
|
||||
print(_pytest._code.ExceptionInfo.from_current())
|
||||
pytest.fail("spurious skip")
|
||||
|
||||
|
||||
def test_importorskip_imports_last_module_part():
|
||||
def test_importorskip_imports_last_module_part() -> None:
|
||||
ospath = pytest.importorskip("os.path")
|
||||
assert os.path == ospath
|
||||
|
||||
|
||||
def test_importorskip_dev_module(monkeypatch):
|
||||
def test_importorskip_dev_module(monkeypatch) -> None:
|
||||
try:
|
||||
mod = types.ModuleType("mockmodule")
|
||||
mod.__version__ = "0.13.0.dev-43290"
|
||||
mod.__version__ = "0.13.0.dev-43290" # type: ignore
|
||||
monkeypatch.setitem(sys.modules, "mockmodule", mod)
|
||||
mod2 = pytest.importorskip("mockmodule", minversion="0.12.0")
|
||||
assert mod2 == mod
|
||||
with pytest.raises(pytest.skip.Exception):
|
||||
with pytest.raises(Skipped):
|
||||
pytest.importorskip("mockmodule1", minversion="0.14.0")
|
||||
except pytest.skip.Exception:
|
||||
except Skipped:
|
||||
print(_pytest._code.ExceptionInfo.from_current())
|
||||
pytest.fail("spurious skip")
|
||||
|
||||
|
||||
def test_importorskip_module_level(testdir):
|
||||
def test_importorskip_module_level(testdir) -> None:
|
||||
"""importorskip must be able to skip entire modules when used at module level"""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
|
@ -758,7 +778,7 @@ def test_importorskip_module_level(testdir):
|
|||
result.stdout.fnmatch_lines(["*collected 0 items / 1 skipped*"])
|
||||
|
||||
|
||||
def test_importorskip_custom_reason(testdir):
|
||||
def test_importorskip_custom_reason(testdir) -> None:
|
||||
"""make sure custom reasons are used"""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
|
@ -774,7 +794,7 @@ def test_importorskip_custom_reason(testdir):
|
|||
result.stdout.fnmatch_lines(["*collected 0 items / 1 skipped*"])
|
||||
|
||||
|
||||
def test_pytest_cmdline_main(testdir):
|
||||
def test_pytest_cmdline_main(testdir) -> None:
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -792,7 +812,7 @@ def test_pytest_cmdline_main(testdir):
|
|||
assert ret == 0
|
||||
|
||||
|
||||
def test_unicode_in_longrepr(testdir):
|
||||
def test_unicode_in_longrepr(testdir) -> None:
|
||||
testdir.makeconftest(
|
||||
"""\
|
||||
import pytest
|
||||
|
@ -815,7 +835,7 @@ def test_unicode_in_longrepr(testdir):
|
|||
assert "UnicodeEncodeError" not in result.stderr.str()
|
||||
|
||||
|
||||
def test_failure_in_setup(testdir):
|
||||
def test_failure_in_setup(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def setup_module():
|
||||
|
@ -828,7 +848,7 @@ def test_failure_in_setup(testdir):
|
|||
result.stdout.no_fnmatch_line("*def setup_module*")
|
||||
|
||||
|
||||
def test_makereport_getsource(testdir):
|
||||
def test_makereport_getsource(testdir) -> None:
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_foo():
|
||||
|
@ -841,17 +861,17 @@ def test_makereport_getsource(testdir):
|
|||
result.stdout.fnmatch_lines(["*else: assert False*"])
|
||||
|
||||
|
||||
def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
|
||||
def test_makereport_getsource_dynamic_code(testdir, monkeypatch) -> None:
|
||||
"""Test that exception in dynamically generated code doesn't break getting the source line."""
|
||||
import inspect
|
||||
|
||||
original_findsource = inspect.findsource
|
||||
|
||||
def findsource(obj, *args, **kwargs):
|
||||
def findsource(obj):
|
||||
# Can be triggered by dynamically created functions
|
||||
if obj.__name__ == "foo":
|
||||
raise IndexError()
|
||||
return original_findsource(obj, *args, **kwargs)
|
||||
return original_findsource(obj)
|
||||
|
||||
monkeypatch.setattr(inspect, "findsource", findsource)
|
||||
|
||||
|
@ -872,7 +892,7 @@ def test_makereport_getsource_dynamic_code(testdir, monkeypatch):
|
|||
result.stdout.fnmatch_lines(["*test_fix*", "*fixture*'missing'*not found*"])
|
||||
|
||||
|
||||
def test_store_except_info_on_error():
|
||||
def test_store_except_info_on_error() -> None:
|
||||
""" Test that upon test failure, the exception info is stored on
|
||||
sys.last_traceback and friends.
|
||||
"""
|
||||
|
@ -891,6 +911,7 @@ def test_store_except_info_on_error():
|
|||
pass
|
||||
# Check that exception info is stored on sys
|
||||
assert sys.last_type is IndexError
|
||||
assert isinstance(sys.last_value, IndexError)
|
||||
assert sys.last_value.args[0] == "TEST"
|
||||
assert sys.last_traceback
|
||||
|
||||
|
@ -902,8 +923,8 @@ def test_store_except_info_on_error():
|
|||
assert not hasattr(sys, "last_traceback")
|
||||
|
||||
|
||||
def test_current_test_env_var(testdir, monkeypatch):
|
||||
pytest_current_test_vars = []
|
||||
def test_current_test_env_var(testdir, monkeypatch) -> None:
|
||||
pytest_current_test_vars = [] # type: List[Tuple[str, str]]
|
||||
monkeypatch.setattr(
|
||||
sys, "pytest_current_test_vars", pytest_current_test_vars, raising=False
|
||||
)
|
||||
|
@ -942,7 +963,7 @@ class TestReportContents:
|
|||
def getrunner(self):
|
||||
return lambda item: runner.runtestprotocol(item, log=False)
|
||||
|
||||
def test_longreprtext_pass(self, testdir):
|
||||
def test_longreprtext_pass(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -952,7 +973,7 @@ class TestReportContents:
|
|||
rep = reports[1]
|
||||
assert rep.longreprtext == ""
|
||||
|
||||
def test_longreprtext_failure(self, testdir):
|
||||
def test_longreprtext_failure(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -963,7 +984,7 @@ class TestReportContents:
|
|||
rep = reports[1]
|
||||
assert "assert 1 == 4" in rep.longreprtext
|
||||
|
||||
def test_captured_text(self, testdir):
|
||||
def test_captured_text(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
import pytest
|
||||
|
@ -993,7 +1014,7 @@ class TestReportContents:
|
|||
assert call.capstderr == "setup: stderr\ncall: stderr\n"
|
||||
assert teardown.capstderr == "setup: stderr\ncall: stderr\nteardown: stderr\n"
|
||||
|
||||
def test_no_captured_text(self, testdir):
|
||||
def test_no_captured_text(self, testdir) -> None:
|
||||
reports = testdir.runitem(
|
||||
"""
|
||||
def test_func():
|
||||
|
@ -1005,10 +1026,10 @@ class TestReportContents:
|
|||
assert rep.capstderr == ""
|
||||
|
||||
|
||||
def test_outcome_exception_bad_msg():
|
||||
def test_outcome_exception_bad_msg() -> None:
|
||||
"""Check that OutcomeExceptions validate their input to prevent confusing errors (#5578)"""
|
||||
|
||||
def func():
|
||||
def func() -> None:
|
||||
pass
|
||||
|
||||
expected = (
|
||||
|
@ -1016,5 +1037,5 @@ def test_outcome_exception_bad_msg():
|
|||
"Perhaps you meant to use a mark?"
|
||||
)
|
||||
with pytest.raises(TypeError) as excinfo:
|
||||
OutcomeException(func)
|
||||
OutcomeException(func) # type: ignore
|
||||
assert str(excinfo.value) == expected
|
||||
|
|
|
@ -167,11 +167,13 @@ def test_stop_on_collection_errors(broken_testdir, broken_first):
|
|||
result.stdout.fnmatch_lines("*error during collection*")
|
||||
|
||||
|
||||
def test_xfail_handling(testdir):
|
||||
def test_xfail_handling(testdir, monkeypatch):
|
||||
"""Ensure normal xfail is ignored, and strict xfail interrupts the session in sw mode
|
||||
|
||||
(#5547)
|
||||
"""
|
||||
monkeypatch.setattr("sys.dont_write_bytecode", True)
|
||||
|
||||
contents = """
|
||||
import pytest
|
||||
def test_a(): pass
|
||||
|
@ -205,10 +207,6 @@ def test_xfail_handling(testdir):
|
|||
]
|
||||
)
|
||||
|
||||
# because we are writing to the same file, mtime might not be affected enough to
|
||||
# invalidate the cache, making this next run flaky
|
||||
if testdir.tmpdir.join("__pycache__").exists():
|
||||
testdir.tmpdir.join("__pycache__").remove()
|
||||
testdir.makepyfile(contents.format(assert_value="0", strict="True"))
|
||||
result = testdir.runpytest("--sw", "-v")
|
||||
result.stdout.fnmatch_lines(
|
||||
|
|
|
@ -3,6 +3,7 @@ terminal reporting of the full testing process.
|
|||
"""
|
||||
import collections
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import textwrap
|
||||
from io import StringIO
|
||||
|
@ -21,10 +22,15 @@ from _pytest.terminal import getreportopt
|
|||
from _pytest.terminal import TerminalReporter
|
||||
|
||||
DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
|
||||
RED = r"\x1b\[31m"
|
||||
GREEN = r"\x1b\[32m"
|
||||
YELLOW = r"\x1b\[33m"
|
||||
RESET = r"\x1b\[0m"
|
||||
|
||||
COLORS = {
|
||||
"red": "\x1b[31m",
|
||||
"green": "\x1b[32m",
|
||||
"yellow": "\x1b[33m",
|
||||
"bold": "\x1b[1m",
|
||||
"reset": "\x1b[0m",
|
||||
}
|
||||
RE_COLORS = {k: re.escape(v) for k, v in COLORS.items()}
|
||||
|
||||
|
||||
class Option:
|
||||
|
@ -623,7 +629,7 @@ class TestTerminalFunctional:
|
|||
if request.config.pluginmanager.list_plugin_distinfo():
|
||||
result.stdout.fnmatch_lines(["plugins: *"])
|
||||
|
||||
def test_header(self, testdir, request):
|
||||
def test_header(self, testdir):
|
||||
testdir.tmpdir.join("tests").ensure_dir()
|
||||
testdir.tmpdir.join("gui").ensure_dir()
|
||||
|
||||
|
@ -709,7 +715,7 @@ class TestTerminalFunctional:
|
|||
"""
|
||||
)
|
||||
|
||||
def test_verbose_reporting(self, verbose_testfile, testdir, pytestconfig):
|
||||
def test_verbose_reporting(self, verbose_testfile, testdir):
|
||||
result = testdir.runpytest(
|
||||
verbose_testfile, "-v", "-Walways::pytest.PytestWarning"
|
||||
)
|
||||
|
@ -879,10 +885,70 @@ def test_pass_output_reporting(testdir):
|
|||
|
||||
|
||||
def test_color_yes(testdir):
|
||||
testdir.makepyfile("def test_this(): assert 1")
|
||||
result = testdir.runpytest("--color=yes")
|
||||
assert "test session starts" in result.stdout.str()
|
||||
assert "\x1b[1m" in result.stdout.str()
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
def fail():
|
||||
assert 0
|
||||
|
||||
def test_this():
|
||||
fail()
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("--color=yes", str(p1))
|
||||
if sys.version_info < (3, 6):
|
||||
# py36 required for ordered markup
|
||||
output = result.stdout.str()
|
||||
assert "test session starts" in output
|
||||
assert "\x1b[1m" in output
|
||||
return
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
line.format(**COLORS).replace("[", "[[]")
|
||||
for line in [
|
||||
"{bold}=*= test session starts =*={reset}",
|
||||
"collected 1 item",
|
||||
"",
|
||||
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
|
||||
"",
|
||||
"=*= FAILURES =*=",
|
||||
"{red}{bold}_*_ test_this _*_{reset}",
|
||||
"",
|
||||
"{bold} def test_this():{reset}",
|
||||
"{bold}> fail(){reset}",
|
||||
"",
|
||||
"{bold}{red}test_color_yes.py{reset}:5: ",
|
||||
"_ _ * _ _*",
|
||||
"",
|
||||
"{bold} def fail():{reset}",
|
||||
"{bold}> assert 0{reset}",
|
||||
"{bold}{red}E assert 0{reset}",
|
||||
"",
|
||||
"{bold}{red}test_color_yes.py{reset}:2: AssertionError",
|
||||
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
|
||||
]
|
||||
]
|
||||
)
|
||||
result = testdir.runpytest("--color=yes", "--tb=short", str(p1))
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
line.format(**COLORS).replace("[", "[[]")
|
||||
for line in [
|
||||
"{bold}=*= test session starts =*={reset}",
|
||||
"collected 1 item",
|
||||
"",
|
||||
"test_color_yes.py {red}F{reset}{red} * [100%]{reset}",
|
||||
"",
|
||||
"=*= FAILURES =*=",
|
||||
"{red}{bold}_*_ test_this _*_{reset}",
|
||||
"{bold}{red}test_color_yes.py{reset}:5: in test_this",
|
||||
"{bold} fail(){reset}",
|
||||
"{bold}{red}test_color_yes.py{reset}:2: in fail",
|
||||
"{bold} assert 0{reset}",
|
||||
"{bold}{red}E assert 0{reset}",
|
||||
"{red}=*= {red}{bold}1 failed{reset}{red} in *s{reset}{red} =*={reset}",
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_color_no(testdir):
|
||||
|
@ -994,7 +1060,7 @@ def test_tbstyle_short(testdir):
|
|||
assert "assert x" in s
|
||||
|
||||
|
||||
def test_traceconfig(testdir, monkeypatch):
|
||||
def test_traceconfig(testdir):
|
||||
result = testdir.runpytest("--traceconfig")
|
||||
result.stdout.fnmatch_lines(["*active plugins*"])
|
||||
assert result.ret == ExitCode.NO_TESTS_COLLECTED
|
||||
|
@ -1599,18 +1665,15 @@ class TestProgressOutputStyle:
|
|||
def test_foobar(i): raise ValueError()
|
||||
""",
|
||||
)
|
||||
output = testdir.runpytest()
|
||||
output.stdout.re_match_lines(
|
||||
result = testdir.runpytest()
|
||||
result.stdout.re_match_lines(
|
||||
[
|
||||
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}".format(
|
||||
green=GREEN, reset=RESET
|
||||
),
|
||||
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}".format(
|
||||
green=GREEN, reset=RESET, yellow=YELLOW
|
||||
),
|
||||
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}".format(
|
||||
reset=RESET, red=RED
|
||||
),
|
||||
line.format(**RE_COLORS)
|
||||
for line in [
|
||||
r"test_bar.py ({green}\.{reset}){{10}}{green} \s+ \[ 50%\]{reset}",
|
||||
r"test_foo.py ({green}\.{reset}){{5}}{yellow} \s+ \[ 75%\]{reset}",
|
||||
r"test_foobar.py ({red}F{reset}){{5}}{red} \s+ \[100%\]{reset}",
|
||||
]
|
||||
]
|
||||
)
|
||||
|
||||
|
|
25
tox.ini
25
tox.ini
|
@ -20,11 +20,12 @@ envlist =
|
|||
[testenv]
|
||||
commands =
|
||||
{env:_PYTEST_TOX_COVERAGE_RUN:} pytest {posargs:{env:_PYTEST_TOX_DEFAULT_POSARGS:}}
|
||||
doctesting: {env:_PYTEST_TOX_COVERAGE_RUN:} pytest --doctest-modules --pyargs _pytest
|
||||
coverage: coverage combine
|
||||
coverage: coverage report -m
|
||||
passenv = USER USERNAME COVERAGE_* TRAVIS PYTEST_ADDOPTS TERM
|
||||
setenv =
|
||||
_PYTEST_TOX_DEFAULT_POSARGS={env:_PYTEST_TOX_POSARGS_LSOF:} {env:_PYTEST_TOX_POSARGS_XDIST:}
|
||||
_PYTEST_TOX_DEFAULT_POSARGS={env:_PYTEST_TOX_POSARGS_DOCTESTING:} {env:_PYTEST_TOX_POSARGS_LSOF:} {env:_PYTEST_TOX_POSARGS_XDIST:}
|
||||
|
||||
# Configuration to run with coverage similar to CI, e.g.
|
||||
# "tox -e py37-coverage".
|
||||
|
@ -33,6 +34,8 @@ setenv =
|
|||
coverage: COVERAGE_FILE={toxinidir}/.coverage
|
||||
coverage: COVERAGE_PROCESS_START={toxinidir}/.coveragerc
|
||||
|
||||
doctesting: _PYTEST_TOX_POSARGS_DOCTESTING=doc/en
|
||||
|
||||
nobyte: PYTHONDONTWRITEBYTECODE=1
|
||||
|
||||
lsof: _PYTEST_TOX_POSARGS_LSOF=--lsof
|
||||
|
@ -40,6 +43,7 @@ setenv =
|
|||
xdist: _PYTEST_TOX_POSARGS_XDIST=-n auto
|
||||
extras = testing
|
||||
deps =
|
||||
doctesting: PyYAML
|
||||
oldattrs: attrs==17.4.0
|
||||
oldattrs: hypothesis<=4.38.1
|
||||
numpy: numpy
|
||||
|
@ -59,6 +63,15 @@ commands = pre-commit run --all-files --show-diff-on-failure {posargs:}
|
|||
extras = checkqa-mypy, testing
|
||||
commands = mypy {posargs:src testing}
|
||||
|
||||
[testenv:mypy-diff]
|
||||
extras = checkqa-mypy, testing
|
||||
deps =
|
||||
lxml
|
||||
diff-cover
|
||||
commands =
|
||||
-mypy --cobertura-xml-report {envtmpdir} {posargs:src testing}
|
||||
diff-cover --fail-under=100 --compare-branch={env:DIFF_BRANCH:origin/{env:GITHUB_BASE_REF:master}} {envtmpdir}/cobertura.xml
|
||||
|
||||
[testenv:docs]
|
||||
basepython = python3
|
||||
usedevelop = True
|
||||
|
@ -81,16 +94,6 @@ deps = -r{toxinidir}/doc/en/requirements.txt
|
|||
commands =
|
||||
sphinx-build -W -q --keep-going -b linkcheck . _build
|
||||
|
||||
[testenv:doctesting]
|
||||
basepython = python3
|
||||
skipsdist = True
|
||||
deps =
|
||||
{[testenv]deps}
|
||||
PyYAML
|
||||
commands =
|
||||
{env:_PYTEST_TOX_COVERAGE_RUN:} pytest doc/en
|
||||
{env:_PYTEST_TOX_COVERAGE_RUN:} pytest --doctest-modules --pyargs _pytest
|
||||
|
||||
[testenv:regen]
|
||||
changedir = doc/en
|
||||
skipsdist = True
|
||||
|
|
Loading…
Reference in New Issue