Merge remote-tracking branch 'upstream/features' into blueyed/pdb-doctest-bdbquit

This commit is contained in:
Bruno Oliveira 2019-10-22 19:43:35 -03:00
commit f4734213e5
171 changed files with 4566 additions and 1619 deletions

View File

@ -16,3 +16,11 @@ source = src/
*/lib/python*/site-packages/ */lib/python*/site-packages/
*/pypy*/site-packages/ */pypy*/site-packages/
*\Lib\site-packages\ *\Lib\site-packages\
[report]
skip_covered = True
show_missing = True
exclude_lines =
\#\s*pragma: no cover
^\s*raise NotImplementedError\b
^\s*return NotImplemented\b

View File

@ -6,7 +6,7 @@ Here is a quick checklist that should be present in PRs.
--> -->
- [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes. - [ ] Target the `master` branch for bug fixes, documentation updates and trivial changes.
- [ ] Target the `features` branch for new features and removals/deprecations. - [ ] Target the `features` branch for new features, improvements, and removals/deprecations.
- [ ] Include documentation when adding new features. - [ ] Include documentation when adding new features.
- [ ] Include new tests or update existing tests when applicable. - [ ] Include new tests or update existing tests when applicable.

View File

@ -1,17 +1,15 @@
exclude: doc/en/example/py2py3/test_py2.py exclude: doc/en/example/py2py3/test_py2.py
repos: repos:
- repo: https://github.com/python/black - repo: https://github.com/psf/black
rev: 19.3b0 rev: 19.3b0
hooks: hooks:
- id: black - id: black
args: [--safe, --quiet] args: [--safe, --quiet]
language_version: python3
- repo: https://github.com/asottile/blacken-docs - repo: https://github.com/asottile/blacken-docs
rev: v1.0.0 rev: v1.0.0
hooks: hooks:
- id: blacken-docs - id: blacken-docs
additional_dependencies: [black==19.3b0] additional_dependencies: [black==19.3b0]
language_version: python3
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.2.3 rev: v2.2.3
hooks: hooks:
@ -28,7 +26,7 @@ repos:
hooks: hooks:
- id: flake8 - id: flake8
language_version: python3 language_version: python3
additional_dependencies: [flake8-typing-imports] additional_dependencies: [flake8-typing-imports==1.3.0]
- repo: https://github.com/asottile/reorder_python_imports - repo: https://github.com/asottile/reorder_python_imports
rev: v1.4.0 rev: v1.4.0
hooks: hooks:
@ -44,15 +42,10 @@ repos:
hooks: hooks:
- id: rst-backticks - id: rst-backticks
- repo: https://github.com/pre-commit/mirrors-mypy - repo: https://github.com/pre-commit/mirrors-mypy
rev: v0.711 rev: v0.720
hooks: hooks:
- id: mypy - id: mypy
name: mypy (src) files: ^(src/|testing/)
files: ^src/
args: []
- id: mypy
name: mypy (testing)
files: ^testing/
args: [] args: []
- repo: local - repo: local
hooks: hooks:
@ -66,7 +59,7 @@ repos:
name: changelog filenames name: changelog filenames
language: fail language: fail
entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst' entry: 'changelog files must be named ####.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst'
exclude: changelog/(\d+\.(feature|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst) exclude: changelog/(\d+\.(feature|improvement|bugfix|doc|deprecation|removal|vendor|trivial).rst|README.rst|_template.rst)
files: ^changelog/ files: ^changelog/
- id: py-deprecated - id: py-deprecated
name: py library is deprecated name: py library is deprecated

View File

@ -13,6 +13,10 @@ env:
global: global:
- PYTEST_ADDOPTS=-vv - PYTEST_ADDOPTS=-vv
# setuptools-scm needs all tags in order to obtain a proper version
git:
depth: false
install: install:
- python -m pip install --upgrade --pre tox - python -m pip install --upgrade --pre tox
@ -31,7 +35,9 @@ jobs:
- test $(python -c 'import sys; print("%d%d" % sys.version_info[0:2])') = 37 - test $(python -c 'import sys; print("%d%d" % sys.version_info[0:2])') = 37
# Full run of latest supported version, without xdist. # Full run of latest supported version, without xdist.
- env: TOXENV=py37 # Coverage for:
# - test_sys_breakpoint_interception (via pexpect).
- env: TOXENV=py37-pexpect PYTEST_COVERAGE=1
python: '3.7' python: '3.7'
# Coverage tracking is slow with pypy, skip it. # Coverage tracking is slow with pypy, skip it.
@ -45,13 +51,11 @@ jobs:
# - pytester's LsofFdLeakChecker # - pytester's LsofFdLeakChecker
# - TestArgComplete (linux only) # - TestArgComplete (linux only)
# - numpy # - numpy
# - old attrs
# Empty PYTEST_ADDOPTS to run this non-verbose. # Empty PYTEST_ADDOPTS to run this non-verbose.
- env: TOXENV=py37-lsof-numpy-twisted-xdist PYTEST_COVERAGE=1 PYTEST_ADDOPTS= - env: TOXENV=py37-lsof-oldattrs-numpy-twisted-xdist PYTEST_COVERAGE=1 PYTEST_ADDOPTS=
# Specialized factors for py37. # Specialized factors for py37.
# Coverage for:
# - test_sys_breakpoint_interception (via pexpect).
- env: TOXENV=py37-pexpect PYTEST_COVERAGE=1
- env: TOXENV=py37-pluggymaster-xdist - env: TOXENV=py37-pluggymaster-xdist
- env: TOXENV=py37-freeze - env: TOXENV=py37-freeze
@ -68,8 +72,17 @@ jobs:
- stage: deploy - stage: deploy
python: '3.6' python: '3.6'
install: pip install -U setuptools setuptools_scm install: pip install -U setuptools setuptools_scm tox
script: skip script: skip
# token to upload github release notes: GH_RELEASE_NOTES_TOKEN
env:
- secure: "OjOeL7/0JUDkV00SsTs732e8vQjHynpbG9FKTNtZZJ+1Zn4Cib+hAlwmlBnvVukML0X60YpcfjnC4quDOIGLPsh5zeXnvJmYtAIIUNQXjWz8NhcGYrhyzuP1rqV22U68RTCdmOq3lMYU/W2acwHP7T49PwJtOiUM5kF120UAQ0Zi5EmkqkIvH8oM5mO9Dlver+/U7Htpz9rhKrHBXQNCMZI6yj2aUyukqB2PN2fjAlDbCF//+FmvYw9NjT4GeFOSkTCf4ER9yfqs7yglRfwiLtOCZ2qKQhWZNsSJDB89rxIRXWavJUjJKeY2EW2/NkomYJDpqJLIF4JeFRw/HhA47CYPeo6BJqyyNV+0CovL1frpWfi9UQw2cMbgFUkUIUk3F6DD59PHNIOX2R/HX56dQsw7WKl3QuHlCOkICXYg8F7Ta684IoKjeTX03/6QNOkURfDBwfGszY0FpbxrjCSWKom6RyZdyidnESaxv9RzjcIRZVh1rp8KMrwS1OrwRSdG0zjlsPr49hWMenN/8fKgcHTV4/r1Tj6mip0dorSRCrgUNIeRBKgmui6FS8642ab5JNKOxMteVPVR2sFuhjOQ0Jy+PmvceYY9ZMWc3+/B/KVh0dZ3hwvLGZep/vxDS2PwCA5/xw31714vT5LxidKo8yECjBynMU/wUTTS695D3NY="
addons:
apt:
packages:
# required by publish_gh_release_notes
- pandoc
after_deploy: tox -e publish_gh_release_notes
deploy: deploy:
provider: pypi provider: pypi
user: nicoddemus user: nicoddemus

View File

@ -23,6 +23,7 @@ Andras Tim
Andrea Cimatoribus Andrea Cimatoribus
Andreas Zeidler Andreas Zeidler
Andrey Paramonov Andrey Paramonov
Andrzej Klajnert
Andrzej Ostrowski Andrzej Ostrowski
Andy Freeland Andy Freeland
Anthon van der Neut Anthon van der Neut
@ -55,6 +56,7 @@ Charnjit SiNGH (CCSJ)
Chris Lamb Chris Lamb
Christian Boelsen Christian Boelsen
Christian Fetzer Christian Fetzer
Christian Neumüller
Christian Theunert Christian Theunert
Christian Tismer Christian Tismer
Christopher Gilling Christopher Gilling
@ -96,6 +98,7 @@ Feng Ma
Florian Bruhin Florian Bruhin
Floris Bruynooghe Floris Bruynooghe
Gabriel Reis Gabriel Reis
Gene Wood
George Kussumoto George Kussumoto
Georgy Dyuldin Georgy Dyuldin
Graham Horler Graham Horler
@ -173,6 +176,7 @@ mbyt
Michael Aquilina Michael Aquilina
Michael Birtwell Michael Birtwell
Michael Droettboom Michael Droettboom
Michael Goerz
Michael Seifert Michael Seifert
Michal Wajszczuk Michal Wajszczuk
Mihai Capotă Mihai Capotă
@ -209,6 +213,7 @@ Raphael Castaneda
Raphael Pierzina Raphael Pierzina
Raquel Alegre Raquel Alegre
Ravi Chandra Ravi Chandra
Robert Holt
Roberto Polli Roberto Polli
Roland Puntaier Roland Puntaier
Romain Dorgueil Romain Dorgueil
@ -239,6 +244,7 @@ Tareq Alayan
Ted Xiao Ted Xiao
Thomas Grainger Thomas Grainger
Thomas Hisch Thomas Hisch
Tim Hoffmann
Tim Strazny Tim Strazny
Tom Dalton Tom Dalton
Tom Viner Tom Viner
@ -258,7 +264,9 @@ Wil Cooley
William Lee William Lee
Wim Glenn Wim Glenn
Wouter van Ackooy Wouter van Ackooy
Xixi Zhao
Xuan Luong Xuan Luong
Xuecong Liao Xuecong Liao
Yoav Caspi
Zac Hatfield-Dodds Zac Hatfield-Dodds
Zoltán Máté Zoltán Máté

View File

@ -1,6 +1,6 @@
================= =========
Changelog history Changelog
================= =========
Versions follow `Semantic Versioning <https://semver.org/>`_ (``<major>.<minor>.<patch>``). Versions follow `Semantic Versioning <https://semver.org/>`_ (``<major>.<minor>.<patch>``).
@ -18,6 +18,287 @@ with advance notice in the **Deprecations** section of releases.
.. towncrier release notes start .. towncrier release notes start
pytest 5.2.1 (2019-10-06)
=========================
Bug Fixes
---------
- `#5902 <https://github.com/pytest-dev/pytest/issues/5902>`_: Fix warnings about deprecated ``cmp`` attribute in ``attrs>=19.2``.
pytest 4.6.6 (2019-10-11)
=========================
Bug Fixes
---------
- `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
standard library on Python 3.8+.
- `#5806 <https://github.com/pytest-dev/pytest/issues/5806>`_: Fix "lexer" being used when uploading to bpaste.net from ``--pastebin`` to "text".
- `#5902 <https://github.com/pytest-dev/pytest/issues/5902>`_: Fix warnings about deprecated ``cmp`` attribute in ``attrs>=19.2``.
Trivial/Internal Changes
------------------------
- `#5801 <https://github.com/pytest-dev/pytest/issues/5801>`_: Fixes python version checks (detected by ``flake8-2020``) in case python4 becomes a thing.
pytest 5.2.0 (2019-09-28)
=========================
Deprecations
------------
- `#1682 <https://github.com/pytest-dev/pytest/issues/1682>`_: Passing arguments to pytest.fixture() as positional arguments is deprecated - pass them
as a keyword argument instead.
Features
--------
- `#1682 <https://github.com/pytest-dev/pytest/issues/1682>`_: The ``scope`` parameter of ``@pytest.fixture`` can now be a callable that receives
the fixture name and the ``config`` object as keyword-only parameters.
See `the docs <https://docs.pytest.org/en/latest/fixture.html#dynamic-scope>`__ for more information.
- `#5764 <https://github.com/pytest-dev/pytest/issues/5764>`_: New behavior of the ``--pastebin`` option: failures to connect to the pastebin server are reported, without failing the pytest run
Bug Fixes
---------
- `#5806 <https://github.com/pytest-dev/pytest/issues/5806>`_: Fix "lexer" being used when uploading to bpaste.net from ``--pastebin`` to "text".
- `#5884 <https://github.com/pytest-dev/pytest/issues/5884>`_: Fix ``--setup-only`` and ``--setup-show`` for custom pytest items.
Trivial/Internal Changes
------------------------
- `#5056 <https://github.com/pytest-dev/pytest/issues/5056>`_: The HelpFormatter uses ``py.io.get_terminal_width`` for better width detection.
pytest 5.1.3 (2019-09-18)
=========================
Bug Fixes
---------
- `#5807 <https://github.com/pytest-dev/pytest/issues/5807>`_: Fix pypy3.6 (nightly) on windows.
- `#5811 <https://github.com/pytest-dev/pytest/issues/5811>`_: Handle ``--fulltrace`` correctly with ``pytest.raises``.
- `#5819 <https://github.com/pytest-dev/pytest/issues/5819>`_: Windows: Fix regression with conftest whose qualified name contains uppercase
characters (introduced by #5792).
pytest 5.1.2 (2019-08-30)
=========================
Bug Fixes
---------
- `#2270 <https://github.com/pytest-dev/pytest/issues/2270>`_: Fixed ``self`` reference in function-scoped fixtures defined plugin classes: previously ``self``
would be a reference to a *test* class, not the *plugin* class.
- `#570 <https://github.com/pytest-dev/pytest/issues/570>`_: Fixed long standing issue where fixture scope was not respected when indirect fixtures were used during
parametrization.
- `#5782 <https://github.com/pytest-dev/pytest/issues/5782>`_: Fix decoding error when printing an error response from ``--pastebin``.
- `#5786 <https://github.com/pytest-dev/pytest/issues/5786>`_: Chained exceptions in test and collection reports are now correctly serialized, allowing plugins like
``pytest-xdist`` to display them properly.
- `#5792 <https://github.com/pytest-dev/pytest/issues/5792>`_: Windows: Fix error that occurs in certain circumstances when loading
``conftest.py`` from a working directory that has casing other than the one stored
in the filesystem (e.g., ``c:\test`` instead of ``C:\test``).
pytest 5.1.1 (2019-08-20)
=========================
Bug Fixes
---------
- `#5751 <https://github.com/pytest-dev/pytest/issues/5751>`_: Fixed ``TypeError`` when importing pytest on Python 3.5.0 and 3.5.1.
pytest 5.1.0 (2019-08-15)
=========================
Removals
--------
- `#5180 <https://github.com/pytest-dev/pytest/issues/5180>`_: As per our policy, the following features have been deprecated in the 4.X series and are now
removed:
* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead.
* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument.
* ``message`` parameter of ``pytest.raises``.
* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only
syntax. This might change the exception message from previous versions, but they still raise
``TypeError`` on unknown keyword arguments as before.
* ``pytest.config`` global variable.
* ``tmpdir_factory.ensuretemp`` method.
* ``pytest_logwarning`` hook.
* ``RemovedInPytest4Warning`` warning type.
* ``request`` is now a reserved name for fixtures.
For more information consult
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
- `#5565 <https://github.com/pytest-dev/pytest/issues/5565>`_: Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__.
The ``unittest2`` backport module is no longer
necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem
to be used: after removed, all tests still pass unchanged.
Although our policy is to introduce a deprecation period before removing any features or support
for third party libraries, because this code is apparently not used
at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to
remove it in this release.
If you experience a regression because of this, please
`file an issue <https://github.com/pytest-dev/pytest/issues/new>`__.
- `#5615 <https://github.com/pytest-dev/pytest/issues/5615>`_: ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument.
This was supported for Python 2 where it was tempting to use ``"message"``
instead of ``u"message"``.
Python 3 code is unlikely to pass ``bytes`` to these functions. If you do,
please decode it to an ``str`` beforehand.
Features
--------
- `#5564 <https://github.com/pytest-dev/pytest/issues/5564>`_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
- `#5576 <https://github.com/pytest-dev/pytest/issues/5576>`_: New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__
option for doctests to ignore irrelevant differences in floating-point numbers.
Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__
extension for doctest.
Improvements
------------
- `#5471 <https://github.com/pytest-dev/pytest/issues/5471>`_: JUnit XML now includes a timestamp and hostname in the testsuite tag.
- `#5707 <https://github.com/pytest-dev/pytest/issues/5707>`_: Time taken to run the test suite now includes a human-readable representation when it takes over
60 seconds, for example::
===== 2 failed in 102.70s (0:01:42) =====
Bug Fixes
---------
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
- `#5115 <https://github.com/pytest-dev/pytest/issues/5115>`_: Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest.
- `#5477 <https://github.com/pytest-dev/pytest/issues/5477>`_: The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element.
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
standard library on Python 3.8+.
- `#5578 <https://github.com/pytest-dev/pytest/issues/5578>`_: Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc)
so they provide better error messages when users meant to use marks (for example ``@pytest.xfail``
instead of ``@pytest.mark.xfail``).
- `#5606 <https://github.com/pytest-dev/pytest/issues/5606>`_: Fixed internal error when test functions were patched with objects that cannot be compared
for truth values against others, like ``numpy`` arrays.
- `#5634 <https://github.com/pytest-dev/pytest/issues/5634>`_: ``pytest.exit`` is now correctly handled in ``unittest`` cases.
This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly.
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
- `#5701 <https://github.com/pytest-dev/pytest/issues/5701>`_: Fix collection of ``staticmethod`` objects defined with ``functools.partial``.
- `#5734 <https://github.com/pytest-dev/pytest/issues/5734>`_: Skip async generator test functions, and update the warning message to refer to ``async def`` functions.
Improved Documentation
----------------------
- `#5669 <https://github.com/pytest-dev/pytest/issues/5669>`_: Add docstring for ``Testdir.copy_example``.
Trivial/Internal Changes
------------------------
- `#5095 <https://github.com/pytest-dev/pytest/issues/5095>`_: XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite
to avoid future regressions.
- `#5516 <https://github.com/pytest-dev/pytest/issues/5516>`_: Cache node splitting function which can improve collection performance in very large test suites.
- `#5603 <https://github.com/pytest-dev/pytest/issues/5603>`_: Simplified internal ``SafeRepr`` class and removed some dead code.
- `#5664 <https://github.com/pytest-dev/pytest/issues/5664>`_: When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``,
the ``test_xfail_handling`` test no longer fails.
- `#5684 <https://github.com/pytest-dev/pytest/issues/5684>`_: Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.).
pytest 5.0.1 (2019-07-04) pytest 5.0.1 (2019-07-04)
========================= =========================
@ -90,6 +371,24 @@ Removals
- `#5412 <https://github.com/pytest-dev/pytest/issues/5412>`_: ``ExceptionInfo`` objects (returned by ``pytest.raises``) now have the same ``str`` representation as ``repr``, which - `#5412 <https://github.com/pytest-dev/pytest/issues/5412>`_: ``ExceptionInfo`` objects (returned by ``pytest.raises``) now have the same ``str`` representation as ``repr``, which
avoids some confusion when users use ``print(e)`` to inspect the object. avoids some confusion when users use ``print(e)`` to inspect the object.
This means code like:
.. code-block:: python
with pytest.raises(SomeException) as e:
...
assert "some message" in str(e)
Needs to be changed to:
.. code-block:: python
with pytest.raises(SomeException) as e:
...
assert "some message" in str(e.value)
Deprecations Deprecations
@ -225,6 +524,27 @@ Improved Documentation
- `#5416 <https://github.com/pytest-dev/pytest/issues/5416>`_: Fix PytestUnknownMarkWarning in run/skip example. - `#5416 <https://github.com/pytest-dev/pytest/issues/5416>`_: Fix PytestUnknownMarkWarning in run/skip example.
pytest 4.6.5 (2019-08-05)
=========================
Bug Fixes
---------
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
- `#5478 <https://github.com/pytest-dev/pytest/issues/5478>`_: Fix encode error when using unicode strings in exceptions with ``pytest.raises``.
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
- `#5547 <https://github.com/pytest-dev/pytest/issues/5547>`_: ``--step-wise`` now handles ``xfail(strict=True)`` markers properly.
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
pytest 4.6.4 (2019-06-28) pytest 4.6.4 (2019-06-28)
========================= =========================
@ -2173,10 +2493,10 @@ Features
design. This introduces new ``Node.iter_markers(name)`` and design. This introduces new ``Node.iter_markers(name)`` and
``Node.get_closest_marker(name)`` APIs. Users are **strongly encouraged** to ``Node.get_closest_marker(name)`` APIs. Users are **strongly encouraged** to
read the `reasons for the revamp in the docs read the `reasons for the revamp in the docs
<https://docs.pytest.org/en/latest/mark.html#marker-revamp-and-iteration>`_, <https://docs.pytest.org/en/latest/historical-notes.html#marker-revamp-and-iteration>`_,
or jump over to details about `updating existing code to use the new APIs or jump over to details about `updating existing code to use the new APIs
<https://docs.pytest.org/en/latest/mark.html#updating-code>`_. (`#3317 <https://docs.pytest.org/en/latest/historical-notes.html#updating-code>`_.
<https://github.com/pytest-dev/pytest/issues/3317>`_) (`#3317 <https://github.com/pytest-dev/pytest/issues/3317>`_)
- Now when ``@pytest.fixture`` is applied more than once to the same function a - Now when ``@pytest.fixture`` is applied more than once to the same function a
``ValueError`` is raised. This buggy behavior would cause surprising problems ``ValueError`` is raised. This buggy behavior would cause surprising problems
@ -2582,10 +2902,10 @@ Features
<https://github.com/pytest-dev/pytest/issues/3038>`_) <https://github.com/pytest-dev/pytest/issues/3038>`_)
- New `pytest_runtest_logfinish - New `pytest_runtest_logfinish
<https://docs.pytest.org/en/latest/writing_plugins.html#_pytest.hookspec.pytest_runtest_logfinish>`_ <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_runtest_logfinish>`_
hook which is called when a test item has finished executing, analogous to hook which is called when a test item has finished executing, analogous to
`pytest_runtest_logstart `pytest_runtest_logstart
<https://docs.pytest.org/en/latest/writing_plugins.html#_pytest.hookspec.pytest_runtest_start>`_. <https://docs.pytest.org/en/latest/reference.html#_pytest.hookspec.pytest_runtest_logstart>`_.
(`#3101 <https://github.com/pytest-dev/pytest/issues/3101>`_) (`#3101 <https://github.com/pytest-dev/pytest/issues/3101>`_)
- Improve performance when collecting tests using many fixtures. (`#3107 - Improve performance when collecting tests using many fixtures. (`#3107
@ -3575,7 +3895,7 @@ Bug Fixes
Thanks `@sirex`_ for the report and `@nicoddemus`_ for the PR. Thanks `@sirex`_ for the report and `@nicoddemus`_ for the PR.
* Replace ``raise StopIteration`` usages in the code by simple ``returns`` to finish generators, in accordance to `PEP-479`_ (`#2160`_). * Replace ``raise StopIteration`` usages in the code by simple ``returns`` to finish generators, in accordance to `PEP-479`_ (`#2160`_).
Thanks `@tgoodlet`_ for the report and `@nicoddemus`_ for the PR. Thanks to `@nicoddemus`_ for the PR.
* Fix internal errors when an unprintable ``AssertionError`` is raised inside a test. * Fix internal errors when an unprintable ``AssertionError`` is raised inside a test.
Thanks `@omerhadari`_ for the PR. Thanks `@omerhadari`_ for the PR.
@ -3706,7 +4026,7 @@ Bug Fixes
.. _@syre: https://github.com/syre .. _@syre: https://github.com/syre
.. _@adler-j: https://github.com/adler-j .. _@adler-j: https://github.com/adler-j
.. _@d-b-w: https://bitbucket.org/d-b-w/ .. _@d-b-w: https://github.com/d-b-w
.. _@DuncanBetts: https://github.com/DuncanBetts .. _@DuncanBetts: https://github.com/DuncanBetts
.. _@dupuy: https://bitbucket.org/dupuy/ .. _@dupuy: https://bitbucket.org/dupuy/
.. _@kerrick-lyft: https://github.com/kerrick-lyft .. _@kerrick-lyft: https://github.com/kerrick-lyft
@ -3766,7 +4086,7 @@ Bug Fixes
.. _@adborden: https://github.com/adborden .. _@adborden: https://github.com/adborden
.. _@cwitty: https://github.com/cwitty .. _@cwitty: https://github.com/cwitty
.. _@d_b_w: https://github.com/d_b_w .. _@d_b_w: https://github.com/d-b-w
.. _@gdyuldin: https://github.com/gdyuldin .. _@gdyuldin: https://github.com/gdyuldin
.. _@matclab: https://github.com/matclab .. _@matclab: https://github.com/matclab
.. _@MSeifert04: https://github.com/MSeifert04 .. _@MSeifert04: https://github.com/MSeifert04
@ -3801,7 +4121,7 @@ Bug Fixes
Thanks `@axil`_ for the PR. Thanks `@axil`_ for the PR.
* Explain a bad scope value passed to ``@fixture`` declarations or * Explain a bad scope value passed to ``@fixture`` declarations or
a ``MetaFunc.parametrize()`` call. Thanks `@tgoodlet`_ for the PR. a ``MetaFunc.parametrize()`` call.
* This version includes ``pluggy-0.4.0``, which correctly handles * This version includes ``pluggy-0.4.0``, which correctly handles
``VersionConflict`` errors in plugins (`#704`_). ``VersionConflict`` errors in plugins (`#704`_).
@ -3811,7 +4131,6 @@ Bug Fixes
.. _@philpep: https://github.com/philpep .. _@philpep: https://github.com/philpep
.. _@raquel-ucl: https://github.com/raquel-ucl .. _@raquel-ucl: https://github.com/raquel-ucl
.. _@axil: https://github.com/axil .. _@axil: https://github.com/axil
.. _@tgoodlet: https://github.com/tgoodlet
.. _@vlad-dragos: https://github.com/vlad-dragos .. _@vlad-dragos: https://github.com/vlad-dragos
.. _#1853: https://github.com/pytest-dev/pytest/issues/1853 .. _#1853: https://github.com/pytest-dev/pytest/issues/1853
@ -4157,7 +4476,7 @@ time or change existing behaviors in order to make them less surprising/more use
* Updated docstrings with a more uniform style. * Updated docstrings with a more uniform style.
* Add stderr write for ``pytest.exit(msg)`` during startup. Previously the message was never shown. * Add stderr write for ``pytest.exit(msg)`` during startup. Previously the message was never shown.
Thanks `@BeyondEvil`_ for reporting `#1210`_. Thanks to `@JonathonSonesen`_ and Thanks `@BeyondEvil`_ for reporting `#1210`_. Thanks to `@jgsonesen`_ and
`@tomviner`_ for the PR. `@tomviner`_ for the PR.
* No longer display the incorrect test deselection reason (`#1372`_). * No longer display the incorrect test deselection reason (`#1372`_).
@ -4205,7 +4524,7 @@ time or change existing behaviors in order to make them less surprising/more use
Thanks to `@Stranger6667`_ for the PR. Thanks to `@Stranger6667`_ for the PR.
* Fixed the total tests tally in junit xml output (`#1798`_). * Fixed the total tests tally in junit xml output (`#1798`_).
Thanks to `@cryporchild`_ for the PR. Thanks to `@cboelsen`_ for the PR.
* Fixed off-by-one error with lines from ``request.node.warn``. * Fixed off-by-one error with lines from ``request.node.warn``.
Thanks to `@blueyed`_ for the PR. Thanks to `@blueyed`_ for the PR.
@ -4278,7 +4597,7 @@ time or change existing behaviors in order to make them less surprising/more use
.. _@BeyondEvil: https://github.com/BeyondEvil .. _@BeyondEvil: https://github.com/BeyondEvil
.. _@blueyed: https://github.com/blueyed .. _@blueyed: https://github.com/blueyed
.. _@ceridwen: https://github.com/ceridwen .. _@ceridwen: https://github.com/ceridwen
.. _@cryporchild: https://github.com/cryporchild .. _@cboelsen: https://github.com/cboelsen
.. _@csaftoiu: https://github.com/csaftoiu .. _@csaftoiu: https://github.com/csaftoiu
.. _@d6e: https://github.com/d6e .. _@d6e: https://github.com/d6e
.. _@davehunt: https://github.com/davehunt .. _@davehunt: https://github.com/davehunt
@ -4289,7 +4608,7 @@ time or change existing behaviors in order to make them less surprising/more use
.. _@gprasad84: https://github.com/gprasad84 .. _@gprasad84: https://github.com/gprasad84
.. _@graingert: https://github.com/graingert .. _@graingert: https://github.com/graingert
.. _@hartym: https://github.com/hartym .. _@hartym: https://github.com/hartym
.. _@JonathonSonesen: https://github.com/JonathonSonesen .. _@jgsonesen: https://github.com/jgsonesen
.. _@kalekundert: https://github.com/kalekundert .. _@kalekundert: https://github.com/kalekundert
.. _@kvas-it: https://github.com/kvas-it .. _@kvas-it: https://github.com/kvas-it
.. _@marscher: https://github.com/marscher .. _@marscher: https://github.com/marscher
@ -4426,7 +4745,7 @@ time or change existing behaviors in order to make them less surprising/more use
**Changes** **Changes**
* **Important**: `py.code <https://pylib.readthedocs.io/en/latest/code.html>`_ has been * **Important**: `py.code <https://pylib.readthedocs.io/en/stable/code.html>`_ has been
merged into the ``pytest`` repository as ``pytest._code``. This decision merged into the ``pytest`` repository as ``pytest._code``. This decision
was made because ``py.code`` had very few uses outside ``pytest`` and the was made because ``py.code`` had very few uses outside ``pytest`` and the
fact that it was in a different repository made it difficult to fix bugs on fact that it was in a different repository made it difficult to fix bugs on

84
CODE_OF_CONDUCT.md Normal file
View File

@ -0,0 +1,84 @@
# Contributor Covenant Code of Conduct
## Our Pledge
In the interest of fostering an open and welcoming environment, we as
contributors and maintainers pledge to making participation in our project and
our community a harassment-free experience for everyone, regardless of age, body
size, disability, ethnicity, sex characteristics, gender identity and expression,
level of experience, education, socio-economic status, nationality, personal
appearance, race, religion, or sexual identity and orientation.
## Our Standards
Examples of behavior that contributes to creating a positive environment
include:
* Using welcoming and inclusive language
* Being respectful of differing viewpoints and experiences
* Gracefully accepting constructive criticism
* Focusing on what is best for the community
* Showing empathy towards other community members
Examples of unacceptable behavior by participants include:
* The use of sexualized language or imagery and unwelcome sexual attention or
advances
* Trolling, insulting/derogatory comments, and personal or political attacks
* Public or private harassment
* Publishing others' private information, such as a physical or electronic
address, without explicit permission
* Other conduct which could reasonably be considered inappropriate in a
professional setting
## Our Responsibilities
Project maintainers are responsible for clarifying the standards of acceptable
behavior and are expected to take appropriate and fair corrective action in
response to any instances of unacceptable behavior.
Project maintainers have the right and responsibility to remove, edit, or
reject comments, commits, code, wiki edits, issues, and other contributions
that are not aligned to this Code of Conduct, or to ban temporarily or
permanently any contributor for other behaviors that they deem inappropriate,
threatening, offensive, or harmful.
## Scope
This Code of Conduct applies both within project spaces and in public spaces
when an individual is representing the project or its community. Examples of
representing a project or community include using an official project e-mail
address, posting via an official social media account, or acting as an appointed
representative at an online or offline event. Representation of a project may be
further defined and clarified by project maintainers.
## Enforcement
Instances of abusive, harassing, or otherwise unacceptable behavior may be
reported by contacting the project team at coc@pytest.org. All
complaints will be reviewed and investigated and will result in a response that
is deemed necessary and appropriate to the circumstances. The project team is
obligated to maintain confidentiality with regard to the reporter of an incident.
Further details of specific enforcement policies may be posted separately.
Project maintainers who do not follow or enforce the Code of Conduct in good
faith may face temporary or permanent repercussions as determined by other
members of the project's leadership.
The coc@pytest.org address is routed to the following people who can also be
contacted individually:
- Brianna Laugher ([@pfctdayelise](https://github.com/pfctdayelise)): brianna@laugher.id.au
- Bruno Oliveira ([@nicoddemus](https://github.com/nicoddemus)): nicoddemus@gmail.com
- Florian Bruhin ([@the-compiler](https://github.com/the-compiler)): pytest@the-compiler.org
- Ronny Pfannschmidt ([@RonnyPfannschmidt](https://github.com/RonnyPfannschmidt)): ich@ronnypfannschmidt.de
## Attribution
This Code of Conduct is adapted from the [Contributor Covenant][homepage], version 1.4,
available at https://www.contributor-covenant.org/version/1/4/code-of-conduct.html
[homepage]: https://www.contributor-covenant.org
For answers to common questions about this code of conduct, see
https://www.contributor-covenant.org/faq

View File

@ -5,8 +5,9 @@ Contribution getting started
Contributions are highly welcomed and appreciated. Every little help counts, Contributions are highly welcomed and appreciated. Every little help counts,
so do not hesitate! so do not hesitate!
.. contents:: Contribution links .. contents::
:depth: 2 :depth: 2
:backlinks: none
.. _submitfeedback: .. _submitfeedback:
@ -166,7 +167,7 @@ Short version
#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed. #. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.
#. Target ``master`` for bugfixes and doc changes. #. Target ``master`` for bugfixes and doc changes.
#. Target ``features`` for new features or functionality changes. #. Target ``features`` for new features or functionality changes.
#. Follow **PEP-8** for naming and `black <https://github.com/python/black>`_ for formatting. #. Follow **PEP-8** for naming and `black <https://github.com/psf/black>`_ for formatting.
#. Tests are run using ``tox``:: #. Tests are run using ``tox``::
tox -e linting,py37 tox -e linting,py37

View File

@ -9,7 +9,7 @@ What is it
========== ==========
Open Collective is an online funding platform for open and transparent communities. Open Collective is an online funding platform for open and transparent communities.
It provide tools to raise money and share your finances in full transparency. It provides tools to raise money and share your finances in full transparency.
It is the platform of choice for individuals and companies that want to make one-time or It is the platform of choice for individuals and companies that want to make one-time or
monthly donations directly to the project. monthly donations directly to the project.
@ -19,7 +19,7 @@ Funds
The OpenCollective funds donated to pytest will be used to fund overall maintenance, The OpenCollective funds donated to pytest will be used to fund overall maintenance,
local sprints, merchandising (stickers to distribute in conferences for example), and future local sprints, merchandising (stickers to distribute in conferences for example), and future
gatherings of pytest developers (Sprints). gatherings of pytest developers (sprints).
`Core contributors`_ which are contributing on a continuous basis are free to submit invoices `Core contributors`_ which are contributing on a continuous basis are free to submit invoices
to bill maintenance hours using the platform. How much each contributor should request is still an to bill maintenance hours using the platform. How much each contributor should request is still an

View File

@ -26,7 +26,7 @@
:target: https://dev.azure.com/pytest-dev/pytest :target: https://dev.azure.com/pytest-dev/pytest
.. image:: https://img.shields.io/badge/code%20style-black-000000.svg .. image:: https://img.shields.io/badge/code%20style-black-000000.svg
:target: https://github.com/python/black :target: https://github.com/psf/black
.. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg .. image:: https://www.codetriage.com/pytest-dev/pytest/badges/users.svg
:target: https://www.codetriage.com/pytest-dev/pytest :target: https://www.codetriage.com/pytest-dev/pytest
@ -111,13 +111,13 @@ Consult the `Changelog <https://docs.pytest.org/en/latest/changelog.html>`__ pag
Support pytest Support pytest
-------------- --------------
You can support pytest by obtaining a `Tideflift subscription`_. You can support pytest by obtaining a `Tidelift subscription`_.
Tidelift gives software development teams a single source for purchasing and maintaining their software, Tidelift gives software development teams a single source for purchasing and maintaining their software,
with professional grade assurances from the experts who know it best, while seamlessly integrating with existing tools. with professional grade assurances from the experts who know it best, while seamlessly integrating with existing tools.
.. _`Tideflift subscription`: https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=readme .. _`Tidelift subscription`: https://tidelift.com/subscription/pkg/pypi-pytest?utm_source=pypi-pytest&utm_medium=referral&utm_campaign=readme
Security Security

View File

@ -1,26 +0,0 @@
As per our policy, the following features have been deprecated in the 4.X series and are now
removed:
* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead.
* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument.
* ``message`` parameter of ``pytest.raises``.
* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only
syntax. This might change the exception message from previous versions, but they still raise
``TypeError`` on unknown keyword arguments as before.
* ``pytest.config`` global variable.
* ``tmpdir_factory.ensuretemp`` method.
* ``pytest_logwarning`` hook.
* ``RemovedInPytest4Warning`` warning type.
* ``request`` is now a reserved name for fixtures.
For more information consult
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.

View File

@ -1 +0,0 @@
The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element.

View File

@ -1 +0,0 @@
Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.

View File

@ -1,2 +0,0 @@
Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
standard library on Python 3.8+.

View File

@ -1 +0,0 @@
New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.

View File

@ -1,13 +0,0 @@
Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__.
The ``unittest2`` backport module is no longer
necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem
to be used: after removed, all tests still pass unchanged.
Although our policy is to introduce a deprecation period before removing any features or support
for third party libraries, because this code is apparently not used
at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to
remove it in this release.
If you experience a regression because of this, please
`file an issue <https://github.com/pytest-dev/pytest/issues/new>`__.

View File

@ -1,4 +0,0 @@
New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__
option for doctests to ignore irrelevant differences in floating-point numbers.
Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__
extension for doctest.

View File

@ -1 +0,0 @@
Simplified internal ``SafeRepr`` class and removed some dead code.

View File

@ -0,0 +1 @@
Fix crash with ``KeyboardInterrupt`` during ``--setup-show``.

View File

@ -0,0 +1,19 @@
``pytester`` learned two new functions, `no_fnmatch_line <https://docs.pytest.org/en/latest/reference.html#_pytest.pytester.LineMatcher.no_fnmatch_line>`_ and
`no_re_match_line <https://docs.pytest.org/en/latest/reference.html#_pytest.pytester.LineMatcher.no_re_match_line>`_.
The functions are used to ensure the captured text *does not* match the given
pattern.
The previous idiom was to use ``re.match``:
.. code-block:: python
assert re.match(pat, result.stdout.str()) is None
Or the ``in`` operator:
.. code-block:: python
assert text in result.stdout.str()
But the new functions produce best output on failure.

View File

@ -0,0 +1,34 @@
Improve verbose diff output with sequences.
Before:
.. code-block::
E AssertionError: assert ['version', '...version_info'] == ['version', '...version', ...]
E Right contains 3 more items, first extra item: ' '
E Full diff:
E - ['version', 'version_info', 'sys.version', 'sys.version_info']
E + ['version',
E + 'version_info',
E + 'sys.version',
E + 'sys.version_info',
E + ' ',
E + 'sys.version',
E + 'sys.version_info']
After:
.. code-block::
E AssertionError: assert ['version', '...version_info'] == ['version', '...version', ...]
E Right contains 3 more items, first extra item: ' '
E Full diff:
E [
E 'version',
E 'version_info',
E 'sys.version',
E 'sys.version_info',
E + ' ',
E + 'sys.version',
E + 'sys.version_info',
E ]

View File

@ -0,0 +1 @@
Fixed issue when parametrizing fixtures with numpy arrays (and possibly other sequence-like types).

View File

@ -0,0 +1,2 @@
``Config.InvocationParams.args`` is now always a ``tuple`` to better convey that it should be
immutable and avoid accidental modifications.

View File

@ -12,6 +12,7 @@ Each file should be named like ``<ISSUE>.<TYPE>.rst``, where
``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of: ``<ISSUE>`` is an issue number, and ``<TYPE>`` is one of:
* ``feature``: new user facing features, like new command-line options and new behavior. * ``feature``: new user facing features, like new command-line options and new behavior.
* ``improvement``: improvement of existing functionality, usually without requiring user intervention (for example, new fields being written in ``--junitxml``, improved colors in terminal, etc).
* ``bugfix``: fixes a reported bug. * ``bugfix``: fixes a reported bug.
* ``doc``: documentation improvement, like rewording an entire session or adding missing docs. * ``doc``: documentation improvement, like rewording an entire session or adding missing docs.
* ``deprecation``: feature deprecation. * ``deprecation``: feature deprecation.

7
codecov.yml Normal file
View File

@ -0,0 +1,7 @@
coverage:
status:
project: true
patch: true
changes: true
comment: off

1
doc/5934.feature.rst Normal file
View File

@ -0,0 +1 @@
``repr`` of ``ExceptionInfo`` objects has been improved to honor the ``__repr__`` method of the underlying exception.

View File

@ -16,7 +16,7 @@ REGENDOC_ARGS := \
--normalize "/[ \t]+\n/\n/" \ --normalize "/[ \t]+\n/\n/" \
--normalize "~\$$REGENDOC_TMPDIR~/home/sweet/project~" \ --normalize "~\$$REGENDOC_TMPDIR~/home/sweet/project~" \
--normalize "~/path/to/example~/home/sweet/project~" \ --normalize "~/path/to/example~/home/sweet/project~" \
--normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \ --normalize "/in \d.\d\ds/in 0.12s/" \
--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \ --normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
--normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \ --normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \
--normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \ --normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \

View File

@ -4,7 +4,7 @@
<li><a href="{{ pathto('index') }}">Home</a></li> <li><a href="{{ pathto('index') }}">Home</a></li>
<li><a href="{{ pathto('getting-started') }}">Install</a></li> <li><a href="{{ pathto('getting-started') }}">Install</a></li>
<li><a href="{{ pathto('contents') }}">Contents</a></li> <li><a href="{{ pathto('contents') }}">Contents</a></li>
<li><a href="{{ pathto('reference') }}">Reference</a></li> <li><a href="{{ pathto('reference') }}">API Reference</a></li>
<li><a href="{{ pathto('example/index') }}">Examples</a></li> <li><a href="{{ pathto('example/index') }}">Examples</a></li>
<li><a href="{{ pathto('customize') }}">Customize</a></li> <li><a href="{{ pathto('customize') }}">Customize</a></li>
<li><a href="{{ pathto('changelog') }}">Changelog</a></li> <li><a href="{{ pathto('changelog') }}">Changelog</a></li>

View File

@ -16,7 +16,7 @@
{%- block footer %} {%- block footer %}
<div class="footer"> <div class="footer">
&copy; Copyright {{ copyright }}. &copy; Copyright {{ copyright }}.
Created using <a href="http://sphinx.pocoo.org/">Sphinx</a>. Created using <a href="https://www.sphinx-doc.org/">Sphinx</a> {{ sphinx_version }}.
</div> </div>
{% if pagename == 'index' %} {% if pagename == 'index' %}
</div> </div>

View File

@ -0,0 +1,15 @@
{#
basic/searchbox.html with heading removed.
#}
{%- if pagename != "search" and builder != "singlehtml" %}
<div id="searchbox" style="display: none" role="search">
<div class="searchformwrapper">
<form class="search" action="{{ pathto('search') }}" method="get">
<input type="text" name="q" aria-labelledby="searchlabel"
placeholder="Search"/>
<input type="submit" value="{{ _('Go') }}" />
</form>
</div>
</div>
<script type="text/javascript">$('#searchbox').show(0);</script>
{%- endif %}

View File

@ -8,11 +8,12 @@
{% set page_width = '1020px' %} {% set page_width = '1020px' %}
{% set sidebar_width = '220px' %} {% set sidebar_width = '220px' %}
/* orange of logo is #d67c29 but we use black for links for now */ /* muted version of green logo color #C9D22A */
{% set link_color = '#000' %} {% set link_color = '#606413' %}
{% set link_hover_color = '#000' %} /* blue logo color */
{% set link_hover_color = '#009de0' %}
{% set base_font = 'sans-serif' %} {% set base_font = 'sans-serif' %}
{% set header_font = 'serif' %} {% set header_font = 'sans-serif' %}
@import url("basic.css"); @import url("basic.css");
@ -20,7 +21,7 @@
body { body {
font-family: {{ base_font }}; font-family: {{ base_font }};
font-size: 17px; font-size: 16px;
background-color: white; background-color: white;
color: #000; color: #000;
margin: 0; margin: 0;
@ -78,13 +79,13 @@ div.related {
} }
div.sphinxsidebar a { div.sphinxsidebar a {
color: #444;
text-decoration: none; text-decoration: none;
border-bottom: 1px dotted #999; border-bottom: none;
} }
div.sphinxsidebar a:hover { div.sphinxsidebar a:hover {
border-bottom: 1px solid #999; color: {{ link_hover_color }};
border-bottom: 1px solid {{ link_hover_color }};
} }
div.sphinxsidebar { div.sphinxsidebar {
@ -106,14 +107,14 @@ div.sphinxsidebar h3,
div.sphinxsidebar h4 { div.sphinxsidebar h4 {
font-family: {{ header_font }}; font-family: {{ header_font }};
color: #444; color: #444;
font-size: 24px; font-size: 21px;
font-weight: normal; font-weight: normal;
margin: 0 0 5px 0; margin: 16px 0 0 0;
padding: 0; padding: 0;
} }
div.sphinxsidebar h4 { div.sphinxsidebar h4 {
font-size: 20px; font-size: 18px;
} }
div.sphinxsidebar h3 a { div.sphinxsidebar h3 a {
@ -205,10 +206,22 @@ div.body p, div.body dd, div.body li {
line-height: 1.4em; line-height: 1.4em;
} }
ul.simple li {
margin-bottom: 0.5em;
}
div.topic ul.simple li {
margin-bottom: 0;
}
div.topic li > p:first-child {
margin-top: 0;
margin-bottom: 0;
}
div.admonition { div.admonition {
background: #fafafa; background: #fafafa;
margin: 20px -30px; padding: 10px 20px;
padding: 10px 30px;
border-top: 1px solid #ccc; border-top: 1px solid #ccc;
border-bottom: 1px solid #ccc; border-bottom: 1px solid #ccc;
} }
@ -217,11 +230,6 @@ div.admonition tt.xref, div.admonition a tt {
border-bottom: 1px solid #fafafa; border-bottom: 1px solid #fafafa;
} }
dd div.admonition {
margin-left: -60px;
padding-left: 60px;
}
div.admonition p.admonition-title { div.admonition p.admonition-title {
font-family: {{ header_font }}; font-family: {{ header_font }};
font-weight: normal; font-weight: normal;
@ -231,7 +239,7 @@ div.admonition p.admonition-title {
line-height: 1; line-height: 1;
} }
div.admonition p.last { div.admonition :last-child {
margin-bottom: 0; margin-bottom: 0;
} }
@ -243,7 +251,7 @@ dt:target, .highlight {
background: #FAF3E8; background: #FAF3E8;
} }
div.note { div.note, div.warning {
background-color: #eee; background-color: #eee;
border: 1px solid #ccc; border: 1px solid #ccc;
} }
@ -257,6 +265,11 @@ div.topic {
background-color: #eee; background-color: #eee;
} }
div.topic a {
text-decoration: none;
border-bottom: none;
}
p.admonition-title { p.admonition-title {
display: inline; display: inline;
} }
@ -358,21 +371,10 @@ ul, ol {
pre { pre {
background: #eee; background: #eee;
padding: 7px 30px; padding: 7px 12px;
margin: 15px -30px;
line-height: 1.3em; line-height: 1.3em;
} }
dl pre, blockquote pre, li pre {
margin-left: -60px;
padding-left: 60px;
}
dl dl pre {
margin-left: -90px;
padding-left: 90px;
}
tt { tt {
background-color: #ecf0f3; background-color: #ecf0f3;
color: #222; color: #222;
@ -393,6 +395,20 @@ a.reference:hover {
border-bottom: 1px solid {{ link_hover_color }}; border-bottom: 1px solid {{ link_hover_color }};
} }
li.toctree-l1 a.reference,
li.toctree-l2 a.reference,
li.toctree-l3 a.reference,
li.toctree-l4 a.reference {
border-bottom: none;
}
li.toctree-l1 a.reference:hover,
li.toctree-l2 a.reference:hover,
li.toctree-l3 a.reference:hover,
li.toctree-l4 a.reference:hover {
border-bottom: 1px solid {{ link_hover_color }};
}
a.footnote-reference { a.footnote-reference {
text-decoration: none; text-decoration: none;
font-size: 0.7em; font-size: 0.7em;
@ -408,6 +424,56 @@ a:hover tt {
background: #EEE; background: #EEE;
} }
#reference div.section h2 {
/* separate code elements in the reference section */
border-top: 2px solid #ccc;
padding-top: 0.5em;
}
#reference div.section h3 {
/* separate code elements in the reference section */
border-top: 1px solid #ccc;
padding-top: 0.5em;
}
dl.class, dl.function {
margin-top: 1em;
margin-bottom: 1em;
}
dl.class > dd {
border-left: 3px solid #ccc;
margin-left: 0px;
padding-left: 30px;
}
dl.field-list {
flex-direction: column;
}
dl.field-list dd {
padding-left: 4em;
border-left: 3px solid #ccc;
margin-bottom: 0.5em;
}
dl.field-list dd > ul {
list-style: none;
padding-left: 0px;
}
dl.field-list dd > ul > li li :first-child {
text-indent: 0;
}
dl.field-list dd > ul > li :first-child {
text-indent: -2em;
padding-left: 0px;
}
dl.field-list dd > p:first-child {
text-indent: -2em;
}
@media screen and (max-width: 870px) { @media screen and (max-width: 870px) {

View File

@ -24,11 +24,9 @@ The ideal pytest helper
- feels confident in using pytest (e.g. has explored command line options, knows how to write parametrized tests, has an idea about conftest contents) - feels confident in using pytest (e.g. has explored command line options, knows how to write parametrized tests, has an idea about conftest contents)
- does not need to be an expert in every aspect! - does not need to be an expert in every aspect!
`Pytest helpers, sign up here`_! (preferably in February, hard deadline 22 March) Pytest helpers, sign up here! (preferably in February, hard deadline 22 March)
.. _`Pytest helpers, sign up here`: http://goo.gl/forms/nxqAhqWt1P
The ideal partner project The ideal partner project
----------------------------------------- -----------------------------------------
@ -40,11 +38,9 @@ The ideal partner project
- has the support of the core development team, in trying out pytest adoption - has the support of the core development team, in trying out pytest adoption
- has no tests... or 100% test coverage... or somewhere in between! - has no tests... or 100% test coverage... or somewhere in between!
`Partner projects, sign up here`_! (by 22 March) Partner projects, sign up here! (by 22 March)
.. _`Partner projects, sign up here`: http://goo.gl/forms/ZGyqlHiwk3
What does it mean to "adopt pytest"? What does it mean to "adopt pytest"?
----------------------------------------- -----------------------------------------
@ -68,11 +64,11 @@ Progressive success might look like:
It may be after the month is up, the partner project decides that pytest is not right for it. That's okay - hopefully the pytest team will also learn something about its weaknesses or deficiencies. It may be after the month is up, the partner project decides that pytest is not right for it. That's okay - hopefully the pytest team will also learn something about its weaknesses or deficiencies.
.. _`nose and unittest`: faq.html#how-does-pytest-relate-to-nose-and-unittest .. _`nose and unittest`: faq.html#how-does-pytest-relate-to-nose-and-unittest
.. _assert: asserts.html .. _assert: assert.html
.. _pycmd: https://bitbucket.org/hpk42/pycmd/overview .. _pycmd: https://bitbucket.org/hpk42/pycmd/overview
.. _`setUp/tearDown methods`: xunit_setup.html .. _`setUp/tearDown methods`: xunit_setup.html
.. _fixtures: fixture.html .. _fixtures: fixture.html
.. _markers: markers.html .. _markers: mark.html
.. _distributed: xdist.html .. _distributed: xdist.html

View File

@ -6,8 +6,15 @@ Release announcements
:maxdepth: 2 :maxdepth: 2
release-5.2.1
release-5.2.0
release-5.1.3
release-5.1.2
release-5.1.1
release-5.1.0
release-5.0.1 release-5.0.1
release-5.0.0 release-5.0.0
release-4.6.5
release-4.6.4 release-4.6.4
release-4.6.3 release-4.6.3
release-4.6.2 release-4.6.2

View File

@ -12,7 +12,7 @@ courtesy of Benjamin Peterson. You can now safely use ``assert``
statements in test modules without having to worry about side effects statements in test modules without having to worry about side effects
or python optimization ("-OO") options. This is achieved by rewriting or python optimization ("-OO") options. This is achieved by rewriting
assert statements in test modules upon import, using a PEP302 hook. assert statements in test modules upon import, using a PEP302 hook.
See http://pytest.org/assert.html#advanced-assertion-introspection for See https://docs.pytest.org/en/latest/assert.html for
detailed information. The work has been partly sponsored by my company, detailed information. The work has been partly sponsored by my company,
merlinux GmbH. merlinux GmbH.

View File

@ -75,7 +75,7 @@ The py.test Development Team
**Changes** **Changes**
* **Important**: `py.code <https://pylib.readthedocs.io/en/latest/code.html>`_ has been * **Important**: `py.code <https://pylib.readthedocs.io/en/stable/code.html>`_ has been
merged into the ``pytest`` repository as ``pytest._code``. This decision merged into the ``pytest`` repository as ``pytest._code``. This decision
was made because ``py.code`` had very few uses outside ``pytest`` and the was made because ``py.code`` had very few uses outside ``pytest`` and the
fact that it was in a different repository made it difficult to fix bugs on fact that it was in a different repository made it difficult to fix bugs on
@ -88,7 +88,7 @@ The py.test Development Team
**experimental**, so you definitely should not import it explicitly! **experimental**, so you definitely should not import it explicitly!
Please note that the original ``py.code`` is still available in Please note that the original ``py.code`` is still available in
`pylib <https://pylib.readthedocs.io>`_. `pylib <https://pylib.readthedocs.io/en/stable/>`_.
* ``pytest_enter_pdb`` now optionally receives the pytest config object. * ``pytest_enter_pdb`` now optionally receives the pytest config object.
Thanks `@nicoddemus`_ for the PR. Thanks `@nicoddemus`_ for the PR.

View File

@ -66,8 +66,8 @@ The py.test Development Team
.. _#510: https://github.com/pytest-dev/pytest/issues/510 .. _#510: https://github.com/pytest-dev/pytest/issues/510
.. _#1506: https://github.com/pytest-dev/pytest/pull/1506 .. _#1506: https://github.com/pytest-dev/pytest/pull/1506
.. _#1496: https://github.com/pytest-dev/pytest/issue/1496 .. _#1496: https://github.com/pytest-dev/pytest/issues/1496
.. _#1524: https://github.com/pytest-dev/pytest/issue/1524 .. _#1524: https://github.com/pytest-dev/pytest/pull/1524
.. _@astraw38: https://github.com/astraw38 .. _@astraw38: https://github.com/astraw38
.. _@hackebrot: https://github.com/hackebrot .. _@hackebrot: https://github.com/hackebrot

View File

@ -0,0 +1,21 @@
pytest-4.6.5
=======================================
pytest 4.6.5 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
Thanks to all who contributed to this release, among them:
* Anthony Sottile
* Bruno Oliveira
* Daniel Hahler
* Thomas Grainger
Happy testing,
The pytest Development Team

View File

@ -0,0 +1,56 @@
pytest-5.1.0
=======================================
The pytest team is proud to announce the 5.1.0 release!
pytest is a mature Python testing tool with more than a 2000 tests
against itself, passing on many different interpreters and platforms.
This release contains a number of bugs fixes and improvements, so users are encouraged
to take a look at the CHANGELOG:
https://docs.pytest.org/en/latest/changelog.html
For complete documentation, please visit:
https://docs.pytest.org/en/latest/
As usual, you can upgrade from pypi via:
pip install -U pytest
Thanks to all who contributed to this release, among them:
* Albert Tugushev
* Alexey Zankevich
* Anthony Sottile
* Bruno Oliveira
* Daniel Hahler
* David Röthlisberger
* Florian Bruhin
* Ilya Stepin
* Jon Dufresne
* Kaiqi
* Max R
* Miro Hrončok
* Oliver Bestwalter
* Ran Benita
* Ronny Pfannschmidt
* Samuel Searles-Bryant
* Semen Zhydenko
* Steffen Schroeder
* Thomas Grainger
* Tim Hoffmann
* William Woodall
* Wojtek Erbetowski
* Xixi Zhao
* Yash Todi
* boris
* dmitry.dygalo
* helloocc
* martbln
* mei-li
Happy testing,
The Pytest Development Team

View File

@ -0,0 +1,24 @@
pytest-5.1.1
=======================================
pytest 5.1.1 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
Thanks to all who contributed to this release, among them:
* Anthony Sottile
* Bruno Oliveira
* Daniel Hahler
* Florian Bruhin
* Hugo van Kemenade
* Ran Benita
* Ronny Pfannschmidt
Happy testing,
The pytest Development Team

View File

@ -0,0 +1,23 @@
pytest-5.1.2
=======================================
pytest 5.1.2 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
Thanks to all who contributed to this release, among them:
* Andrzej Klajnert
* Anthony Sottile
* Bruno Oliveira
* Christian Neumüller
* Robert Holt
* linchiwei123
Happy testing,
The pytest Development Team

View File

@ -0,0 +1,23 @@
pytest-5.1.3
=======================================
pytest 5.1.3 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
Thanks to all who contributed to this release, among them:
* Anthony Sottile
* Bruno Oliveira
* Christian Neumüller
* Daniel Hahler
* Gene Wood
* Hugo
Happy testing,
The pytest Development Team

View File

@ -0,0 +1,35 @@
pytest-5.2.0
=======================================
The pytest team is proud to announce the 5.2.0 release!
pytest is a mature Python testing tool with more than a 2000 tests
against itself, passing on many different interpreters and platforms.
This release contains a number of bugs fixes and improvements, so users are encouraged
to take a look at the CHANGELOG:
https://docs.pytest.org/en/latest/changelog.html
For complete documentation, please visit:
https://docs.pytest.org/en/latest/
As usual, you can upgrade from pypi via:
pip install -U pytest
Thanks to all who contributed to this release, among them:
* Andrzej Klajnert
* Anthony Sottile
* Bruno Oliveira
* Daniel Hahler
* James Cooke
* Michael Goerz
* Ran Benita
* Tomáš Chvátal
Happy testing,
The Pytest Development Team

View File

@ -0,0 +1,23 @@
pytest-5.2.1
=======================================
pytest 5.2.1 has just been released to PyPI.
This is a bug-fix release, being a drop-in replacement. To upgrade::
pip install --upgrade pytest
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
Thanks to all who contributed to this release, among them:
* Anthony Sottile
* Bruno Oliveira
* Florian Bruhin
* Hynek Schlawack
* Kevin J. Foley
* tadashigaki
Happy testing,
The pytest Development Team

View File

@ -47,7 +47,7 @@ you will see the return value of the function call:
E + where 3 = f() E + where 3 = f()
test_assert1.py:6: AssertionError test_assert1.py:6: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
``pytest`` has support for showing the values of the most common subexpressions ``pytest`` has support for showing the values of the most common subexpressions
including calls, attributes, comparisons, and binary and unary including calls, attributes, comparisons, and binary and unary
@ -208,7 +208,7 @@ if you run this module:
E Use -v to get the full diff E Use -v to get the full diff
test_assert2.py:6: AssertionError test_assert2.py:6: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
Special comparisons are done for a number of cases: Special comparisons are done for a number of cases:
@ -238,14 +238,17 @@ file which provides an alternative explanation for ``Foo`` objects:
def pytest_assertrepr_compare(op, left, right): def pytest_assertrepr_compare(op, left, right):
if isinstance(left, Foo) and isinstance(right, Foo) and op == "==": if isinstance(left, Foo) and isinstance(right, Foo) and op == "==":
return ["Comparing Foo instances:", " vals: %s != %s" % (left.val, right.val)] return [
"Comparing Foo instances:",
" vals: {} != {}".format(left.val, right.val),
]
now, given this test module: now, given this test module:
.. code-block:: python .. code-block:: python
# content of test_foocompare.py # content of test_foocompare.py
class Foo(object): class Foo:
def __init__(self, val): def __init__(self, val):
self.val = val self.val = val
@ -276,7 +279,7 @@ the conftest file:
E vals: 1 != 2 E vals: 1 != 2
test_foocompare.py:12: AssertionError test_foocompare.py:12: AssertionError
1 failed in 0.12 seconds 1 failed in 0.12s
.. _assert-details: .. _assert-details:
.. _`assert introspection`: .. _`assert introspection`:

View File

@ -160,9 +160,12 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
in python < 3.6 this is a pathlib2.Path in python < 3.6 this is a pathlib2.Path
no tests ran in 0.12 seconds no tests ran in 0.12s
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:
.. code-block:: python
import pytest import pytest
help(pytest) help(pytest)

View File

@ -33,15 +33,18 @@ Other plugins may access the `config.cache`_ object to set/get
Rerunning only failures or failures first Rerunning only failures or failures first
----------------------------------------------- -----------------------------------------------
First, let's create 50 test invocation of which only 2 fail:: First, let's create 50 test invocation of which only 2 fail:
.. code-block:: python
# content of test_50.py # content of test_50.py
import pytest import pytest
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
pytest.fail("bad luck") pytest.fail("bad luck")
If you run this for the first time you will see two failures: If you run this for the first time you will see two failures:
@ -57,10 +60,10 @@ If you run this for the first time you will see two failures:
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
> pytest.fail("bad luck") > pytest.fail("bad luck")
E Failed: bad luck E Failed: bad luck
test_50.py:6: Failed test_50.py:7: Failed
_______________________________ test_num[25] _______________________________ _______________________________ test_num[25] _______________________________
i = 25 i = 25
@ -68,11 +71,11 @@ If you run this for the first time you will see two failures:
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
> pytest.fail("bad luck") > pytest.fail("bad luck")
E Failed: bad luck E Failed: bad luck
test_50.py:6: Failed test_50.py:7: Failed
2 failed, 48 passed in 0.12 seconds 2 failed, 48 passed in 0.12s
If you then run it with ``--lf``: If you then run it with ``--lf``:
@ -96,10 +99,10 @@ If you then run it with ``--lf``:
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
> pytest.fail("bad luck") > pytest.fail("bad luck")
E Failed: bad luck E Failed: bad luck
test_50.py:6: Failed test_50.py:7: Failed
_______________________________ test_num[25] _______________________________ _______________________________ test_num[25] _______________________________
i = 25 i = 25
@ -107,11 +110,11 @@ If you then run it with ``--lf``:
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
> pytest.fail("bad luck") > pytest.fail("bad luck")
E Failed: bad luck E Failed: bad luck
test_50.py:6: Failed test_50.py:7: Failed
================= 2 failed, 48 deselected in 0.12 seconds ================== ===================== 2 failed, 48 deselected in 0.12s =====================
You have run only the two failing tests from the last run, while the 48 passing You have run only the two failing tests from the last run, while the 48 passing
tests have not been run ("deselected"). tests have not been run ("deselected").
@ -140,10 +143,10 @@ of ``FF`` and dots):
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
> pytest.fail("bad luck") > pytest.fail("bad luck")
E Failed: bad luck E Failed: bad luck
test_50.py:6: Failed test_50.py:7: Failed
_______________________________ test_num[25] _______________________________ _______________________________ test_num[25] _______________________________
i = 25 i = 25
@ -151,11 +154,11 @@ of ``FF`` and dots):
@pytest.mark.parametrize("i", range(50)) @pytest.mark.parametrize("i", range(50))
def test_num(i): def test_num(i):
if i in (17, 25): if i in (17, 25):
> pytest.fail("bad luck") > pytest.fail("bad luck")
E Failed: bad luck E Failed: bad luck
test_50.py:6: Failed test_50.py:7: Failed
=================== 2 failed, 48 passed in 0.12 seconds ==================== ======================= 2 failed, 48 passed in 0.12s =======================
.. _`config.cache`: .. _`config.cache`:
@ -183,15 +186,19 @@ The new config.cache object
Plugins or conftest.py support code can get a cached value using the Plugins or conftest.py support code can get a cached value using the
pytest ``config`` object. Here is a basic example plugin which pytest ``config`` object. Here is a basic example plugin which
implements a :ref:`fixture` which re-uses previously created state implements a :ref:`fixture` which re-uses previously created state
across pytest invocations:: across pytest invocations:
.. code-block:: python
# content of test_caching.py # content of test_caching.py
import pytest import pytest
import time import time
def expensive_computation(): def expensive_computation():
print("running expensive computation...") print("running expensive computation...")
@pytest.fixture @pytest.fixture
def mydata(request): def mydata(request):
val = request.config.cache.get("example/value", None) val = request.config.cache.get("example/value", None)
@ -201,6 +208,7 @@ across pytest invocations::
request.config.cache.set("example/value", val) request.config.cache.set("example/value", val)
return val return val
def test_function(mydata): def test_function(mydata):
assert mydata == 23 assert mydata == 23
@ -219,10 +227,10 @@ If you run this command for the first time, you can see the print statement:
> assert mydata == 23 > assert mydata == 23
E assert 42 == 23 E assert 42 == 23
test_caching.py:17: AssertionError test_caching.py:20: AssertionError
-------------------------- Captured stdout setup --------------------------- -------------------------- Captured stdout setup ---------------------------
running expensive computation... running expensive computation...
1 failed in 0.12 seconds 1 failed in 0.12s
If you run it a second time, the value will be retrieved from If you run it a second time, the value will be retrieved from
the cache and nothing will be printed: the cache and nothing will be printed:
@ -240,8 +248,8 @@ the cache and nothing will be printed:
> assert mydata == 23 > assert mydata == 23
E assert 42 == 23 E assert 42 == 23
test_caching.py:17: AssertionError test_caching.py:20: AssertionError
1 failed in 0.12 seconds 1 failed in 0.12s
See the :ref:`cache-api` for more details. See the :ref:`cache-api` for more details.
@ -275,7 +283,7 @@ You can always peek at the content of the cache using the
example/value contains: example/value contains:
42 42
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
``--cache-show`` takes an optional argument to specify a glob pattern for ``--cache-show`` takes an optional argument to specify a glob pattern for
filtering: filtering:
@ -292,7 +300,7 @@ filtering:
example/value contains: example/value contains:
42 42
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
Clearing Cache content Clearing Cache content
---------------------- ----------------------

View File

@ -49,16 +49,21 @@ Using print statements for debugging
--------------------------------------------------- ---------------------------------------------------
One primary benefit of the default capturing of stdout/stderr output One primary benefit of the default capturing of stdout/stderr output
is that you can use print statements for debugging:: is that you can use print statements for debugging:
.. code-block:: python
# content of test_module.py # content of test_module.py
def setup_function(function): def setup_function(function):
print("setting up %s" % function) print("setting up", function)
def test_func1(): def test_func1():
assert True assert True
def test_func2(): def test_func2():
assert False assert False
@ -83,10 +88,10 @@ of the failing function and hide the other one:
> assert False > assert False
E assert False E assert False
test_module.py:9: AssertionError test_module.py:12: AssertionError
-------------------------- Captured stdout setup --------------------------- -------------------------- Captured stdout setup ---------------------------
setting up <function test_func2 at 0xdeadbeef> setting up <function test_func2 at 0xdeadbeef>
==================== 1 failed, 1 passed in 0.12 seconds ==================== ======================= 1 failed, 1 passed in 0.12s ========================
Accessing captured output from a test function Accessing captured output from a test function
--------------------------------------------------- ---------------------------------------------------

View File

@ -15,7 +15,6 @@
# #
# The full version, including alpha/beta/rc tags. # The full version, including alpha/beta/rc tags.
# The short X.Y version. # The short X.Y version.
import datetime
import os import os
import sys import sys
@ -63,8 +62,7 @@ master_doc = "contents"
# General information about the project. # General information about the project.
project = "pytest" project = "pytest"
year = datetime.datetime.utcnow().year copyright = "20152019, holger krekel and pytest-dev team"
copyright = "20152019 , holger krekel and pytest-dev team"
# The language for content autogenerated by Sphinx. Refer to documentation # The language for content autogenerated by Sphinx. Refer to documentation
@ -167,18 +165,18 @@ html_favicon = "img/pytest1favi.ico"
html_sidebars = { html_sidebars = {
"index": [ "index": [
"slim_searchbox.html",
"sidebarintro.html", "sidebarintro.html",
"globaltoc.html", "globaltoc.html",
"links.html", "links.html",
"sourcelink.html", "sourcelink.html",
"searchbox.html",
], ],
"**": [ "**": [
"slim_searchbox.html",
"globaltoc.html", "globaltoc.html",
"relations.html", "relations.html",
"links.html", "links.html",
"sourcelink.html", "sourcelink.html",
"searchbox.html",
], ],
} }

View File

@ -107,8 +107,8 @@ check for ini-files as follows:
# first look for pytest.ini files # first look for pytest.ini files
path/pytest.ini path/pytest.ini
path/setup.cfg # must also contain [tool:pytest] section to match
path/tox.ini # must also contain [pytest] section to match path/tox.ini # must also contain [pytest] section to match
path/setup.cfg # must also contain [tool:pytest] section to match
pytest.ini pytest.ini
... # all the way down to the root ... # all the way down to the root
@ -134,10 +134,13 @@ progress output, you can write it into a configuration file:
.. code-block:: ini .. code-block:: ini
# content of pytest.ini or tox.ini # content of pytest.ini or tox.ini
# setup.cfg files should use [tool:pytest] section instead
[pytest] [pytest]
addopts = -ra -q addopts = -ra -q
# content of setup.cfg
[tool:pytest]
addopts = -ra -q
Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command
line options while the environment is in use: line options while the environment is in use:

View File

@ -459,7 +459,9 @@ Internal classes accessed through ``Node``
.. versionremoved:: 4.0 .. versionremoved:: 4.0
Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue Access of ``Module``, ``Function``, ``Class``, ``Instance``, ``File`` and ``Item`` through ``Node`` instances now issue
this warning:: this warning:
.. code-block:: text
usage of Function.Module is deprecated, please use pytest.Module instead usage of Function.Module is deprecated, please use pytest.Module instead

View File

@ -36,7 +36,7 @@ then you can just invoke ``pytest`` directly:
test_example.txt . [100%] test_example.txt . [100%]
========================= 1 passed in 0.12 seconds ========================= ============================ 1 passed in 0.12s =============================
By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you
can pass additional globs using the ``--doctest-glob`` option (multi-allowed). can pass additional globs using the ``--doctest-glob`` option (multi-allowed).
@ -66,7 +66,7 @@ and functions, including from test modules:
mymodule.py . [ 50%] mymodule.py . [ 50%]
test_example.txt . [100%] test_example.txt . [100%]
========================= 2 passed in 0.12 seconds ========================= ============================ 2 passed in 0.12s =============================
You can make these changes permanent in your project by You can make these changes permanent in your project by
putting them into a pytest.ini file like this: putting them into a pytest.ini file like this:
@ -156,6 +156,8 @@ pytest also introduces new options:
a string! This means that it may not be appropriate to enable globally in a string! This means that it may not be appropriate to enable globally in
``doctest_optionflags`` in your configuration file. ``doctest_optionflags`` in your configuration file.
.. versionadded:: 5.1
Continue on failure Continue on failure
------------------- -------------------
@ -218,15 +220,21 @@ namespace in which your doctests run. It is intended to be used within
your own fixtures to provide the tests that use them with context. your own fixtures to provide the tests that use them with context.
``doctest_namespace`` is a standard ``dict`` object into which you ``doctest_namespace`` is a standard ``dict`` object into which you
place the objects you want to appear in the doctest namespace:: place the objects you want to appear in the doctest namespace:
.. code-block:: python
# content of conftest.py # content of conftest.py
import numpy import numpy
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def add_np(doctest_namespace): def add_np(doctest_namespace):
doctest_namespace['np'] = numpy doctest_namespace["np"] = numpy
which can then be used in your doctests directly:: which can then be used in your doctests directly:
.. code-block:: python
# content of numpy.py # content of numpy.py
def arange(): def arange():
@ -246,7 +254,9 @@ Skipping tests dynamically
.. versionadded:: 4.4 .. versionadded:: 4.4
You can use ``pytest.skip`` to dynamically skip doctests. For example:: You can use ``pytest.skip`` to dynamically skip doctests. For example:
.. code-block:: text
>>> import sys, pytest >>> import sys, pytest
>>> if sys.platform.startswith('win'): >>> if sys.platform.startswith('win'):

View File

@ -177,7 +177,7 @@ class TestRaises:
def test_reinterpret_fails_with_print_for_the_fun_of_it(self): def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
items = [1, 2, 3] items = [1, 2, 3]
print("items is %r" % items) print("items is {!r}".format(items))
a, b = items.pop() a, b = items.pop()
def test_some_error(self): def test_some_error(self):

View File

@ -18,7 +18,7 @@ example: specifying and selecting acceptance tests
return AcceptFixture(request) return AcceptFixture(request)
class AcceptFixture(object): class AcceptFixture:
def __init__(self, request): def __init__(self, request):
if not request.config.getoption("acceptance"): if not request.config.getoption("acceptance"):
pytest.skip("specify -A to run acceptance tests") pytest.skip("specify -A to run acceptance tests")
@ -65,7 +65,7 @@ extend the `accept example`_ by putting this in our test module:
return arg return arg
class TestSpecialAcceptance(object): class TestSpecialAcceptance:
def test_sometest(self, accept): def test_sometest(self, accept):
assert accept.tmpdir.join("special").check() assert accept.tmpdir.join("special").check()

View File

@ -1,7 +1,7 @@
import pytest import pytest
@pytest.fixture("session") @pytest.fixture(scope="session")
def setup(request): def setup(request):
setup = CostlySetup() setup = CostlySetup()
yield setup yield setup

View File

@ -0,0 +1,38 @@
import pytest
# fixtures documentation order example
order = []
@pytest.fixture(scope="session")
def s1():
order.append("s1")
@pytest.fixture(scope="module")
def m1():
order.append("m1")
@pytest.fixture
def f1(f3):
order.append("f1")
@pytest.fixture
def f3():
order.append("f3")
@pytest.fixture(autouse=True)
def a1():
order.append("a1")
@pytest.fixture
def f2():
order.append("f2")
def test_order(f1, m1, f2, s1):
assert order == ["s1", "m1", "a1", "f3", "f1", "f2"]

View File

@ -33,7 +33,7 @@ You can "mark" a test function with custom metadata like this:
pass pass
class TestClass(object): class TestClass:
def test_method(self): def test_method(self):
pass pass
@ -52,7 +52,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:
test_server.py::test_send_http PASSED [100%] test_server.py::test_send_http PASSED [100%]
================== 1 passed, 3 deselected in 0.12 seconds ================== ===================== 1 passed, 3 deselected in 0.12s ======================
Or the inverse, running all tests except the webtest ones: Or the inverse, running all tests except the webtest ones:
@ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones:
test_server.py::test_another PASSED [ 66%] test_server.py::test_another PASSED [ 66%]
test_server.py::TestClass::test_method PASSED [100%] test_server.py::TestClass::test_method PASSED [100%]
================== 3 passed, 1 deselected in 0.12 seconds ================== ===================== 3 passed, 1 deselected in 0.12s ======================
Selecting tests based on their node ID Selecting tests based on their node ID
-------------------------------------- --------------------------------------
@ -89,7 +89,7 @@ tests based on their module, class, method, or function name:
test_server.py::TestClass::test_method PASSED [100%] test_server.py::TestClass::test_method PASSED [100%]
========================= 1 passed in 0.12 seconds ========================= ============================ 1 passed in 0.12s =============================
You can also select on the class: You can also select on the class:
@ -104,7 +104,7 @@ You can also select on the class:
test_server.py::TestClass::test_method PASSED [100%] test_server.py::TestClass::test_method PASSED [100%]
========================= 1 passed in 0.12 seconds ========================= ============================ 1 passed in 0.12s =============================
Or select multiple nodes: Or select multiple nodes:
@ -120,7 +120,7 @@ Or select multiple nodes:
test_server.py::TestClass::test_method PASSED [ 50%] test_server.py::TestClass::test_method PASSED [ 50%]
test_server.py::test_send_http PASSED [100%] test_server.py::test_send_http PASSED [100%]
========================= 2 passed in 0.12 seconds ========================= ============================ 2 passed in 0.12s =============================
.. _node-id: .. _node-id:
@ -159,7 +159,7 @@ select tests based on their names:
test_server.py::test_send_http PASSED [100%] test_server.py::test_send_http PASSED [100%]
================== 1 passed, 3 deselected in 0.12 seconds ================== ===================== 1 passed, 3 deselected in 0.12s ======================
And you can also run all tests except the ones that match the keyword: And you can also run all tests except the ones that match the keyword:
@ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword:
test_server.py::test_another PASSED [ 66%] test_server.py::test_another PASSED [ 66%]
test_server.py::TestClass::test_method PASSED [100%] test_server.py::TestClass::test_method PASSED [100%]
================== 3 passed, 1 deselected in 0.12 seconds ================== ===================== 3 passed, 1 deselected in 0.12s ======================
Or to select "http" and "quick" tests: Or to select "http" and "quick" tests:
@ -192,7 +192,7 @@ Or to select "http" and "quick" tests:
test_server.py::test_send_http PASSED [ 50%] test_server.py::test_send_http PASSED [ 50%]
test_server.py::test_something_quick PASSED [100%] test_server.py::test_something_quick PASSED [100%]
================== 2 passed, 2 deselected in 0.12 seconds ================== ===================== 2 passed, 2 deselected in 0.12s ======================
.. note:: .. note::
@ -278,7 +278,7 @@ its test methods:
@pytest.mark.webtest @pytest.mark.webtest
class TestClass(object): class TestClass:
def test_startup(self): def test_startup(self):
pass pass
@ -295,7 +295,7 @@ Due to legacy reasons, it is possible to set the ``pytestmark`` attribute on a T
import pytest import pytest
class TestClass(object): class TestClass:
pytestmark = pytest.mark.webtest pytestmark = pytest.mark.webtest
or if you need to use multiple markers you can use a list: or if you need to use multiple markers you can use a list:
@ -305,7 +305,7 @@ or if you need to use multiple markers you can use a list:
import pytest import pytest
class TestClass(object): class TestClass:
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
You can also set a module level marker:: You can also set a module level marker::
@ -336,7 +336,7 @@ apply a marker to an individual test instance:
@pytest.mark.foo @pytest.mark.foo
@pytest.mark.parametrize( @pytest.mark.parametrize(
("n", "expected"), [(1, 2), pytest.param((1, 3), marks=pytest.mark.bar), (2, 3)] ("n", "expected"), [(1, 2), pytest.param(1, 3, marks=pytest.mark.bar), (2, 3)]
) )
def test_increment(n, expected): def test_increment(n, expected):
assert n + 1 == expected assert n + 1 == expected
@ -384,7 +384,7 @@ specifies via named environments:
envnames = [mark.args[0] for mark in item.iter_markers(name="env")] envnames = [mark.args[0] for mark in item.iter_markers(name="env")]
if envnames: if envnames:
if item.config.getoption("-E") not in envnames: if item.config.getoption("-E") not in envnames:
pytest.skip("test requires env in %r" % envnames) pytest.skip("test requires env in {!r}".format(envnames))
A test file using this local plugin: A test file using this local plugin:
@ -413,7 +413,7 @@ the test needs:
test_someenv.py s [100%] test_someenv.py s [100%]
======================== 1 skipped in 0.12 seconds ========================= ============================ 1 skipped in 0.12s ============================
and here is one that specifies exactly the environment needed: and here is one that specifies exactly the environment needed:
@ -428,7 +428,7 @@ and here is one that specifies exactly the environment needed:
test_someenv.py . [100%] test_someenv.py . [100%]
========================= 1 passed in 0.12 seconds ========================= ============================ 1 passed in 0.12s =============================
The ``--markers`` option always gives you a list of available markers: The ``--markers`` option always gives you a list of available markers:
@ -499,7 +499,7 @@ The output is as follows:
$ pytest -q -s $ pytest -q -s
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={}) Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
. .
1 passed in 0.12 seconds 1 passed in 0.12s
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
@ -523,7 +523,7 @@ code you can read over all such settings. Example:
@pytest.mark.glob("class", x=2) @pytest.mark.glob("class", x=2)
class TestClass(object): class TestClass:
@pytest.mark.glob("function", x=3) @pytest.mark.glob("function", x=3)
def test_something(self): def test_something(self):
pass pass
@ -539,7 +539,7 @@ test function. From a conftest file we can read it like this:
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
for mark in item.iter_markers(name="glob"): for mark in item.iter_markers(name="glob"):
print("glob args=%s kwargs=%s" % (mark.args, mark.kwargs)) print("glob args={} kwargs={}".format(mark.args, mark.kwargs))
sys.stdout.flush() sys.stdout.flush()
Let's run this without capturing output and see what we get: Let's run this without capturing output and see what we get:
@ -551,7 +551,7 @@ Let's run this without capturing output and see what we get:
glob args=('class',) kwargs={'x': 2} glob args=('class',) kwargs={'x': 2}
glob args=('module',) kwargs={'x': 1} glob args=('module',) kwargs={'x': 1}
. .
1 passed in 0.12 seconds 1 passed in 0.12s
marking platform specific tests with pytest marking platform specific tests with pytest
-------------------------------------------------------------- --------------------------------------------------------------
@ -578,7 +578,7 @@ for your particular platform, you could use the following plugin:
supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers()) supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers())
plat = sys.platform plat = sys.platform
if supported_platforms and plat not in supported_platforms: if supported_platforms and plat not in supported_platforms:
pytest.skip("cannot run on platform %s" % (plat)) pytest.skip("cannot run on platform {}".format(plat))
then tests will be skipped if they were specified for a different platform. then tests will be skipped if they were specified for a different platform.
Let's do a little test file to show how this looks like: Let's do a little test file to show how this looks like:
@ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected:
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
=================== 2 passed, 2 skipped in 0.12 seconds ==================== ======================= 2 passed, 2 skipped in 0.12s =======================
Note that if you specify a platform via the marker-command line option like this: Note that if you specify a platform via the marker-command line option like this:
@ -638,7 +638,7 @@ Note that if you specify a platform via the marker-command line option like this
test_plat.py . [100%] test_plat.py . [100%]
================== 1 passed, 3 deselected in 0.12 seconds ================== ===================== 1 passed, 3 deselected in 0.12s ======================
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
@ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set:
test_module.py:8: in test_interface_complex test_module.py:8: in test_interface_complex
assert 0 assert 0
E assert 0 E assert 0
================== 2 failed, 2 deselected in 0.12 seconds ================== ===================== 2 failed, 2 deselected in 0.12s ======================
or to select both "event" and "interface" tests: or to select both "event" and "interface" tests:
@ -739,4 +739,4 @@ or to select both "event" and "interface" tests:
test_module.py:12: in test_event_simple test_module.py:12: in test_event_simple
assert 0 assert 0
E assert 0 E assert 0
================== 3 failed, 1 deselected in 0.12 seconds ================== ===================== 3 failed, 1 deselected in 0.12s ======================

View File

@ -69,4 +69,4 @@ class Python:
@pytest.mark.parametrize("obj", [42, {}, {1: 3}]) @pytest.mark.parametrize("obj", [42, {}, {1: 3}])
def test_basic_objects(python1, python2, obj): def test_basic_objects(python1, python2, obj):
python1.dumps(obj) python1.dumps(obj)
python2.load_and_is_true("obj == %s" % obj) python2.load_and_is_true("obj == {}".format(obj))

View File

@ -41,7 +41,7 @@ now execute the test specification:
usecase execution failed usecase execution failed
spec failed: 'some': 'other' spec failed: 'some': 'other'
no further details known at this point. no further details known at this point.
==================== 1 failed, 1 passed in 0.12 seconds ==================== ======================= 1 failed, 1 passed in 0.12s ========================
.. regendoc:wipe .. regendoc:wipe
@ -77,7 +77,7 @@ consulted when reporting in ``verbose`` mode:
usecase execution failed usecase execution failed
spec failed: 'some': 'other' spec failed: 'some': 'other'
no further details known at this point. no further details known at this point.
==================== 1 failed, 1 passed in 0.12 seconds ==================== ======================= 1 failed, 1 passed in 0.12s ========================
.. regendoc:wipe .. regendoc:wipe
@ -97,4 +97,4 @@ interesting to just look at the collection tree:
<YamlItem hello> <YamlItem hello>
<YamlItem ok> <YamlItem ok>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================

View File

@ -33,13 +33,13 @@ class YamlItem(pytest.Item):
return "\n".join( return "\n".join(
[ [
"usecase execution failed", "usecase execution failed",
" spec failed: %r: %r" % excinfo.value.args[1:3], " spec failed: {1!r}: {2!r}".format(*excinfo.value.args),
" no further details known at this point.", " no further details known at this point.",
] ]
) )
def reportinfo(self): def reportinfo(self):
return self.fspath, 0, "usecase: %s" % self.name return self.fspath, 0, "usecase: {}".format(self.name)
class YamlException(Exception): class YamlException(Exception):

View File

@ -19,24 +19,30 @@ Generating parameters combinations, depending on command line
Let's say we want to execute a test with different computation Let's say we want to execute a test with different computation
parameters and the parameter range shall be determined by a command parameters and the parameter range shall be determined by a command
line argument. Let's first write a simple (do-nothing) computation test:: line argument. Let's first write a simple (do-nothing) computation test:
.. code-block:: python
# content of test_compute.py # content of test_compute.py
def test_compute(param1): def test_compute(param1):
assert param1 < 4 assert param1 < 4
Now we add a test configuration like this:: Now we add a test configuration like this:
.. code-block:: python
# content of conftest.py # content of conftest.py
def pytest_addoption(parser): def pytest_addoption(parser):
parser.addoption("--all", action="store_true", parser.addoption("--all", action="store_true", help="run all combinations")
help="run all combinations")
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames: if "param1" in metafunc.fixturenames:
if metafunc.config.getoption('all'): if metafunc.config.getoption("all"):
end = 5 end = 5
else: else:
end = 2 end = 2
@ -48,7 +54,7 @@ This means that we only run 2 tests if we do not pass ``--all``:
$ pytest -q test_compute.py $ pytest -q test_compute.py
.. [100%] .. [100%]
2 passed in 0.12 seconds 2 passed in 0.12s
We run only two computations, so we see two dots. We run only two computations, so we see two dots.
let's run the full monty: let's run the full monty:
@ -66,8 +72,8 @@ let's run the full monty:
> assert param1 < 4 > assert param1 < 4
E assert 4 < 4 E assert 4 < 4
test_compute.py:3: AssertionError test_compute.py:4: AssertionError
1 failed, 4 passed in 0.12 seconds 1 failed, 4 passed in 0.12s
As expected when running the full range of ``param1`` values As expected when running the full range of ``param1`` values
we'll get an error on the last one. we'll get an error on the last one.
@ -83,7 +89,9 @@ Running pytest with ``--collect-only`` will show the generated IDs.
Numbers, strings, booleans and None will have their usual string representation Numbers, strings, booleans and None will have their usual string representation
used in the test ID. For other objects, pytest will make a string based on used in the test ID. For other objects, pytest will make a string based on
the argument name:: the argument name:
.. code-block:: python
# content of test_time.py # content of test_time.py
@ -112,7 +120,7 @@ the argument name::
def idfn(val): def idfn(val):
if isinstance(val, (datetime,)): if isinstance(val, (datetime,)):
# note this wouldn't show any hours/minutes/seconds # note this wouldn't show any hours/minutes/seconds
return val.strftime('%Y%m%d') return val.strftime("%Y%m%d")
@pytest.mark.parametrize("a,b,expected", testdata, ids=idfn) @pytest.mark.parametrize("a,b,expected", testdata, ids=idfn)
@ -120,12 +128,18 @@ the argument name::
diff = a - b diff = a - b
assert diff == expected assert diff == expected
@pytest.mark.parametrize("a,b,expected", [
pytest.param(datetime(2001, 12, 12), datetime(2001, 12, 11), @pytest.mark.parametrize(
timedelta(1), id='forward'), "a,b,expected",
pytest.param(datetime(2001, 12, 11), datetime(2001, 12, 12), [
timedelta(-1), id='backward'), pytest.param(
]) datetime(2001, 12, 12), datetime(2001, 12, 11), timedelta(1), id="forward"
),
pytest.param(
datetime(2001, 12, 11), datetime(2001, 12, 12), timedelta(-1), id="backward"
),
],
)
def test_timedistance_v3(a, b, expected): def test_timedistance_v3(a, b, expected):
diff = a - b diff = a - b
assert diff == expected assert diff == expected
@ -158,7 +172,7 @@ objects, they are still using the default pytest representation:
<Function test_timedistance_v3[forward]> <Function test_timedistance_v3[forward]>
<Function test_timedistance_v3[backward]> <Function test_timedistance_v3[backward]>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
together with the actual data, instead of listing them separately. together with the actual data, instead of listing them separately.
@ -171,10 +185,13 @@ A quick port of "testscenarios"
Here is a quick port to run tests configured with `test scenarios`_, Here is a quick port to run tests configured with `test scenarios`_,
an add-on from Robert Collins for the standard unittest framework. We an add-on from Robert Collins for the standard unittest framework. We
only have to work a bit to construct the correct arguments for pytest's only have to work a bit to construct the correct arguments for pytest's
:py:func:`Metafunc.parametrize`:: :py:func:`Metafunc.parametrize`:
.. code-block:: python
# content of test_scenarios.py # content of test_scenarios.py
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
idlist = [] idlist = []
argvalues = [] argvalues = []
@ -182,13 +199,15 @@ only have to work a bit to construct the correct arguments for pytest's
idlist.append(scenario[0]) idlist.append(scenario[0])
items = scenario[1].items() items = scenario[1].items()
argnames = [x[0] for x in items] argnames = [x[0] for x in items]
argvalues.append(([x[1] for x in items])) argvalues.append([x[1] for x in items])
metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class") metafunc.parametrize(argnames, argvalues, ids=idlist, scope="class")
scenario1 = ('basic', {'attribute': 'value'})
scenario2 = ('advanced', {'attribute': 'value2'})
class TestSampleWithScenarios(object): scenario1 = ("basic", {"attribute": "value"})
scenario2 = ("advanced", {"attribute": "value2"})
class TestSampleWithScenarios:
scenarios = [scenario1, scenario2] scenarios = [scenario1, scenario2]
def test_demo1(self, attribute): def test_demo1(self, attribute):
@ -210,7 +229,7 @@ this is a fully self-contained example which you can run with:
test_scenarios.py .... [100%] test_scenarios.py .... [100%]
========================= 4 passed in 0.12 seconds ========================= ============================ 4 passed in 0.12s =============================
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function: If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
@ -229,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
<Function test_demo1[advanced]> <Function test_demo1[advanced]>
<Function test_demo2[advanced]> <Function test_demo2[advanced]>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
Note that we told ``metafunc.parametrize()`` that your scenario values Note that we told ``metafunc.parametrize()`` that your scenario values
should be considered class-scoped. With pytest-2.3 this leads to a should be considered class-scoped. With pytest-2.3 this leads to a
@ -243,12 +262,16 @@ Deferring the setup of parametrized resources
The parametrization of test functions happens at collection The parametrization of test functions happens at collection
time. It is a good idea to setup expensive resources like DB time. It is a good idea to setup expensive resources like DB
connections or subprocess only when the actual test is run. connections or subprocess only when the actual test is run.
Here is a simple example how you can achieve that, first Here is a simple example how you can achieve that. This test
the actual test requiring a ``db`` object:: requires a ``db`` object fixture:
.. code-block:: python
# content of test_backends.py # content of test_backends.py
import pytest import pytest
def test_db_initialized(db): def test_db_initialized(db):
# a dummy test # a dummy test
if db.__class__.__name__ == "DB2": if db.__class__.__name__ == "DB2":
@ -256,20 +279,27 @@ the actual test requiring a ``db`` object::
We can now add a test configuration that generates two invocations of We can now add a test configuration that generates two invocations of
the ``test_db_initialized`` function and also implements a factory that the ``test_db_initialized`` function and also implements a factory that
creates a database object for the actual test invocations:: creates a database object for the actual test invocations:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
def pytest_generate_tests(metafunc):
if 'db' in metafunc.fixturenames:
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
class DB1(object): def pytest_generate_tests(metafunc):
if "db" in metafunc.fixturenames:
metafunc.parametrize("db", ["d1", "d2"], indirect=True)
class DB1:
"one database object" "one database object"
class DB2(object):
class DB2:
"alternative database object" "alternative database object"
@pytest.fixture @pytest.fixture
def db(request): def db(request):
if request.param == "d1": if request.param == "d1":
@ -293,7 +323,7 @@ Let's first see how it looks like at collection time:
<Function test_db_initialized[d1]> <Function test_db_initialized[d1]>
<Function test_db_initialized[d2]> <Function test_db_initialized[d2]>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
And then when we run the test: And then when we run the test:
@ -312,8 +342,8 @@ And then when we run the test:
> pytest.fail("deliberately failing for demo purposes") > pytest.fail("deliberately failing for demo purposes")
E Failed: deliberately failing for demo purposes E Failed: deliberately failing for demo purposes
test_backends.py:6: Failed test_backends.py:8: Failed
1 failed, 1 passed in 0.12 seconds 1 failed, 1 passed in 0.12s
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
@ -327,23 +357,29 @@ parameter on particular arguments. It can be done by passing list or tuple of
arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses arguments' names to ``indirect``. In the example below there is a function ``test_indirect`` which uses
two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the two fixtures: ``x`` and ``y``. Here we give to indirect the list, which contains the name of the
fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a`` fixture ``x``. The indirect parameter will be applied to this argument only, and the value ``a``
will be passed to respective fixture function:: will be passed to respective fixture function:
.. code-block:: python
# content of test_indirect_list.py # content of test_indirect_list.py
import pytest import pytest
@pytest.fixture(scope='function')
@pytest.fixture(scope="function")
def x(request): def x(request):
return request.param * 3 return request.param * 3
@pytest.fixture(scope='function')
@pytest.fixture(scope="function")
def y(request): def y(request):
return request.param * 2 return request.param * 2
@pytest.mark.parametrize('x, y', [('a', 'b')], indirect=['x'])
def test_indirect(x,y): @pytest.mark.parametrize("x, y", [("a", "b")], indirect=["x"])
assert x == 'aaa' def test_indirect(x, y):
assert y == 'b' assert x == "aaa"
assert y == "b"
The result of this test will be successful: The result of this test will be successful:
@ -358,7 +394,7 @@ The result of this test will be successful:
<Module test_indirect_list.py> <Module test_indirect_list.py>
<Function test_indirect[a-b]> <Function test_indirect[a-b]>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
.. regendoc:wipe .. regendoc:wipe
@ -370,23 +406,28 @@ Parametrizing test methods through per-class configuration
Here is an example ``pytest_generate_tests`` function implementing a Here is an example ``pytest_generate_tests`` function implementing a
parametrization scheme similar to Michael Foord's `unittest parametrization scheme similar to Michael Foord's `unittest
parametrizer`_ but in a lot less code:: parametrizer`_ but in a lot less code:
.. code-block:: python
# content of ./test_parametrize.py # content of ./test_parametrize.py
import pytest import pytest
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
# called once per each test function # called once per each test function
funcarglist = metafunc.cls.params[metafunc.function.__name__] funcarglist = metafunc.cls.params[metafunc.function.__name__]
argnames = sorted(funcarglist[0]) argnames = sorted(funcarglist[0])
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] metafunc.parametrize(
for funcargs in funcarglist]) argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]
)
class TestClass(object):
class TestClass:
# a map specifying multiple argument sets for a test method # a map specifying multiple argument sets for a test method
params = { params = {
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], "test_equals": [dict(a=1, b=2), dict(a=3, b=3)],
'test_zerodivision': [dict(a=1, b=0), ], "test_zerodivision": [dict(a=1, b=0)],
} }
def test_equals(self, a, b): def test_equals(self, a, b):
@ -412,8 +453,8 @@ argument sets to use for each test function. Let's run it:
> assert a == b > assert a == b
E assert 1 == 2 E assert 1 == 2
test_parametrize.py:18: AssertionError test_parametrize.py:21: AssertionError
1 failed, 2 passed in 0.12 seconds 1 failed, 2 passed in 0.12s
Indirect parametrization with multiple fixtures Indirect parametrization with multiple fixtures
-------------------------------------------------------------- --------------------------------------------------------------
@ -434,11 +475,11 @@ Running it results in some skips if we don't have all the python interpreters in
.. code-block:: pytest .. code-block:: pytest
. $ pytest -rs -q multipython.py . $ pytest -rs -q multipython.py
ssssssssssss...ssssssssssss [100%] ssssssssssssssssssssssss... [100%]
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.6' not found
3 passed, 24 skipped in 0.12 seconds 3 passed, 24 skipped in 0.12s
Indirect parametrization of optional implementations/imports Indirect parametrization of optional implementations/imports
-------------------------------------------------------------------- --------------------------------------------------------------------
@ -447,36 +488,47 @@ If you want to compare the outcomes of several implementations of a given
API, you can write test functions that receive the already imported implementations API, you can write test functions that receive the already imported implementations
and get skipped in case the implementation is not importable/available. Let's and get skipped in case the implementation is not importable/available. Let's
say we have a "base" implementation and the other (possibly optimized ones) say we have a "base" implementation and the other (possibly optimized ones)
need to provide similar results:: need to provide similar results:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def basemod(request): def basemod(request):
return pytest.importorskip("base") return pytest.importorskip("base")
@pytest.fixture(scope="session", params=["opt1", "opt2"]) @pytest.fixture(scope="session", params=["opt1", "opt2"])
def optmod(request): def optmod(request):
return pytest.importorskip(request.param) return pytest.importorskip(request.param)
And then a base implementation of a simple function:: And then a base implementation of a simple function:
.. code-block:: python
# content of base.py # content of base.py
def func1(): def func1():
return 1 return 1
And an optimized version:: And an optimized version:
.. code-block:: python
# content of opt1.py # content of opt1.py
def func1(): def func1():
return 1.0001 return 1.0001
And finally a little test module:: And finally a little test module:
.. code-block:: python
# content of test_module.py # content of test_module.py
def test_func1(basemod, optmod): def test_func1(basemod, optmod):
assert round(basemod.func1(), 3) == round(optmod.func1(), 3) assert round(basemod.func1(), 3) == round(optmod.func1(), 3)
@ -495,8 +547,8 @@ If you run this with reporting for skips enabled:
test_module.py .s [100%] test_module.py .s [100%]
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2' SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2'
=================== 1 passed, 1 skipped in 0.12 seconds ==================== ======================= 1 passed, 1 skipped in 0.12s =======================
You'll see that we don't have an ``opt2`` module and thus the second test run You'll see that we don't have an ``opt2`` module and thus the second test run
of our ``test_func1`` was skipped. A few notes: of our ``test_func1`` was skipped. A few notes:
@ -552,13 +604,13 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
cachedir: $PYTHON_PREFIX/.pytest_cache cachedir: $PYTHON_PREFIX/.pytest_cache
rootdir: $REGENDOC_TMPDIR rootdir: $REGENDOC_TMPDIR
collecting ... collected 17 items / 14 deselected / 3 selected collecting ... collected 18 items / 15 deselected / 3 selected
test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%] test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%]
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%] test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%] test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
============ 2 passed, 14 deselected, 1 xfailed in 0.12 seconds ============ =============== 2 passed, 15 deselected, 1 xfailed in 0.12s ================
As the result: As the result:
@ -579,22 +631,28 @@ Use :func:`pytest.raises` with the
in which some tests raise exceptions and others do not. in which some tests raise exceptions and others do not.
It is helpful to define a no-op context manager ``does_not_raise`` to serve It is helpful to define a no-op context manager ``does_not_raise`` to serve
as a complement to ``raises``. For example:: as a complement to ``raises``. For example:
.. code-block:: python
from contextlib import contextmanager from contextlib import contextmanager
import pytest import pytest
@contextmanager @contextmanager
def does_not_raise(): def does_not_raise():
yield yield
@pytest.mark.parametrize('example_input,expectation', [ @pytest.mark.parametrize(
(3, does_not_raise()), "example_input,expectation",
(2, does_not_raise()), [
(1, does_not_raise()), (3, does_not_raise()),
(0, pytest.raises(ZeroDivisionError)), (2, does_not_raise()),
]) (1, does_not_raise()),
(0, pytest.raises(ZeroDivisionError)),
],
)
def test_division(example_input, expectation): def test_division(example_input, expectation):
"""Test how much I know division.""" """Test how much I know division."""
with expectation: with expectation:
@ -604,14 +662,20 @@ In the example above, the first three test cases should run unexceptionally,
while the fourth should raise ``ZeroDivisionError``. while the fourth should raise ``ZeroDivisionError``.
If you're only supporting Python 3.7+, you can simply use ``nullcontext`` If you're only supporting Python 3.7+, you can simply use ``nullcontext``
to define ``does_not_raise``:: to define ``does_not_raise``:
.. code-block:: python
from contextlib import nullcontext as does_not_raise from contextlib import nullcontext as does_not_raise
Or, if you're supporting Python 3.3+ you can use:: Or, if you're supporting Python 3.3+ you can use:
.. code-block:: python
from contextlib import ExitStack as does_not_raise from contextlib import ExitStack as does_not_raise
Or, if desired, you can ``pip install contextlib2`` and use:: Or, if desired, you can ``pip install contextlib2`` and use:
from contextlib2 import ExitStack as does_not_raise .. code-block:: python
from contextlib2 import nullcontext as does_not_raise

View File

@ -31,7 +31,7 @@ you will see that ``pytest`` only collects test-modules, which do not match the
.. code-block:: pytest .. code-block:: pytest
=========================== test session starts ============================ =========================== test session starts ============================
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y platform linux -- Python 3.x.y, pytest-5.x.y, py-1.x.y, pluggy-0.x.y
rootdir: $REGENDOC_TMPDIR, inifile: rootdir: $REGENDOC_TMPDIR, inifile:
collected 5 items collected 5 items
@ -131,12 +131,15 @@ Here is an example:
This would make ``pytest`` look for tests in files that match the ``check_* This would make ``pytest`` look for tests in files that match the ``check_*
.py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods .py`` glob-pattern, ``Check`` prefixes in classes, and functions and methods
that match ``*_check``. For example, if we have:: that match ``*_check``. For example, if we have:
.. code-block:: python
# content of check_myapp.py # content of check_myapp.py
class CheckMyApp(object): class CheckMyApp:
def simple_check(self): def simple_check(self):
pass pass
def complex_check(self): def complex_check(self):
pass pass
@ -155,7 +158,7 @@ The test collection would look like this:
<Function simple_check> <Function simple_check>
<Function complex_check> <Function complex_check>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
You can check for multiple glob patterns by adding a space between the patterns: You can check for multiple glob patterns by adding a space between the patterns:
@ -218,7 +221,7 @@ You can always peek at the collection tree without running tests like this:
<Function test_method> <Function test_method>
<Function test_anothermethod> <Function test_anothermethod>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
.. _customizing-test-collection: .. _customizing-test-collection:
@ -238,7 +241,9 @@ You can easily instruct ``pytest`` to discover tests from every Python file:
However, many projects will have a ``setup.py`` which they don't want to be However, many projects will have a ``setup.py`` which they don't want to be
imported. Moreover, there may files only importable by a specific python imported. Moreover, there may files only importable by a specific python
version. For such cases you can dynamically define files to be ignored by version. For such cases you can dynamically define files to be ignored by
listing them in a ``conftest.py`` file:: listing them in a ``conftest.py`` file:
.. code-block:: python
# content of conftest.py # content of conftest.py
import sys import sys
@ -247,7 +252,9 @@ listing them in a ``conftest.py`` file::
if sys.version_info[0] > 2: if sys.version_info[0] > 2:
collect_ignore.append("pkg/module_py2.py") collect_ignore.append("pkg/module_py2.py")
and then if you have a module file like this:: and then if you have a module file like this:
.. code-block:: python
# content of pkg/module_py2.py # content of pkg/module_py2.py
def test_only_on_python2(): def test_only_on_python2():
@ -256,10 +263,12 @@ and then if you have a module file like this::
except Exception, e: except Exception, e:
pass pass
and a ``setup.py`` dummy file like this:: and a ``setup.py`` dummy file like this:
.. code-block:: python
# content of setup.py # content of setup.py
0/0 # will raise exception if imported 0 / 0 # will raise exception if imported
If you run with a Python 2 interpreter then you will find the one test and will If you run with a Python 2 interpreter then you will find the one test and will
leave out the ``setup.py`` file: leave out the ``setup.py`` file:
@ -288,14 +297,16 @@ file will be left out:
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
collected 0 items collected 0 items
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
It's also possible to ignore files based on Unix shell-style wildcards by adding It's also possible to ignore files based on Unix shell-style wildcards by adding
patterns to ``collect_ignore_glob``. patterns to ``collect_ignore_glob``.
The following example ``conftest.py`` ignores the file ``setup.py`` and in The following example ``conftest.py`` ignores the file ``setup.py`` and in
addition all files that end with ``*_py2.py`` when executed with a Python 3 addition all files that end with ``*_py2.py`` when executed with a Python 3
interpreter:: interpreter:
.. code-block:: python
# content of conftest.py # content of conftest.py
import sys import sys

View File

@ -119,7 +119,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
a = "1" * 100 + "a" + "2" * 100 a = "1" * 100 + "a" + "2" * 100
b = "1" * 100 + "b" + "2" * 100 b = "1" * 100 + "b" + "2" * 100
> assert a == b > assert a == b
E AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222' E AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222'
E Skipping 90 identical leading characters in diff, use -v to show E Skipping 90 identical leading characters in diff, use -v to show
E Skipping 91 identical trailing characters in diff, use -v to show E Skipping 91 identical trailing characters in diff, use -v to show
E - 1111111111a222222222 E - 1111111111a222222222
@ -136,7 +136,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
a = "1\n" * 100 + "a" + "2\n" * 100 a = "1\n" * 100 + "a" + "2\n" * 100
b = "1\n" * 100 + "b" + "2\n" * 100 b = "1\n" * 100 + "b" + "2\n" * 100
> assert a == b > assert a == b
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n'
E Skipping 190 identical leading characters in diff, use -v to show E Skipping 190 identical leading characters in diff, use -v to show
E Skipping 191 identical trailing characters in diff, use -v to show E Skipping 191 identical trailing characters in diff, use -v to show
E 1 E 1
@ -235,7 +235,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_not_in_text_multiline(self): def test_not_in_text_multiline(self):
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
> assert "foo" not in text > assert "foo" not in text
E AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' E AssertionError: assert 'foo' not in 'some multil...nand a\ntail'
E 'foo' is contained here: E 'foo' is contained here:
E some multiline E some multiline
E text E text
@ -267,7 +267,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_not_in_text_single_long(self): def test_not_in_text_single_long(self):
text = "head " * 50 + "foo " + "tail " * 20 text = "head " * 50 + "foo " + "tail " * 20
> assert "foo" not in text > assert "foo" not in text
E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E AssertionError: assert 'foo' not in 'head head h...l tail tail '
E 'foo' is contained here: E 'foo' is contained here:
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? +++ E ? +++
@ -280,7 +280,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_not_in_text_single_long_term(self): def test_not_in_text_single_long_term(self):
text = "head " * 50 + "f" * 70 + "tail " * 20 text = "head " * 50 + "f" * 70 + "tail " * 20
> assert "f" * 70 not in text > assert "f" * 70 not in text
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail '
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
@ -301,7 +301,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
left = Foo(1, "b") left = Foo(1, "b")
right = Foo(1, "c") right = Foo(1, "c")
> assert left == right > assert left == right
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c') E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialis...oo(a=1, b='c')
E Omitting 1 identical items, use -vv to show E Omitting 1 identical items, use -vv to show
E Differing attributes: E Differing attributes:
E b: 'b' != 'c' E b: 'b' != 'c'
@ -434,9 +434,9 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_reinterpret_fails_with_print_for_the_fun_of_it(self): def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
items = [1, 2, 3] items = [1, 2, 3]
print("items is %r" % items) print("items is {!r}".format(items))
> a, b = items.pop() > a, b = items.pop()
E TypeError: 'int' object is not iterable E TypeError: cannot unpack non-iterable int object
failure_demo.py:181: TypeError failure_demo.py:181: TypeError
--------------------------- Captured stdout call --------------------------- --------------------------- Captured stdout call ---------------------------
@ -516,7 +516,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
def test_z2_type_error(self): def test_z2_type_error(self):
items = 3 items = 3
> a, b = items > a, b = items
E TypeError: 'int' object is not iterable E TypeError: cannot unpack non-iterable int object
failure_demo.py:222: TypeError failure_demo.py:222: TypeError
______________________ TestMoreErrors.test_startswith ______________________ ______________________ TestMoreErrors.test_startswith ______________________
@ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things:
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
failure_demo.py:282: AssertionError failure_demo.py:282: AssertionError
======================== 44 failed in 0.12 seconds ========================= ============================ 44 failed in 0.12s ============================

View File

@ -65,7 +65,7 @@ Let's run this without supplying our new option:
test_sample.py:6: AssertionError test_sample.py:6: AssertionError
--------------------------- Captured stdout call --------------------------- --------------------------- Captured stdout call ---------------------------
first first
1 failed in 0.12 seconds 1 failed in 0.12s
And now with supplying a command line option: And now with supplying a command line option:
@ -89,7 +89,7 @@ And now with supplying a command line option:
test_sample.py:6: AssertionError test_sample.py:6: AssertionError
--------------------------- Captured stdout call --------------------------- --------------------------- Captured stdout call ---------------------------
second second
1 failed in 0.12 seconds 1 failed in 0.12s
You can see that the command line option arrived in our test. This You can see that the command line option arrived in our test. This
completes the basic pattern. However, one often rather wants to process completes the basic pattern. However, one often rather wants to process
@ -132,7 +132,7 @@ directory with the above conftest.py:
rootdir: $REGENDOC_TMPDIR rootdir: $REGENDOC_TMPDIR
collected 0 items collected 0 items
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
.. _`excontrolskip`: .. _`excontrolskip`:
@ -201,7 +201,7 @@ and when running it will see a skipped "slow" test:
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIPPED [1] test_module.py:8: need --runslow option to run SKIPPED [1] test_module.py:8: need --runslow option to run
=================== 1 passed, 1 skipped in 0.12 seconds ==================== ======================= 1 passed, 1 skipped in 0.12s =======================
Or run it including the ``slow`` marked test: Or run it including the ``slow`` marked test:
@ -216,7 +216,7 @@ Or run it including the ``slow`` marked test:
test_module.py .. [100%] test_module.py .. [100%]
========================= 2 passed in 0.12 seconds ========================= ============================ 2 passed in 0.12s =============================
Writing well integrated assertion helpers Writing well integrated assertion helpers
-------------------------------------------------- --------------------------------------------------
@ -238,7 +238,7 @@ Example:
def checkconfig(x): def checkconfig(x):
__tracebackhide__ = True __tracebackhide__ = True
if not hasattr(x, "config"): if not hasattr(x, "config"):
pytest.fail("not configured: %s" % (x,)) pytest.fail("not configured: {}".format(x))
def test_something(): def test_something():
@ -261,7 +261,7 @@ Let's run our little function:
E Failed: not configured: 42 E Failed: not configured: 42
test_checkconfig.py:11: Failed test_checkconfig.py:11: Failed
1 failed in 0.12 seconds 1 failed in 0.12s
If you only want to hide certain exceptions, you can set ``__tracebackhide__`` If you only want to hide certain exceptions, you can set ``__tracebackhide__``
to a callable which gets the ``ExceptionInfo`` object. You can for example use to a callable which gets the ``ExceptionInfo`` object. You can for example use
@ -280,7 +280,7 @@ this to make sure unexpected exception types aren't hidden:
def checkconfig(x): def checkconfig(x):
__tracebackhide__ = operator.methodcaller("errisinstance", ConfigException) __tracebackhide__ = operator.methodcaller("errisinstance", ConfigException)
if not hasattr(x, "config"): if not hasattr(x, "config"):
raise ConfigException("not configured: %s" % (x,)) raise ConfigException("not configured: {}".format(x))
def test_something(): def test_something():
@ -358,7 +358,7 @@ which will add the string to the test header accordingly:
rootdir: $REGENDOC_TMPDIR rootdir: $REGENDOC_TMPDIR
collected 0 items collected 0 items
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
.. regendoc:wipe .. regendoc:wipe
@ -388,7 +388,7 @@ which will add info only when run with "--v":
rootdir: $REGENDOC_TMPDIR rootdir: $REGENDOC_TMPDIR
collecting ... collected 0 items collecting ... collected 0 items
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
and nothing when run plainly: and nothing when run plainly:
@ -401,7 +401,7 @@ and nothing when run plainly:
rootdir: $REGENDOC_TMPDIR rootdir: $REGENDOC_TMPDIR
collected 0 items collected 0 items
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
profiling test duration profiling test duration
-------------------------- --------------------------
@ -447,7 +447,7 @@ Now we can profile which test functions execute the slowest:
0.30s call test_some_are_slow.py::test_funcslow2 0.30s call test_some_are_slow.py::test_funcslow2
0.20s call test_some_are_slow.py::test_funcslow1 0.20s call test_some_are_slow.py::test_funcslow1
0.10s call test_some_are_slow.py::test_funcfast 0.10s call test_some_are_slow.py::test_funcfast
========================= 3 passed in 0.12 seconds ========================= ============================ 3 passed in 0.12s =============================
incremental testing - test steps incremental testing - test steps
--------------------------------------------------- ---------------------------------------------------
@ -478,7 +478,7 @@ an ``incremental`` marker which is to be used on classes:
if "incremental" in item.keywords: if "incremental" in item.keywords:
previousfailed = getattr(item.parent, "_previousfailed", None) previousfailed = getattr(item.parent, "_previousfailed", None)
if previousfailed is not None: if previousfailed is not None:
pytest.xfail("previous test failed (%s)" % previousfailed.name) pytest.xfail("previous test failed ({})".format(previousfailed.name))
These two hook implementations work together to abort incremental-marked These two hook implementations work together to abort incremental-marked
tests in a class. Here is a test module example: tests in a class. Here is a test module example:
@ -491,7 +491,7 @@ tests in a class. Here is a test module example:
@pytest.mark.incremental @pytest.mark.incremental
class TestUserHandling(object): class TestUserHandling:
def test_login(self): def test_login(self):
pass pass
@ -531,7 +531,7 @@ If we run this:
========================= short test summary info ========================== ========================= short test summary info ==========================
XFAIL test_step.py::TestUserHandling::test_deletion XFAIL test_step.py::TestUserHandling::test_deletion
reason: previous test failed (test_modification) reason: previous test failed (test_modification)
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds =============== ================== 1 failed, 2 passed, 1 xfailed in 0.12s ==================
We'll see that ``test_deletion`` was not executed because ``test_modification`` We'll see that ``test_deletion`` was not executed because ``test_modification``
failed. It is reported as an "expected failure". failed. It is reported as an "expected failure".
@ -556,7 +556,7 @@ Here is an example for making a ``db`` fixture available in a directory:
import pytest import pytest
class DB(object): class DB:
pass pass
@ -644,7 +644,7 @@ We can run this:
E assert 0 E assert 0
a/test_db2.py:2: AssertionError a/test_db2.py:2: AssertionError
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== ============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12s ==============
The two test modules in the ``a`` directory see the same ``db`` fixture instance The two test modules in the ``a`` directory see the same ``db`` fixture instance
while the one test in the sister-directory ``b`` doesn't see it. We could of course while the one test in the sister-directory ``b`` doesn't see it. We could of course
@ -684,7 +684,7 @@ case we just write some information out to a ``failures`` file:
with open("failures", mode) as f: with open("failures", mode) as f:
# let's also access a fixture for the fun of it # let's also access a fixture for the fun of it
if "tmpdir" in item.fixturenames: if "tmpdir" in item.fixturenames:
extra = " (%s)" % item.funcargs["tmpdir"] extra = " ({})".format(item.funcargs["tmpdir"])
else: else:
extra = "" extra = ""
@ -733,7 +733,7 @@ and run them:
E assert 0 E assert 0
test_module.py:6: AssertionError test_module.py:6: AssertionError
========================= 2 failed in 0.12 seconds ========================= ============================ 2 failed in 0.12s =============================
you will have a "failures" file which contains the failing test ids: you will have a "failures" file which contains the failing test ids:
@ -848,7 +848,7 @@ and run it:
E assert 0 E assert 0
test_module.py:19: AssertionError test_module.py:19: AssertionError
==================== 2 failed, 1 error in 0.12 seconds ===================== ======================== 2 failed, 1 error in 0.12s ========================
You'll see that the fixture finalizers could use the precise reporting You'll see that the fixture finalizers could use the precise reporting
information. information.

View File

@ -5,30 +5,36 @@ A session-scoped fixture effectively has access to all
collected test items. Here is an example of a fixture collected test items. Here is an example of a fixture
function which walks all collected tests and looks function which walks all collected tests and looks
if their test class defines a ``callme`` method and if their test class defines a ``callme`` method and
calls it:: calls it:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
@pytest.fixture(scope="session", autouse=True) @pytest.fixture(scope="session", autouse=True)
def callattr_ahead_of_alltests(request): def callattr_ahead_of_alltests(request):
print("callattr_ahead_of_alltests called") print("callattr_ahead_of_alltests called")
seen = set([None]) seen = {None}
session = request.node session = request.node
for item in session.items: for item in session.items:
cls = item.getparent(pytest.Class) cls = item.getparent(pytest.Class)
if cls not in seen: if cls not in seen:
if hasattr(cls.obj, "callme"): if hasattr(cls.obj, "callme"):
cls.obj.callme() cls.obj.callme()
seen.add(cls) seen.add(cls)
test classes may now define a ``callme`` method which test classes may now define a ``callme`` method which
will be called ahead of running any tests:: will be called ahead of running any tests:
.. code-block:: python
# content of test_module.py # content of test_module.py
class TestHello(object):
class TestHello:
@classmethod @classmethod
def callme(cls): def callme(cls):
print("callme called!") print("callme called!")
@ -39,16 +45,20 @@ will be called ahead of running any tests::
def test_method2(self): def test_method2(self):
print("test_method1 called") print("test_method1 called")
class TestOther(object):
class TestOther:
@classmethod @classmethod
def callme(cls): def callme(cls):
print("callme other called") print("callme other called")
def test_other(self): def test_other(self):
print("test other") print("test other")
# works with unittest as well ... # works with unittest as well ...
import unittest import unittest
class SomeTest(unittest.TestCase): class SomeTest(unittest.TestCase):
@classmethod @classmethod
def callme(self): def callme(self):
@ -71,4 +81,4 @@ If you run this without output capturing:
.test other .test other
.test_unit1 method called .test_unit1 method called
. .
4 passed in 0.12 seconds 4 passed in 0.12s

View File

@ -15,7 +15,9 @@ Running an existing test suite with pytest
Say you want to contribute to an existing repository somewhere. Say you want to contribute to an existing repository somewhere.
After pulling the code into your development space using some After pulling the code into your development space using some
flavor of version control and (optionally) setting up a virtualenv flavor of version control and (optionally) setting up a virtualenv
you will want to run:: you will want to run:
.. code-block:: bash
cd <repository> cd <repository>
pip install -e . # Environment dependent alternatives include pip install -e . # Environment dependent alternatives include

View File

@ -49,20 +49,25 @@ argument. For each argument name, a fixture function with that name provides
the fixture object. Fixture functions are registered by marking them with the fixture object. Fixture functions are registered by marking them with
:py:func:`@pytest.fixture <_pytest.python.fixture>`. Let's look at a simple :py:func:`@pytest.fixture <_pytest.python.fixture>`. Let's look at a simple
self-contained test module containing a fixture and a test function self-contained test module containing a fixture and a test function
using it:: using it:
.. code-block:: python
# content of ./test_smtpsimple.py # content of ./test_smtpsimple.py
import pytest import pytest
@pytest.fixture @pytest.fixture
def smtp_connection(): def smtp_connection():
import smtplib import smtplib
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5) return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
def test_ehlo(smtp_connection): def test_ehlo(smtp_connection):
response, msg = smtp_connection.ehlo() response, msg = smtp_connection.ehlo()
assert response == 250 assert response == 250
assert 0 # for demo purposes assert 0 # for demo purposes
Here, the ``test_ehlo`` needs the ``smtp_connection`` fixture value. pytest Here, the ``test_ehlo`` needs the ``smtp_connection`` fixture value. pytest
will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>` will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
@ -87,11 +92,11 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
def test_ehlo(smtp_connection): def test_ehlo(smtp_connection):
response, msg = smtp_connection.ehlo() response, msg = smtp_connection.ehlo()
assert response == 250 assert response == 250
> assert 0 # for demo purposes > assert 0 # for demo purposes
E assert 0 E assert 0
test_smtpsimple.py:11: AssertionError test_smtpsimple.py:14: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
In the failure traceback we see that the test function was called with a In the failure traceback we see that the test function was called with a
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture ``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
@ -180,12 +185,15 @@ Possible values for ``scope`` are: ``function``, ``class``, ``module``, ``packag
The next example puts the fixture function into a separate ``conftest.py`` file The next example puts the fixture function into a separate ``conftest.py`` file
so that tests from multiple test modules in the directory can so that tests from multiple test modules in the directory can
access the fixture function:: access the fixture function:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
import smtplib import smtplib
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def smtp_connection(): def smtp_connection():
return smtplib.SMTP("smtp.gmail.com", 587, timeout=5) return smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
@ -193,16 +201,20 @@ access the fixture function::
The name of the fixture again is ``smtp_connection`` and you can access its The name of the fixture again is ``smtp_connection`` and you can access its
result by listing the name ``smtp_connection`` as an input parameter in any result by listing the name ``smtp_connection`` as an input parameter in any
test or fixture function (in or below the directory where ``conftest.py`` is test or fixture function (in or below the directory where ``conftest.py`` is
located):: located):
.. code-block:: python
# content of test_module.py # content of test_module.py
def test_ehlo(smtp_connection): def test_ehlo(smtp_connection):
response, msg = smtp_connection.ehlo() response, msg = smtp_connection.ehlo()
assert response == 250 assert response == 250
assert b"smtp.gmail.com" in msg assert b"smtp.gmail.com" in msg
assert 0 # for demo purposes assert 0 # for demo purposes
def test_noop(smtp_connection): def test_noop(smtp_connection):
response, msg = smtp_connection.noop() response, msg = smtp_connection.noop()
assert response == 250 assert response == 250
@ -234,7 +246,7 @@ inspect what is going on and can now run the tests:
> assert 0 # for demo purposes > assert 0 # for demo purposes
E assert 0 E assert 0
test_module.py:6: AssertionError test_module.py:7: AssertionError
________________________________ test_noop _________________________________ ________________________________ test_noop _________________________________
smtp_connection = <smtplib.SMTP object at 0xdeadbeef> smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
@ -245,8 +257,8 @@ inspect what is going on and can now run the tests:
> assert 0 # for demo purposes > assert 0 # for demo purposes
E assert 0 E assert 0
test_module.py:11: AssertionError test_module.py:13: AssertionError
========================= 2 failed in 0.12 seconds ========================= ============================ 2 failed in 0.12s =============================
You see the two ``assert 0`` failing and more importantly you can also see You see the two ``assert 0`` failing and more importantly you can also see
that the same (module-scoped) ``smtp_connection`` object was passed into the that the same (module-scoped) ``smtp_connection`` object was passed into the
@ -289,51 +301,59 @@ are finalized when the last test of a *package* finishes.
Use this new feature sparingly and please make sure to report any issues you find. Use this new feature sparingly and please make sure to report any issues you find.
Higher-scoped fixtures are instantiated first .. _dynamic scope:
---------------------------------------------
Dynamic scope
^^^^^^^^^^^^^
.. versionadded:: 5.2
In some cases, you might want to change the scope of the fixture without changing the code.
To do that, pass a callable to ``scope``. The callable must return a string with a valid scope
and will be executed only once - during the fixture definition. It will be called with two
keyword arguments - ``fixture_name`` as a string and ``config`` with a configuration object.
This can be especially useful when dealing with fixtures that need time for setup, like spawning
a docker container. You can use the command-line argument to control the scope of the spawned
containers for different environments. See the example below.
.. code-block:: python
def determine_scope(fixture_name, config):
if config.getoption("--keep-containers"):
return "session"
return "function"
@pytest.fixture(scope=determine_scope)
def docker_container():
yield spawn_container()
Order: Higher-scoped fixtures are instantiated first
----------------------------------------------------
Within a function request for features, fixture of higher-scopes (such as ``session``) are instantiated first than Within a function request for features, fixture of higher-scopes (such as ``session``) are instantiated first than
lower-scoped fixtures (such as ``function`` or ``class``). The relative order of fixtures of same scope follows lower-scoped fixtures (such as ``function`` or ``class``). The relative order of fixtures of same scope follows
the declared order in the test function and honours dependencies between fixtures. the declared order in the test function and honours dependencies between fixtures. Autouse fixtures will be
instantiated before explicitly used fixtures.
Consider the code below: Consider the code below:
.. code-block:: python .. literalinclude:: example/fixtures/test_fixtures_order.py
@pytest.fixture(scope="session") The fixtures requested by ``test_order`` will be instantiated in the following order:
def s1():
pass
@pytest.fixture(scope="module")
def m1():
pass
@pytest.fixture
def f1(tmpdir):
pass
@pytest.fixture
def f2():
pass
def test_foo(f1, m1, f2, s1):
...
The fixtures requested by ``test_foo`` will be instantiated in the following order:
1. ``s1``: is the highest-scoped fixture (``session``). 1. ``s1``: is the highest-scoped fixture (``session``).
2. ``m1``: is the second highest-scoped fixture (``module``). 2. ``m1``: is the second highest-scoped fixture (``module``).
3. ``tmpdir``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point 3. ``a1``: is a ``function``-scoped ``autouse`` fixture: it will be instantiated before other fixtures
because it is a dependency of ``f1``. within the same scope.
4. ``f1``: is the first ``function``-scoped fixture in ``test_foo`` parameter list. 4. ``f3``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point
5. ``f2``: is the last ``function``-scoped fixture in ``test_foo`` parameter list. 5. ``f1``: is the first ``function``-scoped fixture in ``test_order`` parameter list.
6. ``f2``: is the last ``function``-scoped fixture in ``test_order`` parameter list.
.. _`finalization`: .. _`finalization`:
@ -371,7 +391,7 @@ Let's execute it:
$ pytest -s -q --tb=no $ pytest -s -q --tb=no
FFteardown smtp FFteardown smtp
2 failed in 0.12 seconds 2 failed in 0.12s
We see that the ``smtp_connection`` instance is finalized after the two We see that the ``smtp_connection`` instance is finalized after the two
tests finished execution. Note that if we decorated our fixture tests finished execution. Note that if we decorated our fixture
@ -400,6 +420,34 @@ The ``smtp_connection`` connection will be closed after the test finished
execution because the ``smtp_connection`` object automatically closes when execution because the ``smtp_connection`` object automatically closes when
the ``with`` statement ends. the ``with`` statement ends.
Using the contextlib.ExitStack context manager finalizers will always be called
regardless if the fixture *setup* code raises an exception. This is handy to properly
close all resources created by a fixture even if one of them fails to be created/acquired:
.. code-block:: python
# content of test_yield3.py
import contextlib
import pytest
@contextlib.contextmanager
def connect(port):
... # create connection
yield
... # close connection
@pytest.fixture
def equipments():
with contextlib.ExitStack() as stack:
yield [stack.enter_context(connect(port)) for port in ("C1", "C3", "C28")]
In the example above, if ``"C28"`` fails with an exception, ``"C1"`` and ``"C3"`` will still
be properly closed.
Note that if an exception happens during the *setup* code (before the ``yield`` keyword), the Note that if an exception happens during the *setup* code (before the ``yield`` keyword), the
*teardown* code (after the ``yield``) will not be called. *teardown* code (after the ``yield``) will not be called.
@ -428,27 +476,39 @@ Here's the ``smtp_connection`` fixture changed to use ``addfinalizer`` for clean
return smtp_connection # provide the fixture value return smtp_connection # provide the fixture value
Here's the ``equipments`` fixture changed to use ``addfinalizer`` for cleanup:
.. code-block:: python
# content of test_yield3.py
import contextlib
import functools
import pytest
@contextlib.contextmanager
def connect(port):
... # create connection
yield
... # close connection
@pytest.fixture
def equipments(request):
r = []
for port in ("C1", "C3", "C28"):
cm = connect(port)
equip = cm.__enter__()
request.addfinalizer(functools.partial(cm.__exit__, None, None, None))
r.append(equip)
return r
Both ``yield`` and ``addfinalizer`` methods work similarly by calling their code after the test Both ``yield`` and ``addfinalizer`` methods work similarly by calling their code after the test
ends, but ``addfinalizer`` has two key differences over ``yield``: ends. Of course, if an exception happens before the finalize function is registered then it
will not be executed.
1. It is possible to register multiple finalizer functions.
2. Finalizers will always be called regardless if the fixture *setup* code raises an exception.
This is handy to properly close all resources created by a fixture even if one of them
fails to be created/acquired::
@pytest.fixture
def equipments(request):
r = []
for port in ('C1', 'C3', 'C28'):
equip = connect(port)
request.addfinalizer(equip.disconnect)
r.append(equip)
return r
In the example above, if ``"C28"`` fails with an exception, ``"C1"`` and ``"C3"`` will still
be properly closed. Of course, if an exception happens before the finalize function is
registered then it will not be executed.
.. _`request-context`: .. _`request-context`:
@ -459,18 +519,21 @@ Fixtures can introspect the requesting test context
Fixture functions can accept the :py:class:`request <FixtureRequest>` object Fixture functions can accept the :py:class:`request <FixtureRequest>` object
to introspect the "requesting" test function, class or module context. to introspect the "requesting" test function, class or module context.
Further extending the previous ``smtp_connection`` fixture example, let's Further extending the previous ``smtp_connection`` fixture example, let's
read an optional server URL from the test module which uses our fixture:: read an optional server URL from the test module which uses our fixture:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
import smtplib import smtplib
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def smtp_connection(request): def smtp_connection(request):
server = getattr(request.module, "smtpserver", "smtp.gmail.com") server = getattr(request.module, "smtpserver", "smtp.gmail.com")
smtp_connection = smtplib.SMTP(server, 587, timeout=5) smtp_connection = smtplib.SMTP(server, 587, timeout=5)
yield smtp_connection yield smtp_connection
print("finalizing %s (%s)" % (smtp_connection, server)) print("finalizing {} ({})".format(smtp_connection, server))
smtp_connection.close() smtp_connection.close()
We use the ``request.module`` attribute to optionally obtain an We use the ``request.module`` attribute to optionally obtain an
@ -482,15 +545,18 @@ again, nothing much has changed:
$ pytest -s -q --tb=no $ pytest -s -q --tb=no
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com) FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
2 failed in 0.12 seconds 2 failed in 0.12s
Let's quickly create another test module that actually sets the Let's quickly create another test module that actually sets the
server URL in its module namespace:: server URL in its module namespace:
.. code-block:: python
# content of test_anothersmtp.py # content of test_anothersmtp.py
smtpserver = "mail.python.org" # will be read by smtp fixture smtpserver = "mail.python.org" # will be read by smtp fixture
def test_showhelo(smtp_connection): def test_showhelo(smtp_connection):
assert 0, smtp_connection.helo() assert 0, smtp_connection.helo()
@ -502,7 +568,7 @@ Running it:
F [100%] F [100%]
================================= FAILURES ================================= ================================= FAILURES =================================
______________________________ test_showhelo _______________________________ ______________________________ test_showhelo _______________________________
test_anothersmtp.py:5: in test_showhelo test_anothersmtp.py:6: in test_showhelo
assert 0, smtp_connection.helo() assert 0, smtp_connection.helo()
E AssertionError: (250, b'mail.python.org') E AssertionError: (250, b'mail.python.org')
E assert 0 E assert 0
@ -522,16 +588,14 @@ of a fixture is needed multiple times in a single test. Instead of returning
data directly, the fixture instead returns a function which generates the data. data directly, the fixture instead returns a function which generates the data.
This function can then be called multiple times in the test. This function can then be called multiple times in the test.
Factories can have have parameters as needed:: Factories can have parameters as needed:
.. code-block:: python
@pytest.fixture @pytest.fixture
def make_customer_record(): def make_customer_record():
def _make_customer_record(name): def _make_customer_record(name):
return { return {"name": name, "orders": []}
"name": name,
"orders": []
}
return _make_customer_record return _make_customer_record
@ -541,7 +605,9 @@ Factories can have have parameters as needed::
customer_2 = make_customer_record("Mike") customer_2 = make_customer_record("Mike")
customer_3 = make_customer_record("Meredith") customer_3 = make_customer_record("Meredith")
If the data created by the factory requires managing, the fixture can take care of that:: If the data created by the factory requires managing, the fixture can take care of that:
.. code-block:: python
@pytest.fixture @pytest.fixture
def make_customer_record(): def make_customer_record():
@ -580,18 +646,20 @@ configured in multiple ways.
Extending the previous example, we can flag the fixture to create two Extending the previous example, we can flag the fixture to create two
``smtp_connection`` fixture instances which will cause all tests using the fixture ``smtp_connection`` fixture instances which will cause all tests using the fixture
to run twice. The fixture function gets access to each parameter to run twice. The fixture function gets access to each parameter
through the special :py:class:`request <FixtureRequest>` object:: through the special :py:class:`request <FixtureRequest>` object:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
import smtplib import smtplib
@pytest.fixture(scope="module",
params=["smtp.gmail.com", "mail.python.org"]) @pytest.fixture(scope="module", params=["smtp.gmail.com", "mail.python.org"])
def smtp_connection(request): def smtp_connection(request):
smtp_connection = smtplib.SMTP(request.param, 587, timeout=5) smtp_connection = smtplib.SMTP(request.param, 587, timeout=5)
yield smtp_connection yield smtp_connection
print("finalizing %s" % smtp_connection) print("finalizing {}".format(smtp_connection))
smtp_connection.close() smtp_connection.close()
The main change is the declaration of ``params`` with The main change is the declaration of ``params`` with
@ -616,7 +684,7 @@ So let's just do another run:
> assert 0 # for demo purposes > assert 0 # for demo purposes
E assert 0 E assert 0
test_module.py:6: AssertionError test_module.py:7: AssertionError
________________________ test_noop[smtp.gmail.com] _________________________ ________________________ test_noop[smtp.gmail.com] _________________________
smtp_connection = <smtplib.SMTP object at 0xdeadbeef> smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
@ -627,7 +695,7 @@ So let's just do another run:
> assert 0 # for demo purposes > assert 0 # for demo purposes
E assert 0 E assert 0
test_module.py:11: AssertionError test_module.py:13: AssertionError
________________________ test_ehlo[mail.python.org] ________________________ ________________________ test_ehlo[mail.python.org] ________________________
smtp_connection = <smtplib.SMTP object at 0xdeadbeef> smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
@ -638,7 +706,7 @@ So let's just do another run:
> assert b"smtp.gmail.com" in msg > assert b"smtp.gmail.com" in msg
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING' E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
test_module.py:5: AssertionError test_module.py:6: AssertionError
-------------------------- Captured stdout setup --------------------------- -------------------------- Captured stdout setup ---------------------------
finalizing <smtplib.SMTP object at 0xdeadbeef> finalizing <smtplib.SMTP object at 0xdeadbeef>
________________________ test_noop[mail.python.org] ________________________ ________________________ test_noop[mail.python.org] ________________________
@ -651,10 +719,10 @@ So let's just do another run:
> assert 0 # for demo purposes > assert 0 # for demo purposes
E assert 0 E assert 0
test_module.py:11: AssertionError test_module.py:13: AssertionError
------------------------- Captured stdout teardown ------------------------- ------------------------- Captured stdout teardown -------------------------
finalizing <smtplib.SMTP object at 0xdeadbeef> finalizing <smtplib.SMTP object at 0xdeadbeef>
4 failed in 0.12 seconds 4 failed in 0.12s
We see that our two test functions each ran twice, against the different We see that our two test functions each ran twice, against the different
``smtp_connection`` instances. Note also, that with the ``mail.python.org`` ``smtp_connection`` instances. Note also, that with the ``mail.python.org``
@ -672,28 +740,35 @@ Numbers, strings, booleans and None will have their usual string
representation used in the test ID. For other objects, pytest will representation used in the test ID. For other objects, pytest will
make a string based on the argument name. It is possible to customise make a string based on the argument name. It is possible to customise
the string used in a test ID for a certain fixture value by using the the string used in a test ID for a certain fixture value by using the
``ids`` keyword argument:: ``ids`` keyword argument:
.. code-block:: python
# content of test_ids.py # content of test_ids.py
import pytest import pytest
@pytest.fixture(params=[0, 1], ids=["spam", "ham"]) @pytest.fixture(params=[0, 1], ids=["spam", "ham"])
def a(request): def a(request):
return request.param return request.param
def test_a(a): def test_a(a):
pass pass
def idfn(fixture_value): def idfn(fixture_value):
if fixture_value == 0: if fixture_value == 0:
return "eggs" return "eggs"
else: else:
return None return None
@pytest.fixture(params=[0, 1], ids=idfn) @pytest.fixture(params=[0, 1], ids=idfn)
def b(request): def b(request):
return request.param return request.param
def test_b(b): def test_b(b):
pass pass
@ -726,7 +801,7 @@ Running the above tests results in the following test IDs being used:
<Function test_ehlo[mail.python.org]> <Function test_ehlo[mail.python.org]>
<Function test_noop[mail.python.org]> <Function test_noop[mail.python.org]>
======================= no tests ran in 0.12 seconds ======================= ========================== no tests ran in 0.12s ===========================
.. _`fixture-parametrize-marks`: .. _`fixture-parametrize-marks`:
@ -736,14 +811,19 @@ Using marks with parametrized fixtures
:func:`pytest.param` can be used to apply marks in values sets of parametrized fixtures in the same way :func:`pytest.param` can be used to apply marks in values sets of parametrized fixtures in the same way
that they can be used with :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>`. that they can be used with :ref:`@pytest.mark.parametrize <@pytest.mark.parametrize>`.
Example:: Example:
.. code-block:: python
# content of test_fixture_marks.py # content of test_fixture_marks.py
import pytest import pytest
@pytest.fixture(params=[0, 1, pytest.param(2, marks=pytest.mark.skip)]) @pytest.fixture(params=[0, 1, pytest.param(2, marks=pytest.mark.skip)])
def data_set(request): def data_set(request):
return request.param return request.param
def test_data(data_set): def test_data(data_set):
pass pass
@ -762,7 +842,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
test_fixture_marks.py::test_data[1] PASSED [ 66%] test_fixture_marks.py::test_data[1] PASSED [ 66%]
test_fixture_marks.py::test_data[2] SKIPPED [100%] test_fixture_marks.py::test_data[2] SKIPPED [100%]
=================== 2 passed, 1 skipped in 0.12 seconds ==================== ======================= 2 passed, 1 skipped in 0.12s =======================
.. _`interdependent fixtures`: .. _`interdependent fixtures`:
@ -774,20 +854,25 @@ can use other fixtures themselves. This contributes to a modular design
of your fixtures and allows re-use of framework-specific fixtures across of your fixtures and allows re-use of framework-specific fixtures across
many projects. As a simple example, we can extend the previous example many projects. As a simple example, we can extend the previous example
and instantiate an object ``app`` where we stick the already defined and instantiate an object ``app`` where we stick the already defined
``smtp_connection`` resource into it:: ``smtp_connection`` resource into it:
.. code-block:: python
# content of test_appsetup.py # content of test_appsetup.py
import pytest import pytest
class App(object):
class App:
def __init__(self, smtp_connection): def __init__(self, smtp_connection):
self.smtp_connection = smtp_connection self.smtp_connection = smtp_connection
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def app(smtp_connection): def app(smtp_connection):
return App(smtp_connection) return App(smtp_connection)
def test_smtp_connection_exists(app): def test_smtp_connection_exists(app):
assert app.smtp_connection assert app.smtp_connection
@ -806,7 +891,7 @@ Here we declare an ``app`` fixture which receives the previously defined
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%] test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%] test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
========================= 2 passed in 0.12 seconds ========================= ============================ 2 passed in 0.12s =============================
Due to the parametrization of ``smtp_connection``, the test will run twice with two Due to the parametrization of ``smtp_connection``, the test will run twice with two
different ``App`` instances and respective smtp servers. There is no different ``App`` instances and respective smtp servers. There is no
@ -836,31 +921,40 @@ this eases testing of applications which create and use global state.
The following example uses two parametrized fixtures, one of which is The following example uses two parametrized fixtures, one of which is
scoped on a per-module basis, and all the functions perform ``print`` calls scoped on a per-module basis, and all the functions perform ``print`` calls
to show the setup/teardown flow:: to show the setup/teardown flow:
.. code-block:: python
# content of test_module.py # content of test_module.py
import pytest import pytest
@pytest.fixture(scope="module", params=["mod1", "mod2"]) @pytest.fixture(scope="module", params=["mod1", "mod2"])
def modarg(request): def modarg(request):
param = request.param param = request.param
print(" SETUP modarg %s" % param) print(" SETUP modarg", param)
yield param yield param
print(" TEARDOWN modarg %s" % param) print(" TEARDOWN modarg", param)
@pytest.fixture(scope="function", params=[1,2])
@pytest.fixture(scope="function", params=[1, 2])
def otherarg(request): def otherarg(request):
param = request.param param = request.param
print(" SETUP otherarg %s" % param) print(" SETUP otherarg", param)
yield param yield param
print(" TEARDOWN otherarg %s" % param) print(" TEARDOWN otherarg", param)
def test_0(otherarg): def test_0(otherarg):
print(" RUN test0 with otherarg %s" % otherarg) print(" RUN test0 with otherarg", otherarg)
def test_1(modarg): def test_1(modarg):
print(" RUN test1 with modarg %s" % modarg) print(" RUN test1 with modarg", modarg)
def test_2(otherarg, modarg): def test_2(otherarg, modarg):
print(" RUN test2 with otherarg %s and modarg %s" % (otherarg, modarg)) print(" RUN test2 with otherarg {} and modarg {}".format(otherarg, modarg))
Let's run the tests in verbose mode and with looking at the print-output: Let's run the tests in verbose mode and with looking at the print-output:
@ -907,7 +1001,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
TEARDOWN modarg mod2 TEARDOWN modarg mod2
========================= 8 passed in 0.12 seconds ========================= ============================ 8 passed in 0.12s =============================
You can see that the parametrized module-scoped ``modarg`` resource caused an You can see that the parametrized module-scoped ``modarg`` resource caused an
ordering of test execution that lead to the fewest possible "active" resources. ordering of test execution that lead to the fewest possible "active" resources.
@ -935,7 +1029,9 @@ current working directory but otherwise do not care for the concrete
directory. Here is how you can use the standard `tempfile directory. Here is how you can use the standard `tempfile
<http://docs.python.org/library/tempfile.html>`_ and pytest fixtures to <http://docs.python.org/library/tempfile.html>`_ and pytest fixtures to
achieve it. We separate the creation of the fixture into a conftest.py achieve it. We separate the creation of the fixture into a conftest.py
file:: file:
.. code-block:: python
# content of conftest.py # content of conftest.py
@ -943,19 +1039,23 @@ file::
import tempfile import tempfile
import os import os
@pytest.fixture() @pytest.fixture()
def cleandir(): def cleandir():
newpath = tempfile.mkdtemp() newpath = tempfile.mkdtemp()
os.chdir(newpath) os.chdir(newpath)
and declare its use in a test module via a ``usefixtures`` marker:: and declare its use in a test module via a ``usefixtures`` marker:
.. code-block:: python
# content of test_setenv.py # content of test_setenv.py
import os import os
import pytest import pytest
@pytest.mark.usefixtures("cleandir") @pytest.mark.usefixtures("cleandir")
class TestDirectoryInit(object): class TestDirectoryInit:
def test_cwd_starts_empty(self): def test_cwd_starts_empty(self):
assert os.listdir(os.getcwd()) == [] assert os.listdir(os.getcwd()) == []
with open("myfile", "w") as f: with open("myfile", "w") as f:
@ -973,7 +1073,7 @@ to verify our fixture is activated and the tests pass:
$ pytest -q $ pytest -q
.. [100%] .. [100%]
2 passed in 0.12 seconds 2 passed in 0.12s
You can specify multiple fixtures like this: You can specify multiple fixtures like this:
@ -1032,25 +1132,32 @@ without declaring a function argument explicitly or a `usefixtures`_ decorator.
As a practical example, suppose we have a database fixture which has a As a practical example, suppose we have a database fixture which has a
begin/rollback/commit architecture and we want to automatically surround begin/rollback/commit architecture and we want to automatically surround
each test method by a transaction and a rollback. Here is a dummy each test method by a transaction and a rollback. Here is a dummy
self-contained implementation of this idea:: self-contained implementation of this idea:
.. code-block:: python
# content of test_db_transact.py # content of test_db_transact.py
import pytest import pytest
class DB(object):
class DB:
def __init__(self): def __init__(self):
self.intransaction = [] self.intransaction = []
def begin(self, name): def begin(self, name):
self.intransaction.append(name) self.intransaction.append(name)
def rollback(self): def rollback(self):
self.intransaction.pop() self.intransaction.pop()
@pytest.fixture(scope="module") @pytest.fixture(scope="module")
def db(): def db():
return DB() return DB()
class TestClass(object):
class TestClass:
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def transact(self, request, db): def transact(self, request, db):
db.begin(request.function.__name__) db.begin(request.function.__name__)
@ -1074,7 +1181,7 @@ If we run it, we get two passing tests:
$ pytest -q $ pytest -q
.. [100%] .. [100%]
2 passed in 0.12 seconds 2 passed in 0.12s
Here is how autouse fixtures work in other scopes: Here is how autouse fixtures work in other scopes:
@ -1098,7 +1205,9 @@ Here is how autouse fixtures work in other scopes:
Note that the above ``transact`` fixture may very well be a fixture that Note that the above ``transact`` fixture may very well be a fixture that
you want to make available in your project without having it generally you want to make available in your project without having it generally
active. The canonical way to do that is to put the transact definition active. The canonical way to do that is to put the transact definition
into a conftest.py file **without** using ``autouse``:: into a conftest.py file **without** using ``autouse``:
.. code-block:: python
# content of conftest.py # content of conftest.py
@pytest.fixture @pytest.fixture
@ -1107,10 +1216,12 @@ into a conftest.py file **without** using ``autouse``::
yield yield
db.rollback() db.rollback()
and then e.g. have a TestClass using it by declaring the need:: and then e.g. have a TestClass using it by declaring the need:
.. code-block:: python
@pytest.mark.usefixtures("transact") @pytest.mark.usefixtures("transact")
class TestClass(object): class TestClass:
def test_method1(self): def test_method1(self):
... ...

View File

@ -122,4 +122,4 @@ Resources
* Google: * Google:
* `Flaky Tests at Google and How We Mitigate Them <https://testing.googleblog.com/2016/05/flaky-tests-at-google-and-how-we.html>`_ by John Micco, 2016 * `Flaky Tests at Google and How We Mitigate Them <https://testing.googleblog.com/2016/05/flaky-tests-at-google-and-how-we.html>`_ by John Micco, 2016
* `Where do Google's flaky tests come from? <https://docs.google.com/document/d/1mZ0-Kc97DI_F3tf_GBW_NB_aqka-P1jVOsFfufxqUUM/edit#heading=h.ec0r4fypsleh>`_ by Jeff Listfield, 2017 * `Where do Google's flaky tests come from? <https://testing.googleblog.com/2017/04/where-do-our-flaky-tests-come-from.html>`_ by Jeff Listfield, 2017

View File

@ -21,19 +21,23 @@ funcarg for a test function is required. If a factory wants to
re-use a resource across different scopes, it often used re-use a resource across different scopes, it often used
the ``request.cached_setup()`` helper to manage caching of the ``request.cached_setup()`` helper to manage caching of
resources. Here is a basic example how we could implement resources. Here is a basic example how we could implement
a per-session Database object:: a per-session Database object:
.. code-block:: python
# content of conftest.py # content of conftest.py
class Database(object): class Database:
def __init__(self): def __init__(self):
print("database instance created") print("database instance created")
def destroy(self): def destroy(self):
print("database instance destroyed") print("database instance destroyed")
def pytest_funcarg__db(request): def pytest_funcarg__db(request):
return request.cached_setup(setup=DataBase, return request.cached_setup(
teardown=lambda db: db.destroy, setup=DataBase, teardown=lambda db: db.destroy, scope="session"
scope="session") )
There are several limitations and difficulties with this approach: There are several limitations and difficulties with this approach:
@ -68,7 +72,9 @@ Direct scoping of fixture/funcarg factories
Instead of calling cached_setup() with a cache scope, you can use the Instead of calling cached_setup() with a cache scope, you can use the
:ref:`@pytest.fixture <pytest.fixture>` decorator and directly state :ref:`@pytest.fixture <pytest.fixture>` decorator and directly state
the scope:: the scope:
.. code-block:: python
@pytest.fixture(scope="session") @pytest.fixture(scope="session")
def db(request): def db(request):
@ -90,11 +96,13 @@ Previously, funcarg factories could not directly cause parametrization.
You needed to specify a ``@parametrize`` decorator on your test function You needed to specify a ``@parametrize`` decorator on your test function
or implement a ``pytest_generate_tests`` hook to perform or implement a ``pytest_generate_tests`` hook to perform
parametrization, i.e. calling a test multiple times with different value parametrization, i.e. calling a test multiple times with different value
sets. pytest-2.3 introduces a decorator for use on the factory itself:: sets. pytest-2.3 introduces a decorator for use on the factory itself:
.. code-block:: python
@pytest.fixture(params=["mysql", "pg"]) @pytest.fixture(params=["mysql", "pg"])
def db(request): def db(request):
... # use request.param ... # use request.param
Here the factory will be invoked twice (with the respective "mysql" Here the factory will be invoked twice (with the respective "mysql"
and "pg" values set as ``request.param`` attributes) and all of and "pg" values set as ``request.param`` attributes) and all of
@ -107,7 +115,9 @@ allow to re-use already written factories because effectively
parametrized via parametrized via
:py:func:`~_pytest.python.Metafunc.parametrize(indirect=True)` calls. :py:func:`~_pytest.python.Metafunc.parametrize(indirect=True)` calls.
Of course it's perfectly fine to combine parametrization and scoping:: Of course it's perfectly fine to combine parametrization and scoping:
.. code-block:: python
@pytest.fixture(scope="session", params=["mysql", "pg"]) @pytest.fixture(scope="session", params=["mysql", "pg"])
def db(request): def db(request):
@ -128,7 +138,9 @@ No ``pytest_funcarg__`` prefix when using @fixture decorator
When using the ``@fixture`` decorator the name of the function When using the ``@fixture`` decorator the name of the function
denotes the name under which the resource can be accessed as a function denotes the name under which the resource can be accessed as a function
argument:: argument:
.. code-block:: python
@pytest.fixture() @pytest.fixture()
def db(request): def db(request):
@ -137,7 +149,9 @@ argument::
The name under which the funcarg resource can be requested is ``db``. The name under which the funcarg resource can be requested is ``db``.
You can still use the "old" non-decorator way of specifying funcarg factories You can still use the "old" non-decorator way of specifying funcarg factories
aka:: aka:
.. code-block:: python
def pytest_funcarg__db(request): def pytest_funcarg__db(request):
... ...

View File

@ -28,19 +28,22 @@ Install ``pytest``
.. code-block:: bash .. code-block:: bash
$ pytest --version $ pytest --version
This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.7/site-packages/pytest.py
.. _`simpletest`: .. _`simpletest`:
Create your first test Create your first test
---------------------------------------------------------- ----------------------------------------------------------
Create a simple test function with just four lines of code:: Create a simple test function with just four lines of code:
.. code-block:: python
# content of test_sample.py # content of test_sample.py
def func(x): def func(x):
return x + 1 return x + 1
def test_answer(): def test_answer():
assert func(3) == 5 assert func(3) == 5
@ -65,8 +68,8 @@ Thats it. You can now execute the test function:
E assert 4 == 5 E assert 4 == 5
E + where 4 = func(3) E + where 4 = func(3)
test_sample.py:5: AssertionError test_sample.py:6: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
This test returns a failure report because ``func(3)`` does not return ``5``. This test returns a failure report because ``func(3)`` does not return ``5``.
@ -83,13 +86,18 @@ Run multiple tests
Assert that a certain exception is raised Assert that a certain exception is raised
-------------------------------------------------------------- --------------------------------------------------------------
Use the :ref:`raises <assertraises>` helper to assert that some code raises an exception:: Use the :ref:`raises <assertraises>` helper to assert that some code raises an exception:
.. code-block:: python
# content of test_sysexit.py # content of test_sysexit.py
import pytest import pytest
def f(): def f():
raise SystemExit(1) raise SystemExit(1)
def test_mytest(): def test_mytest():
with pytest.raises(SystemExit): with pytest.raises(SystemExit):
f() f()
@ -100,22 +108,24 @@ Execute the test function with “quiet” reporting mode:
$ pytest -q test_sysexit.py $ pytest -q test_sysexit.py
. [100%] . [100%]
1 passed in 0.12 seconds 1 passed in 0.12s
Group multiple tests in a class Group multiple tests in a class
-------------------------------------------------------------- --------------------------------------------------------------
Once you develop multiple tests, you may want to group them into a class. pytest makes it easy to create a class containing more than one test:: Once you develop multiple tests, you may want to group them into a class. pytest makes it easy to create a class containing more than one test:
.. code-block:: python
# content of test_class.py # content of test_class.py
class TestClass(object): class TestClass:
def test_one(self): def test_one(self):
x = "this" x = "this"
assert 'h' in x assert "h" in x
def test_two(self): def test_two(self):
x = "hello" x = "hello"
assert hasattr(x, 'check') assert hasattr(x, "check")
``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename: ``pytest`` discovers all tests following its :ref:`Conventions for Python test discovery <test discovery>`, so it finds both ``test_`` prefixed functions. There is no need to subclass anything. We can simply run the module by passing its filename:
@ -130,19 +140,21 @@ Once you develop multiple tests, you may want to group them into a class. pytest
def test_two(self): def test_two(self):
x = "hello" x = "hello"
> assert hasattr(x, 'check') > assert hasattr(x, "check")
E AssertionError: assert False E AssertionError: assert False
E + where False = hasattr('hello', 'check') E + where False = hasattr('hello', 'check')
test_class.py:8: AssertionError test_class.py:8: AssertionError
1 failed, 1 passed in 0.12 seconds 1 failed, 1 passed in 0.12s
The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure. The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure.
Request a unique temporary directory for functional tests Request a unique temporary directory for functional tests
-------------------------------------------------------------- --------------------------------------------------------------
``pytest`` provides `Builtin fixtures/function arguments <https://docs.pytest.org/en/latest/builtin.html#builtinfixtures>`_ to request arbitrary resources, like a unique temporary directory:: ``pytest`` provides `Builtin fixtures/function arguments <https://docs.pytest.org/en/latest/builtin.html>`_ to request arbitrary resources, like a unique temporary directory:
.. code-block:: python
# content of test_tmpdir.py # content of test_tmpdir.py
def test_needsfiles(tmpdir): def test_needsfiles(tmpdir):
@ -168,7 +180,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
test_tmpdir.py:3: AssertionError test_tmpdir.py:3: AssertionError
--------------------------- Captured stdout call --------------------------- --------------------------- Captured stdout call ---------------------------
PYTEST_TMPDIR/test_needsfiles0 PYTEST_TMPDIR/test_needsfiles0
1 failed in 0.12 seconds 1 failed in 0.12s
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`. More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.

View File

@ -12,13 +12,17 @@ pip_ for installing your application and any dependencies,
as well as the ``pytest`` package itself. as well as the ``pytest`` package itself.
This ensures your code and dependencies are isolated from your system Python installation. This ensures your code and dependencies are isolated from your system Python installation.
Next, place a ``setup.py`` file in the root of your package with the following minimum content:: Next, place a ``setup.py`` file in the root of your package with the following minimum content:
.. code-block:: python
from setuptools import setup, find_packages from setuptools import setup, find_packages
setup(name="PACKAGENAME", packages=find_packages()) setup(name="PACKAGENAME", packages=find_packages())
Where ``PACKAGENAME`` is the name of your package. You can then install your package in "editable" mode by running from the same directory:: Where ``PACKAGENAME`` is the name of your package. You can then install your package in "editable" mode by running from the same directory:
.. code-block:: bash
pip install -e . pip install -e .
@ -60,7 +64,9 @@ Tests outside application code
Putting tests into an extra directory outside your actual application code Putting tests into an extra directory outside your actual application code
might be useful if you have many functional tests or for other reasons want might be useful if you have many functional tests or for other reasons want
to keep tests separate from actual application code (often a good idea):: to keep tests separate from actual application code (often a good idea):
.. code-block:: text
setup.py setup.py
mypkg/ mypkg/
@ -82,7 +88,7 @@ This has the following benefits:
.. note:: .. note::
See :ref:`pythonpath` for more information about the difference between calling ``pytest`` and See :ref:`pytest vs python -m pytest` for more information about the difference between calling ``pytest`` and
``python -m pytest``. ``python -m pytest``.
Note that using this scheme your test files must have **unique names**, because Note that using this scheme your test files must have **unique names**, because
@ -92,7 +98,9 @@ be imported as ``test_app`` and ``test_view`` top-level modules by adding ``test
``sys.path``. ``sys.path``.
If you need to have test modules with the same name, you might add ``__init__.py`` files to your If you need to have test modules with the same name, you might add ``__init__.py`` files to your
``tests`` folder and subfolders, changing them to packages:: ``tests`` folder and subfolders, changing them to packages:
.. code-block:: text
setup.py setup.py
mypkg/ mypkg/
@ -114,7 +122,9 @@ This is problematic if you are using a tool like `tox`_ to test your package in
because you want to test the *installed* version of your package, not the local code from the repository. because you want to test the *installed* version of your package, not the local code from the repository.
In this situation, it is **strongly** suggested to use a ``src`` layout where application root package resides in a In this situation, it is **strongly** suggested to use a ``src`` layout where application root package resides in a
sub-directory of your root:: sub-directory of your root:
.. code-block:: text
setup.py setup.py
src/ src/
@ -140,7 +150,9 @@ Tests as part of application code
Inlining test directories into your application package Inlining test directories into your application package
is useful if you have direct relation between tests and application modules and is useful if you have direct relation between tests and application modules and
want to distribute them along with your application:: want to distribute them along with your application:
.. code-block:: text
setup.py setup.py
mypkg/ mypkg/
@ -153,7 +165,9 @@ want to distribute them along with your application::
test_view.py test_view.py
... ...
In this scheme, it is easy to run your tests using the ``--pyargs`` option:: In this scheme, it is easy to run your tests using the ``--pyargs`` option:
.. code-block:: bash
pytest --pyargs mypkg pytest --pyargs mypkg

View File

@ -44,7 +44,7 @@ To execute it:
E + where 4 = inc(3) E + where 4 = inc(3)
test_sample.py:6: AssertionError test_sample.py:6: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
See :ref:`Getting Started <getstarted>` for more examples. See :ref:`Getting Started <getstarted>` for more examples.
@ -61,7 +61,7 @@ Features
- Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box; - Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box;
- Python Python 3.5+ and PyPy 3; - Python 3.5+ and PyPy 3;
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community; - Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community;

View File

@ -9,7 +9,7 @@ Distributed under the terms of the `MIT`_ license, pytest is free and open sourc
The MIT License (MIT) The MIT License (MIT)
Copyright (c) 2004-2017 Holger Krekel and others Copyright (c) 2004-2019 Holger Krekel and others
Permission is hereby granted, free of charge, to any person obtaining a copy of Permission is hereby granted, free of charge, to any person obtaining a copy of
this software and associated documentation files (the "Software"), to deal in this software and associated documentation files (the "Software"), to deal in

View File

@ -14,7 +14,7 @@
.. _`distribute docs`: .. _`distribute docs`:
.. _`distribute`: https://pypi.org/project/distribute/ .. _`distribute`: https://pypi.org/project/distribute/
.. _`pip`: https://pypi.org/project/pip/ .. _`pip`: https://pypi.org/project/pip/
.. _`venv`: https://docs.python.org/3/library/venv.html/ .. _`venv`: https://docs.python.org/3/library/venv.html
.. _`virtualenv`: https://pypi.org/project/virtualenv/ .. _`virtualenv`: https://pypi.org/project/virtualenv/
.. _hudson: http://hudson-ci.org/ .. _hudson: http://hudson-ci.org/
.. _jenkins: http://jenkins-ci.org/ .. _jenkins: http://jenkins-ci.org/

View File

@ -70,7 +70,9 @@ caplog fixture
^^^^^^^^^^^^^^ ^^^^^^^^^^^^^^
Inside tests it is possible to change the log level for the captured log Inside tests it is possible to change the log level for the captured log
messages. This is supported by the ``caplog`` fixture:: messages. This is supported by the ``caplog`` fixture:
.. code-block:: python
def test_foo(caplog): def test_foo(caplog):
caplog.set_level(logging.INFO) caplog.set_level(logging.INFO)
@ -78,59 +80,69 @@ messages. This is supported by the ``caplog`` fixture::
By default the level is set on the root logger, By default the level is set on the root logger,
however as a convenience it is also possible to set the log level of any however as a convenience it is also possible to set the log level of any
logger:: logger:
.. code-block:: python
def test_foo(caplog): def test_foo(caplog):
caplog.set_level(logging.CRITICAL, logger='root.baz') caplog.set_level(logging.CRITICAL, logger="root.baz")
pass pass
The log levels set are restored automatically at the end of the test. The log levels set are restored automatically at the end of the test.
It is also possible to use a context manager to temporarily change the log It is also possible to use a context manager to temporarily change the log
level inside a ``with`` block:: level inside a ``with`` block:
.. code-block:: python
def test_bar(caplog): def test_bar(caplog):
with caplog.at_level(logging.INFO): with caplog.at_level(logging.INFO):
pass pass
Again, by default the level of the root logger is affected but the level of any Again, by default the level of the root logger is affected but the level of any
logger can be changed instead with:: logger can be changed instead with:
.. code-block:: python
def test_bar(caplog): def test_bar(caplog):
with caplog.at_level(logging.CRITICAL, logger='root.baz'): with caplog.at_level(logging.CRITICAL, logger="root.baz"):
pass pass
Lastly all the logs sent to the logger during the test run are made available on Lastly all the logs sent to the logger during the test run are made available on
the fixture in the form of both the ``logging.LogRecord`` instances and the final log text. the fixture in the form of both the ``logging.LogRecord`` instances and the final log text.
This is useful for when you want to assert on the contents of a message:: This is useful for when you want to assert on the contents of a message:
.. code-block:: python
def test_baz(caplog): def test_baz(caplog):
func_under_test() func_under_test()
for record in caplog.records: for record in caplog.records:
assert record.levelname != 'CRITICAL' assert record.levelname != "CRITICAL"
assert 'wally' not in caplog.text assert "wally" not in caplog.text
For all the available attributes of the log records see the For all the available attributes of the log records see the
``logging.LogRecord`` class. ``logging.LogRecord`` class.
You can also resort to ``record_tuples`` if all you want to do is to ensure, You can also resort to ``record_tuples`` if all you want to do is to ensure,
that certain messages have been logged under a given logger name with a given that certain messages have been logged under a given logger name with a given
severity and message:: severity and message:
.. code-block:: python
def test_foo(caplog): def test_foo(caplog):
logging.getLogger().info('boo %s', 'arg') logging.getLogger().info("boo %s", "arg")
assert caplog.record_tuples == [ assert caplog.record_tuples == [("root", logging.INFO, "boo arg")]
('root', logging.INFO, 'boo arg'),
]
You can call ``caplog.clear()`` to reset the captured log records in a test:: You can call ``caplog.clear()`` to reset the captured log records in a test:
.. code-block:: python
def test_something_with_clearing_records(caplog): def test_something_with_clearing_records(caplog):
some_method_that_creates_log_records() some_method_that_creates_log_records()
caplog.clear() caplog.clear()
your_test_method() your_test_method()
assert ['Foo'] == [rec.message for rec in caplog.records] assert ["Foo"] == [rec.message for rec in caplog.records]
The ``caplog.records`` attribute contains records from the current stage only, so The ``caplog.records`` attribute contains records from the current stage only, so
@ -149,7 +161,7 @@ the records for the ``setup`` and ``call`` stages during teardown like so:
yield window yield window
for when in ("setup", "call"): for when in ("setup", "call"):
messages = [ messages = [
x.message for x in caplog.get_records(when) if x.level == logging.WARNING x.message for x in caplog.get_records(when) if x.levelno == logging.WARNING
] ]
if messages: if messages:
pytest.fail( pytest.fail(

View File

@ -40,7 +40,7 @@ You can register custom marks in your ``pytest.ini`` file like this:
Note that everything after the ``:`` is an optional description. Note that everything after the ``:`` is an optional description.
Alternatively, you can register new markers programatically in a Alternatively, you can register new markers programmatically in a
:ref:`pytest_configure <initialization-hooks>` hook: :ref:`pytest_configure <initialization-hooks>` hook:
.. code-block:: python .. code-block:: python

View File

@ -46,10 +46,13 @@ environment variable is missing, or to set multiple values to a known variable.
:py:meth:`monkeypatch.setenv` and :py:meth:`monkeypatch.delenv` can be used for :py:meth:`monkeypatch.setenv` and :py:meth:`monkeypatch.delenv` can be used for
these patches. these patches.
4. Use :py:meth:`monkeypatch.syspath_prepend` to modify the system ``$PATH`` safely, and 4. Use ``monkeypatch.setenv("PATH", value, prepend=os.pathsep)`` to modify ``$PATH``, and
:py:meth:`monkeypatch.chdir` to change the context of the current working directory :py:meth:`monkeypatch.chdir` to change the context of the current working directory
during a test. during a test.
5. Use :py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also
call :py:meth:`pkg_resources.fixup_namespace_packages` and :py:meth:`importlib.invalidate_caches`.
See the `monkeypatch blog post`_ for some introduction material See the `monkeypatch blog post`_ for some introduction material
and a discussion of its motivation. and a discussion of its motivation.
@ -269,7 +272,7 @@ to do this using the ``setenv`` and ``delenv`` method. Our example code to test:
username = os.getenv("USER") username = os.getenv("USER")
if username is None: if username is None:
raise EnvironmentError("USER environment is not set.") raise OSError("USER environment is not set.")
return username.lower() return username.lower()
@ -293,7 +296,7 @@ both paths can be safely tested without impacting the running environment:
"""Remove the USER env var and assert EnvironmentError is raised.""" """Remove the USER env var and assert EnvironmentError is raised."""
monkeypatch.delenv("USER", raising=False) monkeypatch.delenv("USER", raising=False)
with pytest.raises(EnvironmentError): with pytest.raises(OSError):
_ = get_os_user_lower() _ = get_os_user_lower()
This behavior can be moved into ``fixture`` structures and shared across tests: This behavior can be moved into ``fixture`` structures and shared across tests:
@ -320,7 +323,7 @@ This behavior can be moved into ``fixture`` structures and shared across tests:
def test_raise_exception(mock_env_missing): def test_raise_exception(mock_env_missing):
with pytest.raises(EnvironmentError): with pytest.raises(OSError):
_ = get_os_user_lower() _ = get_os_user_lower()

View File

@ -75,7 +75,7 @@ them in turn:
E + where 54 = eval('6*9') E + where 54 = eval('6*9')
test_expectation.py:6: AssertionError test_expectation.py:6: AssertionError
==================== 1 failed, 2 passed in 0.12 seconds ==================== ======================= 1 failed, 2 passed in 0.12s ========================
.. note:: .. note::
@ -128,7 +128,7 @@ Let's run this:
test_expectation.py ..x [100%] test_expectation.py ..x [100%]
=================== 2 passed, 1 xfailed in 0.12 seconds ==================== ======================= 2 passed, 1 xfailed in 0.12s =======================
The one parameter set which caused a failure previously now The one parameter set which caused a failure previously now
shows up as an "xfailed (expected to fail)" test. shows up as an "xfailed (expected to fail)" test.
@ -205,7 +205,7 @@ If we now pass two stringinput values, our test will run twice:
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py $ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
.. [100%] .. [100%]
2 passed in 0.12 seconds 2 passed in 0.12s
Let's also run with a stringinput that will lead to a failing test: Let's also run with a stringinput that will lead to a failing test:
@ -225,7 +225,7 @@ Let's also run with a stringinput that will lead to a failing test:
E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
test_strings.py:4: AssertionError test_strings.py:4: AssertionError
1 failed in 0.12 seconds 1 failed in 0.12s
As expected our test function fails. As expected our test function fails.
@ -239,7 +239,7 @@ list:
s [100%] s [100%]
========================= short test summary info ========================== ========================= short test summary info ==========================
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2 SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2
1 skipped in 0.12 seconds 1 skipped in 0.12s
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
those sets cannot be duplicated, otherwise an error will be raised. those sets cannot be duplicated, otherwise an error will be raised.

View File

@ -8,7 +8,9 @@ Installing and Using plugins
This section talks about installing and using third party plugins. This section talks about installing and using third party plugins.
For writing your own plugins, please refer to :ref:`writing-plugins`. For writing your own plugins, please refer to :ref:`writing-plugins`.
Installing a third party plugin can be easily done with ``pip``:: Installing a third party plugin can be easily done with ``pip``:
.. code-block:: bash
pip install pytest-NAME pip install pytest-NAME
pip uninstall pytest-NAME pip uninstall pytest-NAME
@ -95,7 +97,9 @@ Finding out which plugins are active
------------------------------------ ------------------------------------
If you want to find out which plugins are active in your If you want to find out which plugins are active in your
environment you can type:: environment you can type:
.. code-block:: bash
pytest --trace-config pytest --trace-config
@ -108,7 +112,9 @@ and their names. It will also print local plugins aka
Deactivating / unregistering a plugin by name Deactivating / unregistering a plugin by name
--------------------------------------------- ---------------------------------------------
You can prevent plugins from loading or unregister them:: You can prevent plugins from loading or unregister them:
.. code-block:: bash
pytest -p no:NAME pytest -p no:NAME

View File

@ -28,7 +28,6 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref:
* `sentry <https://getsentry.com/welcome/>`_, realtime app-maintenance and exception tracking * `sentry <https://getsentry.com/welcome/>`_, realtime app-maintenance and exception tracking
* `Astropy <http://www.astropy.org/>`_ and `affiliated packages <http://www.astropy.org/affiliated/index.html>`_ * `Astropy <http://www.astropy.org/>`_ and `affiliated packages <http://www.astropy.org/affiliated/index.html>`_
* `tox <http://testrun.org/tox>`_, virtualenv/Hudson integration tool * `tox <http://testrun.org/tox>`_, virtualenv/Hudson integration tool
* `PIDA <http://pida.co.uk>`_ framework for integrated development
* `PyPM <http://code.activestate.com/pypm/>`_ ActiveState's package manager * `PyPM <http://code.activestate.com/pypm/>`_ ActiveState's package manager
* `Fom <http://packages.python.org/Fom/>`_ a fluid object mapper for FluidDB * `Fom <http://packages.python.org/Fom/>`_ a fluid object mapper for FluidDB
* `applib <https://github.com/ActiveState/applib>`_ cross-platform utilities * `applib <https://github.com/ActiveState/applib>`_ cross-platform utilities
@ -37,10 +36,10 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref:
* `mwlib <https://pypi.org/project/mwlib/>`_ mediawiki parser and utility library * `mwlib <https://pypi.org/project/mwlib/>`_ mediawiki parser and utility library
* `The Translate Toolkit <http://translate.sourceforge.net/wiki/toolkit/index>`_ for localization and conversion * `The Translate Toolkit <http://translate.sourceforge.net/wiki/toolkit/index>`_ for localization and conversion
* `execnet <http://codespeak.net/execnet>`_ rapid multi-Python deployment * `execnet <http://codespeak.net/execnet>`_ rapid multi-Python deployment
* `pylib <https://py.readthedocs.io>`_ cross-platform path, IO, dynamic code library * `pylib <https://pylib.readthedocs.io/en/stable/>`_ cross-platform path, IO, dynamic code library
* `Pacha <http://pacha.cafepais.com/>`_ configuration management in five minutes
* `bbfreeze <https://pypi.org/project/bbfreeze/>`_ create standalone executables from Python scripts * `bbfreeze <https://pypi.org/project/bbfreeze/>`_ create standalone executables from Python scripts
* `pdb++ <http://bitbucket.org/antocuni/pdb>`_ a fancier version of PDB * `pdb++ <https://github.com/pdbpp/pdbpp>`_ a fancier version of PDB
* `pudb <https://github.com/inducer/pudb>`_ full-screen console debugger for python
* `py-s3fuse <http://code.google.com/p/py-s3fuse/>`_ Amazon S3 FUSE based filesystem * `py-s3fuse <http://code.google.com/p/py-s3fuse/>`_ Amazon S3 FUSE based filesystem
* `waskr <http://code.google.com/p/waskr/>`_ WSGI Stats Middleware * `waskr <http://code.google.com/p/waskr/>`_ WSGI Stats Middleware
* `guachi <http://code.google.com/p/guachi/>`_ global persistent configs for Python modules * `guachi <http://code.google.com/p/guachi/>`_ global persistent configs for Python modules
@ -77,7 +76,7 @@ Some organisations using pytest
* `Tandberg <http://www.tandberg.com/>`_ * `Tandberg <http://www.tandberg.com/>`_
* `Shootq <http://web.shootq.com/>`_ * `Shootq <http://web.shootq.com/>`_
* `Stups department of Heinrich Heine University Duesseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_ * `Stups department of Heinrich Heine University Duesseldorf <http://www.stups.uni-duesseldorf.de/projects.php>`_
* `cellzome <http://www.cellzome.com/>`_ * cellzome
* `Open End, Gothenborg <http://www.openend.se>`_ * `Open End, Gothenborg <http://www.openend.se>`_
* `Laboratory of Bioinformatics, Warsaw <http://genesilico.pl/>`_ * `Laboratory of Bioinformatics, Warsaw <http://genesilico.pl/>`_
* `merlinux, Germany <http://merlinux.eu>`_ * `merlinux, Germany <http://merlinux.eu>`_

View File

@ -7,8 +7,8 @@ Python 3.4's last release is scheduled for
`March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of `March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of
the participating projects of the https://python3statement.org. the participating projects of the https://python3statement.org.
The **pytest 4.6** series will be the last to support Python 2.7 and 3.4, and is scheduled The **pytest 4.6** series is the last to support Python 2.7 and 3.4, and was released in
to be released by **mid-2019**. **pytest 5.0** and onwards will support only Python 3.5+. **June 2019**. **pytest 5.0** and onwards will support only Python 3.5+.
Thanks to the `python_requires`_ ``setuptools`` option, Thanks to the `python_requires`_ ``setuptools`` option,
Python 2.7 and Python 3.4 users using a modern ``pip`` version Python 2.7 and Python 3.4 users using a modern ``pip`` version
@ -24,3 +24,8 @@ branch will continue to exist so the community itself can contribute patches. Th
be happy to accept those patches and make new ``4.6`` releases **until mid-2020**. be happy to accept those patches and make new ``4.6`` releases **until mid-2020**.
.. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires .. _`python_requires`: https://packaging.python.org/guides/distributing-packages-using-setuptools/#python-requires
Technical Aspects
-----------------
The technical aspects of the Python 2.7 and 3.4 support plan (such as when releases will occurr, how to backport fixes, etc) is described in issue `#5275 <https://github.com/pytest-dev/pytest/issues/5275>`__.

View File

@ -22,7 +22,9 @@ Consider this file and directory layout::
|- test_foo.py |- test_foo.py
When executing:: When executing:
.. code-block:: bash
pytest root/ pytest root/
@ -54,7 +56,9 @@ Consider this file and directory layout::
|- test_foo.py |- test_foo.py
When executing:: When executing:
.. code-block:: bash
pytest root/ pytest root/
@ -68,6 +72,8 @@ imported in the global import namespace.
This is also discussed in details in :ref:`test discovery`. This is also discussed in details in :ref:`test discovery`.
.. _`pytest vs python -m pytest`:
Invoking ``pytest`` versus ``python -m pytest`` Invoking ``pytest`` versus ``python -m pytest``
----------------------------------------------- -----------------------------------------------

View File

@ -1,5 +1,5 @@
Reference API Reference
========= =============
This page contains the full reference to pytest's API. This page contains the full reference to pytest's API.
@ -27,6 +27,8 @@ pytest.skip
.. autofunction:: _pytest.outcomes.skip(msg, [allow_module_level=False]) .. autofunction:: _pytest.outcomes.skip(msg, [allow_module_level=False])
.. _`pytest.importorskip ref`:
pytest.importorskip pytest.importorskip
~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~
@ -57,7 +59,7 @@ pytest.raises
**Tutorial**: :ref:`assertraises`. **Tutorial**: :ref:`assertraises`.
.. autofunction:: pytest.raises(expected_exception: Exception, [match], [message]) .. autofunction:: pytest.raises(expected_exception: Exception, [match])
:with: excinfo :with: excinfo
pytest.deprecated_call pytest.deprecated_call
@ -469,9 +471,11 @@ testdir
This fixture provides a :class:`Testdir` instance useful for black-box testing of test files, making it ideal to This fixture provides a :class:`Testdir` instance useful for black-box testing of test files, making it ideal to
test plugins. test plugins.
To use it, include in your top-most ``conftest.py`` file:: To use it, include in your top-most ``conftest.py`` file:
pytest_plugins = 'pytester' .. code-block:: python
pytest_plugins = "pytester"
@ -999,7 +1003,9 @@ passed multiple times. The expected format is ``name=value``. For example::
[pytest] [pytest]
addopts = --maxfail=2 -rf # exit after 2 failures, report fail info addopts = --maxfail=2 -rf # exit after 2 failures, report fail info
issuing ``pytest test_hello.py`` actually means:: issuing ``pytest test_hello.py`` actually means:
.. code-block:: bash
pytest --maxfail=2 -rf test_hello.py pytest --maxfail=2 -rf test_hello.py

View File

@ -145,7 +145,7 @@ You can use the ``skipif`` marker (as any other marker) on classes:
.. code-block:: python .. code-block:: python
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows") @pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
class TestPosixCalls(object): class TestPosixCalls:
def test_function(self): def test_function(self):
"will not be setup or run under 'win32' platform" "will not be setup or run under 'win32' platform"
@ -179,14 +179,17 @@ information.
Skipping on a missing import dependency Skipping on a missing import dependency
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can use the following helper at module level You can skip tests on a missing import by using :ref:`pytest.importorskip ref`
or within a test or test setup function:: at module level, within a test, or test setup function.
.. code-block:: python
docutils = pytest.importorskip("docutils") docutils = pytest.importorskip("docutils")
If ``docutils`` cannot be imported here, this will lead to a If ``docutils`` cannot be imported here, this will lead to a skip outcome of
skip outcome of the test. You can also skip based on the the test. You can also skip based on the version number of a library:
version number of a library::
.. code-block:: python
docutils = pytest.importorskip("docutils", minversion="0.3") docutils = pytest.importorskip("docutils", minversion="0.3")
@ -223,7 +226,9 @@ XFail: mark test functions as expected to fail
---------------------------------------------- ----------------------------------------------
You can use the ``xfail`` marker to indicate that you You can use the ``xfail`` marker to indicate that you
expect a test to fail:: expect a test to fail:
.. code-block:: python
@pytest.mark.xfail @pytest.mark.xfail
def test_function(): def test_function():
@ -366,7 +371,7 @@ Running it with the report-on-xfail option gives this output:
XFAIL xfail_demo.py::test_hello6 XFAIL xfail_demo.py::test_hello6
reason: reason reason: reason
XFAIL xfail_demo.py::test_hello7 XFAIL xfail_demo.py::test_hello7
======================== 7 xfailed in 0.12 seconds ========================= ============================ 7 xfailed in 0.12s ============================
.. _`skip/xfail with parametrize`: .. _`skip/xfail with parametrize`:

View File

@ -2,12 +2,6 @@
Talks and Tutorials Talks and Tutorials
========================== ==========================
.. sidebar:: Next Open Trainings
- `Training at Europython 2019 <https://ep2019.europython.eu/talks/94WEnsY-introduction-to-pytest/>`_, 8th July 2019, Basel, Switzerland.
- `Training at Workshoptage 2019 <https://workshoptage.ch/workshops/2019/test-driven-development-fuer-python-mit-pytest/>`_ (German), 10th September 2019, Rapperswil, Switzerland.
.. _`funcargs`: funcargs.html .. _`funcargs`: funcargs.html
Books Books

View File

@ -64,7 +64,7 @@ Running this would result in a passed test except for the last
E assert 0 E assert 0
test_tmp_path.py:13: AssertionError test_tmp_path.py:13: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
.. _`tmp_path_factory example`: .. _`tmp_path_factory example`:
@ -90,10 +90,14 @@ provide a temporary directory unique to the test invocation,
created in the `base temporary directory`_. created in the `base temporary directory`_.
``tmpdir`` is a `py.path.local`_ object which offers ``os.path`` methods ``tmpdir`` is a `py.path.local`_ object which offers ``os.path`` methods
and more. Here is an example test usage:: and more. Here is an example test usage:
.. code-block:: python
# content of test_tmpdir.py # content of test_tmpdir.py
import os import os
def test_create_file(tmpdir): def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt") p = tmpdir.mkdir("sub").join("hello.txt")
p.write("content") p.write("content")
@ -128,8 +132,8 @@ Running this would result in a passed test except for the last
> assert 0 > assert 0
E assert 0 E assert 0
test_tmpdir.py:7: AssertionError test_tmpdir.py:9: AssertionError
========================= 1 failed in 0.12 seconds ========================= ============================ 1 failed in 0.12s =============================
.. _`tmpdir factory example`: .. _`tmpdir factory example`:

View File

@ -10,7 +10,9 @@ It's meant for leveraging existing ``unittest``-based test suites
to use pytest as a test runner and also allow to incrementally adapt to use pytest as a test runner and also allow to incrementally adapt
the test suite to take full advantage of pytest's features. the test suite to take full advantage of pytest's features.
To run an existing ``unittest``-style test suite using ``pytest``, type:: To run an existing ``unittest``-style test suite using ``pytest``, type:
.. code-block:: bash
pytest tests pytest tests
@ -78,7 +80,9 @@ Running your unittest with ``pytest`` allows you to use its
tests. Assuming you have at least skimmed the pytest fixture features, tests. Assuming you have at least skimmed the pytest fixture features,
let's jump-start into an example that integrates a pytest ``db_class`` let's jump-start into an example that integrates a pytest ``db_class``
fixture, setting up a class-cached database object, and then reference fixture, setting up a class-cached database object, and then reference
it from a unittest-style test:: it from a unittest-style test:
.. code-block:: python
# content of conftest.py # content of conftest.py
@ -87,10 +91,12 @@ it from a unittest-style test::
import pytest import pytest
@pytest.fixture(scope="class") @pytest.fixture(scope="class")
def db_class(request): def db_class(request):
class DummyDB(object): class DummyDB:
pass pass
# set a class attribute on the invoking test context # set a class attribute on the invoking test context
request.cls.db = DummyDB() request.cls.db = DummyDB()
@ -103,21 +109,24 @@ as the ``cls`` attribute, denoting the class from which the fixture
is used. This architecture de-couples fixture writing from actual test is used. This architecture de-couples fixture writing from actual test
code and allows re-use of the fixture by a minimal reference, the fixture code and allows re-use of the fixture by a minimal reference, the fixture
name. So let's write an actual ``unittest.TestCase`` class using our name. So let's write an actual ``unittest.TestCase`` class using our
fixture definition:: fixture definition:
.. code-block:: python
# content of test_unittest_db.py # content of test_unittest_db.py
import unittest import unittest
import pytest import pytest
@pytest.mark.usefixtures("db_class") @pytest.mark.usefixtures("db_class")
class MyTest(unittest.TestCase): class MyTest(unittest.TestCase):
def test_method1(self): def test_method1(self):
assert hasattr(self, "db") assert hasattr(self, "db")
assert 0, self.db # fail for demo purposes assert 0, self.db # fail for demo purposes
def test_method2(self): def test_method2(self):
assert 0, self.db # fail for demo purposes assert 0, self.db # fail for demo purposes
The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that The ``@pytest.mark.usefixtures("db_class")`` class-decorator makes sure that
the pytest fixture function ``db_class`` is called once per class. the pytest fixture function ``db_class`` is called once per class.
@ -142,22 +151,22 @@ the ``self.db`` values in the traceback:
def test_method1(self): def test_method1(self):
assert hasattr(self, "db") assert hasattr(self, "db")
> assert 0, self.db # fail for demo purposes > assert 0, self.db # fail for demo purposes
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef> E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
E assert 0 E assert 0
test_unittest_db.py:9: AssertionError test_unittest_db.py:10: AssertionError
___________________________ MyTest.test_method2 ____________________________ ___________________________ MyTest.test_method2 ____________________________
self = <test_unittest_db.MyTest testMethod=test_method2> self = <test_unittest_db.MyTest testMethod=test_method2>
def test_method2(self): def test_method2(self):
> assert 0, self.db # fail for demo purposes > assert 0, self.db # fail for demo purposes
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef> E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
E assert 0 E assert 0
test_unittest_db.py:12: AssertionError test_unittest_db.py:13: AssertionError
========================= 2 failed in 0.12 seconds ========================= ============================ 2 failed in 0.12s =============================
This default pytest traceback shows that the two test methods This default pytest traceback shows that the two test methods
share the same ``self.db`` instance which was our intention share the same ``self.db`` instance which was our intention
@ -179,17 +188,19 @@ Let's look at an ``initdir`` fixture which makes all test methods of a
``TestCase`` class execute in a temporary directory with a ``TestCase`` class execute in a temporary directory with a
pre-initialized ``samplefile.ini``. Our ``initdir`` fixture itself uses pre-initialized ``samplefile.ini``. Our ``initdir`` fixture itself uses
the pytest builtin :ref:`tmpdir <tmpdir>` fixture to delegate the the pytest builtin :ref:`tmpdir <tmpdir>` fixture to delegate the
creation of a per-test temporary directory:: creation of a per-test temporary directory:
.. code-block:: python
# content of test_unittest_cleandir.py # content of test_unittest_cleandir.py
import pytest import pytest
import unittest import unittest
class MyTest(unittest.TestCase):
class MyTest(unittest.TestCase):
@pytest.fixture(autouse=True) @pytest.fixture(autouse=True)
def initdir(self, tmpdir): def initdir(self, tmpdir):
tmpdir.chdir() # change to pytest-provided temporary directory tmpdir.chdir() # change to pytest-provided temporary directory
tmpdir.join("samplefile.ini").write("# testdata") tmpdir.join("samplefile.ini").write("# testdata")
def test_method(self): def test_method(self):
@ -208,7 +219,7 @@ Running this test module ...:
$ pytest -q test_unittest_cleandir.py $ pytest -q test_unittest_cleandir.py
. [100%] . [100%]
1 passed in 0.12 seconds 1 passed in 0.12s
... gives us one passed test because the ``initdir`` fixture function ... gives us one passed test because the ``initdir`` fixture function
was executed ahead of the ``test_method``. was executed ahead of the ``test_method``.

View File

@ -33,7 +33,19 @@ Running ``pytest`` can result in six different exit codes:
:Exit code 4: pytest command line usage error :Exit code 4: pytest command line usage error
:Exit code 5: No tests were collected :Exit code 5: No tests were collected
They are represented by the :class:`_pytest.main.ExitCode` enum. They are represented by the :class:`_pytest.main.ExitCode` enum. The exit codes being a part of the public API can be imported and accessed directly using:
.. code-block:: python
from pytest import ExitCode
.. note::
If you would like to customize the exit code in some scenarios, specially when
no tests are collected, consider using the
`pytest-custom_exit_code <https://github.com/yashtodi94/pytest-custom_exit_code>`__
plugin.
Getting help on version, option names, environment variables Getting help on version, option names, environment variables
-------------------------------------------------------------- --------------------------------------------------------------
@ -235,7 +247,7 @@ Example:
XPASS test_example.py::test_xpass always xfail XPASS test_example.py::test_xpass always xfail
ERROR test_example.py::test_error - assert 0 ERROR test_example.py::test_error - assert 0
FAILED test_example.py::test_fail - assert 0 FAILED test_example.py::test_fail - assert 0
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s ===
The ``-r`` options accepts a number of characters after it, with ``a`` used The ``-r`` options accepts a number of characters after it, with ``a`` used
above meaning "all except passes". above meaning "all except passes".
@ -285,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp
========================= short test summary info ========================== ========================= short test summary info ==========================
FAILED test_example.py::test_fail - assert 0 FAILED test_example.py::test_fail - assert 0
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s ===
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
captured output: captured output:
@ -324,7 +336,7 @@ captured output:
ok ok
========================= short test summary info ========================== ========================= short test summary info ==========================
PASSED test_example.py::test_ok PASSED test_example.py::test_ok
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds = == 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12s ===
.. _pdb-option: .. _pdb-option:
@ -640,7 +652,7 @@ to all tests.
record_testsuite_property("STORAGE_TYPE", "CEPH") record_testsuite_property("STORAGE_TYPE", "CEPH")
class TestMe(object): class TestMe:
def test_foo(self): def test_foo(self):
assert True assert True
@ -706,6 +718,11 @@ for example ``-x`` if you only want to send one particular failure.
Currently only pasting to the http://bpaste.net service is implemented. Currently only pasting to the http://bpaste.net service is implemented.
.. versionchanged:: 5.2
If creating the URL fails for any reason, a warning is generated instead of failing the
entire test suite.
Early loading plugins Early loading plugins
--------------------- ---------------------
@ -742,24 +759,33 @@ Calling pytest from Python code
You can invoke ``pytest`` from Python code directly:: You can invoke ``pytest`` from Python code directly:
.. code-block:: python
pytest.main() pytest.main()
this acts as if you would call "pytest" from the command line. this acts as if you would call "pytest" from the command line.
It will not raise ``SystemExit`` but return the exitcode instead. It will not raise ``SystemExit`` but return the exitcode instead.
You can pass in options and arguments:: You can pass in options and arguments:
pytest.main(['-x', 'mytestdir']) .. code-block:: python
You can specify additional plugins to ``pytest.main``:: pytest.main(["-x", "mytestdir"])
You can specify additional plugins to ``pytest.main``:
.. code-block:: python
# content of myinvoke.py # content of myinvoke.py
import pytest import pytest
class MyPlugin(object):
class MyPlugin:
def pytest_sessionfinish(self): def pytest_sessionfinish(self):
print("*** test run reporting finishing") print("*** test run reporting finishing")
pytest.main(["-qq"], plugins=[MyPlugin()]) pytest.main(["-qq"], plugins=[MyPlugin()])
Running it will show that ``MyPlugin`` was added and its Running it will show that ``MyPlugin`` was added and its

View File

@ -41,7 +41,7 @@ Running pytest now produces this output:
warnings.warn(UserWarning("api v1, should use functions from v2")) warnings.warn(UserWarning("api v1, should use functions from v2"))
-- Docs: https://docs.pytest.org/en/latest/warnings.html -- Docs: https://docs.pytest.org/en/latest/warnings.html
=================== 1 passed, 1 warnings in 0.12 seconds =================== ====================== 1 passed, 1 warnings in 0.12s =======================
The ``-W`` flag can be passed to control which warnings will be displayed or even turn The ``-W`` flag can be passed to control which warnings will be displayed or even turn
them into errors: them into errors:
@ -64,7 +64,7 @@ them into errors:
E UserWarning: api v1, should use functions from v2 E UserWarning: api v1, should use functions from v2
test_show_warnings.py:5: UserWarning test_show_warnings.py:5: UserWarning
1 failed in 0.12 seconds 1 failed in 0.12s
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option. The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
For example, the configuration below will ignore all user warnings, but will transform For example, the configuration below will ignore all user warnings, but will transform
@ -127,7 +127,7 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_ *Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_
*plugin.* *plugin.*
.. _`-W option`: https://docs.python.org/3/using/cmdline.html?highlight=#cmdoption-W .. _`-W option`: https://docs.python.org/3/using/cmdline.html#cmdoption-w
.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter .. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter
.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings .. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings
@ -180,6 +180,7 @@ This will ignore all warnings of type ``DeprecationWarning`` where the start of
the regular expression ``".*U.*mode is deprecated"``. the regular expression ``".*U.*mode is deprecated"``.
.. note:: .. note::
If warnings are configured at the interpreter level, using If warnings are configured at the interpreter level, using
the `PYTHONWARNINGS <https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS>`_ environment variable or the the `PYTHONWARNINGS <https://docs.python.org/3/using/cmdline.html#envvar-PYTHONWARNINGS>`_ environment variable or the
``-W`` command-line option, pytest will not configure any filters by default. ``-W`` command-line option, pytest will not configure any filters by default.
@ -277,7 +278,9 @@ argument ``match`` to assert that the exception matches a text or regex::
... ...
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted... Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
You can also call ``pytest.warns`` on a function or code string:: You can also call ``pytest.warns`` on a function or code string:
.. code-block:: python
pytest.warns(expected_warning, func, *args, **kwargs) pytest.warns(expected_warning, func, *args, **kwargs)
pytest.warns(expected_warning, "func(*args, **kwargs)") pytest.warns(expected_warning, "func(*args, **kwargs)")
@ -404,14 +407,14 @@ defines an ``__init__`` constructor, as this prevents the class from being insta
class Test: class Test:
-- Docs: https://docs.pytest.org/en/latest/warnings.html -- Docs: https://docs.pytest.org/en/latest/warnings.html
1 warnings in 0.12 seconds 1 warnings in 0.12s
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings. These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
Please read our :ref:`backwards-compatibility` to learn how we proceed about deprecating and eventually removing Please read our :ref:`backwards-compatibility` to learn how we proceed about deprecating and eventually removing
features. features.
The following warning types ares used by pytest and are part of the public API: The following warning types are used by pytest and are part of the public API:
.. autoclass:: pytest.PytestWarning .. autoclass:: pytest.PytestWarning

View File

@ -164,7 +164,7 @@ If a package is installed this way, ``pytest`` will load
.. note:: .. note::
Make sure to include ``Framework :: Pytest`` in your list of Make sure to include ``Framework :: Pytest`` in your list of
`PyPI classifiers <https://python-packaging-user-guide.readthedocs.io/distributing/#classifiers>`_ `PyPI classifiers <https://pypi.org/classifiers/>`_
to make it easy for users to find your plugin. to make it easy for users to find your plugin.
@ -442,7 +442,7 @@ additionally it is possible to copy examples for an example folder before runnin
testdir.copy_example("test_example.py") testdir.copy_example("test_example.py")
-- Docs: https://docs.pytest.org/en/latest/warnings.html -- Docs: https://docs.pytest.org/en/latest/warnings.html
=================== 2 passed, 1 warnings in 0.12 seconds =================== ====================== 2 passed, 1 warnings in 0.12s =======================
For more information about the result object that ``runpytest()`` returns, and For more information about the result object that ``runpytest()`` returns, and
the methods that it provides please check out the :py:class:`RunResult the methods that it provides please check out the :py:class:`RunResult
@ -693,7 +693,7 @@ declaring the hook functions directly in your plugin module, for example:
# contents of myplugin.py # contents of myplugin.py
class DeferPlugin(object): class DeferPlugin:
"""Simple plugin to defer pytest-xdist hook functions.""" """Simple plugin to defer pytest-xdist hook functions."""
def pytest_testnodedown(self, node, error): def pytest_testnodedown(self, node, error):

View File

@ -27,11 +27,14 @@ Module level setup/teardown
If you have multiple test functions and test classes in a single If you have multiple test functions and test classes in a single
module you can optionally implement the following fixture methods module you can optionally implement the following fixture methods
which will usually be called once for all the functions:: which will usually be called once for all the functions:
.. code-block:: python
def setup_module(module): def setup_module(module):
""" setup any state specific to the execution of the given module.""" """ setup any state specific to the execution of the given module."""
def teardown_module(module): def teardown_module(module):
""" teardown any state that was previously setup with a setup_module """ teardown any state that was previously setup with a setup_module
method. method.
@ -43,7 +46,9 @@ Class level setup/teardown
---------------------------------- ----------------------------------
Similarly, the following methods are called at class level before Similarly, the following methods are called at class level before
and after all test methods of the class are called:: and after all test methods of the class are called:
.. code-block:: python
@classmethod @classmethod
def setup_class(cls): def setup_class(cls):
@ -51,6 +56,7 @@ and after all test methods of the class are called::
usually contains tests). usually contains tests).
""" """
@classmethod @classmethod
def teardown_class(cls): def teardown_class(cls):
""" teardown any state that was previously setup with a call to """ teardown any state that was previously setup with a call to
@ -60,13 +66,16 @@ and after all test methods of the class are called::
Method and function level setup/teardown Method and function level setup/teardown
----------------------------------------------- -----------------------------------------------
Similarly, the following methods are called around each method invocation:: Similarly, the following methods are called around each method invocation:
.. code-block:: python
def setup_method(self, method): def setup_method(self, method):
""" setup any state tied to the execution of the given method in a """ setup any state tied to the execution of the given method in a
class. setup_method is invoked for every test method of a class. class. setup_method is invoked for every test method of a class.
""" """
def teardown_method(self, method): def teardown_method(self, method):
""" teardown any state that was previously setup with a setup_method """ teardown any state that was previously setup with a setup_method
call. call.
@ -75,13 +84,16 @@ Similarly, the following methods are called around each method invocation::
As of pytest-3.0, the ``method`` parameter is optional. As of pytest-3.0, the ``method`` parameter is optional.
If you would rather define test functions directly at module level If you would rather define test functions directly at module level
you can also use the following functions to implement fixtures:: you can also use the following functions to implement fixtures:
.. code-block:: python
def setup_function(function): def setup_function(function):
""" setup any state tied to the execution of the given function. """ setup any state tied to the execution of the given function.
Invoked for every test function in the module. Invoked for every test function in the module.
""" """
def teardown_function(function): def teardown_function(function):
""" teardown any state that was previously setup with a setup_function """ teardown any state that was previously setup with a setup_function
call. call.

View File

@ -30,6 +30,11 @@ template = "changelog/_template.rst"
name = "Features" name = "Features"
showcontent = true showcontent = true
[[tool.towncrier.type]]
directory = "improvement"
name = "Improvements"
showcontent = true
[[tool.towncrier.type]] [[tool.towncrier.type]]
directory = "bugfix" directory = "bugfix"
name = "Bug Fixes" name = "Bug Fixes"

View File

@ -0,0 +1,95 @@
"""
Script used to publish GitHub release notes extracted from CHANGELOG.rst.
This script is meant to be executed after a successful deployment in Travis.
Uses the following environment variables:
* GIT_TAG: the name of the tag of the current commit.
* GH_RELEASE_NOTES_TOKEN: a personal access token with 'repo' permissions. It should be encrypted using:
$travis encrypt GH_RELEASE_NOTES_TOKEN=<token> -r pytest-dev/pytest
And the contents pasted in the ``deploy.env.secure`` section in the ``travis.yml`` file.
The script also requires ``pandoc`` to be previously installed in the system.
Requires Python3.6+.
"""
import os
import re
import sys
from pathlib import Path
import github3
import pypandoc
def publish_github_release(slug, token, tag_name, body):
github = github3.login(token=token)
owner, repo = slug.split("/")
repo = github.repository(owner, repo)
return repo.create_release(tag_name=tag_name, body=body)
def parse_changelog(tag_name):
p = Path(__file__).parent.parent / "CHANGELOG.rst"
changelog_lines = p.read_text(encoding="UTF-8").splitlines()
title_regex = re.compile(r"pytest (\d\.\d+\.\d+) \(\d{4}-\d{2}-\d{2}\)")
consuming_version = False
version_lines = []
for line in changelog_lines:
m = title_regex.match(line)
if m:
# found the version we want: start to consume lines until we find the next version title
if m.group(1) == tag_name:
consuming_version = True
# found a new version title while parsing the version we want: break out
elif consuming_version:
break
if consuming_version:
version_lines.append(line)
return "\n".join(version_lines)
def convert_rst_to_md(text):
return pypandoc.convert_text(text, "md", format="rst")
def main(argv):
if len(argv) > 1:
tag_name = argv[1]
else:
tag_name = os.environ.get("TRAVIS_TAG")
if not tag_name:
print("tag_name not given and $TRAVIS_TAG not set", file=sys.stderr)
return 1
token = os.environ.get("GH_RELEASE_NOTES_TOKEN")
if not token:
print("GH_RELEASE_NOTES_TOKEN not set", file=sys.stderr)
return 1
slug = os.environ.get("TRAVIS_REPO_SLUG")
if not slug:
print("TRAVIS_REPO_SLUG not set", file=sys.stderr)
return 1
rst_body = parse_changelog(tag_name)
md_body = convert_rst_to_md(rst_body)
if not publish_github_release(slug, token, tag_name, md_body):
print("Could not publish release notes:", file=sys.stderr)
print(md_body, file=sys.stderr)
return 5
print()
print(f"Release notes for {tag_name} published successfully:")
print(f"https://github.com/{slug}/releases/tag/{tag_name}")
print()
return 0
if __name__ == "__main__":
sys.exit(main(sys.argv))

View File

@ -13,4 +13,6 @@ fi
python -m coverage combine python -m coverage combine
python -m coverage xml python -m coverage xml
python -m coverage report -m python -m coverage report -m
bash <(curl -s https://codecov.io/bash) -Z -X gcov -X coveragepy -X search -X xcode -X gcovout -X fix -f coverage.xml # Set --connect-timeout to work around https://github.com/curl/curl/issues/4461
curl -S -L --connect-timeout 5 --retry 6 -s https://codecov.io/bash -o codecov-upload.sh
bash codecov-upload.sh -Z -X fix -f coverage.xml

View File

@ -5,7 +5,7 @@ from setuptools import setup
INSTALL_REQUIRES = [ INSTALL_REQUIRES = [
"py>=1.5.0", "py>=1.5.0",
"packaging", "packaging",
"attrs>=17.4.0", "attrs>=17.4.0", # should match oldattrs tox env.
"more-itertools>=4.0.0", "more-itertools>=4.0.0",
"atomicwrites>=1.0", "atomicwrites>=1.0",
'pathlib2>=2.2.0;python_version<"3.6"', 'pathlib2>=2.2.0;python_version<"3.6"',
@ -21,7 +21,6 @@ def main():
use_scm_version={"write_to": "src/_pytest/_version.py"}, use_scm_version={"write_to": "src/_pytest/_version.py"},
setup_requires=["setuptools-scm", "setuptools>=40.0"], setup_requires=["setuptools-scm", "setuptools>=40.0"],
package_dir={"": "src"}, package_dir={"": "src"},
# fmt: off
extras_require={ extras_require={
"testing": [ "testing": [
"argcomplete", "argcomplete",
@ -29,9 +28,9 @@ def main():
"mock", "mock",
"nose", "nose",
"requests", "requests",
], "xmlschema",
]
}, },
# fmt: on
install_requires=INSTALL_REQUIRES, install_requires=INSTALL_REQUIRES,
) )

View File

@ -5,10 +5,15 @@ import traceback
from inspect import CO_VARARGS from inspect import CO_VARARGS
from inspect import CO_VARKEYWORDS from inspect import CO_VARKEYWORDS
from traceback import format_exception_only from traceback import format_exception_only
from types import CodeType
from types import TracebackType from types import TracebackType
from typing import Any
from typing import Dict
from typing import Generic from typing import Generic
from typing import List
from typing import Optional from typing import Optional
from typing import Pattern from typing import Pattern
from typing import Set
from typing import Tuple from typing import Tuple
from typing import TypeVar from typing import TypeVar
from typing import Union from typing import Union
@ -29,7 +34,7 @@ if False: # TYPE_CHECKING
class Code: class Code:
""" wrapper around Python code objects """ """ wrapper around Python code objects """
def __init__(self, rawcode): def __init__(self, rawcode) -> None:
if not hasattr(rawcode, "co_filename"): if not hasattr(rawcode, "co_filename"):
rawcode = getrawcode(rawcode) rawcode = getrawcode(rawcode)
try: try:
@ -38,7 +43,7 @@ class Code:
self.name = rawcode.co_name self.name = rawcode.co_name
except AttributeError: except AttributeError:
raise TypeError("not a code object: {!r}".format(rawcode)) raise TypeError("not a code object: {!r}".format(rawcode))
self.raw = rawcode self.raw = rawcode # type: CodeType
def __eq__(self, other): def __eq__(self, other):
return self.raw == other.raw return self.raw == other.raw
@ -351,7 +356,7 @@ class Traceback(list):
""" return the index of the frame/TracebackEntry where recursion """ return the index of the frame/TracebackEntry where recursion
originates if appropriate, None if no recursion occurred originates if appropriate, None if no recursion occurred
""" """
cache = {} cache = {} # type: Dict[Tuple[Any, int, int], List[Dict[str, Any]]]
for i, entry in enumerate(self): for i, entry in enumerate(self):
# id for the code.raw is needed to work around # id for the code.raw is needed to work around
# the strange metaprogramming in the decorator lib from pypi # the strange metaprogramming in the decorator lib from pypi
@ -443,7 +448,7 @@ class ExceptionInfo(Generic[_E]):
assert tup[1] is not None, "no current exception" assert tup[1] is not None, "no current exception"
assert tup[2] is not None, "no current exception" assert tup[2] is not None, "no current exception"
exc_info = (tup[0], tup[1], tup[2]) exc_info = (tup[0], tup[1], tup[2])
return cls.from_exc_info(exc_info) return cls.from_exc_info(exc_info, exprinfo)
@classmethod @classmethod
def for_later(cls) -> "ExceptionInfo[_E]": def for_later(cls) -> "ExceptionInfo[_E]":
@ -502,7 +507,9 @@ class ExceptionInfo(Generic[_E]):
def __repr__(self) -> str: def __repr__(self) -> str:
if self._excinfo is None: if self._excinfo is None:
return "<ExceptionInfo for raises contextmanager>" return "<ExceptionInfo for raises contextmanager>"
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback)) return "<{} {} tblen={}>".format(
self.__class__.__name__, saferepr(self._excinfo[1]), len(self.traceback)
)
def exconly(self, tryshort: bool = False) -> str: def exconly(self, tryshort: bool = False) -> str:
""" return the exception as a string """ return the exception as a string
@ -520,7 +527,9 @@ class ExceptionInfo(Generic[_E]):
text = text[len(self._striptext) :] text = text[len(self._striptext) :]
return text return text
def errisinstance(self, exc: "Type[BaseException]") -> bool: def errisinstance(
self, exc: Union["Type[BaseException]", Tuple["Type[BaseException]", ...]]
) -> bool:
""" return True if the exception is an instance of exc """ """ return True if the exception is an instance of exc """
return isinstance(self.value, exc) return isinstance(self.value, exc)
@ -589,7 +598,7 @@ class ExceptionInfo(Generic[_E]):
) )
return fmt.repr_excinfo(self) return fmt.repr_excinfo(self)
def match(self, regexp: Union[str, Pattern]) -> bool: def match(self, regexp: "Union[str, Pattern]") -> bool:
""" """
Check whether the regular expression 'regexp' is found in the string Check whether the regular expression 'regexp' is found in the string
representation of the exception using ``re.search``. If it matches representation of the exception using ``re.search``. If it matches
@ -648,7 +657,7 @@ class FormattedExcinfo:
args.append((argname, saferepr(argvalue))) args.append((argname, saferepr(argvalue)))
return ReprFuncArgs(args) return ReprFuncArgs(args)
def get_source(self, source, line_index=-1, excinfo=None, short=False): def get_source(self, source, line_index=-1, excinfo=None, short=False) -> List[str]:
""" return formatted and marked up source lines. """ """ return formatted and marked up source lines. """
import _pytest._code import _pytest._code
@ -720,7 +729,7 @@ class FormattedExcinfo:
else: else:
line_index = entry.lineno - entry.getfirstlinesource() line_index = entry.lineno - entry.getfirstlinesource()
lines = [] lines = [] # type: List[str]
style = entry._repr_style style = entry._repr_style
if style is None: if style is None:
style = self.style style = self.style
@ -797,7 +806,7 @@ class FormattedExcinfo:
exc_msg=str(e), exc_msg=str(e),
max_frames=max_frames, max_frames=max_frames,
total=len(traceback), total=len(traceback),
) ) # type: Optional[str]
traceback = traceback[:max_frames] + traceback[-max_frames:] traceback = traceback[:max_frames] + traceback[-max_frames:]
else: else:
if recursionindex is not None: if recursionindex is not None:
@ -810,10 +819,12 @@ class FormattedExcinfo:
def repr_excinfo(self, excinfo): def repr_excinfo(self, excinfo):
repr_chain = [] repr_chain = (
[]
) # type: List[Tuple[ReprTraceback, Optional[ReprFileLocation], Optional[str]]]
e = excinfo.value e = excinfo.value
descr = None descr = None
seen = set() seen = set() # type: Set[int]
while e is not None and id(e) not in seen: while e is not None and id(e) not in seen:
seen.add(id(e)) seen.add(id(e))
if excinfo: if excinfo:
@ -866,8 +877,8 @@ class TerminalRepr:
class ExceptionRepr(TerminalRepr): class ExceptionRepr(TerminalRepr):
def __init__(self): def __init__(self) -> None:
self.sections = [] self.sections = [] # type: List[Tuple[str, str, str]]
def addsection(self, name, content, sep="-"): def addsection(self, name, content, sep="-"):
self.sections.append((name, content, sep)) self.sections.append((name, content, sep))

View File

@ -7,6 +7,7 @@ import tokenize
import warnings import warnings
from ast import PyCF_ONLY_AST as _AST_FLAG from ast import PyCF_ONLY_AST as _AST_FLAG
from bisect import bisect_right from bisect import bisect_right
from typing import List
import py import py
@ -19,11 +20,11 @@ class Source:
_compilecounter = 0 _compilecounter = 0
def __init__(self, *parts, **kwargs): def __init__(self, *parts, **kwargs):
self.lines = lines = [] self.lines = lines = [] # type: List[str]
de = kwargs.get("deindent", True) de = kwargs.get("deindent", True)
for part in parts: for part in parts:
if not part: if not part:
partlines = [] partlines = [] # type: List[str]
elif isinstance(part, Source): elif isinstance(part, Source):
partlines = part.lines partlines = part.lines
elif isinstance(part, (tuple, list)): elif isinstance(part, (tuple, list)):
@ -157,8 +158,7 @@ class Source:
source = "\n".join(self.lines) + "\n" source = "\n".join(self.lines) + "\n"
try: try:
co = compile(source, filename, mode, flag) co = compile(source, filename, mode, flag)
except SyntaxError: except SyntaxError as ex:
ex = sys.exc_info()[1]
# re-represent syntax errors from parsing python strings # re-represent syntax errors from parsing python strings
msglines = self.lines[: ex.lineno] msglines = self.lines[: ex.lineno]
if ex.offset: if ex.offset:
@ -173,7 +173,8 @@ class Source:
if flag & _AST_FLAG: if flag & _AST_FLAG:
return co return co
lines = [(x + "\n") for x in self.lines] lines = [(x + "\n") for x in self.lines]
linecache.cache[filename] = (1, None, lines, filename) # Type ignored because linecache.cache is private.
linecache.cache[filename] = (1, None, lines, filename) # type: ignore
return co return co
@ -282,7 +283,7 @@ def get_statement_startend2(lineno, node):
return start, end return start, end
def getstatementrange_ast(lineno, source, assertion=False, astnode=None): def getstatementrange_ast(lineno, source: Source, assertion=False, astnode=None):
if astnode is None: if astnode is None:
content = str(source) content = str(source)
# See #4260: # See #4260:

View File

@ -2,6 +2,7 @@
support for presenting detailed information in failing assertions. support for presenting detailed information in failing assertions.
""" """
import sys import sys
from typing import Optional
from _pytest.assertion import rewrite from _pytest.assertion import rewrite
from _pytest.assertion import truncate from _pytest.assertion import truncate
@ -52,7 +53,9 @@ def register_assert_rewrite(*names):
importhook = hook importhook = hook
break break
else: else:
importhook = DummyRewriteHook() # TODO(typing): Add a protocol for mark_rewrite() and use it
# for importhook and for PytestPluginManager.rewrite_hook.
importhook = DummyRewriteHook() # type: ignore
importhook.mark_rewrite(*names) importhook.mark_rewrite(*names)
@ -69,7 +72,7 @@ class AssertionState:
def __init__(self, config, mode): def __init__(self, config, mode):
self.mode = mode self.mode = mode
self.trace = config.trace.root.get("assertion") self.trace = config.trace.root.get("assertion")
self.hook = None self.hook = None # type: Optional[rewrite.AssertionRewritingHook]
def install_importhook(config): def install_importhook(config):
@ -108,6 +111,7 @@ def pytest_runtest_setup(item):
""" """
def callbinrepr(op, left, right): def callbinrepr(op, left, right):
# type: (str, object, object) -> Optional[str]
"""Call the pytest_assertrepr_compare hook and prepare the result """Call the pytest_assertrepr_compare hook and prepare the result
This uses the first result from the hook and then ensures the This uses the first result from the hook and then ensures the
@ -133,12 +137,13 @@ def pytest_runtest_setup(item):
if item.config.getvalue("assertmode") == "rewrite": if item.config.getvalue("assertmode") == "rewrite":
res = res.replace("%", "%%") res = res.replace("%", "%%")
return res return res
return None
util._reprcompare = callbinrepr util._reprcompare = callbinrepr
if item.ihook.pytest_assertion_pass.get_hookimpls(): if item.ihook.pytest_assertion_pass.get_hookimpls():
def call_assertion_pass_hook(lineno, expl, orig): def call_assertion_pass_hook(lineno, orig, expl):
item.ihook.pytest_assertion_pass( item.ihook.pytest_assertion_pass(
item=item, lineno=lineno, orig=orig, expl=expl item=item, lineno=lineno, orig=orig, expl=expl
) )

View File

@ -2,6 +2,7 @@
import ast import ast
import errno import errno
import functools import functools
import importlib.abc
import importlib.machinery import importlib.machinery
import importlib.util import importlib.util
import io import io
@ -16,6 +17,7 @@ from typing import Dict
from typing import List from typing import List
from typing import Optional from typing import Optional
from typing import Set from typing import Set
from typing import Tuple
import atomicwrites import atomicwrites
@ -34,7 +36,7 @@ PYC_EXT = ".py" + (__debug__ and "c" or "o")
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
class AssertionRewritingHook: class AssertionRewritingHook(importlib.abc.MetaPathFinder):
"""PEP302/PEP451 import hook which rewrites asserts.""" """PEP302/PEP451 import hook which rewrites asserts."""
def __init__(self, config): def __init__(self, config):
@ -44,13 +46,13 @@ class AssertionRewritingHook:
except ValueError: except ValueError:
self.fnpats = ["test_*.py", "*_test.py"] self.fnpats = ["test_*.py", "*_test.py"]
self.session = None self.session = None
self._rewritten_names = set() self._rewritten_names = set() # type: Set[str]
self._must_rewrite = set() self._must_rewrite = set() # type: Set[str]
# flag to guard against trying to rewrite a pyc file while we are already writing another pyc file, # flag to guard against trying to rewrite a pyc file while we are already writing another pyc file,
# which might result in infinite recursion (#3506) # which might result in infinite recursion (#3506)
self._writing_pyc = False self._writing_pyc = False
self._basenames_to_check_rewrite = {"conftest"} self._basenames_to_check_rewrite = {"conftest"}
self._marked_for_rewrite_cache = {} self._marked_for_rewrite_cache = {} # type: Dict[str, bool]
self._session_paths_checked = False self._session_paths_checked = False
def set_session(self, session): def set_session(self, session):
@ -116,24 +118,11 @@ class AssertionRewritingHook:
write = not sys.dont_write_bytecode write = not sys.dont_write_bytecode
cache_dir = os.path.join(os.path.dirname(fn), "__pycache__") cache_dir = os.path.join(os.path.dirname(fn), "__pycache__")
if write: if write:
try: ok = try_mkdir(cache_dir)
os.mkdir(cache_dir) if not ok:
except OSError: write = False
e = sys.exc_info()[1].errno state.trace("read only directory: {}".format(os.path.dirname(fn)))
if e == errno.EEXIST:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
pass
elif e in {errno.ENOENT, errno.ENOTDIR}:
# One of the path components was not a directory, likely
# because we're in a zip file.
write = False
elif e in {errno.EACCES, errno.EROFS, errno.EPERM}:
state.trace("read only directory: %r" % os.path.dirname(fn))
write = False
else:
raise
cache_name = os.path.basename(fn)[:-3] + PYC_TAIL cache_name = os.path.basename(fn)[:-3] + PYC_TAIL
pyc = os.path.join(cache_dir, cache_name) pyc = os.path.join(cache_dir, cache_name)
# Notice that even if we're in a read-only directory, I'm going # Notice that even if we're in a read-only directory, I'm going
@ -212,7 +201,7 @@ class AssertionRewritingHook:
return self._is_marked_for_rewrite(name, state) return self._is_marked_for_rewrite(name, state)
def _is_marked_for_rewrite(self, name, state): def _is_marked_for_rewrite(self, name: str, state):
try: try:
return self._marked_for_rewrite_cache[name] return self._marked_for_rewrite_cache[name]
except KeyError: except KeyError:
@ -227,7 +216,7 @@ class AssertionRewritingHook:
self._marked_for_rewrite_cache[name] = False self._marked_for_rewrite_cache[name] = False
return False return False
def mark_rewrite(self, *names): def mark_rewrite(self, *names: str) -> None:
"""Mark import names as needing to be rewritten. """Mark import names as needing to be rewritten.
The named module or package as well as any nested modules will The named module or package as well as any nested modules will
@ -394,6 +383,7 @@ def _format_boolop(explanations, is_or):
def _call_reprcompare(ops, results, expls, each_obj): def _call_reprcompare(ops, results, expls, each_obj):
# type: (Tuple[str, ...], Tuple[bool, ...], Tuple[str, ...], Tuple[object, ...]) -> str
for i, res, expl in zip(range(len(ops)), results, expls): for i, res, expl in zip(range(len(ops)), results, expls):
try: try:
done = not res done = not res
@ -409,11 +399,13 @@ def _call_reprcompare(ops, results, expls, each_obj):
def _call_assertion_pass(lineno, orig, expl): def _call_assertion_pass(lineno, orig, expl):
# type: (int, str, str) -> None
if util._assertion_pass is not None: if util._assertion_pass is not None:
util._assertion_pass(lineno=lineno, orig=orig, expl=expl) util._assertion_pass(lineno, orig, expl)
def _check_if_assertion_pass_impl(): def _check_if_assertion_pass_impl():
# type: () -> bool
"""Checks if any plugins implement the pytest_assertion_pass hook """Checks if any plugins implement the pytest_assertion_pass hook
in order not to generate explanation unecessarily (might be expensive)""" in order not to generate explanation unecessarily (might be expensive)"""
return True if util._assertion_pass else False return True if util._assertion_pass else False
@ -587,7 +579,7 @@ class AssertionRewriter(ast.NodeVisitor):
def _assert_expr_to_lineno(self): def _assert_expr_to_lineno(self):
return _get_assertion_exprs(self.source) return _get_assertion_exprs(self.source)
def run(self, mod): def run(self, mod: ast.Module) -> None:
"""Find all assert statements in *mod* and rewrite them.""" """Find all assert statements in *mod* and rewrite them."""
if not mod.body: if not mod.body:
# Nothing to do. # Nothing to do.
@ -629,12 +621,12 @@ class AssertionRewriter(ast.NodeVisitor):
] ]
mod.body[pos:pos] = imports mod.body[pos:pos] = imports
# Collect asserts. # Collect asserts.
nodes = [mod] nodes = [mod] # type: List[ast.AST]
while nodes: while nodes:
node = nodes.pop() node = nodes.pop()
for name, field in ast.iter_fields(node): for name, field in ast.iter_fields(node):
if isinstance(field, list): if isinstance(field, list):
new = [] new = [] # type: List
for i, child in enumerate(field): for i, child in enumerate(field):
if isinstance(child, ast.Assert): if isinstance(child, ast.Assert):
# Transform assert. # Transform assert.
@ -708,7 +700,7 @@ class AssertionRewriter(ast.NodeVisitor):
.explanation_param(). .explanation_param().
""" """
self.explanation_specifiers = {} self.explanation_specifiers = {} # type: Dict[str, ast.expr]
self.stack.append(self.explanation_specifiers) self.stack.append(self.explanation_specifiers)
def pop_format_context(self, expl_expr): def pop_format_context(self, expl_expr):
@ -751,7 +743,8 @@ class AssertionRewriter(ast.NodeVisitor):
from _pytest.warning_types import PytestAssertRewriteWarning from _pytest.warning_types import PytestAssertRewriteWarning
import warnings import warnings
warnings.warn_explicit( # Ignore type: typeshed bug https://github.com/python/typeshed/pull/3121
warnings.warn_explicit( # type: ignore
PytestAssertRewriteWarning( PytestAssertRewriteWarning(
"assertion is always true, perhaps remove parentheses?" "assertion is always true, perhaps remove parentheses?"
), ),
@ -760,15 +753,15 @@ class AssertionRewriter(ast.NodeVisitor):
lineno=assert_.lineno, lineno=assert_.lineno,
) )
self.statements = [] self.statements = [] # type: List[ast.stmt]
self.variables = [] self.variables = [] # type: List[str]
self.variable_counter = itertools.count() self.variable_counter = itertools.count()
if self.enable_assertion_pass_hook: if self.enable_assertion_pass_hook:
self.format_variables = [] self.format_variables = [] # type: List[str]
self.stack = [] self.stack = [] # type: List[Dict[str, ast.expr]]
self.expl_stmts = [] self.expl_stmts = [] # type: List[ast.stmt]
self.push_format_context() self.push_format_context()
# Rewrite assert into a bunch of statements. # Rewrite assert into a bunch of statements.
top_condition, explanation = self.visit(assert_.test) top_condition, explanation = self.visit(assert_.test)
@ -867,10 +860,7 @@ class AssertionRewriter(ast.NodeVisitor):
internally already. internally already.
See issue #3191 for more details. See issue #3191 for more details.
""" """
val_is_none = ast.Compare(node, [ast.Is()], [ast.NameConstant(None)])
# Using parse because it is different between py2 and py3.
AST_NONE = ast.parse("None").body[0].value
val_is_none = ast.Compare(node, [ast.Is()], [AST_NONE])
send_warning = ast.parse( send_warning = ast.parse(
"""\ """\
from _pytest.warning_types import PytestAssertRewriteWarning from _pytest.warning_types import PytestAssertRewriteWarning
@ -909,7 +899,7 @@ warn_explicit(
# Process each operand, short-circuiting if needed. # Process each operand, short-circuiting if needed.
for i, v in enumerate(boolop.values): for i, v in enumerate(boolop.values):
if i: if i:
fail_inner = [] fail_inner = [] # type: List[ast.stmt]
# cond is set in a prior loop iteration below # cond is set in a prior loop iteration below
self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa self.expl_stmts.append(ast.If(cond, fail_inner, [])) # noqa
self.expl_stmts = fail_inner self.expl_stmts = fail_inner
@ -920,10 +910,10 @@ warn_explicit(
call = ast.Call(app, [expl_format], []) call = ast.Call(app, [expl_format], [])
self.expl_stmts.append(ast.Expr(call)) self.expl_stmts.append(ast.Expr(call))
if i < levels: if i < levels:
cond = res cond = res # type: ast.expr
if is_or: if is_or:
cond = ast.UnaryOp(ast.Not(), cond) cond = ast.UnaryOp(ast.Not(), cond)
inner = [] inner = [] # type: List[ast.stmt]
self.statements.append(ast.If(cond, inner, [])) self.statements.append(ast.If(cond, inner, []))
self.statements = body = inner self.statements = body = inner
self.statements = save self.statements = save
@ -989,7 +979,7 @@ warn_explicit(
expl = pat % (res_expl, res_expl, value_expl, attr.attr) expl = pat % (res_expl, res_expl, value_expl, attr.attr)
return res, expl return res, expl
def visit_Compare(self, comp): def visit_Compare(self, comp: ast.Compare):
self.push_format_context() self.push_format_context()
left_res, left_expl = self.visit(comp.left) left_res, left_expl = self.visit(comp.left)
if isinstance(comp.left, (ast.Compare, ast.BoolOp)): if isinstance(comp.left, (ast.Compare, ast.BoolOp)):
@ -1022,7 +1012,30 @@ warn_explicit(
ast.Tuple(results, ast.Load()), ast.Tuple(results, ast.Load()),
) )
if len(comp.ops) > 1: if len(comp.ops) > 1:
res = ast.BoolOp(ast.And(), load_names) res = ast.BoolOp(ast.And(), load_names) # type: ast.expr
else: else:
res = load_names[0] res = load_names[0]
return res, self.explanation_param(self.pop_format_context(expl_call)) return res, self.explanation_param(self.pop_format_context(expl_call))
def try_mkdir(cache_dir):
"""Attempts to create the given directory, returns True if successful"""
try:
os.mkdir(cache_dir)
except FileExistsError:
# Either the __pycache__ directory already exists (the
# common case) or it's blocked by a non-dir node. In the
# latter case, we'll ignore it in _write_pyc.
return True
except (FileNotFoundError, NotADirectoryError):
# One of the path components was not a directory, likely
# because we're in a zip file.
return False
except PermissionError:
return False
except OSError as e:
# as of now, EROFS doesn't have an equivalent OSError-subclass
if e.errno == errno.EROFS:
return False
raise
return True

View File

@ -1,20 +1,24 @@
"""Utilities for assertion debugging""" """Utilities for assertion debugging"""
import pprint import pprint
from collections.abc import Sequence from collections.abc import Sequence
from typing import Callable
from typing import List
from typing import Optional
import _pytest._code import _pytest._code
from _pytest import outcomes from _pytest import outcomes
from _pytest._io.saferepr import saferepr from _pytest._io.saferepr import saferepr
from _pytest.compat import ATTRS_EQ_FIELD
# The _reprcompare attribute on the util module is used by the new assertion # The _reprcompare attribute on the util module is used by the new assertion
# interpretation code and assertion rewriter to detect this plugin was # interpretation code and assertion rewriter to detect this plugin was
# loaded and in turn call the hooks defined here as part of the # loaded and in turn call the hooks defined here as part of the
# DebugInterpreter. # DebugInterpreter.
_reprcompare = None _reprcompare = None # type: Optional[Callable[[str, object, object], Optional[str]]]
# Works similarly as _reprcompare attribute. Is populated with the hook call # Works similarly as _reprcompare attribute. Is populated with the hook call
# when pytest_runtest_setup is called. # when pytest_runtest_setup is called.
_assertion_pass = None _assertion_pass = None # type: Optional[Callable[[int, str, str], None]]
def format_explanation(explanation): def format_explanation(explanation):
@ -119,9 +123,9 @@ def isiterable(obj):
def assertrepr_compare(config, op, left, right): def assertrepr_compare(config, op, left, right):
"""Return specialised explanations for some operators/operands""" """Return specialised explanations for some operators/operands"""
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op maxsize = (80 - 15 - len(op) - 2) // 2 # 15 chars indentation, 1 space around op
left_repr = saferepr(left, maxsize=int(width // 2)) left_repr = saferepr(left, maxsize=maxsize)
right_repr = saferepr(right, maxsize=width - len(left_repr)) right_repr = saferepr(right, maxsize=maxsize)
summary = "{} {} {}".format(left_repr, op, right_repr) summary = "{} {} {}".format(left_repr, op, right_repr)
@ -177,7 +181,7 @@ def _diff_text(left, right, verbose=0):
""" """
from difflib import ndiff from difflib import ndiff
explanation = [] explanation = [] # type: List[str]
def escape_for_readable_diff(binary_text): def escape_for_readable_diff(binary_text):
""" """
@ -235,13 +239,25 @@ def _compare_eq_verbose(left, right):
left_lines = repr(left).splitlines(keepends) left_lines = repr(left).splitlines(keepends)
right_lines = repr(right).splitlines(keepends) right_lines = repr(right).splitlines(keepends)
explanation = [] explanation = [] # type: List[str]
explanation += ["-" + line for line in left_lines] explanation += ["-" + line for line in left_lines]
explanation += ["+" + line for line in right_lines] explanation += ["+" + line for line in right_lines]
return explanation return explanation
def _surrounding_parens_on_own_lines(lines): # type: (List) -> None
"""Move opening/closing parenthesis/bracket to own lines."""
opening = lines[0][:1]
if opening in ["(", "[", "{"]:
lines[0] = " " + lines[0][1:]
lines[:] = [opening] + lines
closing = lines[-1][-1:]
if closing in [")", "]", "}"]:
lines[-1] = lines[-1][:-1] + ","
lines[:] = lines + [closing]
def _compare_eq_iterable(left, right, verbose=0): def _compare_eq_iterable(left, right, verbose=0):
if not verbose: if not verbose:
return ["Use -v to get the full diff"] return ["Use -v to get the full diff"]
@ -250,16 +266,34 @@ def _compare_eq_iterable(left, right, verbose=0):
left_formatting = pprint.pformat(left).splitlines() left_formatting = pprint.pformat(left).splitlines()
right_formatting = pprint.pformat(right).splitlines() right_formatting = pprint.pformat(right).splitlines()
# Re-format for different output lengths.
lines_left = len(left_formatting)
lines_right = len(right_formatting)
if lines_left != lines_right:
if lines_left > lines_right:
max_width = min(len(x) for x in left_formatting)
right_formatting = pprint.pformat(right, width=max_width).splitlines()
lines_right = len(right_formatting)
else:
max_width = min(len(x) for x in right_formatting)
left_formatting = pprint.pformat(left, width=max_width).splitlines()
lines_left = len(left_formatting)
if lines_left > 1 or lines_right > 1:
_surrounding_parens_on_own_lines(left_formatting)
_surrounding_parens_on_own_lines(right_formatting)
explanation = ["Full diff:"] explanation = ["Full diff:"]
explanation.extend( explanation.extend(
line.strip() for line in difflib.ndiff(left_formatting, right_formatting) line.rstrip() for line in difflib.ndiff(left_formatting, right_formatting)
) )
return explanation return explanation
def _compare_eq_sequence(left, right, verbose=0): def _compare_eq_sequence(left, right, verbose=0):
comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes) comparing_bytes = isinstance(left, bytes) and isinstance(right, bytes)
explanation = [] explanation = [] # type: List[str]
len_left = len(left) len_left = len(left)
len_right = len(right) len_right = len(right)
for i in range(min(len_left, len_right)): for i in range(min(len_left, len_right)):
@ -327,7 +361,7 @@ def _compare_eq_set(left, right, verbose=0):
def _compare_eq_dict(left, right, verbose=0): def _compare_eq_dict(left, right, verbose=0):
explanation = [] explanation = [] # type: List[str]
set_left = set(left) set_left = set(left)
set_right = set(right) set_right = set(right)
common = set_left.intersection(set_right) common = set_left.intersection(set_right)
@ -372,7 +406,9 @@ def _compare_eq_cls(left, right, verbose, type_fns):
fields_to_check = [field for field, info in all_fields.items() if info.compare] fields_to_check = [field for field, info in all_fields.items() if info.compare]
elif isattrs(left): elif isattrs(left):
all_fields = left.__attrs_attrs__ all_fields = left.__attrs_attrs__
fields_to_check = [field.name for field in all_fields if field.cmp] fields_to_check = [
field.name for field in all_fields if getattr(field, ATTRS_EQ_FIELD)
]
same = [] same = []
diff = [] diff = []

View File

@ -14,7 +14,7 @@ import py
import pytest import pytest
from .pathlib import Path from .pathlib import Path
from .pathlib import resolve_from_str from .pathlib import resolve_from_str
from .pathlib import rmtree from .pathlib import rm_rf
README_CONTENT = """\ README_CONTENT = """\
# pytest cache directory # # pytest cache directory #
@ -44,7 +44,7 @@ class Cache:
def for_config(cls, config): def for_config(cls, config):
cachedir = cls.cache_dir_from_config(config) cachedir = cls.cache_dir_from_config(config)
if config.getoption("cacheclear") and cachedir.exists(): if config.getoption("cacheclear") and cachedir.exists():
rmtree(cachedir, force=True) rm_rf(cachedir)
cachedir.mkdir() cachedir.mkdir()
return cls(cachedir, config) return cls(cachedir, config)
@ -135,7 +135,7 @@ class Cache:
readme_path.write_text(README_CONTENT) readme_path.write_text(README_CONTENT)
gitignore_path = self._cachedir.joinpath(".gitignore") gitignore_path = self._cachedir.joinpath(".gitignore")
msg = "# Created by pytest automatically.\n*" msg = "# Created by pytest automatically.\n*\n"
gitignore_path.write_text(msg, encoding="UTF-8") gitignore_path.write_text(msg, encoding="UTF-8")
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG") cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")

Some files were not shown because too many files have changed in this diff Show More