Merge remote-tracking branch 'upstream/master' into mm
This commit is contained in:
commit
c28e428249
|
@ -16,3 +16,11 @@ source = src/
|
||||||
*/lib/python*/site-packages/
|
*/lib/python*/site-packages/
|
||||||
*/pypy*/site-packages/
|
*/pypy*/site-packages/
|
||||||
*\Lib\site-packages\
|
*\Lib\site-packages\
|
||||||
|
|
||||||
|
[report]
|
||||||
|
skip_covered = True
|
||||||
|
show_missing = True
|
||||||
|
exclude_lines =
|
||||||
|
\#\s*pragma: no cover
|
||||||
|
^\s*raise NotImplementedError\b
|
||||||
|
^\s*return NotImplemented\b
|
||||||
|
|
|
@ -26,7 +26,7 @@ repos:
|
||||||
hooks:
|
hooks:
|
||||||
- id: flake8
|
- id: flake8
|
||||||
language_version: python3
|
language_version: python3
|
||||||
additional_dependencies: [flake8-typing-imports]
|
additional_dependencies: [flake8-typing-imports==1.3.0]
|
||||||
- repo: https://github.com/asottile/reorder_python_imports
|
- repo: https://github.com/asottile/reorder_python_imports
|
||||||
rev: v1.4.0
|
rev: v1.4.0
|
||||||
hooks:
|
hooks:
|
||||||
|
|
|
@ -43,7 +43,8 @@ jobs:
|
||||||
python: 'pypy3'
|
python: 'pypy3'
|
||||||
|
|
||||||
- env: TOXENV=py35-xdist
|
- env: TOXENV=py35-xdist
|
||||||
python: '3.5'
|
dist: trusty
|
||||||
|
python: '3.5.0'
|
||||||
|
|
||||||
# Coverage for:
|
# Coverage for:
|
||||||
# - pytester's LsofFdLeakChecker
|
# - pytester's LsofFdLeakChecker
|
||||||
|
|
167
CHANGELOG.rst
167
CHANGELOG.rst
|
@ -18,6 +18,173 @@ with advance notice in the **Deprecations** section of releases.
|
||||||
|
|
||||||
.. towncrier release notes start
|
.. towncrier release notes start
|
||||||
|
|
||||||
|
pytest 5.1.1 (2019-08-20)
|
||||||
|
=========================
|
||||||
|
|
||||||
|
Bug Fixes
|
||||||
|
---------
|
||||||
|
|
||||||
|
- `#5751 <https://github.com/pytest-dev/pytest/issues/5751>`_: Fixed ``TypeError`` when importing pytest on Python 3.5.0 and 3.5.1.
|
||||||
|
|
||||||
|
|
||||||
|
pytest 5.1.0 (2019-08-15)
|
||||||
|
=========================
|
||||||
|
|
||||||
|
Removals
|
||||||
|
--------
|
||||||
|
|
||||||
|
- `#5180 <https://github.com/pytest-dev/pytest/issues/5180>`_: As per our policy, the following features have been deprecated in the 4.X series and are now
|
||||||
|
removed:
|
||||||
|
|
||||||
|
* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead.
|
||||||
|
|
||||||
|
* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument.
|
||||||
|
|
||||||
|
* ``message`` parameter of ``pytest.raises``.
|
||||||
|
|
||||||
|
* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only
|
||||||
|
syntax. This might change the exception message from previous versions, but they still raise
|
||||||
|
``TypeError`` on unknown keyword arguments as before.
|
||||||
|
|
||||||
|
* ``pytest.config`` global variable.
|
||||||
|
|
||||||
|
* ``tmpdir_factory.ensuretemp`` method.
|
||||||
|
|
||||||
|
* ``pytest_logwarning`` hook.
|
||||||
|
|
||||||
|
* ``RemovedInPytest4Warning`` warning type.
|
||||||
|
|
||||||
|
* ``request`` is now a reserved name for fixtures.
|
||||||
|
|
||||||
|
|
||||||
|
For more information consult
|
||||||
|
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5565 <https://github.com/pytest-dev/pytest/issues/5565>`_: Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__.
|
||||||
|
|
||||||
|
The ``unittest2`` backport module is no longer
|
||||||
|
necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem
|
||||||
|
to be used: after removed, all tests still pass unchanged.
|
||||||
|
|
||||||
|
Although our policy is to introduce a deprecation period before removing any features or support
|
||||||
|
for third party libraries, because this code is apparently not used
|
||||||
|
at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to
|
||||||
|
remove it in this release.
|
||||||
|
|
||||||
|
If you experience a regression because of this, please
|
||||||
|
`file an issue <https://github.com/pytest-dev/pytest/issues/new>`__.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5615 <https://github.com/pytest-dev/pytest/issues/5615>`_: ``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument.
|
||||||
|
|
||||||
|
This was supported for Python 2 where it was tempting to use ``"message"``
|
||||||
|
instead of ``u"message"``.
|
||||||
|
|
||||||
|
Python 3 code is unlikely to pass ``bytes`` to these functions. If you do,
|
||||||
|
please decode it to an ``str`` beforehand.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Features
|
||||||
|
--------
|
||||||
|
|
||||||
|
- `#5564 <https://github.com/pytest-dev/pytest/issues/5564>`_: New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5576 <https://github.com/pytest-dev/pytest/issues/5576>`_: New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__
|
||||||
|
option for doctests to ignore irrelevant differences in floating-point numbers.
|
||||||
|
Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__
|
||||||
|
extension for doctest.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Improvements
|
||||||
|
------------
|
||||||
|
|
||||||
|
- `#5471 <https://github.com/pytest-dev/pytest/issues/5471>`_: JUnit XML now includes a timestamp and hostname in the testsuite tag.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5707 <https://github.com/pytest-dev/pytest/issues/5707>`_: Time taken to run the test suite now includes a human-readable representation when it takes over
|
||||||
|
60 seconds, for example::
|
||||||
|
|
||||||
|
===== 2 failed in 102.70s (0:01:42) =====
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Bug Fixes
|
||||||
|
---------
|
||||||
|
|
||||||
|
- `#4344 <https://github.com/pytest-dev/pytest/issues/4344>`_: Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5115 <https://github.com/pytest-dev/pytest/issues/5115>`_: Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5477 <https://github.com/pytest-dev/pytest/issues/5477>`_: The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5523 <https://github.com/pytest-dev/pytest/issues/5523>`_: Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5524 <https://github.com/pytest-dev/pytest/issues/5524>`_: Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
||||||
|
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5537 <https://github.com/pytest-dev/pytest/issues/5537>`_: Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
|
||||||
|
standard library on Python 3.8+.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5578 <https://github.com/pytest-dev/pytest/issues/5578>`_: Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc)
|
||||||
|
so they provide better error messages when users meant to use marks (for example ``@pytest.xfail``
|
||||||
|
instead of ``@pytest.mark.xfail``).
|
||||||
|
|
||||||
|
|
||||||
|
- `#5606 <https://github.com/pytest-dev/pytest/issues/5606>`_: Fixed internal error when test functions were patched with objects that cannot be compared
|
||||||
|
for truth values against others, like ``numpy`` arrays.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5634 <https://github.com/pytest-dev/pytest/issues/5634>`_: ``pytest.exit`` is now correctly handled in ``unittest`` cases.
|
||||||
|
This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5650 <https://github.com/pytest-dev/pytest/issues/5650>`_: Improved output when parsing an ini configuration file fails.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5701 <https://github.com/pytest-dev/pytest/issues/5701>`_: Fix collection of ``staticmethod`` objects defined with ``functools.partial``.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5734 <https://github.com/pytest-dev/pytest/issues/5734>`_: Skip async generator test functions, and update the warning message to refer to ``async def`` functions.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Improved Documentation
|
||||||
|
----------------------
|
||||||
|
|
||||||
|
- `#5669 <https://github.com/pytest-dev/pytest/issues/5669>`_: Add docstring for ``Testdir.copy_example``.
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
Trivial/Internal Changes
|
||||||
|
------------------------
|
||||||
|
|
||||||
|
- `#5095 <https://github.com/pytest-dev/pytest/issues/5095>`_: XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite
|
||||||
|
to avoid future regressions.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5516 <https://github.com/pytest-dev/pytest/issues/5516>`_: Cache node splitting function which can improve collection performance in very large test suites.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5603 <https://github.com/pytest-dev/pytest/issues/5603>`_: Simplified internal ``SafeRepr`` class and removed some dead code.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5664 <https://github.com/pytest-dev/pytest/issues/5664>`_: When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``,
|
||||||
|
the ``test_xfail_handling`` test no longer fails.
|
||||||
|
|
||||||
|
|
||||||
|
- `#5684 <https://github.com/pytest-dev/pytest/issues/5684>`_: Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.).
|
||||||
|
|
||||||
|
|
||||||
pytest 5.0.1 (2019-07-04)
|
pytest 5.0.1 (2019-07-04)
|
||||||
=========================
|
=========================
|
||||||
|
|
||||||
|
|
|
@ -1 +0,0 @@
|
||||||
Fix RuntimeError/StopIteration when trying to collect package with "__init__.py" only.
|
|
|
@ -1,2 +0,0 @@
|
||||||
XML files of the ``xunit2`` family are now validated against the schema by pytest's own test suite
|
|
||||||
to avoid future regressions.
|
|
|
@ -1 +0,0 @@
|
||||||
Warnings issued during ``pytest_configure`` are explicitly not treated as errors, even if configured as such, because it otherwise completely breaks pytest.
|
|
|
@ -1,26 +0,0 @@
|
||||||
As per our policy, the following features have been deprecated in the 4.X series and are now
|
|
||||||
removed:
|
|
||||||
|
|
||||||
* ``Request.getfuncargvalue``: use ``Request.getfixturevalue`` instead.
|
|
||||||
|
|
||||||
* ``pytest.raises`` and ``pytest.warns`` no longer support strings as the second argument.
|
|
||||||
|
|
||||||
* ``message`` parameter of ``pytest.raises``.
|
|
||||||
|
|
||||||
* ``pytest.raises``, ``pytest.warns`` and ``ParameterSet.param`` now use native keyword-only
|
|
||||||
syntax. This might change the exception message from previous versions, but they still raise
|
|
||||||
``TypeError`` on unknown keyword arguments as before.
|
|
||||||
|
|
||||||
* ``pytest.config`` global variable.
|
|
||||||
|
|
||||||
* ``tmpdir_factory.ensuretemp`` method.
|
|
||||||
|
|
||||||
* ``pytest_logwarning`` hook.
|
|
||||||
|
|
||||||
* ``RemovedInPytest4Warning`` warning type.
|
|
||||||
|
|
||||||
* ``request`` is now a reserved name for fixtures.
|
|
||||||
|
|
||||||
|
|
||||||
For more information consult
|
|
||||||
`Deprecations and Removals <https://docs.pytest.org/en/latest/deprecations.html>`__ in the docs.
|
|
|
@ -1 +0,0 @@
|
||||||
JUnit XML now includes a timestamp and hostname in the testsuite tag.
|
|
|
@ -1 +0,0 @@
|
||||||
The XML file produced by ``--junitxml`` now correctly contain a ``<testsuites>`` root element.
|
|
|
@ -1 +0,0 @@
|
||||||
Cache node splitting function which can improve collection performance in very large test suites.
|
|
|
@ -1 +0,0 @@
|
||||||
Fixed using multiple short options together in the command-line (for example ``-vs``) in Python 3.8+.
|
|
|
@ -1,2 +0,0 @@
|
||||||
Fix issue where ``tmp_path`` and ``tmpdir`` would not remove directories containing files marked as read-only,
|
|
||||||
which could lead to pytest crashing when executed a second time with the ``--basetemp`` option.
|
|
|
@ -1,2 +0,0 @@
|
||||||
Replace ``importlib_metadata`` backport with ``importlib.metadata`` from the
|
|
||||||
standard library on Python 3.8+.
|
|
|
@ -1 +0,0 @@
|
||||||
New ``Config.invocation_args`` attribute containing the unchanged arguments passed to ``pytest.main()``.
|
|
|
@ -1,13 +0,0 @@
|
||||||
Removed unused support code for `unittest2 <https://pypi.org/project/unittest2/>`__.
|
|
||||||
|
|
||||||
The ``unittest2`` backport module is no longer
|
|
||||||
necessary since Python 3.3+, and the small amount of code in pytest to support it also doesn't seem
|
|
||||||
to be used: after removed, all tests still pass unchanged.
|
|
||||||
|
|
||||||
Although our policy is to introduce a deprecation period before removing any features or support
|
|
||||||
for third party libraries, because this code is apparently not used
|
|
||||||
at all (even if ``unittest2`` is used by a test suite executed by pytest), it was decided to
|
|
||||||
remove it in this release.
|
|
||||||
|
|
||||||
If you experience a regression because of this, please
|
|
||||||
`file an issue <https://github.com/pytest-dev/pytest/issues/new>`__.
|
|
|
@ -1,4 +0,0 @@
|
||||||
New `NUMBER <https://docs.pytest.org/en/latest/doctest.html#using-doctest-options>`__
|
|
||||||
option for doctests to ignore irrelevant differences in floating-point numbers.
|
|
||||||
Inspired by Sébastien Boisgérault's `numtest <https://github.com/boisgera/numtest>`__
|
|
||||||
extension for doctest.
|
|
|
@ -1,3 +0,0 @@
|
||||||
Improve type checking for some exception-raising functions (``pytest.xfail``, ``pytest.skip``, etc)
|
|
||||||
so they provide better error messages when users meant to use marks (for example ``@pytest.xfail``
|
|
||||||
instead of ``@pytest.mark.xfail``).
|
|
|
@ -1 +0,0 @@
|
||||||
Simplified internal ``SafeRepr`` class and removed some dead code.
|
|
|
@ -1,2 +0,0 @@
|
||||||
Fixed internal error when test functions were patched with objects that cannot be compared
|
|
||||||
for truth values against others, like ``numpy`` arrays.
|
|
|
@ -1,7 +0,0 @@
|
||||||
``pytest.fail``, ``pytest.xfail`` and ``pytest.skip`` no longer support bytes for the message argument.
|
|
||||||
|
|
||||||
This was supported for Python 2 where it was tempting to use ``"message"``
|
|
||||||
instead of ``u"message"``.
|
|
||||||
|
|
||||||
Python 3 code is unlikely to pass ``bytes`` to these functions. If you do,
|
|
||||||
please decode it to an ``str`` beforehand.
|
|
|
@ -1,2 +0,0 @@
|
||||||
``pytest.exit`` is now correctly handled in ``unittest`` cases.
|
|
||||||
This makes ``unittest`` cases handle ``quit`` from pytest's pdb correctly.
|
|
|
@ -1 +0,0 @@
|
||||||
Improved output when parsing an ini configuration file fails.
|
|
|
@ -1,2 +0,0 @@
|
||||||
When invoking pytest's own testsuite with ``PYTHONDONTWRITEBYTECODE=1``,
|
|
||||||
the ``test_xfail_handling`` test no longer fails.
|
|
|
@ -1 +0,0 @@
|
||||||
Add docstring for ``Testdir.copy_example``.
|
|
|
@ -1 +0,0 @@
|
||||||
Replace manual handling of ``OSError.errno`` in the codebase by new ``OSError`` subclasses (``PermissionError``, ``FileNotFoundError``, etc.).
|
|
|
@ -1 +0,0 @@
|
||||||
Fix collection of ``staticmethod`` objects defined with ``functools.partial``.
|
|
|
@ -1,4 +0,0 @@
|
||||||
Time taken to run the test suite now includes a human-readable representation when it takes over
|
|
||||||
60 seconds, for example::
|
|
||||||
|
|
||||||
===== 2 failed in 102.70s (0:01:42) =====
|
|
|
@ -1 +0,0 @@
|
||||||
Skip async generator test functions, and update the warning message to refer to ``async def`` functions.
|
|
|
@ -0,0 +1 @@
|
||||||
|
Fix decoding error when printing an error response from ``--pastebin``.
|
|
@ -0,0 +1,7 @@
|
||||||
|
coverage:
|
||||||
|
status:
|
||||||
|
project: true
|
||||||
|
patch: true
|
||||||
|
changes: true
|
||||||
|
|
||||||
|
comment: off
|
|
@ -6,6 +6,8 @@ Release announcements
|
||||||
:maxdepth: 2
|
:maxdepth: 2
|
||||||
|
|
||||||
|
|
||||||
|
release-5.1.1
|
||||||
|
release-5.1.0
|
||||||
release-5.0.1
|
release-5.0.1
|
||||||
release-5.0.0
|
release-5.0.0
|
||||||
release-4.6.5
|
release-4.6.5
|
||||||
|
|
|
@ -0,0 +1,56 @@
|
||||||
|
pytest-5.1.0
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
The pytest team is proud to announce the 5.1.0 release!
|
||||||
|
|
||||||
|
pytest is a mature Python testing tool with more than a 2000 tests
|
||||||
|
against itself, passing on many different interpreters and platforms.
|
||||||
|
|
||||||
|
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||||
|
to take a look at the CHANGELOG:
|
||||||
|
|
||||||
|
https://docs.pytest.org/en/latest/changelog.html
|
||||||
|
|
||||||
|
For complete documentation, please visit:
|
||||||
|
|
||||||
|
https://docs.pytest.org/en/latest/
|
||||||
|
|
||||||
|
As usual, you can upgrade from pypi via:
|
||||||
|
|
||||||
|
pip install -U pytest
|
||||||
|
|
||||||
|
Thanks to all who contributed to this release, among them:
|
||||||
|
|
||||||
|
* Albert Tugushev
|
||||||
|
* Alexey Zankevich
|
||||||
|
* Anthony Sottile
|
||||||
|
* Bruno Oliveira
|
||||||
|
* Daniel Hahler
|
||||||
|
* David Röthlisberger
|
||||||
|
* Florian Bruhin
|
||||||
|
* Ilya Stepin
|
||||||
|
* Jon Dufresne
|
||||||
|
* Kaiqi
|
||||||
|
* Max R
|
||||||
|
* Miro Hrončok
|
||||||
|
* Oliver Bestwalter
|
||||||
|
* Ran Benita
|
||||||
|
* Ronny Pfannschmidt
|
||||||
|
* Samuel Searles-Bryant
|
||||||
|
* Semen Zhydenko
|
||||||
|
* Steffen Schroeder
|
||||||
|
* Thomas Grainger
|
||||||
|
* Tim Hoffmann
|
||||||
|
* William Woodall
|
||||||
|
* Wojtek Erbetowski
|
||||||
|
* Xixi Zhao
|
||||||
|
* Yash Todi
|
||||||
|
* boris
|
||||||
|
* dmitry.dygalo
|
||||||
|
* helloocc
|
||||||
|
* martbln
|
||||||
|
* mei-li
|
||||||
|
|
||||||
|
|
||||||
|
Happy testing,
|
||||||
|
The Pytest Development Team
|
|
@ -0,0 +1,24 @@
|
||||||
|
pytest-5.1.1
|
||||||
|
=======================================
|
||||||
|
|
||||||
|
pytest 5.1.1 has just been released to PyPI.
|
||||||
|
|
||||||
|
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||||
|
|
||||||
|
pip install --upgrade pytest
|
||||||
|
|
||||||
|
The full changelog is available at https://docs.pytest.org/en/latest/changelog.html.
|
||||||
|
|
||||||
|
Thanks to all who contributed to this release, among them:
|
||||||
|
|
||||||
|
* Anthony Sottile
|
||||||
|
* Bruno Oliveira
|
||||||
|
* Daniel Hahler
|
||||||
|
* Florian Bruhin
|
||||||
|
* Hugo van Kemenade
|
||||||
|
* Ran Benita
|
||||||
|
* Ronny Pfannschmidt
|
||||||
|
|
||||||
|
|
||||||
|
Happy testing,
|
||||||
|
The pytest Development Team
|
|
@ -47,7 +47,7 @@ you will see the return value of the function call:
|
||||||
E + where 3 = f()
|
E + where 3 = f()
|
||||||
|
|
||||||
test_assert1.py:6: AssertionError
|
test_assert1.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.02s =============================
|
||||||
|
|
||||||
``pytest`` has support for showing the values of the most common subexpressions
|
``pytest`` has support for showing the values of the most common subexpressions
|
||||||
including calls, attributes, comparisons, and binary and unary
|
including calls, attributes, comparisons, and binary and unary
|
||||||
|
@ -208,7 +208,7 @@ if you run this module:
|
||||||
E Use -v to get the full diff
|
E Use -v to get the full diff
|
||||||
|
|
||||||
test_assert2.py:6: AssertionError
|
test_assert2.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.02s =============================
|
||||||
|
|
||||||
Special comparisons are done for a number of cases:
|
Special comparisons are done for a number of cases:
|
||||||
|
|
||||||
|
@ -279,7 +279,7 @@ the conftest file:
|
||||||
E vals: 1 != 2
|
E vals: 1 != 2
|
||||||
|
|
||||||
test_foocompare.py:12: AssertionError
|
test_foocompare.py:12: AssertionError
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
.. _assert-details:
|
.. _assert-details:
|
||||||
.. _`assert introspection`:
|
.. _`assert introspection`:
|
||||||
|
|
|
@ -160,7 +160,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
||||||
in python < 3.6 this is a pathlib2.Path
|
in python < 3.6 this is a pathlib2.Path
|
||||||
|
|
||||||
|
|
||||||
no tests ran in 0.12 seconds
|
no tests ran in 0.00s
|
||||||
|
|
||||||
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:
|
You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:
|
||||||
|
|
||||||
|
|
|
@ -60,10 +60,10 @@ If you run this for the first time you will see two failures:
|
||||||
@pytest.mark.parametrize("i", range(50))
|
@pytest.mark.parametrize("i", range(50))
|
||||||
def test_num(i):
|
def test_num(i):
|
||||||
if i in (17, 25):
|
if i in (17, 25):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
_______________________________ test_num[25] _______________________________
|
_______________________________ test_num[25] _______________________________
|
||||||
|
|
||||||
i = 25
|
i = 25
|
||||||
|
@ -71,11 +71,11 @@ If you run this for the first time you will see two failures:
|
||||||
@pytest.mark.parametrize("i", range(50))
|
@pytest.mark.parametrize("i", range(50))
|
||||||
def test_num(i):
|
def test_num(i):
|
||||||
if i in (17, 25):
|
if i in (17, 25):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
2 failed, 48 passed in 0.12 seconds
|
2 failed, 48 passed in 0.08s
|
||||||
|
|
||||||
If you then run it with ``--lf``:
|
If you then run it with ``--lf``:
|
||||||
|
|
||||||
|
@ -99,10 +99,10 @@ If you then run it with ``--lf``:
|
||||||
@pytest.mark.parametrize("i", range(50))
|
@pytest.mark.parametrize("i", range(50))
|
||||||
def test_num(i):
|
def test_num(i):
|
||||||
if i in (17, 25):
|
if i in (17, 25):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
_______________________________ test_num[25] _______________________________
|
_______________________________ test_num[25] _______________________________
|
||||||
|
|
||||||
i = 25
|
i = 25
|
||||||
|
@ -110,11 +110,11 @@ If you then run it with ``--lf``:
|
||||||
@pytest.mark.parametrize("i", range(50))
|
@pytest.mark.parametrize("i", range(50))
|
||||||
def test_num(i):
|
def test_num(i):
|
||||||
if i in (17, 25):
|
if i in (17, 25):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
================= 2 failed, 48 deselected in 0.12 seconds ==================
|
===================== 2 failed, 48 deselected in 0.02s =====================
|
||||||
|
|
||||||
You have run only the two failing tests from the last run, while the 48 passing
|
You have run only the two failing tests from the last run, while the 48 passing
|
||||||
tests have not been run ("deselected").
|
tests have not been run ("deselected").
|
||||||
|
@ -143,10 +143,10 @@ of ``FF`` and dots):
|
||||||
@pytest.mark.parametrize("i", range(50))
|
@pytest.mark.parametrize("i", range(50))
|
||||||
def test_num(i):
|
def test_num(i):
|
||||||
if i in (17, 25):
|
if i in (17, 25):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
_______________________________ test_num[25] _______________________________
|
_______________________________ test_num[25] _______________________________
|
||||||
|
|
||||||
i = 25
|
i = 25
|
||||||
|
@ -154,11 +154,11 @@ of ``FF`` and dots):
|
||||||
@pytest.mark.parametrize("i", range(50))
|
@pytest.mark.parametrize("i", range(50))
|
||||||
def test_num(i):
|
def test_num(i):
|
||||||
if i in (17, 25):
|
if i in (17, 25):
|
||||||
> pytest.fail("bad luck")
|
> pytest.fail("bad luck")
|
||||||
E Failed: bad luck
|
E Failed: bad luck
|
||||||
|
|
||||||
test_50.py:6: Failed
|
test_50.py:7: Failed
|
||||||
=================== 2 failed, 48 passed in 0.12 seconds ====================
|
======================= 2 failed, 48 passed in 0.07s =======================
|
||||||
|
|
||||||
.. _`config.cache`:
|
.. _`config.cache`:
|
||||||
|
|
||||||
|
@ -227,10 +227,10 @@ If you run this command for the first time, you can see the print statement:
|
||||||
> assert mydata == 23
|
> assert mydata == 23
|
||||||
E assert 42 == 23
|
E assert 42 == 23
|
||||||
|
|
||||||
test_caching.py:17: AssertionError
|
test_caching.py:20: AssertionError
|
||||||
-------------------------- Captured stdout setup ---------------------------
|
-------------------------- Captured stdout setup ---------------------------
|
||||||
running expensive computation...
|
running expensive computation...
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
If you run it a second time, the value will be retrieved from
|
If you run it a second time, the value will be retrieved from
|
||||||
the cache and nothing will be printed:
|
the cache and nothing will be printed:
|
||||||
|
@ -248,8 +248,8 @@ the cache and nothing will be printed:
|
||||||
> assert mydata == 23
|
> assert mydata == 23
|
||||||
E assert 42 == 23
|
E assert 42 == 23
|
||||||
|
|
||||||
test_caching.py:17: AssertionError
|
test_caching.py:20: AssertionError
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
See the :ref:`cache-api` for more details.
|
See the :ref:`cache-api` for more details.
|
||||||
|
|
||||||
|
@ -283,7 +283,7 @@ You can always peek at the content of the cache using the
|
||||||
example/value contains:
|
example/value contains:
|
||||||
42
|
42
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
``--cache-show`` takes an optional argument to specify a glob pattern for
|
``--cache-show`` takes an optional argument to specify a glob pattern for
|
||||||
filtering:
|
filtering:
|
||||||
|
@ -300,7 +300,7 @@ filtering:
|
||||||
example/value contains:
|
example/value contains:
|
||||||
42
|
42
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
Clearing Cache content
|
Clearing Cache content
|
||||||
----------------------
|
----------------------
|
||||||
|
|
|
@ -88,10 +88,10 @@ of the failing function and hide the other one:
|
||||||
> assert False
|
> assert False
|
||||||
E assert False
|
E assert False
|
||||||
|
|
||||||
test_module.py:9: AssertionError
|
test_module.py:12: AssertionError
|
||||||
-------------------------- Captured stdout setup ---------------------------
|
-------------------------- Captured stdout setup ---------------------------
|
||||||
setting up <function test_func2 at 0xdeadbeef>
|
setting up <function test_func2 at 0xdeadbeef>
|
||||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
======================= 1 failed, 1 passed in 0.02s ========================
|
||||||
|
|
||||||
Accessing captured output from a test function
|
Accessing captured output from a test function
|
||||||
---------------------------------------------------
|
---------------------------------------------------
|
||||||
|
|
|
@ -36,7 +36,7 @@ then you can just invoke ``pytest`` directly:
|
||||||
|
|
||||||
test_example.txt . [100%]
|
test_example.txt . [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you
|
By default, pytest will collect ``test*.txt`` files looking for doctest directives, but you
|
||||||
can pass additional globs using the ``--doctest-glob`` option (multi-allowed).
|
can pass additional globs using the ``--doctest-glob`` option (multi-allowed).
|
||||||
|
@ -66,7 +66,7 @@ and functions, including from test modules:
|
||||||
mymodule.py . [ 50%]
|
mymodule.py . [ 50%]
|
||||||
test_example.txt . [100%]
|
test_example.txt . [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.01s =============================
|
||||||
|
|
||||||
You can make these changes permanent in your project by
|
You can make these changes permanent in your project by
|
||||||
putting them into a pytest.ini file like this:
|
putting them into a pytest.ini file like this:
|
||||||
|
|
|
@ -52,7 +52,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:
|
||||||
|
|
||||||
test_server.py::test_send_http PASSED [100%]
|
test_server.py::test_send_http PASSED [100%]
|
||||||
|
|
||||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
===================== 1 passed, 3 deselected in 0.01s ======================
|
||||||
|
|
||||||
Or the inverse, running all tests except the webtest ones:
|
Or the inverse, running all tests except the webtest ones:
|
||||||
|
|
||||||
|
@ -69,7 +69,7 @@ Or the inverse, running all tests except the webtest ones:
|
||||||
test_server.py::test_another PASSED [ 66%]
|
test_server.py::test_another PASSED [ 66%]
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
===================== 3 passed, 1 deselected in 0.01s ======================
|
||||||
|
|
||||||
Selecting tests based on their node ID
|
Selecting tests based on their node ID
|
||||||
--------------------------------------
|
--------------------------------------
|
||||||
|
@ -89,7 +89,7 @@ tests based on their module, class, method, or function name:
|
||||||
|
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
You can also select on the class:
|
You can also select on the class:
|
||||||
|
|
||||||
|
@ -104,7 +104,7 @@ You can also select on the class:
|
||||||
|
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
Or select multiple nodes:
|
Or select multiple nodes:
|
||||||
|
|
||||||
|
@ -120,7 +120,7 @@ Or select multiple nodes:
|
||||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||||
test_server.py::test_send_http PASSED [100%]
|
test_server.py::test_send_http PASSED [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.01s =============================
|
||||||
|
|
||||||
.. _node-id:
|
.. _node-id:
|
||||||
|
|
||||||
|
@ -159,7 +159,7 @@ select tests based on their names:
|
||||||
|
|
||||||
test_server.py::test_send_http PASSED [100%]
|
test_server.py::test_send_http PASSED [100%]
|
||||||
|
|
||||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
===================== 1 passed, 3 deselected in 0.01s ======================
|
||||||
|
|
||||||
And you can also run all tests except the ones that match the keyword:
|
And you can also run all tests except the ones that match the keyword:
|
||||||
|
|
||||||
|
@ -176,7 +176,7 @@ And you can also run all tests except the ones that match the keyword:
|
||||||
test_server.py::test_another PASSED [ 66%]
|
test_server.py::test_another PASSED [ 66%]
|
||||||
test_server.py::TestClass::test_method PASSED [100%]
|
test_server.py::TestClass::test_method PASSED [100%]
|
||||||
|
|
||||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
===================== 3 passed, 1 deselected in 0.01s ======================
|
||||||
|
|
||||||
Or to select "http" and "quick" tests:
|
Or to select "http" and "quick" tests:
|
||||||
|
|
||||||
|
@ -192,7 +192,7 @@ Or to select "http" and "quick" tests:
|
||||||
test_server.py::test_send_http PASSED [ 50%]
|
test_server.py::test_send_http PASSED [ 50%]
|
||||||
test_server.py::test_something_quick PASSED [100%]
|
test_server.py::test_something_quick PASSED [100%]
|
||||||
|
|
||||||
================== 2 passed, 2 deselected in 0.12 seconds ==================
|
===================== 2 passed, 2 deselected in 0.01s ======================
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
@ -413,7 +413,7 @@ the test needs:
|
||||||
|
|
||||||
test_someenv.py s [100%]
|
test_someenv.py s [100%]
|
||||||
|
|
||||||
======================== 1 skipped in 0.12 seconds =========================
|
============================ 1 skipped in 0.00s ============================
|
||||||
|
|
||||||
and here is one that specifies exactly the environment needed:
|
and here is one that specifies exactly the environment needed:
|
||||||
|
|
||||||
|
@ -428,7 +428,7 @@ and here is one that specifies exactly the environment needed:
|
||||||
|
|
||||||
test_someenv.py . [100%]
|
test_someenv.py . [100%]
|
||||||
|
|
||||||
========================= 1 passed in 0.12 seconds =========================
|
============================ 1 passed in 0.01s =============================
|
||||||
|
|
||||||
The ``--markers`` option always gives you a list of available markers:
|
The ``--markers`` option always gives you a list of available markers:
|
||||||
|
|
||||||
|
@ -499,7 +499,7 @@ The output is as follows:
|
||||||
$ pytest -q -s
|
$ pytest -q -s
|
||||||
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
Mark(name='my_marker', args=(<function hello_world at 0xdeadbeef>,), kwargs={})
|
||||||
.
|
.
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.00s
|
||||||
|
|
||||||
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
|
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
|
||||||
|
|
||||||
|
@ -551,7 +551,7 @@ Let's run this without capturing output and see what we get:
|
||||||
glob args=('class',) kwargs={'x': 2}
|
glob args=('class',) kwargs={'x': 2}
|
||||||
glob args=('module',) kwargs={'x': 1}
|
glob args=('module',) kwargs={'x': 1}
|
||||||
.
|
.
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.01s
|
||||||
|
|
||||||
marking platform specific tests with pytest
|
marking platform specific tests with pytest
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
|
@ -623,7 +623,7 @@ then you will see two tests skipped and two executed tests as expected:
|
||||||
|
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
SKIPPED [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
||||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
======================= 2 passed, 2 skipped in 0.01s =======================
|
||||||
|
|
||||||
Note that if you specify a platform via the marker-command line option like this:
|
Note that if you specify a platform via the marker-command line option like this:
|
||||||
|
|
||||||
|
@ -638,7 +638,7 @@ Note that if you specify a platform via the marker-command line option like this
|
||||||
|
|
||||||
test_plat.py . [100%]
|
test_plat.py . [100%]
|
||||||
|
|
||||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
===================== 1 passed, 3 deselected in 0.01s ======================
|
||||||
|
|
||||||
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
||||||
|
|
||||||
|
@ -711,7 +711,7 @@ We can now use the ``-m option`` to select one set:
|
||||||
test_module.py:8: in test_interface_complex
|
test_module.py:8: in test_interface_complex
|
||||||
assert 0
|
assert 0
|
||||||
E assert 0
|
E assert 0
|
||||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
===================== 2 failed, 2 deselected in 0.02s ======================
|
||||||
|
|
||||||
or to select both "event" and "interface" tests:
|
or to select both "event" and "interface" tests:
|
||||||
|
|
||||||
|
@ -739,4 +739,4 @@ or to select both "event" and "interface" tests:
|
||||||
test_module.py:12: in test_event_simple
|
test_module.py:12: in test_event_simple
|
||||||
assert 0
|
assert 0
|
||||||
E assert 0
|
E assert 0
|
||||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
===================== 3 failed, 1 deselected in 0.03s ======================
|
||||||
|
|
|
@ -41,7 +41,7 @@ now execute the test specification:
|
||||||
usecase execution failed
|
usecase execution failed
|
||||||
spec failed: 'some': 'other'
|
spec failed: 'some': 'other'
|
||||||
no further details known at this point.
|
no further details known at this point.
|
||||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
======================= 1 failed, 1 passed in 0.02s ========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -77,7 +77,7 @@ consulted when reporting in ``verbose`` mode:
|
||||||
usecase execution failed
|
usecase execution failed
|
||||||
spec failed: 'some': 'other'
|
spec failed: 'some': 'other'
|
||||||
no further details known at this point.
|
no further details known at this point.
|
||||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
======================= 1 failed, 1 passed in 0.02s ========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -97,4 +97,4 @@ interesting to just look at the collection tree:
|
||||||
<YamlItem hello>
|
<YamlItem hello>
|
||||||
<YamlItem ok>
|
<YamlItem ok>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.02s ===========================
|
||||||
|
|
|
@ -54,7 +54,7 @@ This means that we only run 2 tests if we do not pass ``--all``:
|
||||||
|
|
||||||
$ pytest -q test_compute.py
|
$ pytest -q test_compute.py
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.01s
|
||||||
|
|
||||||
We run only two computations, so we see two dots.
|
We run only two computations, so we see two dots.
|
||||||
let's run the full monty:
|
let's run the full monty:
|
||||||
|
@ -72,8 +72,8 @@ let's run the full monty:
|
||||||
> assert param1 < 4
|
> assert param1 < 4
|
||||||
E assert 4 < 4
|
E assert 4 < 4
|
||||||
|
|
||||||
test_compute.py:3: AssertionError
|
test_compute.py:4: AssertionError
|
||||||
1 failed, 4 passed in 0.12 seconds
|
1 failed, 4 passed in 0.02s
|
||||||
|
|
||||||
As expected when running the full range of ``param1`` values
|
As expected when running the full range of ``param1`` values
|
||||||
we'll get an error on the last one.
|
we'll get an error on the last one.
|
||||||
|
@ -172,7 +172,7 @@ objects, they are still using the default pytest representation:
|
||||||
<Function test_timedistance_v3[forward]>
|
<Function test_timedistance_v3[forward]>
|
||||||
<Function test_timedistance_v3[backward]>
|
<Function test_timedistance_v3[backward]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
|
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
|
||||||
together with the actual data, instead of listing them separately.
|
together with the actual data, instead of listing them separately.
|
||||||
|
@ -229,7 +229,7 @@ this is a fully self-contained example which you can run with:
|
||||||
|
|
||||||
test_scenarios.py .... [100%]
|
test_scenarios.py .... [100%]
|
||||||
|
|
||||||
========================= 4 passed in 0.12 seconds =========================
|
============================ 4 passed in 0.01s =============================
|
||||||
|
|
||||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:
|
||||||
|
|
||||||
|
@ -248,7 +248,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
||||||
<Function test_demo1[advanced]>
|
<Function test_demo1[advanced]>
|
||||||
<Function test_demo2[advanced]>
|
<Function test_demo2[advanced]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
Note that we told ``metafunc.parametrize()`` that your scenario values
|
Note that we told ``metafunc.parametrize()`` that your scenario values
|
||||||
should be considered class-scoped. With pytest-2.3 this leads to a
|
should be considered class-scoped. With pytest-2.3 this leads to a
|
||||||
|
@ -262,8 +262,8 @@ Deferring the setup of parametrized resources
|
||||||
The parametrization of test functions happens at collection
|
The parametrization of test functions happens at collection
|
||||||
time. It is a good idea to setup expensive resources like DB
|
time. It is a good idea to setup expensive resources like DB
|
||||||
connections or subprocess only when the actual test is run.
|
connections or subprocess only when the actual test is run.
|
||||||
Here is a simple example how you can achieve that, first
|
Here is a simple example how you can achieve that. This test
|
||||||
the actual test requiring a ``db`` object:
|
requires a ``db`` object fixture:
|
||||||
|
|
||||||
.. code-block:: python
|
.. code-block:: python
|
||||||
|
|
||||||
|
@ -323,7 +323,7 @@ Let's first see how it looks like at collection time:
|
||||||
<Function test_db_initialized[d1]>
|
<Function test_db_initialized[d1]>
|
||||||
<Function test_db_initialized[d2]>
|
<Function test_db_initialized[d2]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
And then when we run the test:
|
And then when we run the test:
|
||||||
|
|
||||||
|
@ -342,8 +342,8 @@ And then when we run the test:
|
||||||
> pytest.fail("deliberately failing for demo purposes")
|
> pytest.fail("deliberately failing for demo purposes")
|
||||||
E Failed: deliberately failing for demo purposes
|
E Failed: deliberately failing for demo purposes
|
||||||
|
|
||||||
test_backends.py:6: Failed
|
test_backends.py:8: Failed
|
||||||
1 failed, 1 passed in 0.12 seconds
|
1 failed, 1 passed in 0.02s
|
||||||
|
|
||||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||||
|
|
||||||
|
@ -394,7 +394,7 @@ The result of this test will be successful:
|
||||||
<Module test_indirect_list.py>
|
<Module test_indirect_list.py>
|
||||||
<Function test_indirect[a-b]>
|
<Function test_indirect[a-b]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -453,8 +453,8 @@ argument sets to use for each test function. Let's run it:
|
||||||
> assert a == b
|
> assert a == b
|
||||||
E assert 1 == 2
|
E assert 1 == 2
|
||||||
|
|
||||||
test_parametrize.py:18: AssertionError
|
test_parametrize.py:21: AssertionError
|
||||||
1 failed, 2 passed in 0.12 seconds
|
1 failed, 2 passed in 0.03s
|
||||||
|
|
||||||
Indirect parametrization with multiple fixtures
|
Indirect parametrization with multiple fixtures
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
|
@ -475,11 +475,10 @@ Running it results in some skips if we don't have all the python interpreters in
|
||||||
.. code-block:: pytest
|
.. code-block:: pytest
|
||||||
|
|
||||||
. $ pytest -rs -q multipython.py
|
. $ pytest -rs -q multipython.py
|
||||||
ssssssssssss...ssssssssssss [100%]
|
ssssssssssss......sss...... [100%]
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
|
SKIPPED [15] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.5' not found
|
||||||
SKIPPED [12] $REGENDOC_TMPDIR/CWD/multipython.py:30: 'python3.7' not found
|
12 passed, 15 skipped in 0.62s
|
||||||
3 passed, 24 skipped in 0.12 seconds
|
|
||||||
|
|
||||||
Indirect parametrization of optional implementations/imports
|
Indirect parametrization of optional implementations/imports
|
||||||
--------------------------------------------------------------------
|
--------------------------------------------------------------------
|
||||||
|
@ -547,8 +546,8 @@ If you run this with reporting for skips enabled:
|
||||||
test_module.py .s [100%]
|
test_module.py .s [100%]
|
||||||
|
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2': No module named 'opt2'
|
SKIPPED [1] $REGENDOC_TMPDIR/conftest.py:13: could not import 'opt2': No module named 'opt2'
|
||||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
======================= 1 passed, 1 skipped in 0.01s =======================
|
||||||
|
|
||||||
You'll see that we don't have an ``opt2`` module and thus the second test run
|
You'll see that we don't have an ``opt2`` module and thus the second test run
|
||||||
of our ``test_func1`` was skipped. A few notes:
|
of our ``test_func1`` was skipped. A few notes:
|
||||||
|
@ -610,7 +609,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
|
||||||
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
test_pytest_param_example.py::test_eval[basic_2+4] PASSED [ 66%]
|
||||||
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
|
test_pytest_param_example.py::test_eval[basic_6*9] XFAIL [100%]
|
||||||
|
|
||||||
============ 2 passed, 15 deselected, 1 xfailed in 0.12 seconds ============
|
=============== 2 passed, 15 deselected, 1 xfailed in 0.08s ================
|
||||||
|
|
||||||
As the result:
|
As the result:
|
||||||
|
|
||||||
|
|
|
@ -158,7 +158,7 @@ The test collection would look like this:
|
||||||
<Function simple_check>
|
<Function simple_check>
|
||||||
<Function complex_check>
|
<Function complex_check>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
You can check for multiple glob patterns by adding a space between the patterns:
|
You can check for multiple glob patterns by adding a space between the patterns:
|
||||||
|
|
||||||
|
@ -221,7 +221,7 @@ You can always peek at the collection tree without running tests like this:
|
||||||
<Function test_method>
|
<Function test_method>
|
||||||
<Function test_anothermethod>
|
<Function test_anothermethod>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
.. _customizing-test-collection:
|
.. _customizing-test-collection:
|
||||||
|
|
||||||
|
@ -297,7 +297,7 @@ file will be left out:
|
||||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
It's also possible to ignore files based on Unix shell-style wildcards by adding
|
It's also possible to ignore files based on Unix shell-style wildcards by adding
|
||||||
patterns to ``collect_ignore_glob``.
|
patterns to ``collect_ignore_glob``.
|
||||||
|
|
|
@ -119,7 +119,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
a = "1" * 100 + "a" + "2" * 100
|
a = "1" * 100 + "a" + "2" * 100
|
||||||
b = "1" * 100 + "b" + "2" * 100
|
b = "1" * 100 + "b" + "2" * 100
|
||||||
> assert a == b
|
> assert a == b
|
||||||
E AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222'
|
E AssertionError: assert '111111111111...2222222222222' == '111111111111...2222222222222'
|
||||||
E Skipping 90 identical leading characters in diff, use -v to show
|
E Skipping 90 identical leading characters in diff, use -v to show
|
||||||
E Skipping 91 identical trailing characters in diff, use -v to show
|
E Skipping 91 identical trailing characters in diff, use -v to show
|
||||||
E - 1111111111a222222222
|
E - 1111111111a222222222
|
||||||
|
@ -136,7 +136,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
a = "1\n" * 100 + "a" + "2\n" * 100
|
a = "1\n" * 100 + "a" + "2\n" * 100
|
||||||
b = "1\n" * 100 + "b" + "2\n" * 100
|
b = "1\n" * 100 + "b" + "2\n" * 100
|
||||||
> assert a == b
|
> assert a == b
|
||||||
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n'
|
E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n...n2\n2\n2\n2\n'
|
||||||
E Skipping 190 identical leading characters in diff, use -v to show
|
E Skipping 190 identical leading characters in diff, use -v to show
|
||||||
E Skipping 191 identical trailing characters in diff, use -v to show
|
E Skipping 191 identical trailing characters in diff, use -v to show
|
||||||
E 1
|
E 1
|
||||||
|
@ -235,7 +235,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
def test_not_in_text_multiline(self):
|
def test_not_in_text_multiline(self):
|
||||||
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
|
text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail"
|
||||||
> assert "foo" not in text
|
> assert "foo" not in text
|
||||||
E AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail'
|
E AssertionError: assert 'foo' not in 'some multil...nand a\ntail'
|
||||||
E 'foo' is contained here:
|
E 'foo' is contained here:
|
||||||
E some multiline
|
E some multiline
|
||||||
E text
|
E text
|
||||||
|
@ -267,7 +267,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
def test_not_in_text_single_long(self):
|
def test_not_in_text_single_long(self):
|
||||||
text = "head " * 50 + "foo " + "tail " * 20
|
text = "head " * 50 + "foo " + "tail " * 20
|
||||||
> assert "foo" not in text
|
> assert "foo" not in text
|
||||||
E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
|
E AssertionError: assert 'foo' not in 'head head h...l tail tail '
|
||||||
E 'foo' is contained here:
|
E 'foo' is contained here:
|
||||||
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||||
E ? +++
|
E ? +++
|
||||||
|
@ -280,7 +280,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
def test_not_in_text_single_long_term(self):
|
def test_not_in_text_single_long_term(self):
|
||||||
text = "head " * 50 + "f" * 70 + "tail " * 20
|
text = "head " * 50 + "f" * 70 + "tail " * 20
|
||||||
> assert "f" * 70 not in text
|
> assert "f" * 70 not in text
|
||||||
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
|
E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head h...l tail tail '
|
||||||
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
|
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
|
||||||
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
|
||||||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||||
|
@ -301,7 +301,7 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
left = Foo(1, "b")
|
left = Foo(1, "b")
|
||||||
right = Foo(1, "c")
|
right = Foo(1, "c")
|
||||||
> assert left == right
|
> assert left == right
|
||||||
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialise...oo(a=1, b='c')
|
E AssertionError: assert TestSpecialis...oo(a=1, b='b') == TestSpecialis...oo(a=1, b='c')
|
||||||
E Omitting 1 identical items, use -vv to show
|
E Omitting 1 identical items, use -vv to show
|
||||||
E Differing attributes:
|
E Differing attributes:
|
||||||
E b: 'b' != 'c'
|
E b: 'b' != 'c'
|
||||||
|
@ -650,4 +650,4 @@ Here is a nice run of several failures and how ``pytest`` presents things:
|
||||||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||||
|
|
||||||
failure_demo.py:282: AssertionError
|
failure_demo.py:282: AssertionError
|
||||||
======================== 44 failed in 0.12 seconds =========================
|
============================ 44 failed in 0.26s ============================
|
||||||
|
|
|
@ -65,7 +65,7 @@ Let's run this without supplying our new option:
|
||||||
test_sample.py:6: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
--------------------------- Captured stdout call ---------------------------
|
--------------------------- Captured stdout call ---------------------------
|
||||||
first
|
first
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
And now with supplying a command line option:
|
And now with supplying a command line option:
|
||||||
|
|
||||||
|
@ -89,7 +89,7 @@ And now with supplying a command line option:
|
||||||
test_sample.py:6: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
--------------------------- Captured stdout call ---------------------------
|
--------------------------- Captured stdout call ---------------------------
|
||||||
second
|
second
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
You can see that the command line option arrived in our test. This
|
You can see that the command line option arrived in our test. This
|
||||||
completes the basic pattern. However, one often rather wants to process
|
completes the basic pattern. However, one often rather wants to process
|
||||||
|
@ -132,7 +132,7 @@ directory with the above conftest.py:
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
.. _`excontrolskip`:
|
.. _`excontrolskip`:
|
||||||
|
|
||||||
|
@ -201,7 +201,7 @@ and when running it will see a skipped "slow" test:
|
||||||
|
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [1] test_module.py:8: need --runslow option to run
|
SKIPPED [1] test_module.py:8: need --runslow option to run
|
||||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
======================= 1 passed, 1 skipped in 0.01s =======================
|
||||||
|
|
||||||
Or run it including the ``slow`` marked test:
|
Or run it including the ``slow`` marked test:
|
||||||
|
|
||||||
|
@ -216,7 +216,7 @@ Or run it including the ``slow`` marked test:
|
||||||
|
|
||||||
test_module.py .. [100%]
|
test_module.py .. [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.01s =============================
|
||||||
|
|
||||||
Writing well integrated assertion helpers
|
Writing well integrated assertion helpers
|
||||||
--------------------------------------------------
|
--------------------------------------------------
|
||||||
|
@ -261,7 +261,7 @@ Let's run our little function:
|
||||||
E Failed: not configured: 42
|
E Failed: not configured: 42
|
||||||
|
|
||||||
test_checkconfig.py:11: Failed
|
test_checkconfig.py:11: Failed
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
|
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
|
||||||
to a callable which gets the ``ExceptionInfo`` object. You can for example use
|
to a callable which gets the ``ExceptionInfo`` object. You can for example use
|
||||||
|
@ -358,7 +358,7 @@ which will add the string to the test header accordingly:
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
.. regendoc:wipe
|
.. regendoc:wipe
|
||||||
|
|
||||||
|
@ -388,7 +388,7 @@ which will add info only when run with "--v":
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collecting ... collected 0 items
|
collecting ... collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
and nothing when run plainly:
|
and nothing when run plainly:
|
||||||
|
|
||||||
|
@ -401,7 +401,7 @@ and nothing when run plainly:
|
||||||
rootdir: $REGENDOC_TMPDIR
|
rootdir: $REGENDOC_TMPDIR
|
||||||
collected 0 items
|
collected 0 items
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.00s ===========================
|
||||||
|
|
||||||
profiling test duration
|
profiling test duration
|
||||||
--------------------------
|
--------------------------
|
||||||
|
@ -447,7 +447,7 @@ Now we can profile which test functions execute the slowest:
|
||||||
0.30s call test_some_are_slow.py::test_funcslow2
|
0.30s call test_some_are_slow.py::test_funcslow2
|
||||||
0.20s call test_some_are_slow.py::test_funcslow1
|
0.20s call test_some_are_slow.py::test_funcslow1
|
||||||
0.10s call test_some_are_slow.py::test_funcfast
|
0.10s call test_some_are_slow.py::test_funcfast
|
||||||
========================= 3 passed in 0.12 seconds =========================
|
============================ 3 passed in 0.61s =============================
|
||||||
|
|
||||||
incremental testing - test steps
|
incremental testing - test steps
|
||||||
---------------------------------------------------
|
---------------------------------------------------
|
||||||
|
@ -531,7 +531,7 @@ If we run this:
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
XFAIL test_step.py::TestUserHandling::test_deletion
|
XFAIL test_step.py::TestUserHandling::test_deletion
|
||||||
reason: previous test failed (test_modification)
|
reason: previous test failed (test_modification)
|
||||||
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ===============
|
================== 1 failed, 2 passed, 1 xfailed in 0.03s ==================
|
||||||
|
|
||||||
We'll see that ``test_deletion`` was not executed because ``test_modification``
|
We'll see that ``test_deletion`` was not executed because ``test_modification``
|
||||||
failed. It is reported as an "expected failure".
|
failed. It is reported as an "expected failure".
|
||||||
|
@ -644,7 +644,7 @@ We can run this:
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
a/test_db2.py:2: AssertionError
|
a/test_db2.py:2: AssertionError
|
||||||
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ==========
|
============= 3 failed, 2 passed, 1 xfailed, 1 error in 0.05s ==============
|
||||||
|
|
||||||
The two test modules in the ``a`` directory see the same ``db`` fixture instance
|
The two test modules in the ``a`` directory see the same ``db`` fixture instance
|
||||||
while the one test in the sister-directory ``b`` doesn't see it. We could of course
|
while the one test in the sister-directory ``b`` doesn't see it. We could of course
|
||||||
|
@ -733,7 +733,7 @@ and run them:
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:6: AssertionError
|
test_module.py:6: AssertionError
|
||||||
========================= 2 failed in 0.12 seconds =========================
|
============================ 2 failed in 0.02s =============================
|
||||||
|
|
||||||
you will have a "failures" file which contains the failing test ids:
|
you will have a "failures" file which contains the failing test ids:
|
||||||
|
|
||||||
|
@ -848,7 +848,7 @@ and run it:
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:19: AssertionError
|
test_module.py:19: AssertionError
|
||||||
==================== 2 failed, 1 error in 0.12 seconds =====================
|
======================== 2 failed, 1 error in 0.02s ========================
|
||||||
|
|
||||||
You'll see that the fixture finalizers could use the precise reporting
|
You'll see that the fixture finalizers could use the precise reporting
|
||||||
information.
|
information.
|
||||||
|
|
|
@ -81,4 +81,4 @@ If you run this without output capturing:
|
||||||
.test other
|
.test other
|
||||||
.test_unit1 method called
|
.test_unit1 method called
|
||||||
.
|
.
|
||||||
4 passed in 0.12 seconds
|
4 passed in 0.01s
|
||||||
|
|
|
@ -92,11 +92,11 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
|
||||||
def test_ehlo(smtp_connection):
|
def test_ehlo(smtp_connection):
|
||||||
response, msg = smtp_connection.ehlo()
|
response, msg = smtp_connection.ehlo()
|
||||||
assert response == 250
|
assert response == 250
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_smtpsimple.py:11: AssertionError
|
test_smtpsimple.py:14: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.18s =============================
|
||||||
|
|
||||||
In the failure traceback we see that the test function was called with a
|
In the failure traceback we see that the test function was called with a
|
||||||
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
``smtp_connection`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||||
|
@ -246,7 +246,7 @@ inspect what is going on and can now run the tests:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:6: AssertionError
|
test_module.py:7: AssertionError
|
||||||
________________________________ test_noop _________________________________
|
________________________________ test_noop _________________________________
|
||||||
|
|
||||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||||
|
@ -257,8 +257,8 @@ inspect what is going on and can now run the tests:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:11: AssertionError
|
test_module.py:13: AssertionError
|
||||||
========================= 2 failed in 0.12 seconds =========================
|
============================ 2 failed in 0.20s =============================
|
||||||
|
|
||||||
You see the two ``assert 0`` failing and more importantly you can also see
|
You see the two ``assert 0`` failing and more importantly you can also see
|
||||||
that the same (module-scoped) ``smtp_connection`` object was passed into the
|
that the same (module-scoped) ``smtp_connection`` object was passed into the
|
||||||
|
@ -315,15 +315,15 @@ Consider the code below:
|
||||||
|
|
||||||
.. literalinclude:: example/fixtures/test_fixtures_order.py
|
.. literalinclude:: example/fixtures/test_fixtures_order.py
|
||||||
|
|
||||||
The fixtures requested by ``test_foo`` will be instantiated in the following order:
|
The fixtures requested by ``test_order`` will be instantiated in the following order:
|
||||||
|
|
||||||
1. ``s1``: is the highest-scoped fixture (``session``).
|
1. ``s1``: is the highest-scoped fixture (``session``).
|
||||||
2. ``m1``: is the second highest-scoped fixture (``module``).
|
2. ``m1``: is the second highest-scoped fixture (``module``).
|
||||||
3. ``a1``: is a ``function``-scoped ``autouse`` fixture: it will be instantiated before other fixtures
|
3. ``a1``: is a ``function``-scoped ``autouse`` fixture: it will be instantiated before other fixtures
|
||||||
within the same scope.
|
within the same scope.
|
||||||
4. ``f3``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point
|
4. ``f3``: is a ``function``-scoped fixture, required by ``f1``: it needs to be instantiated at this point
|
||||||
5. ``f1``: is the first ``function``-scoped fixture in ``test_foo`` parameter list.
|
5. ``f1``: is the first ``function``-scoped fixture in ``test_order`` parameter list.
|
||||||
6. ``f2``: is the last ``function``-scoped fixture in ``test_foo`` parameter list.
|
6. ``f2``: is the last ``function``-scoped fixture in ``test_order`` parameter list.
|
||||||
|
|
||||||
|
|
||||||
.. _`finalization`:
|
.. _`finalization`:
|
||||||
|
@ -361,7 +361,7 @@ Let's execute it:
|
||||||
$ pytest -s -q --tb=no
|
$ pytest -s -q --tb=no
|
||||||
FFteardown smtp
|
FFteardown smtp
|
||||||
|
|
||||||
2 failed in 0.12 seconds
|
2 failed in 0.20s
|
||||||
|
|
||||||
We see that the ``smtp_connection`` instance is finalized after the two
|
We see that the ``smtp_connection`` instance is finalized after the two
|
||||||
tests finished execution. Note that if we decorated our fixture
|
tests finished execution. Note that if we decorated our fixture
|
||||||
|
@ -515,7 +515,7 @@ again, nothing much has changed:
|
||||||
$ pytest -s -q --tb=no
|
$ pytest -s -q --tb=no
|
||||||
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||||
|
|
||||||
2 failed in 0.12 seconds
|
2 failed in 0.21s
|
||||||
|
|
||||||
Let's quickly create another test module that actually sets the
|
Let's quickly create another test module that actually sets the
|
||||||
server URL in its module namespace:
|
server URL in its module namespace:
|
||||||
|
@ -538,7 +538,7 @@ Running it:
|
||||||
F [100%]
|
F [100%]
|
||||||
================================= FAILURES =================================
|
================================= FAILURES =================================
|
||||||
______________________________ test_showhelo _______________________________
|
______________________________ test_showhelo _______________________________
|
||||||
test_anothersmtp.py:5: in test_showhelo
|
test_anothersmtp.py:6: in test_showhelo
|
||||||
assert 0, smtp_connection.helo()
|
assert 0, smtp_connection.helo()
|
||||||
E AssertionError: (250, b'mail.python.org')
|
E AssertionError: (250, b'mail.python.org')
|
||||||
E assert 0
|
E assert 0
|
||||||
|
@ -654,7 +654,7 @@ So let's just do another run:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:6: AssertionError
|
test_module.py:7: AssertionError
|
||||||
________________________ test_noop[smtp.gmail.com] _________________________
|
________________________ test_noop[smtp.gmail.com] _________________________
|
||||||
|
|
||||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||||
|
@ -665,7 +665,7 @@ So let's just do another run:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:11: AssertionError
|
test_module.py:13: AssertionError
|
||||||
________________________ test_ehlo[mail.python.org] ________________________
|
________________________ test_ehlo[mail.python.org] ________________________
|
||||||
|
|
||||||
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
smtp_connection = <smtplib.SMTP object at 0xdeadbeef>
|
||||||
|
@ -676,7 +676,7 @@ So let's just do another run:
|
||||||
> assert b"smtp.gmail.com" in msg
|
> assert b"smtp.gmail.com" in msg
|
||||||
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
|
E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8\nCHUNKING'
|
||||||
|
|
||||||
test_module.py:5: AssertionError
|
test_module.py:6: AssertionError
|
||||||
-------------------------- Captured stdout setup ---------------------------
|
-------------------------- Captured stdout setup ---------------------------
|
||||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||||
________________________ test_noop[mail.python.org] ________________________
|
________________________ test_noop[mail.python.org] ________________________
|
||||||
|
@ -689,10 +689,10 @@ So let's just do another run:
|
||||||
> assert 0 # for demo purposes
|
> assert 0 # for demo purposes
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_module.py:11: AssertionError
|
test_module.py:13: AssertionError
|
||||||
------------------------- Captured stdout teardown -------------------------
|
------------------------- Captured stdout teardown -------------------------
|
||||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||||
4 failed in 0.12 seconds
|
4 failed in 0.89s
|
||||||
|
|
||||||
We see that our two test functions each ran twice, against the different
|
We see that our two test functions each ran twice, against the different
|
||||||
``smtp_connection`` instances. Note also, that with the ``mail.python.org``
|
``smtp_connection`` instances. Note also, that with the ``mail.python.org``
|
||||||
|
@ -771,7 +771,7 @@ Running the above tests results in the following test IDs being used:
|
||||||
<Function test_ehlo[mail.python.org]>
|
<Function test_ehlo[mail.python.org]>
|
||||||
<Function test_noop[mail.python.org]>
|
<Function test_noop[mail.python.org]>
|
||||||
|
|
||||||
======================= no tests ran in 0.12 seconds =======================
|
========================== no tests ran in 0.01s ===========================
|
||||||
|
|
||||||
.. _`fixture-parametrize-marks`:
|
.. _`fixture-parametrize-marks`:
|
||||||
|
|
||||||
|
@ -812,7 +812,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
||||||
test_fixture_marks.py::test_data[1] PASSED [ 66%]
|
test_fixture_marks.py::test_data[1] PASSED [ 66%]
|
||||||
test_fixture_marks.py::test_data[2] SKIPPED [100%]
|
test_fixture_marks.py::test_data[2] SKIPPED [100%]
|
||||||
|
|
||||||
=================== 2 passed, 1 skipped in 0.12 seconds ====================
|
======================= 2 passed, 1 skipped in 0.01s =======================
|
||||||
|
|
||||||
.. _`interdependent fixtures`:
|
.. _`interdependent fixtures`:
|
||||||
|
|
||||||
|
@ -861,7 +861,7 @@ Here we declare an ``app`` fixture which receives the previously defined
|
||||||
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
||||||
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
|
test_appsetup.py::test_smtp_connection_exists[mail.python.org] PASSED [100%]
|
||||||
|
|
||||||
========================= 2 passed in 0.12 seconds =========================
|
============================ 2 passed in 0.44s =============================
|
||||||
|
|
||||||
Due to the parametrization of ``smtp_connection``, the test will run twice with two
|
Due to the parametrization of ``smtp_connection``, the test will run twice with two
|
||||||
different ``App`` instances and respective smtp servers. There is no
|
different ``App`` instances and respective smtp servers. There is no
|
||||||
|
@ -971,7 +971,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
|
||||||
TEARDOWN modarg mod2
|
TEARDOWN modarg mod2
|
||||||
|
|
||||||
|
|
||||||
========================= 8 passed in 0.12 seconds =========================
|
============================ 8 passed in 0.01s =============================
|
||||||
|
|
||||||
You can see that the parametrized module-scoped ``modarg`` resource caused an
|
You can see that the parametrized module-scoped ``modarg`` resource caused an
|
||||||
ordering of test execution that lead to the fewest possible "active" resources.
|
ordering of test execution that lead to the fewest possible "active" resources.
|
||||||
|
@ -1043,7 +1043,7 @@ to verify our fixture is activated and the tests pass:
|
||||||
|
|
||||||
$ pytest -q
|
$ pytest -q
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.01s
|
||||||
|
|
||||||
You can specify multiple fixtures like this:
|
You can specify multiple fixtures like this:
|
||||||
|
|
||||||
|
@ -1151,7 +1151,7 @@ If we run it, we get two passing tests:
|
||||||
|
|
||||||
$ pytest -q
|
$ pytest -q
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.01s
|
||||||
|
|
||||||
Here is how autouse fixtures work in other scopes:
|
Here is how autouse fixtures work in other scopes:
|
||||||
|
|
||||||
|
|
|
@ -28,7 +28,7 @@ Install ``pytest``
|
||||||
.. code-block:: bash
|
.. code-block:: bash
|
||||||
|
|
||||||
$ pytest --version
|
$ pytest --version
|
||||||
This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.x/site-packages/pytest.py
|
This is pytest version 5.x.y, imported from $PYTHON_PREFIX/lib/python3.6/site-packages/pytest.py
|
||||||
|
|
||||||
.. _`simpletest`:
|
.. _`simpletest`:
|
||||||
|
|
||||||
|
@ -68,8 +68,8 @@ That’s it. You can now execute the test function:
|
||||||
E assert 4 == 5
|
E assert 4 == 5
|
||||||
E + where 4 = func(3)
|
E + where 4 = func(3)
|
||||||
|
|
||||||
test_sample.py:5: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.02s =============================
|
||||||
|
|
||||||
This test returns a failure report because ``func(3)`` does not return ``5``.
|
This test returns a failure report because ``func(3)`` does not return ``5``.
|
||||||
|
|
||||||
|
@ -108,7 +108,7 @@ Execute the test function with “quiet” reporting mode:
|
||||||
|
|
||||||
$ pytest -q test_sysexit.py
|
$ pytest -q test_sysexit.py
|
||||||
. [100%]
|
. [100%]
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.00s
|
||||||
|
|
||||||
Group multiple tests in a class
|
Group multiple tests in a class
|
||||||
--------------------------------------------------------------
|
--------------------------------------------------------------
|
||||||
|
@ -140,12 +140,12 @@ Once you develop multiple tests, you may want to group them into a class. pytest
|
||||||
|
|
||||||
def test_two(self):
|
def test_two(self):
|
||||||
x = "hello"
|
x = "hello"
|
||||||
> assert hasattr(x, 'check')
|
> assert hasattr(x, "check")
|
||||||
E AssertionError: assert False
|
E AssertionError: assert False
|
||||||
E + where False = hasattr('hello', 'check')
|
E + where False = hasattr('hello', 'check')
|
||||||
|
|
||||||
test_class.py:8: AssertionError
|
test_class.py:8: AssertionError
|
||||||
1 failed, 1 passed in 0.12 seconds
|
1 failed, 1 passed in 0.02s
|
||||||
|
|
||||||
The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure.
|
The first test passed and the second failed. You can easily see the intermediate values in the assertion to help you understand the reason for the failure.
|
||||||
|
|
||||||
|
@ -180,7 +180,7 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look
|
||||||
test_tmpdir.py:3: AssertionError
|
test_tmpdir.py:3: AssertionError
|
||||||
--------------------------- Captured stdout call ---------------------------
|
--------------------------- Captured stdout call ---------------------------
|
||||||
PYTEST_TMPDIR/test_needsfiles0
|
PYTEST_TMPDIR/test_needsfiles0
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.
|
More info on tmpdir handling is available at :ref:`Temporary directories and files <tmpdir handling>`.
|
||||||
|
|
||||||
|
|
|
@ -44,7 +44,7 @@ To execute it:
|
||||||
E + where 4 = inc(3)
|
E + where 4 = inc(3)
|
||||||
|
|
||||||
test_sample.py:6: AssertionError
|
test_sample.py:6: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.02s =============================
|
||||||
|
|
||||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
|
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
|
||||||
See :ref:`Getting Started <getstarted>` for more examples.
|
See :ref:`Getting Started <getstarted>` for more examples.
|
||||||
|
|
|
@ -50,7 +50,7 @@ these patches.
|
||||||
:py:meth:`monkeypatch.chdir` to change the context of the current working directory
|
:py:meth:`monkeypatch.chdir` to change the context of the current working directory
|
||||||
during a test.
|
during a test.
|
||||||
|
|
||||||
5. Use py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also
|
5. Use :py:meth:`monkeypatch.syspath_prepend` to modify ``sys.path`` which will also
|
||||||
call :py:meth:`pkg_resources.fixup_namespace_packages` and :py:meth:`importlib.invalidate_caches`.
|
call :py:meth:`pkg_resources.fixup_namespace_packages` and :py:meth:`importlib.invalidate_caches`.
|
||||||
|
|
||||||
See the `monkeypatch blog post`_ for some introduction material
|
See the `monkeypatch blog post`_ for some introduction material
|
||||||
|
|
|
@ -75,7 +75,7 @@ them in turn:
|
||||||
E + where 54 = eval('6*9')
|
E + where 54 = eval('6*9')
|
||||||
|
|
||||||
test_expectation.py:6: AssertionError
|
test_expectation.py:6: AssertionError
|
||||||
==================== 1 failed, 2 passed in 0.12 seconds ====================
|
======================= 1 failed, 2 passed in 0.02s ========================
|
||||||
|
|
||||||
.. note::
|
.. note::
|
||||||
|
|
||||||
|
@ -128,7 +128,7 @@ Let's run this:
|
||||||
|
|
||||||
test_expectation.py ..x [100%]
|
test_expectation.py ..x [100%]
|
||||||
|
|
||||||
=================== 2 passed, 1 xfailed in 0.12 seconds ====================
|
======================= 2 passed, 1 xfailed in 0.02s =======================
|
||||||
|
|
||||||
The one parameter set which caused a failure previously now
|
The one parameter set which caused a failure previously now
|
||||||
shows up as an "xfailed (expected to fail)" test.
|
shows up as an "xfailed (expected to fail)" test.
|
||||||
|
@ -205,7 +205,7 @@ If we now pass two stringinput values, our test will run twice:
|
||||||
|
|
||||||
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||||
.. [100%]
|
.. [100%]
|
||||||
2 passed in 0.12 seconds
|
2 passed in 0.01s
|
||||||
|
|
||||||
Let's also run with a stringinput that will lead to a failing test:
|
Let's also run with a stringinput that will lead to a failing test:
|
||||||
|
|
||||||
|
@ -225,7 +225,7 @@ Let's also run with a stringinput that will lead to a failing test:
|
||||||
E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
|
E + where <built-in method isalpha of str object at 0xdeadbeef> = '!'.isalpha
|
||||||
|
|
||||||
test_strings.py:4: AssertionError
|
test_strings.py:4: AssertionError
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
As expected our test function fails.
|
As expected our test function fails.
|
||||||
|
|
||||||
|
@ -239,7 +239,7 @@ list:
|
||||||
s [100%]
|
s [100%]
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2
|
SKIPPED [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:2
|
||||||
1 skipped in 0.12 seconds
|
1 skipped in 0.00s
|
||||||
|
|
||||||
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
||||||
those sets cannot be duplicated, otherwise an error will be raised.
|
those sets cannot be duplicated, otherwise an error will be raised.
|
||||||
|
|
|
@ -7,8 +7,8 @@ Python 3.4's last release is scheduled for
|
||||||
`March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of
|
`March 2019 <https://www.python.org/dev/peps/pep-0429/#release-schedule>`__. pytest is one of
|
||||||
the participating projects of the https://python3statement.org.
|
the participating projects of the https://python3statement.org.
|
||||||
|
|
||||||
The **pytest 4.6** series will be the last to support Python 2.7 and 3.4, and is scheduled
|
The **pytest 4.6** series is the last to support Python 2.7 and 3.4, and was released in
|
||||||
to be released by **mid-2019**. **pytest 5.0** and onwards will support only Python 3.5+.
|
**June 2019**. **pytest 5.0** and onwards will support only Python 3.5+.
|
||||||
|
|
||||||
Thanks to the `python_requires`_ ``setuptools`` option,
|
Thanks to the `python_requires`_ ``setuptools`` option,
|
||||||
Python 2.7 and Python 3.4 users using a modern ``pip`` version
|
Python 2.7 and Python 3.4 users using a modern ``pip`` version
|
||||||
|
|
|
@ -371,7 +371,7 @@ Running it with the report-on-xfail option gives this output:
|
||||||
XFAIL xfail_demo.py::test_hello6
|
XFAIL xfail_demo.py::test_hello6
|
||||||
reason: reason
|
reason: reason
|
||||||
XFAIL xfail_demo.py::test_hello7
|
XFAIL xfail_demo.py::test_hello7
|
||||||
======================== 7 xfailed in 0.12 seconds =========================
|
============================ 7 xfailed in 0.05s ============================
|
||||||
|
|
||||||
.. _`skip/xfail with parametrize`:
|
.. _`skip/xfail with parametrize`:
|
||||||
|
|
||||||
|
|
|
@ -4,7 +4,6 @@ Talks and Tutorials
|
||||||
|
|
||||||
.. sidebar:: Next Open Trainings
|
.. sidebar:: Next Open Trainings
|
||||||
|
|
||||||
- `Training at Workshoptage 2019 <https://workshoptage.ch/workshops/2019/test-driven-development-fuer-python-mit-pytest/>`_ (German), 10th September 2019, Rapperswil, Switzerland.
|
|
||||||
- `3 day hands-on workshop covering pytest, tox and devpi: "Professional Testing with Python" <https://python-academy.com/courses/specialtopics/python_course_testing.html>`_ (English), October 21 - 23, 2019, Leipzig, Germany.
|
- `3 day hands-on workshop covering pytest, tox and devpi: "Professional Testing with Python" <https://python-academy.com/courses/specialtopics/python_course_testing.html>`_ (English), October 21 - 23, 2019, Leipzig, Germany.
|
||||||
|
|
||||||
.. _`funcargs`: funcargs.html
|
.. _`funcargs`: funcargs.html
|
||||||
|
|
|
@ -64,7 +64,7 @@ Running this would result in a passed test except for the last
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_tmp_path.py:13: AssertionError
|
test_tmp_path.py:13: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.02s =============================
|
||||||
|
|
||||||
.. _`tmp_path_factory example`:
|
.. _`tmp_path_factory example`:
|
||||||
|
|
||||||
|
@ -132,8 +132,8 @@ Running this would result in a passed test except for the last
|
||||||
> assert 0
|
> assert 0
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_tmpdir.py:7: AssertionError
|
test_tmpdir.py:9: AssertionError
|
||||||
========================= 1 failed in 0.12 seconds =========================
|
============================ 1 failed in 0.02s =============================
|
||||||
|
|
||||||
.. _`tmpdir factory example`:
|
.. _`tmpdir factory example`:
|
||||||
|
|
||||||
|
|
|
@ -151,22 +151,22 @@ the ``self.db`` values in the traceback:
|
||||||
|
|
||||||
def test_method1(self):
|
def test_method1(self):
|
||||||
assert hasattr(self, "db")
|
assert hasattr(self, "db")
|
||||||
> assert 0, self.db # fail for demo purposes
|
> assert 0, self.db # fail for demo purposes
|
||||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_unittest_db.py:9: AssertionError
|
test_unittest_db.py:10: AssertionError
|
||||||
___________________________ MyTest.test_method2 ____________________________
|
___________________________ MyTest.test_method2 ____________________________
|
||||||
|
|
||||||
self = <test_unittest_db.MyTest testMethod=test_method2>
|
self = <test_unittest_db.MyTest testMethod=test_method2>
|
||||||
|
|
||||||
def test_method2(self):
|
def test_method2(self):
|
||||||
> assert 0, self.db # fail for demo purposes
|
> assert 0, self.db # fail for demo purposes
|
||||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0xdeadbeef>
|
||||||
E assert 0
|
E assert 0
|
||||||
|
|
||||||
test_unittest_db.py:12: AssertionError
|
test_unittest_db.py:13: AssertionError
|
||||||
========================= 2 failed in 0.12 seconds =========================
|
============================ 2 failed in 0.02s =============================
|
||||||
|
|
||||||
This default pytest traceback shows that the two test methods
|
This default pytest traceback shows that the two test methods
|
||||||
share the same ``self.db`` instance which was our intention
|
share the same ``self.db`` instance which was our intention
|
||||||
|
@ -219,7 +219,7 @@ Running this test module ...:
|
||||||
|
|
||||||
$ pytest -q test_unittest_cleandir.py
|
$ pytest -q test_unittest_cleandir.py
|
||||||
. [100%]
|
. [100%]
|
||||||
1 passed in 0.12 seconds
|
1 passed in 0.01s
|
||||||
|
|
||||||
... gives us one passed test because the ``initdir`` fixture function
|
... gives us one passed test because the ``initdir`` fixture function
|
||||||
was executed ahead of the ``test_method``.
|
was executed ahead of the ``test_method``.
|
||||||
|
|
|
@ -247,7 +247,7 @@ Example:
|
||||||
XPASS test_example.py::test_xpass always xfail
|
XPASS test_example.py::test_xpass always xfail
|
||||||
ERROR test_example.py::test_error - assert 0
|
ERROR test_example.py::test_error - assert 0
|
||||||
FAILED test_example.py::test_fail - assert 0
|
FAILED test_example.py::test_fail - assert 0
|
||||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s ===
|
||||||
|
|
||||||
The ``-r`` options accepts a number of characters after it, with ``a`` used
|
The ``-r`` options accepts a number of characters after it, with ``a`` used
|
||||||
above meaning "all except passes".
|
above meaning "all except passes".
|
||||||
|
@ -297,7 +297,7 @@ More than one character can be used, so for example to only see failed and skipp
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
FAILED test_example.py::test_fail - assert 0
|
FAILED test_example.py::test_fail - assert 0
|
||||||
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
SKIPPED [1] $REGENDOC_TMPDIR/test_example.py:23: skipping this test
|
||||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s ===
|
||||||
|
|
||||||
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
|
Using ``p`` lists the passing tests, whilst ``P`` adds an extra section "PASSES" with those tests that passed but had
|
||||||
captured output:
|
captured output:
|
||||||
|
@ -336,7 +336,7 @@ captured output:
|
||||||
ok
|
ok
|
||||||
========================= short test summary info ==========================
|
========================= short test summary info ==========================
|
||||||
PASSED test_example.py::test_ok
|
PASSED test_example.py::test_ok
|
||||||
= 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.12 seconds =
|
== 1 failed, 1 passed, 1 skipped, 1 xfailed, 1 xpassed, 1 error in 0.03s ===
|
||||||
|
|
||||||
.. _pdb-option:
|
.. _pdb-option:
|
||||||
|
|
||||||
|
|
|
@ -41,7 +41,7 @@ Running pytest now produces this output:
|
||||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||||
|
|
||||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||||
=================== 1 passed, 1 warnings in 0.12 seconds ===================
|
====================== 1 passed, 1 warnings in 0.00s =======================
|
||||||
|
|
||||||
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
||||||
them into errors:
|
them into errors:
|
||||||
|
@ -64,7 +64,7 @@ them into errors:
|
||||||
E UserWarning: api v1, should use functions from v2
|
E UserWarning: api v1, should use functions from v2
|
||||||
|
|
||||||
test_show_warnings.py:5: UserWarning
|
test_show_warnings.py:5: UserWarning
|
||||||
1 failed in 0.12 seconds
|
1 failed in 0.02s
|
||||||
|
|
||||||
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
|
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
|
||||||
For example, the configuration below will ignore all user warnings, but will transform
|
For example, the configuration below will ignore all user warnings, but will transform
|
||||||
|
@ -407,7 +407,7 @@ defines an ``__init__`` constructor, as this prevents the class from being insta
|
||||||
class Test:
|
class Test:
|
||||||
|
|
||||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||||
1 warnings in 0.12 seconds
|
1 warnings in 0.00s
|
||||||
|
|
||||||
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
|
These warnings might be filtered using the same builtin mechanisms used to filter other types of warnings.
|
||||||
|
|
||||||
|
|
|
@ -442,7 +442,7 @@ additionally it is possible to copy examples for an example folder before runnin
|
||||||
testdir.copy_example("test_example.py")
|
testdir.copy_example("test_example.py")
|
||||||
|
|
||||||
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
-- Docs: https://docs.pytest.org/en/latest/warnings.html
|
||||||
=================== 2 passed, 1 warnings in 0.12 seconds ===================
|
====================== 2 passed, 1 warnings in 0.12s =======================
|
||||||
|
|
||||||
For more information about the result object that ``runpytest()`` returns, and
|
For more information about the result object that ``runpytest()`` returns, and
|
||||||
the methods that it provides please check out the :py:class:`RunResult
|
the methods that it provides please check out the :py:class:`RunResult
|
||||||
|
|
|
@ -596,7 +596,7 @@ class ExceptionInfo(Generic[_E]):
|
||||||
)
|
)
|
||||||
return fmt.repr_excinfo(self)
|
return fmt.repr_excinfo(self)
|
||||||
|
|
||||||
def match(self, regexp: Union[str, Pattern]) -> bool:
|
def match(self, regexp: "Union[str, Pattern]") -> bool:
|
||||||
"""
|
"""
|
||||||
Check whether the regular expression 'regexp' is found in the string
|
Check whether the regular expression 'regexp' is found in the string
|
||||||
representation of the exception using ``re.search``. If it matches
|
representation of the exception using ``re.search``. If it matches
|
||||||
|
|
|
@ -35,9 +35,6 @@ PYTEST_TAG = "{}-pytest-{}".format(sys.implementation.cache_tag, version)
|
||||||
PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
||||||
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
||||||
|
|
||||||
AST_IS = ast.Is()
|
|
||||||
AST_NONE = ast.NameConstant(None)
|
|
||||||
|
|
||||||
|
|
||||||
class AssertionRewritingHook(importlib.abc.MetaPathFinder):
|
class AssertionRewritingHook(importlib.abc.MetaPathFinder):
|
||||||
"""PEP302/PEP451 import hook which rewrites asserts."""
|
"""PEP302/PEP451 import hook which rewrites asserts."""
|
||||||
|
@ -863,7 +860,7 @@ class AssertionRewriter(ast.NodeVisitor):
|
||||||
internally already.
|
internally already.
|
||||||
See issue #3191 for more details.
|
See issue #3191 for more details.
|
||||||
"""
|
"""
|
||||||
val_is_none = ast.Compare(node, [AST_IS], [AST_NONE])
|
val_is_none = ast.Compare(node, [ast.Is()], [ast.NameConstant(None)])
|
||||||
send_warning = ast.parse(
|
send_warning = ast.parse(
|
||||||
"""\
|
"""\
|
||||||
from _pytest.warning_types import PytestAssertRewriteWarning
|
from _pytest.warning_types import PytestAssertRewriteWarning
|
||||||
|
|
|
@ -9,6 +9,7 @@ import sys
|
||||||
from contextlib import contextmanager
|
from contextlib import contextmanager
|
||||||
from inspect import Parameter
|
from inspect import Parameter
|
||||||
from inspect import signature
|
from inspect import signature
|
||||||
|
from typing import overload
|
||||||
|
|
||||||
import attr
|
import attr
|
||||||
import py
|
import py
|
||||||
|
@ -27,9 +28,9 @@ MODULE_NOT_FOUND_ERROR = (
|
||||||
|
|
||||||
|
|
||||||
if sys.version_info >= (3, 8):
|
if sys.version_info >= (3, 8):
|
||||||
from importlib import metadata as importlib_metadata # noqa
|
from importlib import metadata as importlib_metadata # noqa: F401
|
||||||
else:
|
else:
|
||||||
import importlib_metadata # noqa
|
import importlib_metadata # noqa: F401
|
||||||
|
|
||||||
|
|
||||||
def _format_args(func):
|
def _format_args(func):
|
||||||
|
@ -347,3 +348,9 @@ class FuncargnamesCompatAttr:
|
||||||
|
|
||||||
warnings.warn(FUNCARGNAMES, stacklevel=2)
|
warnings.warn(FUNCARGNAMES, stacklevel=2)
|
||||||
return self.fixturenames
|
return self.fixturenames
|
||||||
|
|
||||||
|
|
||||||
|
if sys.version_info < (3, 5, 2): # pragma: no cover
|
||||||
|
|
||||||
|
def overload(f): # noqa: F811
|
||||||
|
return f
|
||||||
|
|
|
@ -72,7 +72,7 @@ def create_new_paste(contents):
|
||||||
if m:
|
if m:
|
||||||
return "{}/show/{}".format(url, m.group(1))
|
return "{}/show/{}".format(url, m.group(1))
|
||||||
else:
|
else:
|
||||||
return "bad response: " + response
|
return "bad response: " + response.decode("utf-8")
|
||||||
|
|
||||||
|
|
||||||
def pytest_terminal_summary(terminalreporter):
|
def pytest_terminal_summary(terminalreporter):
|
||||||
|
|
|
@ -13,7 +13,6 @@ from typing import Callable
|
||||||
from typing import cast
|
from typing import cast
|
||||||
from typing import Generic
|
from typing import Generic
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import overload
|
|
||||||
from typing import Pattern
|
from typing import Pattern
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
from typing import TypeVar
|
from typing import TypeVar
|
||||||
|
@ -22,12 +21,14 @@ from typing import Union
|
||||||
from more_itertools.more import always_iterable
|
from more_itertools.more import always_iterable
|
||||||
|
|
||||||
import _pytest._code
|
import _pytest._code
|
||||||
|
from _pytest.compat import overload
|
||||||
from _pytest.compat import STRING_TYPES
|
from _pytest.compat import STRING_TYPES
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
|
|
||||||
if False: # TYPE_CHECKING
|
if False: # TYPE_CHECKING
|
||||||
from typing import Type # noqa: F401 (used in type string)
|
from typing import Type # noqa: F401 (used in type string)
|
||||||
|
|
||||||
|
|
||||||
BASE_TYPE = (type, STRING_TYPES)
|
BASE_TYPE = (type, STRING_TYPES)
|
||||||
|
|
||||||
|
|
||||||
|
@ -547,12 +548,12 @@ _E = TypeVar("_E", bound=BaseException)
|
||||||
def raises(
|
def raises(
|
||||||
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
*,
|
*,
|
||||||
match: Optional[Union[str, Pattern]] = ...
|
match: "Optional[Union[str, Pattern]]" = ...
|
||||||
) -> "RaisesContext[_E]":
|
) -> "RaisesContext[_E]":
|
||||||
... # pragma: no cover
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
@overload
|
@overload # noqa: F811
|
||||||
def raises(
|
def raises(
|
||||||
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
func: Callable,
|
func: Callable,
|
||||||
|
@ -563,10 +564,10 @@ def raises(
|
||||||
... # pragma: no cover
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
def raises(
|
def raises( # noqa: F811
|
||||||
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
*args: Any,
|
*args: Any,
|
||||||
match: Optional[Union[str, Pattern]] = None,
|
match: Optional[Union[str, "Pattern"]] = None,
|
||||||
**kwargs: Any
|
**kwargs: Any
|
||||||
) -> Union["RaisesContext[_E]", Optional[_pytest._code.ExceptionInfo[_E]]]:
|
) -> Union["RaisesContext[_E]", Optional[_pytest._code.ExceptionInfo[_E]]]:
|
||||||
r"""
|
r"""
|
||||||
|
@ -724,7 +725,7 @@ class RaisesContext(Generic[_E]):
|
||||||
self,
|
self,
|
||||||
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
expected_exception: Union["Type[_E]", Tuple["Type[_E]", ...]],
|
||||||
message: str,
|
message: str,
|
||||||
match_expr: Optional[Union[str, Pattern]] = None,
|
match_expr: Optional[Union[str, "Pattern"]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
self.expected_exception = expected_exception
|
self.expected_exception = expected_exception
|
||||||
self.message = message
|
self.message = message
|
||||||
|
|
|
@ -7,11 +7,11 @@ from typing import Callable
|
||||||
from typing import Iterator
|
from typing import Iterator
|
||||||
from typing import List
|
from typing import List
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
from typing import overload
|
|
||||||
from typing import Pattern
|
from typing import Pattern
|
||||||
from typing import Tuple
|
from typing import Tuple
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
|
from _pytest.compat import overload
|
||||||
from _pytest.fixtures import yield_fixture
|
from _pytest.fixtures import yield_fixture
|
||||||
from _pytest.outcomes import fail
|
from _pytest.outcomes import fail
|
||||||
|
|
||||||
|
@ -58,26 +58,26 @@ def deprecated_call(func=None, *args, **kwargs):
|
||||||
def warns(
|
def warns(
|
||||||
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||||
*,
|
*,
|
||||||
match: Optional[Union[str, Pattern]] = ...
|
match: "Optional[Union[str, Pattern]]" = ...
|
||||||
) -> "WarningsChecker":
|
) -> "WarningsChecker":
|
||||||
... # pragma: no cover
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
@overload
|
@overload # noqa: F811
|
||||||
def warns(
|
def warns(
|
||||||
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||||
func: Callable,
|
func: Callable,
|
||||||
*args: Any,
|
*args: Any,
|
||||||
match: Optional[Union[str, Pattern]] = ...,
|
match: Optional[Union[str, "Pattern"]] = ...,
|
||||||
**kwargs: Any
|
**kwargs: Any
|
||||||
) -> Union[Any]:
|
) -> Union[Any]:
|
||||||
... # pragma: no cover
|
... # pragma: no cover
|
||||||
|
|
||||||
|
|
||||||
def warns(
|
def warns( # noqa: F811
|
||||||
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
expected_warning: Union["Type[Warning]", Tuple["Type[Warning]", ...]],
|
||||||
*args: Any,
|
*args: Any,
|
||||||
match: Optional[Union[str, Pattern]] = None,
|
match: Optional[Union[str, "Pattern"]] = None,
|
||||||
**kwargs: Any
|
**kwargs: Any
|
||||||
) -> Union["WarningsChecker", Any]:
|
) -> Union["WarningsChecker", Any]:
|
||||||
r"""Assert that code raises a particular class of warning.
|
r"""Assert that code raises a particular class of warning.
|
||||||
|
@ -207,7 +207,7 @@ class WarningsChecker(WarningsRecorder):
|
||||||
expected_warning: Optional[
|
expected_warning: Optional[
|
||||||
Union["Type[Warning]", Tuple["Type[Warning]", ...]]
|
Union["Type[Warning]", Tuple["Type[Warning]", ...]]
|
||||||
] = None,
|
] = None,
|
||||||
match_expr: Optional[Union[str, Pattern]] = None,
|
match_expr: Optional[Union[str, "Pattern"]] = None,
|
||||||
) -> None:
|
) -> None:
|
||||||
super().__init__()
|
super().__init__()
|
||||||
|
|
||||||
|
|
|
@ -13,22 +13,22 @@ def test_getfuncargnames_functions():
|
||||||
"""Test getfuncargnames for normal functions"""
|
"""Test getfuncargnames for normal functions"""
|
||||||
|
|
||||||
def f():
|
def f():
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
assert not fixtures.getfuncargnames(f)
|
assert not fixtures.getfuncargnames(f)
|
||||||
|
|
||||||
def g(arg):
|
def g(arg):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
assert fixtures.getfuncargnames(g) == ("arg",)
|
assert fixtures.getfuncargnames(g) == ("arg",)
|
||||||
|
|
||||||
def h(arg1, arg2="hello"):
|
def h(arg1, arg2="hello"):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
assert fixtures.getfuncargnames(h) == ("arg1",)
|
assert fixtures.getfuncargnames(h) == ("arg1",)
|
||||||
|
|
||||||
def j(arg1, arg2, arg3="hello"):
|
def j(arg1, arg2, arg3="hello"):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
assert fixtures.getfuncargnames(j) == ("arg1", "arg2")
|
assert fixtures.getfuncargnames(j) == ("arg1", "arg2")
|
||||||
|
|
||||||
|
@ -38,7 +38,7 @@ def test_getfuncargnames_methods():
|
||||||
|
|
||||||
class A:
|
class A:
|
||||||
def f(self, arg1, arg2="hello"):
|
def f(self, arg1, arg2="hello"):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
assert fixtures.getfuncargnames(A().f) == ("arg1",)
|
assert fixtures.getfuncargnames(A().f) == ("arg1",)
|
||||||
|
|
||||||
|
@ -49,7 +49,7 @@ def test_getfuncargnames_staticmethod():
|
||||||
class A:
|
class A:
|
||||||
@staticmethod
|
@staticmethod
|
||||||
def static(arg1, arg2, x=1):
|
def static(arg1, arg2, x=1):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2")
|
assert fixtures.getfuncargnames(A.static, cls=A) == ("arg1", "arg2")
|
||||||
|
|
||||||
|
@ -59,7 +59,7 @@ def test_getfuncargnames_partial():
|
||||||
import functools
|
import functools
|
||||||
|
|
||||||
def check(arg1, arg2, i):
|
def check(arg1, arg2, i):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
class T:
|
class T:
|
||||||
test_ok = functools.partial(check, i=2)
|
test_ok = functools.partial(check, i=2)
|
||||||
|
@ -73,7 +73,7 @@ def test_getfuncargnames_staticmethod_partial():
|
||||||
import functools
|
import functools
|
||||||
|
|
||||||
def check(arg1, arg2, i):
|
def check(arg1, arg2, i):
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
class T:
|
class T:
|
||||||
test_ok = staticmethod(functools.partial(check, i=2))
|
test_ok = staticmethod(functools.partial(check, i=2))
|
||||||
|
@ -3325,7 +3325,7 @@ class TestShowFixtures:
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def foo():
|
def foo():
|
||||||
pass
|
raise NotImplementedError()
|
||||||
|
|
||||||
|
|
||||||
class TestContextManagerFixtureFuncs:
|
class TestContextManagerFixtureFuncs:
|
||||||
|
@ -3951,7 +3951,7 @@ def test_call_fixture_function_error():
|
||||||
|
|
||||||
@pytest.fixture
|
@pytest.fixture
|
||||||
def fix():
|
def fix():
|
||||||
return 1
|
raise NotImplementedError()
|
||||||
|
|
||||||
with pytest.raises(pytest.fail.Exception):
|
with pytest.raises(pytest.fail.Exception):
|
||||||
assert fix() == 1
|
assert fix() == 1
|
||||||
|
|
|
@ -163,9 +163,16 @@ class TestRaises:
|
||||||
|
|
||||||
class T:
|
class T:
|
||||||
def __call__(self):
|
def __call__(self):
|
||||||
|
# Early versions of Python 3.5 have some bug causing the
|
||||||
|
# __call__ frame to still refer to t even after everything
|
||||||
|
# is done. This makes the test pass for them.
|
||||||
|
if sys.version_info < (3, 5, 2): # pragma: no cover
|
||||||
|
del self
|
||||||
raise ValueError
|
raise ValueError
|
||||||
|
|
||||||
t = T()
|
t = T()
|
||||||
|
refcount = len(gc.get_referrers(t))
|
||||||
|
|
||||||
if method == "function":
|
if method == "function":
|
||||||
pytest.raises(ValueError, t)
|
pytest.raises(ValueError, t)
|
||||||
else:
|
else:
|
||||||
|
@ -175,14 +182,7 @@ class TestRaises:
|
||||||
# ensure both forms of pytest.raises don't leave exceptions in sys.exc_info()
|
# ensure both forms of pytest.raises don't leave exceptions in sys.exc_info()
|
||||||
assert sys.exc_info() == (None, None, None)
|
assert sys.exc_info() == (None, None, None)
|
||||||
|
|
||||||
del t
|
assert refcount == len(gc.get_referrers(t))
|
||||||
# Make sure this does get updated in locals dict
|
|
||||||
# otherwise it could keep a reference
|
|
||||||
locals()
|
|
||||||
|
|
||||||
# ensure the t instance is not stuck in a cyclic reference
|
|
||||||
for o in gc.get_objects():
|
|
||||||
assert type(o) is not T
|
|
||||||
|
|
||||||
def test_raises_match(self):
|
def test_raises_match(self):
|
||||||
msg = r"with base \d+"
|
msg = r"with base \d+"
|
||||||
|
|
|
@ -490,7 +490,6 @@ class TestAssert_reprcompare:
|
||||||
assert len(expl) > 1
|
assert len(expl) > 1
|
||||||
|
|
||||||
def test_Sequence(self):
|
def test_Sequence(self):
|
||||||
|
|
||||||
if not hasattr(collections_abc, "MutableSequence"):
|
if not hasattr(collections_abc, "MutableSequence"):
|
||||||
pytest.skip("cannot import MutableSequence")
|
pytest.skip("cannot import MutableSequence")
|
||||||
MutableSequence = collections_abc.MutableSequence
|
MutableSequence = collections_abc.MutableSequence
|
||||||
|
@ -806,9 +805,6 @@ class TestFormatExplanation:
|
||||||
|
|
||||||
|
|
||||||
class TestTruncateExplanation:
|
class TestTruncateExplanation:
|
||||||
|
|
||||||
""" Confirm assertion output is truncated as expected """
|
|
||||||
|
|
||||||
# The number of lines in the truncation explanation message. Used
|
# The number of lines in the truncation explanation message. Used
|
||||||
# to calculate that results have the expected length.
|
# to calculate that results have the expected length.
|
||||||
LINES_IN_TRUNCATION_MSG = 2
|
LINES_IN_TRUNCATION_MSG = 2
|
||||||
|
@ -969,7 +965,13 @@ def test_pytest_assertrepr_compare_integration(testdir):
|
||||||
)
|
)
|
||||||
result = testdir.runpytest()
|
result = testdir.runpytest()
|
||||||
result.stdout.fnmatch_lines(
|
result.stdout.fnmatch_lines(
|
||||||
["*def test_hello():*", "*assert x == y*", "*E*Extra items*left*", "*E*50*"]
|
[
|
||||||
|
"*def test_hello():*",
|
||||||
|
"*assert x == y*",
|
||||||
|
"*E*Extra items*left*",
|
||||||
|
"*E*50*",
|
||||||
|
"*= 1 failed in*",
|
||||||
|
]
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
|
@ -1302,3 +1304,23 @@ def test_exit_from_assertrepr_compare(monkeypatch):
|
||||||
|
|
||||||
with pytest.raises(outcomes.Exit, match="Quitting debugger"):
|
with pytest.raises(outcomes.Exit, match="Quitting debugger"):
|
||||||
callequal(1, 1)
|
callequal(1, 1)
|
||||||
|
|
||||||
|
|
||||||
|
def test_assertion_location_with_coverage(testdir):
|
||||||
|
"""This used to report the wrong location when run with coverage (#5754)."""
|
||||||
|
p = testdir.makepyfile(
|
||||||
|
"""
|
||||||
|
def test():
|
||||||
|
assert False, 1
|
||||||
|
assert False, 2
|
||||||
|
"""
|
||||||
|
)
|
||||||
|
result = testdir.runpytest(str(p))
|
||||||
|
result.stdout.fnmatch_lines(
|
||||||
|
[
|
||||||
|
"> assert False, 1",
|
||||||
|
"E AssertionError: 1",
|
||||||
|
"E assert False",
|
||||||
|
"*= 1 failed in*",
|
||||||
|
]
|
||||||
|
)
|
||||||
|
|
|
@ -116,3 +116,15 @@ class TestPaste:
|
||||||
assert "lexer=%s" % lexer in data.decode()
|
assert "lexer=%s" % lexer in data.decode()
|
||||||
assert "code=full-paste-contents" in data.decode()
|
assert "code=full-paste-contents" in data.decode()
|
||||||
assert "expiry=1week" in data.decode()
|
assert "expiry=1week" in data.decode()
|
||||||
|
|
||||||
|
def test_create_new_paste_failure(self, pastebin, monkeypatch):
|
||||||
|
import io
|
||||||
|
import urllib.request
|
||||||
|
|
||||||
|
def response(url, data):
|
||||||
|
stream = io.BytesIO(b"something bad occurred")
|
||||||
|
return stream
|
||||||
|
|
||||||
|
monkeypatch.setattr(urllib.request, "urlopen", response)
|
||||||
|
result = pastebin.create_new_paste(b"full-paste-contents")
|
||||||
|
assert result == "bad response: something bad occurred"
|
||||||
|
|
2
tox.ini
2
tox.ini
|
@ -118,7 +118,7 @@ commands = python scripts/release.py {posargs}
|
||||||
description = create GitHub release after deployment
|
description = create GitHub release after deployment
|
||||||
basepython = python3.6
|
basepython = python3.6
|
||||||
usedevelop = True
|
usedevelop = True
|
||||||
passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG
|
passenv = GH_RELEASE_NOTES_TOKEN TRAVIS_TAG TRAVIS_REPO_SLUG
|
||||||
deps =
|
deps =
|
||||||
github3.py
|
github3.py
|
||||||
pypandoc
|
pypandoc
|
||||||
|
|
Loading…
Reference in New Issue