commit
49d690d137
13
.travis.yml
13
.travis.yml
|
@ -6,8 +6,13 @@ stages:
|
|||
if: repo = pytest-dev/pytest AND tag IS NOT present
|
||||
- name: deploy
|
||||
if: repo = pytest-dev/pytest AND tag IS present
|
||||
python:
|
||||
- '3.7'
|
||||
python: '3.7'
|
||||
cache: false
|
||||
|
||||
env:
|
||||
global:
|
||||
- PYTEST_ADDOPTS=-vv
|
||||
|
||||
install:
|
||||
- python -m pip install --upgrade --pre tox
|
||||
|
||||
|
@ -57,7 +62,8 @@ jobs:
|
|||
# - pytester's LsofFdLeakChecker
|
||||
# - TestArgComplete (linux only)
|
||||
# - numpy
|
||||
- env: TOXENV=py37-lsof-numpy-xdist PYTEST_COVERAGE=1
|
||||
# Empty PYTEST_ADDOPTS to run this non-verbose.
|
||||
- env: TOXENV=py37-lsof-numpy-xdist PYTEST_COVERAGE=1 PYTEST_ADDOPTS=
|
||||
|
||||
# Specialized factors for py27.
|
||||
- env: TOXENV=py27-nobyte-numpy-xdist
|
||||
|
@ -147,4 +153,3 @@ notifications:
|
|||
skip_join: true
|
||||
email:
|
||||
- pytest-commit@python.org
|
||||
cache: false
|
||||
|
|
2
AUTHORS
2
AUTHORS
|
@ -222,6 +222,7 @@ Steffen Allner
|
|||
Stephan Obermann
|
||||
Sven-Hendrik Haase
|
||||
Tadek Teleżyński
|
||||
Takafumi Arakaki
|
||||
Tarcisio Fischer
|
||||
Tareq Alayan
|
||||
Ted Xiao
|
||||
|
@ -241,6 +242,7 @@ Vidar T. Fauske
|
|||
Virgil Dupras
|
||||
Vitaly Lashmanov
|
||||
Vlad Dragos
|
||||
Volodymyr Piskun
|
||||
Wil Cooley
|
||||
William Lee
|
||||
Wim Glenn
|
||||
|
|
158
CHANGELOG.rst
158
CHANGELOG.rst
|
@ -18,6 +18,164 @@ with advance notice in the **Deprecations** section of releases.
|
|||
|
||||
.. towncrier release notes start
|
||||
|
||||
pytest 4.4.0 (2019-03-29)
|
||||
=========================
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- `#2224 <https://github.com/pytest-dev/pytest/issues/2224>`_: ``async`` test functions are skipped and a warning is emitted when a suitable
|
||||
async plugin is not installed (such as ``pytest-asyncio`` or ``pytest-trio``).
|
||||
|
||||
Previously ``async`` functions would not execute at all but still be marked as "passed".
|
||||
|
||||
|
||||
- `#2482 <https://github.com/pytest-dev/pytest/issues/2482>`_: Include new ``disable_test_id_escaping_and_forfeit_all_rights_to_community_support`` option to disable ascii-escaping in parametrized values. This may cause a series of problems and as the name makes clear, use at your own risk.
|
||||
|
||||
|
||||
- `#4718 <https://github.com/pytest-dev/pytest/issues/4718>`_: The ``-p`` option can now be used to early-load plugins also by entry-point name, instead of just
|
||||
by module name.
|
||||
|
||||
This makes it possible to early load external plugins like ``pytest-cov`` in the command-line::
|
||||
|
||||
pytest -p pytest_cov
|
||||
|
||||
|
||||
- `#4855 <https://github.com/pytest-dev/pytest/issues/4855>`_: The ``--pdbcls`` option handles classes via module attributes now (e.g.
|
||||
``pdb:pdb.Pdb`` with `pdb++`_), and its validation was improved.
|
||||
|
||||
.. _pdb++: https://pypi.org/project/pdbpp/
|
||||
|
||||
|
||||
- `#4875 <https://github.com/pytest-dev/pytest/issues/4875>`_: The `testpaths <https://docs.pytest.org/en/latest/reference.html#confval-testpaths>`__ configuration option is now displayed next
|
||||
to the ``rootdir`` and ``inifile`` lines in the pytest header if the option is in effect, i.e., directories or file names were
|
||||
not explicitly passed in the command line.
|
||||
|
||||
Also, ``inifile`` is only displayed if there's a configuration file, instead of an empty ``inifile:`` string.
|
||||
|
||||
|
||||
- `#4911 <https://github.com/pytest-dev/pytest/issues/4911>`_: Doctests can be skipped now dynamically using ``pytest.skip()``.
|
||||
|
||||
|
||||
- `#4920 <https://github.com/pytest-dev/pytest/issues/4920>`_: Internal refactorings have been made in order to make the implementation of the
|
||||
`pytest-subtests <https://github.com/pytest-dev/pytest-subtests>`__ plugin
|
||||
possible, which adds unittest sub-test support and a new ``subtests`` fixture as discussed in
|
||||
`#1367 <https://github.com/pytest-dev/pytest/issues/1367>`__.
|
||||
|
||||
For details on the internal refactorings, please see the details on the related PR.
|
||||
|
||||
|
||||
- `#4931 <https://github.com/pytest-dev/pytest/issues/4931>`_: pytester's ``LineMatcher`` asserts that the passed lines are a sequence.
|
||||
|
||||
|
||||
- `#4936 <https://github.com/pytest-dev/pytest/issues/4936>`_: Handle ``-p plug`` after ``-p no:plug``.
|
||||
|
||||
This can be used to override a blocked plugin (e.g. in "addopts") from the
|
||||
command line etc.
|
||||
|
||||
|
||||
- `#4951 <https://github.com/pytest-dev/pytest/issues/4951>`_: Output capturing is handled correctly when only capturing via fixtures (capsys, capfs) with ``pdb.set_trace()``.
|
||||
|
||||
|
||||
- `#4956 <https://github.com/pytest-dev/pytest/issues/4956>`_: ``pytester`` sets ``$HOME`` and ``$USERPROFILE`` to the temporary directory during test runs.
|
||||
|
||||
This ensures to not load configuration files from the real user's home directory.
|
||||
|
||||
|
||||
- `#4980 <https://github.com/pytest-dev/pytest/issues/4980>`_: Namespace packages are handled better with ``monkeypatch.syspath_prepend`` and ``testdir.syspathinsert`` (via ``pkg_resources.fixup_namespace_packages``).
|
||||
|
||||
|
||||
- `#4993 <https://github.com/pytest-dev/pytest/issues/4993>`_: The stepwise plugin reports status information now.
|
||||
|
||||
|
||||
- `#5008 <https://github.com/pytest-dev/pytest/issues/5008>`_: If a ``setup.cfg`` file contains ``[tool:pytest]`` and also the no longer supported ``[pytest]`` section, pytest will use ``[tool:pytest]`` ignoring ``[pytest]``. Previously it would unconditionally error out.
|
||||
|
||||
This makes it simpler for plugins to support old pytest versions.
|
||||
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- `#1895 <https://github.com/pytest-dev/pytest/issues/1895>`_: Fix bug where fixtures requested dynamically via ``request.getfixturevalue()`` might be teardown
|
||||
before the requesting fixture.
|
||||
|
||||
|
||||
- `#4851 <https://github.com/pytest-dev/pytest/issues/4851>`_: pytester unsets ``PYTEST_ADDOPTS`` now to not use outer options with ``testdir.runpytest()``.
|
||||
|
||||
|
||||
- `#4903 <https://github.com/pytest-dev/pytest/issues/4903>`_: Use the correct modified time for years after 2038 in rewritten ``.pyc`` files.
|
||||
|
||||
|
||||
- `#4928 <https://github.com/pytest-dev/pytest/issues/4928>`_: Fix line offsets with ``ScopeMismatch`` errors.
|
||||
|
||||
|
||||
- `#4957 <https://github.com/pytest-dev/pytest/issues/4957>`_: ``-p no:plugin`` is handled correctly for default (internal) plugins now, e.g. with ``-p no:capture``.
|
||||
|
||||
Previously they were loaded (imported) always, making e.g. the ``capfd`` fixture available.
|
||||
|
||||
|
||||
- `#4968 <https://github.com/pytest-dev/pytest/issues/4968>`_: The pdb ``quit`` command is handled properly when used after the ``debug`` command with `pdb++`_.
|
||||
|
||||
.. _pdb++: https://pypi.org/project/pdbpp/
|
||||
|
||||
|
||||
- `#4975 <https://github.com/pytest-dev/pytest/issues/4975>`_: Fix the interpretation of ``-qq`` option where it was being considered as ``-v`` instead.
|
||||
|
||||
|
||||
- `#4978 <https://github.com/pytest-dev/pytest/issues/4978>`_: ``outcomes.Exit`` is not swallowed in ``assertrepr_compare`` anymore.
|
||||
|
||||
|
||||
- `#4988 <https://github.com/pytest-dev/pytest/issues/4988>`_: Close logging's file handler explicitly when the session finishes.
|
||||
|
||||
|
||||
- `#5003 <https://github.com/pytest-dev/pytest/issues/5003>`_: Fix line offset with mark collection error (off by one).
|
||||
|
||||
|
||||
|
||||
Improved Documentation
|
||||
----------------------
|
||||
|
||||
- `#4974 <https://github.com/pytest-dev/pytest/issues/4974>`_: Update docs for ``pytest_cmdline_parse`` hook to note availability liminations
|
||||
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- `#4718 <https://github.com/pytest-dev/pytest/issues/4718>`_: ``pluggy>=0.9`` is now required.
|
||||
|
||||
|
||||
- `#4815 <https://github.com/pytest-dev/pytest/issues/4815>`_: ``funcsigs>=1.0`` is now required for Python 2.7.
|
||||
|
||||
|
||||
- `#4829 <https://github.com/pytest-dev/pytest/issues/4829>`_: Some left-over internal code related to ``yield`` tests has been removed.
|
||||
|
||||
|
||||
- `#4890 <https://github.com/pytest-dev/pytest/issues/4890>`_: Remove internally unused ``anypython`` fixture from the pytester plugin.
|
||||
|
||||
|
||||
- `#4912 <https://github.com/pytest-dev/pytest/issues/4912>`_: Remove deprecated Sphinx directive, ``add_description_unit()``,
|
||||
pin sphinx-removed-in to >= 0.2.0 to support Sphinx 2.0.
|
||||
|
||||
|
||||
- `#4913 <https://github.com/pytest-dev/pytest/issues/4913>`_: Fix pytest tests invocation with custom ``PYTHONPATH``.
|
||||
|
||||
|
||||
- `#4965 <https://github.com/pytest-dev/pytest/issues/4965>`_: New ``pytest_report_to_serializable`` and ``pytest_report_from_serializable`` **experimental** hooks.
|
||||
|
||||
These hooks will be used by ``pytest-xdist``, ``pytest-subtests``, and the replacement for
|
||||
resultlog to serialize and customize reports.
|
||||
|
||||
They are experimental, meaning that their details might change or even be removed
|
||||
completely in future patch releases without warning.
|
||||
|
||||
Feedback is welcome from plugin authors and users alike.
|
||||
|
||||
|
||||
- `#4987 <https://github.com/pytest-dev/pytest/issues/4987>`_: ``Collector.repr_failure`` respects the ``--tb`` option, but only defaults to ``short`` now (with ``auto``).
|
||||
|
||||
|
||||
pytest 4.3.1 (2019-03-11)
|
||||
=========================
|
||||
|
||||
|
|
|
@ -3,7 +3,7 @@ trigger:
|
|||
- features
|
||||
|
||||
variables:
|
||||
PYTEST_ADDOPTS: "--junitxml=build/test-results/$(tox.env).xml"
|
||||
PYTEST_ADDOPTS: "--junitxml=build/test-results/$(tox.env).xml -vv"
|
||||
python.needs_vc: False
|
||||
python.exe: "python"
|
||||
COVERAGE_FILE: "$(Build.Repository.LocalPath)/.coverage"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Use the correct modified time for years after 2038 in rewritten ``.pyc`` files.
|
|
@ -1,2 +0,0 @@
|
|||
Remove deprecated Sphinx directive, ``add_description_unit()``,
|
||||
pin sphinx-removed-in to >= 0.2.0 to support Sphinx 2.0.
|
|
@ -1 +0,0 @@
|
|||
Fix pytest tests invocation with custom ``PYTHONPATH``.
|
|
@ -1 +0,0 @@
|
|||
Fix line offsets with ``ScopeMismatch`` errors.
|
|
@ -1 +0,0 @@
|
|||
Update docs for ``pytest_cmdline_parse`` hook to note availability liminations
|
|
@ -6,6 +6,7 @@ Release announcements
|
|||
:maxdepth: 2
|
||||
|
||||
|
||||
release-4.4.0
|
||||
release-4.3.1
|
||||
release-4.3.0
|
||||
release-4.2.1
|
||||
|
|
|
@ -0,0 +1,39 @@
|
|||
pytest-4.4.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 4.4.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 2000 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
https://docs.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
https://docs.pytest.org/en/latest/
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* ApaDoctor
|
||||
* Bernhard M. Wiedemann
|
||||
* Brian Skinn
|
||||
* Bruno Oliveira
|
||||
* Daniel Hahler
|
||||
* Gary Tyler
|
||||
* Jeong YunWon
|
||||
* Miro Hrončok
|
||||
* Takafumi Arakaki
|
||||
* henrykironde
|
||||
* smheidrich
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
|
@ -30,7 +30,7 @@ you will see the return value of the function call:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_assert1.py F [100%]
|
||||
|
@ -165,7 +165,7 @@ if you run this module:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_assert2.py F [100%]
|
||||
|
|
|
@ -28,25 +28,29 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
|||
|
||||
Values can be any object handled by the json stdlib module.
|
||||
capsys
|
||||
Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsys.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
capsysbinary
|
||||
Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
|
||||
objects.
|
||||
Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsysbinary.readouterr()``
|
||||
method calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``bytes`` objects.
|
||||
capfd
|
||||
Enable capturing of writes to file descriptors ``1`` and ``2`` and make
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
capfdbinary
|
||||
Enable capturing of write to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfdbinary.readouterr`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
|
||||
``bytes`` objects.
|
||||
Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``byte`` objects.
|
||||
doctest_namespace
|
||||
Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests.
|
||||
pytestconfig
|
||||
|
@ -55,7 +59,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a
|
|||
Example::
|
||||
|
||||
def test_foo(pytestconfig):
|
||||
if pytestconfig.getoption("verbose"):
|
||||
if pytestconfig.getoption("verbose") > 0:
|
||||
...
|
||||
record_property
|
||||
Add an extra properties the calling test.
|
||||
|
|
|
@ -82,7 +82,7 @@ If you then run it with ``--lf``:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 50 items / 48 deselected / 2 selected
|
||||
run-last-failure: rerun previous 2 failures
|
||||
|
||||
|
@ -126,7 +126,7 @@ of ``FF`` and dots):
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 50 items
|
||||
run-last-failure: rerun previous 2 failures first
|
||||
|
||||
|
@ -218,12 +218,8 @@ If you run this command for the first time, you can see the print statement:
|
|||
def test_function(mydata):
|
||||
> assert mydata == 23
|
||||
E assert 42 == 23
|
||||
E -42
|
||||
E +23
|
||||
|
||||
test_caching.py:17: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
running expensive computation...
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
If you run it a second time the value will be retrieved from
|
||||
|
@ -241,8 +237,6 @@ the cache and nothing will be printed:
|
|||
def test_function(mydata):
|
||||
> assert mydata == 23
|
||||
E assert 42 == 23
|
||||
E -42
|
||||
E +23
|
||||
|
||||
test_caching.py:17: AssertionError
|
||||
1 failed in 0.12 seconds
|
||||
|
@ -262,16 +256,96 @@ You can always peek at the content of the cache using the
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
------------------------------- cache values -------------------------------
|
||||
cache/lastfailed contains:
|
||||
{'test_50.py::test_num[17]': True,
|
||||
{'a/test_db.py::test_a1': True,
|
||||
'a/test_db2.py::test_a2': True,
|
||||
'b/test_error.py::test_root': True,
|
||||
'failure_demo.py::TestCustomAssertMsg::test_custom_repr': True,
|
||||
'failure_demo.py::TestCustomAssertMsg::test_multiline': True,
|
||||
'failure_demo.py::TestCustomAssertMsg::test_single_line': True,
|
||||
'failure_demo.py::TestFailing::test_not': True,
|
||||
'failure_demo.py::TestFailing::test_simple': True,
|
||||
'failure_demo.py::TestFailing::test_simple_multiline': True,
|
||||
'failure_demo.py::TestMoreErrors::test_compare': True,
|
||||
'failure_demo.py::TestMoreErrors::test_complex_error': True,
|
||||
'failure_demo.py::TestMoreErrors::test_global_func': True,
|
||||
'failure_demo.py::TestMoreErrors::test_instance': True,
|
||||
'failure_demo.py::TestMoreErrors::test_startswith': True,
|
||||
'failure_demo.py::TestMoreErrors::test_startswith_nested': True,
|
||||
'failure_demo.py::TestMoreErrors::test_try_finally': True,
|
||||
'failure_demo.py::TestMoreErrors::test_z1_unpack_error': True,
|
||||
'failure_demo.py::TestMoreErrors::test_z2_type_error': True,
|
||||
'failure_demo.py::TestRaises::test_raise': True,
|
||||
'failure_demo.py::TestRaises::test_raises': True,
|
||||
'failure_demo.py::TestRaises::test_raises_doesnt': True,
|
||||
'failure_demo.py::TestRaises::test_reinterpret_fails_with_print_for_the_fun_of_it': True,
|
||||
'failure_demo.py::TestRaises::test_some_error': True,
|
||||
'failure_demo.py::TestRaises::test_tupleerror': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_attrs': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_dataclass': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_dict': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_list': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_list_long': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_long_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_long_text_multiline': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_longer_list': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_multiline_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_set': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_similar_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_eq_text': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_in_list': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_multiline': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_single': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_single_long': True,
|
||||
'failure_demo.py::TestSpecialisedExplanations::test_not_in_text_single_long_term': True,
|
||||
'failure_demo.py::test_attribute': True,
|
||||
'failure_demo.py::test_attribute_failure': True,
|
||||
'failure_demo.py::test_attribute_instance': True,
|
||||
'failure_demo.py::test_attribute_multiple': True,
|
||||
'failure_demo.py::test_dynamic_compile_shows_nicely': True,
|
||||
'failure_demo.py::test_generative[3-6]': True,
|
||||
'test_50.py::test_num[17]': True,
|
||||
'test_50.py::test_num[25]': True,
|
||||
'test_anothersmtp.py::test_showhelo': True,
|
||||
'test_assert1.py::test_function': True,
|
||||
'test_assert2.py::test_set_comparison': True,
|
||||
'test_backends.py::test_db_initialized[d2]': True,
|
||||
'test_caching.py::test_function': True,
|
||||
'test_foocompare.py::test_compare': True}
|
||||
'test_checkconfig.py::test_something': True,
|
||||
'test_class.py::TestClass::test_two': True,
|
||||
'test_compute.py::test_compute[4]': True,
|
||||
'test_example.py::test_error': True,
|
||||
'test_example.py::test_fail': True,
|
||||
'test_foocompare.py::test_compare': True,
|
||||
'test_module.py::test_call_fails': True,
|
||||
'test_module.py::test_ehlo': True,
|
||||
'test_module.py::test_ehlo[mail.python.org]': True,
|
||||
'test_module.py::test_ehlo[smtp.gmail.com]': True,
|
||||
'test_module.py::test_event_simple': True,
|
||||
'test_module.py::test_fail1': True,
|
||||
'test_module.py::test_fail2': True,
|
||||
'test_module.py::test_func2': True,
|
||||
'test_module.py::test_interface_complex': True,
|
||||
'test_module.py::test_interface_simple': True,
|
||||
'test_module.py::test_noop': True,
|
||||
'test_module.py::test_noop[mail.python.org]': True,
|
||||
'test_module.py::test_noop[smtp.gmail.com]': True,
|
||||
'test_module.py::test_setup_fails': True,
|
||||
'test_parametrize.py::TestClass::test_equals[1-2]': True,
|
||||
'test_sample.py::test_answer': True,
|
||||
'test_show_warnings.py::test_one': True,
|
||||
'test_simple.yml::hello': True,
|
||||
'test_smtpsimple.py::test_ehlo': True,
|
||||
'test_step.py::TestUserHandling::test_modification': True,
|
||||
'test_strings.py::test_valid_string[!]': True,
|
||||
'test_tmp_path.py::test_create_file': True,
|
||||
'test_tmpdir.py::test_create_file': True,
|
||||
'test_tmpdir.py::test_needsfiles': True,
|
||||
'test_unittest_db.py::MyTest::test_method1': True,
|
||||
'test_unittest_db.py::MyTest::test_method2': True}
|
||||
cache/nodeids contains:
|
||||
['test_caching.py::test_function']
|
||||
cache/stepwise contains:
|
||||
|
|
|
@ -71,7 +71,7 @@ of the failing function and hide the other one:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .F [100%]
|
||||
|
|
|
@ -35,7 +35,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
@ -50,7 +50,7 @@ Or the inverse, running all tests except the webtest ones:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 1 deselected / 3 selected
|
||||
|
||||
test_server.py::test_something_quick PASSED [ 33%]
|
||||
|
@ -72,7 +72,7 @@ tests based on their module, class, method, or function name:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
@ -87,7 +87,7 @@ You can also select on the class:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
@ -102,7 +102,7 @@ Or select multiple nodes:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
|
@ -142,7 +142,7 @@ select tests based on their names:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
@ -157,7 +157,7 @@ And you can also run all tests except the ones that match the keyword:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 1 deselected / 3 selected
|
||||
|
||||
test_server.py::test_something_quick PASSED [ 33%]
|
||||
|
@ -174,7 +174,7 @@ Or to select "http" and "quick" tests:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 4 items / 2 deselected / 2 selected
|
||||
|
||||
test_server.py::test_send_http PASSED [ 50%]
|
||||
|
@ -370,7 +370,7 @@ the test needs:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py s [100%]
|
||||
|
@ -385,7 +385,7 @@ and here is one that specifies exactly the environment needed:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py . [100%]
|
||||
|
@ -555,7 +555,7 @@ then you will see two tests skipped and two executed tests as expected:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s. [100%]
|
||||
|
@ -572,7 +572,7 @@ Note that if you specify a platform via the marker-command line option like this
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 3 deselected / 1 selected
|
||||
|
||||
test_plat.py . [100%]
|
||||
|
@ -626,7 +626,7 @@ We can now use the ``-m option`` to select one set:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 2 deselected / 2 selected
|
||||
|
||||
test_module.py FF [100%]
|
||||
|
@ -650,7 +650,7 @@ or to select both "event" and "interface" tests:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items / 1 deselected / 3 selected
|
||||
|
||||
test_module.py FFF [100%]
|
||||
|
|
|
@ -31,7 +31,7 @@ now execute the test specification:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml F. [100%]
|
||||
|
@ -66,7 +66,7 @@ consulted when reporting in ``verbose`` mode:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml::hello FAILED [ 50%]
|
||||
|
@ -90,7 +90,7 @@ interesting to just look at the collection tree:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython
|
||||
collected 2 items
|
||||
<Package $REGENDOC_TMPDIR/nonpython>
|
||||
<YamlFile test_simple.yml>
|
||||
|
|
|
@ -146,7 +146,7 @@ objects, they are still using the default pytest representation:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 8 items
|
||||
<Module test_time.py>
|
||||
<Function test_timedistance_v0[a0-b0-expected0]>
|
||||
|
@ -205,7 +205,7 @@ this is a fully self-contained example which you can run with:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py .... [100%]
|
||||
|
@ -220,7 +220,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
<Module test_scenarios.py>
|
||||
<Class TestSampleWithScenarios>
|
||||
|
@ -287,7 +287,7 @@ Let's first see how it looks like at collection time:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
<Module test_backends.py>
|
||||
<Function test_db_initialized[d1]>
|
||||
|
@ -353,7 +353,7 @@ The result of this test will be successful:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
<Module test_indirect_list.py>
|
||||
<Function test_indirect[a-b]>
|
||||
|
@ -411,8 +411,6 @@ argument sets to use for each test function. Let's run it:
|
|||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
E -1
|
||||
E +2
|
||||
|
||||
test_parametrize.py:18: AssertionError
|
||||
1 failed, 2 passed in 0.12 seconds
|
||||
|
@ -490,7 +488,7 @@ If you run this with reporting for skips enabled:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s [100%]
|
||||
|
@ -548,7 +546,7 @@ Then run ``pytest`` with verbose mode and with only the ``basic`` marker:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 17 items / 14 deselected / 3 selected
|
||||
|
||||
test_pytest_param_example.py::test_eval[1+7-8] PASSED [ 33%]
|
||||
|
|
|
@ -15,7 +15,7 @@ get on the terminal - we are working on that):
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR/assertion
|
||||
collected 44 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%]
|
||||
|
|
|
@ -129,7 +129,7 @@ directory with the above conftest.py:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
@ -190,7 +190,7 @@ and when running it will see a skipped "slow" test:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s [100%]
|
||||
|
@ -207,7 +207,7 @@ Or run it including the ``slow`` marked test:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py .. [100%]
|
||||
|
@ -351,7 +351,7 @@ which will add the string to the test header accordingly:
|
|||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
project deps: mylib-1.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
@ -381,7 +381,7 @@ which will add info only when run with "--v":
|
|||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
@ -394,7 +394,7 @@ and nothing when run plainly:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 0 items
|
||||
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
@ -434,7 +434,7 @@ Now we can profile which test functions execute the slowest:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_some_are_slow.py ... [100%]
|
||||
|
@ -509,7 +509,7 @@ If we run this:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 4 items
|
||||
|
||||
test_step.py .Fx. [100%]
|
||||
|
@ -593,7 +593,7 @@ We can run this:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 7 items
|
||||
|
||||
test_step.py .Fx. [ 57%]
|
||||
|
@ -707,7 +707,7 @@ and run them:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF [100%]
|
||||
|
@ -811,7 +811,7 @@ and run it:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
|
||||
|
|
|
@ -74,7 +74,7 @@ marked ``smtp_connection`` fixture function. Running the test looks like this:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_smtpsimple.py F [100%]
|
||||
|
@ -217,7 +217,7 @@ inspect what is going on and can now run the tests:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF [100%]
|
||||
|
@ -710,7 +710,7 @@ Running the above tests results in the following test IDs being used:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 10 items
|
||||
<Module test_anothersmtp.py>
|
||||
<Function test_showhelo[smtp.gmail.com]>
|
||||
|
@ -755,7 +755,7 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 3 items
|
||||
|
||||
test_fixture_marks.py::test_data[0] PASSED [ 33%]
|
||||
|
@ -800,7 +800,7 @@ Here we declare an ``app`` fixture which receives the previously defined
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py::test_smtp_connection_exists[smtp.gmail.com] PASSED [ 50%]
|
||||
|
@ -871,7 +871,7 @@ Let's run the tests in verbose mode and with looking at the print-output:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_module.py::test_0[1] SETUP otherarg 1
|
||||
|
|
|
@ -52,7 +52,7 @@ That’s it. You can now execute the test function:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_sample.py F [100%]
|
||||
|
|
|
@ -30,7 +30,7 @@ To execute it:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_sample.py F [100%]
|
||||
|
|
|
@ -58,7 +58,7 @@ them in turn:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..F [100%]
|
||||
|
@ -81,6 +81,21 @@ them in turn:
|
|||
test_expectation.py:8: AssertionError
|
||||
==================== 1 failed, 2 passed in 0.12 seconds ====================
|
||||
|
||||
.. note::
|
||||
|
||||
pytest by default escapes any non-ascii characters used in unicode strings
|
||||
for the parametrization because it has several downsides.
|
||||
If however you would like to use unicode strings in parametrization and see them in the terminal as is (non-escaped), use this option in your ``pytest.ini``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
disable_test_id_escaping_and_forfeit_all_rights_to_community_support = True
|
||||
|
||||
Keep in mind however that this might cause unwanted side effects and
|
||||
even bugs depending on the OS used and plugins currently installed, so use it at your own risk.
|
||||
|
||||
|
||||
As designed in this example, only one pair of input/output values fails
|
||||
the simple test function. And as usual with test function arguments,
|
||||
you can see the ``input`` and ``output`` values in the traceback.
|
||||
|
@ -110,7 +125,7 @@ Let's run this:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..x [100%]
|
||||
|
|
|
@ -27,7 +27,7 @@ Here is a little annotated list for some popular plugins:
|
|||
for `twisted <http://twistedmatrix.com>`_ apps, starting a reactor and
|
||||
processing deferreds from test functions.
|
||||
|
||||
* `pytest-cov <https://pypi.org/project/pytest-cov/>`_:
|
||||
* `pytest-cov <https://pypi.org/project/pytest-cov/>`__:
|
||||
coverage reporting, compatible with distributed testing
|
||||
|
||||
* `pytest-xdist <https://pypi.org/project/pytest-xdist/>`_:
|
||||
|
|
|
@ -1,4 +1,3 @@
|
|||
|
||||
Reference
|
||||
=========
|
||||
|
||||
|
@ -49,7 +48,7 @@ pytest.main
|
|||
.. autofunction:: _pytest.config.main
|
||||
|
||||
pytest.param
|
||||
~~~~~~~~~~~~~
|
||||
~~~~~~~~~~~~
|
||||
|
||||
.. autofunction:: pytest.param(*values, [id], [marks])
|
||||
|
||||
|
|
|
@ -335,7 +335,7 @@ Running it with the report-on-xfail option gives this output:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR/example, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR/example
|
||||
collected 7 items
|
||||
|
||||
xfail_demo.py xxxxxxx [100%]
|
||||
|
|
|
@ -43,7 +43,7 @@ Running this would result in a passed test except for the last
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_tmp_path.py F [100%]
|
||||
|
@ -110,7 +110,7 @@ Running this would result in a passed test except for the last
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_tmpdir.py F [100%]
|
||||
|
|
|
@ -130,7 +130,7 @@ the ``self.db`` values in the traceback:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 2 items
|
||||
|
||||
test_unittest_db.py FF [100%]
|
||||
|
|
|
@ -204,7 +204,7 @@ Example:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
|
||||
test_example.py .FEsxX [100%]
|
||||
|
@ -256,7 +256,7 @@ More than one character can be used, so for example to only see failed and skipp
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
|
||||
test_example.py .FEsxX [100%]
|
||||
|
@ -292,7 +292,7 @@ captured output:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 6 items
|
||||
|
||||
test_example.py .FEsxX [100%]
|
||||
|
@ -384,10 +384,8 @@ in your code and pytest automatically disables its output capture for that test:
|
|||
* Output capture in other tests is not affected.
|
||||
* Any prior test output that has already been captured and will be processed as
|
||||
such.
|
||||
* Any later output produced within the same test will not be captured and will
|
||||
instead get sent directly to ``sys.stdout``. Note that this holds true even
|
||||
for test output occurring after you exit the interactive PDB_ tracing session
|
||||
and continue with the regular test run.
|
||||
* Output capture gets resumed when ending the debugger session (via the
|
||||
``continue`` command).
|
||||
|
||||
|
||||
.. _`breakpoint-builtin`:
|
||||
|
@ -680,6 +678,22 @@ for example ``-x`` if you only want to send one particular failure.
|
|||
|
||||
Currently only pasting to the http://bpaste.net service is implemented.
|
||||
|
||||
Early loading plugins
|
||||
---------------------
|
||||
|
||||
You can early-load plugins (internal and external) explicitly in the command-line with the ``-p`` option::
|
||||
|
||||
pytest -p mypluginmodule
|
||||
|
||||
The option receives a ``name`` parameter, which can be:
|
||||
|
||||
* A full module dotted name, for example ``myproject.plugins``. This dotted name must be importable.
|
||||
* The entry-point name of a plugin. This is the name passed to ``setuptools`` when the plugin is
|
||||
registered. For example to early-load the `pytest-cov <https://pypi.org/project/pytest-cov/>`__ plugin you can use::
|
||||
|
||||
pytest -p pytest_cov
|
||||
|
||||
|
||||
Disabling plugins
|
||||
-----------------
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ Running pytest now produces this output:
|
|||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-4.x.y, py-1.x.y, pluggy-0.x.y
|
||||
cachedir: $PYTHON_PREFIX/.pytest_cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
rootdir: $REGENDOC_TMPDIR
|
||||
collected 1 item
|
||||
|
||||
test_show_warnings.py . [100%]
|
||||
|
|
4
setup.py
4
setup.py
|
@ -10,10 +10,10 @@ INSTALL_REQUIRES = [
|
|||
'more-itertools>=4.0.0,<6.0.0;python_version<="2.7"',
|
||||
'more-itertools>=4.0.0;python_version>"2.7"',
|
||||
"atomicwrites>=1.0",
|
||||
'funcsigs;python_version<"3.0"',
|
||||
'funcsigs>=1.0;python_version<"3.0"',
|
||||
'pathlib2>=2.2.0;python_version<"3.6"',
|
||||
'colorama;sys_platform=="win32"',
|
||||
"pluggy>=0.7",
|
||||
"pluggy>=0.9",
|
||||
]
|
||||
|
||||
|
||||
|
|
|
@ -241,25 +241,20 @@ class TracebackEntry(object):
|
|||
|
||||
def ishidden(self):
|
||||
""" return True if the current frame has a var __tracebackhide__
|
||||
resolving to True
|
||||
resolving to True.
|
||||
|
||||
If __tracebackhide__ is a callable, it gets called with the
|
||||
ExceptionInfo instance and can decide whether to hide the traceback.
|
||||
|
||||
mostly for internal use
|
||||
"""
|
||||
try:
|
||||
tbh = self.frame.f_locals["__tracebackhide__"]
|
||||
except KeyError:
|
||||
try:
|
||||
tbh = self.frame.f_globals["__tracebackhide__"]
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if callable(tbh):
|
||||
f = self.frame
|
||||
tbh = f.f_locals.get(
|
||||
"__tracebackhide__", f.f_globals.get("__tracebackhide__", False)
|
||||
)
|
||||
if tbh and callable(tbh):
|
||||
return tbh(None if self._excinfo is None else self._excinfo())
|
||||
else:
|
||||
return tbh
|
||||
return tbh
|
||||
|
||||
def __str__(self):
|
||||
try:
|
||||
|
@ -418,6 +413,7 @@ class ExceptionInfo(object):
|
|||
to the exception message/``__str__()``
|
||||
"""
|
||||
tup = sys.exc_info()
|
||||
assert tup[0] is not None, "no current exception"
|
||||
_striptext = ""
|
||||
if exprinfo is None and isinstance(tup[1], AssertionError):
|
||||
exprinfo = getattr(tup[1], "msg", None)
|
||||
|
|
|
@ -203,7 +203,9 @@ def compile_(source, filename=None, mode="exec", flags=0, dont_inherit=0):
|
|||
|
||||
def getfslineno(obj):
|
||||
""" Return source location (path, lineno) for the given object.
|
||||
If the source cannot be determined return ("", -1)
|
||||
If the source cannot be determined return ("", -1).
|
||||
|
||||
The line number is 0-based.
|
||||
"""
|
||||
from .code import Code
|
||||
|
||||
|
|
|
@ -21,6 +21,9 @@ import six
|
|||
|
||||
from _pytest._io.saferepr import saferepr
|
||||
from _pytest.assertion import util
|
||||
from _pytest.assertion.util import ( # noqa: F401
|
||||
format_explanation as _format_explanation,
|
||||
)
|
||||
from _pytest.compat import spec_from_file_location
|
||||
from _pytest.pathlib import fnmatch_ex
|
||||
from _pytest.pathlib import PurePath
|
||||
|
@ -485,9 +488,6 @@ def _saferepr(obj):
|
|||
return r.replace(u"\n", u"\\n")
|
||||
|
||||
|
||||
from _pytest.assertion.util import format_explanation as _format_explanation # noqa
|
||||
|
||||
|
||||
def _format_assertmsg(obj):
|
||||
"""Format the custom assertion message given.
|
||||
|
||||
|
|
|
@ -9,6 +9,7 @@ import six
|
|||
|
||||
import _pytest._code
|
||||
from ..compat import Sequence
|
||||
from _pytest import outcomes
|
||||
from _pytest._io.saferepr import saferepr
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
|
@ -102,6 +103,38 @@ except NameError:
|
|||
basestring = str
|
||||
|
||||
|
||||
def issequence(x):
|
||||
return isinstance(x, Sequence) and not isinstance(x, basestring)
|
||||
|
||||
|
||||
def istext(x):
|
||||
return isinstance(x, basestring)
|
||||
|
||||
|
||||
def isdict(x):
|
||||
return isinstance(x, dict)
|
||||
|
||||
|
||||
def isset(x):
|
||||
return isinstance(x, (set, frozenset))
|
||||
|
||||
|
||||
def isdatacls(obj):
|
||||
return getattr(obj, "__dataclass_fields__", None) is not None
|
||||
|
||||
|
||||
def isattrs(obj):
|
||||
return getattr(obj, "__attrs_attrs__", None) is not None
|
||||
|
||||
|
||||
def isiterable(obj):
|
||||
try:
|
||||
iter(obj)
|
||||
return not istext(obj)
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
|
||||
def assertrepr_compare(config, op, left, right):
|
||||
"""Return specialised explanations for some operators/operands"""
|
||||
width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
|
||||
|
@ -110,31 +143,6 @@ def assertrepr_compare(config, op, left, right):
|
|||
|
||||
summary = u"%s %s %s" % (ecu(left_repr), op, ecu(right_repr))
|
||||
|
||||
def issequence(x):
|
||||
return isinstance(x, Sequence) and not isinstance(x, basestring)
|
||||
|
||||
def istext(x):
|
||||
return isinstance(x, basestring)
|
||||
|
||||
def isdict(x):
|
||||
return isinstance(x, dict)
|
||||
|
||||
def isset(x):
|
||||
return isinstance(x, (set, frozenset))
|
||||
|
||||
def isdatacls(obj):
|
||||
return getattr(obj, "__dataclass_fields__", None) is not None
|
||||
|
||||
def isattrs(obj):
|
||||
return getattr(obj, "__attrs_attrs__", None) is not None
|
||||
|
||||
def isiterable(obj):
|
||||
try:
|
||||
iter(obj)
|
||||
return not istext(obj)
|
||||
except TypeError:
|
||||
return False
|
||||
|
||||
verbose = config.getoption("verbose")
|
||||
explanation = None
|
||||
try:
|
||||
|
@ -151,7 +159,7 @@ def assertrepr_compare(config, op, left, right):
|
|||
elif type(left) == type(right) and (isdatacls(left) or isattrs(left)):
|
||||
type_fn = (isdatacls, isattrs)
|
||||
explanation = _compare_eq_cls(left, right, verbose, type_fn)
|
||||
elif verbose:
|
||||
elif verbose > 0:
|
||||
explanation = _compare_eq_verbose(left, right)
|
||||
if isiterable(left) and isiterable(right):
|
||||
expl = _compare_eq_iterable(left, right, verbose)
|
||||
|
@ -162,6 +170,8 @@ def assertrepr_compare(config, op, left, right):
|
|||
elif op == "not in":
|
||||
if istext(left) and istext(right):
|
||||
explanation = _notin_text(left, right, verbose)
|
||||
except outcomes.Exit:
|
||||
raise
|
||||
except Exception:
|
||||
explanation = [
|
||||
u"(pytest_assertion plugin: representation of details failed. "
|
||||
|
@ -175,8 +185,8 @@ def assertrepr_compare(config, op, left, right):
|
|||
return [summary] + explanation
|
||||
|
||||
|
||||
def _diff_text(left, right, verbose=False):
|
||||
"""Return the explanation for the diff between text or bytes
|
||||
def _diff_text(left, right, verbose=0):
|
||||
"""Return the explanation for the diff between text or bytes.
|
||||
|
||||
Unless --verbose is used this will skip leading and trailing
|
||||
characters which are identical to keep the diff minimal.
|
||||
|
@ -202,7 +212,7 @@ def _diff_text(left, right, verbose=False):
|
|||
left = escape_for_readable_diff(left)
|
||||
if isinstance(right, bytes):
|
||||
right = escape_for_readable_diff(right)
|
||||
if not verbose:
|
||||
if verbose < 1:
|
||||
i = 0 # just in case left or right has zero length
|
||||
for i in range(min(len(left), len(right))):
|
||||
if left[i] != right[i]:
|
||||
|
@ -250,7 +260,7 @@ def _compare_eq_verbose(left, right):
|
|||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_iterable(left, right, verbose=False):
|
||||
def _compare_eq_iterable(left, right, verbose=0):
|
||||
if not verbose:
|
||||
return [u"Use -v to get the full diff"]
|
||||
# dynamic import to speedup pytest
|
||||
|
@ -273,7 +283,7 @@ def _compare_eq_iterable(left, right, verbose=False):
|
|||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_sequence(left, right, verbose=False):
|
||||
def _compare_eq_sequence(left, right, verbose=0):
|
||||
explanation = []
|
||||
for i in range(min(len(left), len(right))):
|
||||
if left[i] != right[i]:
|
||||
|
@ -292,7 +302,7 @@ def _compare_eq_sequence(left, right, verbose=False):
|
|||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_set(left, right, verbose=False):
|
||||
def _compare_eq_set(left, right, verbose=0):
|
||||
explanation = []
|
||||
diff_left = left - right
|
||||
diff_right = right - left
|
||||
|
@ -307,7 +317,7 @@ def _compare_eq_set(left, right, verbose=False):
|
|||
return explanation
|
||||
|
||||
|
||||
def _compare_eq_dict(left, right, verbose=False):
|
||||
def _compare_eq_dict(left, right, verbose=0):
|
||||
explanation = []
|
||||
common = set(left).intersection(set(right))
|
||||
same = {k: left[k] for k in common if left[k] == right[k]}
|
||||
|
@ -368,7 +378,7 @@ def _compare_eq_cls(left, right, verbose, type_fns):
|
|||
return explanation
|
||||
|
||||
|
||||
def _notin_text(term, text, verbose=False):
|
||||
def _notin_text(term, text, verbose=0):
|
||||
index = text.find(term)
|
||||
head = text[:index]
|
||||
tail = text[index + len(term) :]
|
||||
|
|
|
@ -121,10 +121,12 @@ class Cache(object):
|
|||
cache_dir_exists_already = True
|
||||
else:
|
||||
cache_dir_exists_already = self._cachedir.exists()
|
||||
path.parent.mkdir(exist_ok=True, parents=True)
|
||||
path.parent.mkdir(exist_ok=True, parents=True)
|
||||
except (IOError, OSError):
|
||||
self.warn("could not create cache path {path}", path=path)
|
||||
return
|
||||
if not cache_dir_exists_already:
|
||||
self._ensure_supporting_files()
|
||||
try:
|
||||
f = path.open("wb" if PY2 else "w")
|
||||
except (IOError, OSError):
|
||||
|
@ -132,24 +134,18 @@ class Cache(object):
|
|||
else:
|
||||
with f:
|
||||
json.dump(value, f, indent=2, sort_keys=True)
|
||||
if not cache_dir_exists_already:
|
||||
self._ensure_supporting_files()
|
||||
|
||||
def _ensure_supporting_files(self):
|
||||
"""Create supporting files in the cache dir that are not really part of the cache."""
|
||||
if self._cachedir.is_dir():
|
||||
readme_path = self._cachedir / "README.md"
|
||||
if not readme_path.is_file():
|
||||
readme_path.write_text(README_CONTENT)
|
||||
readme_path = self._cachedir / "README.md"
|
||||
readme_path.write_text(README_CONTENT)
|
||||
|
||||
gitignore_path = self._cachedir.joinpath(".gitignore")
|
||||
if not gitignore_path.is_file():
|
||||
msg = u"# Created by pytest automatically.\n*"
|
||||
gitignore_path.write_text(msg, encoding="UTF-8")
|
||||
gitignore_path = self._cachedir.joinpath(".gitignore")
|
||||
msg = u"# Created by pytest automatically.\n*"
|
||||
gitignore_path.write_text(msg, encoding="UTF-8")
|
||||
|
||||
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
|
||||
if not cachedir_tag_path.is_file():
|
||||
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
|
||||
cachedir_tag_path = self._cachedir.joinpath("CACHEDIR.TAG")
|
||||
cachedir_tag_path.write_bytes(CACHEDIR_TAG_CONTENT)
|
||||
|
||||
|
||||
class LFPlugin(object):
|
||||
|
@ -344,7 +340,7 @@ def cache(request):
|
|||
|
||||
def pytest_report_header(config):
|
||||
"""Display cachedir with --cache-show and if non-default."""
|
||||
if config.option.verbose or config.getini("cache_dir") != ".pytest_cache":
|
||||
if config.option.verbose > 0 or config.getini("cache_dir") != ".pytest_cache":
|
||||
cachedir = config.cache._cachedir
|
||||
# TODO: evaluate generating upward relative paths
|
||||
# starting with .., ../.. if sensible
|
||||
|
|
|
@ -91,6 +91,13 @@ class CaptureManager(object):
|
|||
self._global_capturing = None
|
||||
self._current_item = None
|
||||
|
||||
def __repr__(self):
|
||||
return "<CaptureManager _method=%r _global_capturing=%r _current_item=%r>" % (
|
||||
self._method,
|
||||
self._global_capturing,
|
||||
self._current_item,
|
||||
)
|
||||
|
||||
def _getcapture(self, method):
|
||||
if method == "fd":
|
||||
return MultiCapture(out=True, err=True, Capture=FDCapture)
|
||||
|
@ -98,8 +105,17 @@ class CaptureManager(object):
|
|||
return MultiCapture(out=True, err=True, Capture=SysCapture)
|
||||
elif method == "no":
|
||||
return MultiCapture(out=False, err=False, in_=False)
|
||||
else:
|
||||
raise ValueError("unknown capturing method: %r" % method)
|
||||
raise ValueError("unknown capturing method: %r" % method) # pragma: no cover
|
||||
|
||||
def is_capturing(self):
|
||||
if self.is_globally_capturing():
|
||||
return "global"
|
||||
capture_fixture = getattr(self._current_item, "_capture_fixture", None)
|
||||
if capture_fixture is not None:
|
||||
return (
|
||||
"fixture %s" % self._current_item._capture_fixture.request.fixturename
|
||||
)
|
||||
return False
|
||||
|
||||
# Global capturing control
|
||||
|
||||
|
@ -128,6 +144,15 @@ class CaptureManager(object):
|
|||
if cap is not None:
|
||||
cap.suspend_capturing(in_=in_)
|
||||
|
||||
def suspend(self, in_=False):
|
||||
# Need to undo local capsys-et-al if it exists before disabling global capture.
|
||||
self.suspend_fixture(self._current_item)
|
||||
self.suspend_global_capture(in_)
|
||||
|
||||
def resume(self):
|
||||
self.resume_global_capture()
|
||||
self.resume_fixture(self._current_item)
|
||||
|
||||
def read_global_capture(self):
|
||||
return self._global_capturing.readouterr()
|
||||
|
||||
|
@ -161,15 +186,12 @@ class CaptureManager(object):
|
|||
|
||||
@contextlib.contextmanager
|
||||
def global_and_fixture_disabled(self):
|
||||
"""Context manager to temporarily disables global and current fixture capturing."""
|
||||
# Need to undo local capsys-et-al if exists before disabling global capture
|
||||
self.suspend_fixture(self._current_item)
|
||||
self.suspend_global_capture(in_=False)
|
||||
"""Context manager to temporarily disable global and current fixture capturing."""
|
||||
self.suspend()
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
self.resume_global_capture()
|
||||
self.resume_fixture(self._current_item)
|
||||
self.resume()
|
||||
|
||||
@contextlib.contextmanager
|
||||
def item_capture(self, when, item):
|
||||
|
@ -247,10 +269,11 @@ def _ensure_only_one_capture_fixture(request, name):
|
|||
|
||||
@pytest.fixture
|
||||
def capsys(request):
|
||||
"""Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` namedtuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
"""Enable text capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsys.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, "capsys")
|
||||
with _install_capture_fixture_on_item(request, SysCapture) as fixture:
|
||||
|
@ -259,26 +282,28 @@ def capsys(request):
|
|||
|
||||
@pytest.fixture
|
||||
def capsysbinary(request):
|
||||
"""Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
|
||||
objects.
|
||||
"""Enable bytes capturing of writes to ``sys.stdout`` and ``sys.stderr``.
|
||||
|
||||
The captured output is made available via ``capsysbinary.readouterr()``
|
||||
method calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``bytes`` objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, "capsysbinary")
|
||||
# Currently, the implementation uses the python3 specific `.buffer`
|
||||
# property of CaptureIO.
|
||||
if sys.version_info < (3,):
|
||||
raise request.raiseerror("capsysbinary is only supported on python 3")
|
||||
raise request.raiseerror("capsysbinary is only supported on Python 3")
|
||||
with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def capfd(request):
|
||||
"""Enable capturing of writes to file descriptors ``1`` and ``2`` and make
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
"""Enable text capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``text`` objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, "capfd")
|
||||
if not hasattr(os, "dup"):
|
||||
|
@ -291,10 +316,11 @@ def capfd(request):
|
|||
|
||||
@pytest.fixture
|
||||
def capfdbinary(request):
|
||||
"""Enable capturing of write to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfdbinary.readouterr`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
|
||||
``bytes`` objects.
|
||||
"""Enable bytes capturing of writes to file descriptors ``1`` and ``2``.
|
||||
|
||||
The captured output is made available via ``capfd.readouterr()`` method
|
||||
calls, which return a ``(out, err)`` namedtuple.
|
||||
``out`` and ``err`` will be ``byte`` objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, "capfdbinary")
|
||||
if not hasattr(os, "dup"):
|
||||
|
@ -316,9 +342,9 @@ def _install_capture_fixture_on_item(request, capture_class):
|
|||
"""
|
||||
request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)
|
||||
capmanager = request.config.pluginmanager.getplugin("capturemanager")
|
||||
# need to active this fixture right away in case it is being used by another fixture (setup phase)
|
||||
# if this fixture is being used only by a test function (call phase), then we wouldn't need this
|
||||
# activation, but it doesn't hurt
|
||||
# Need to active this fixture right away in case it is being used by another fixture (setup phase).
|
||||
# If this fixture is being used only by a test function (call phase), then we wouldn't need this
|
||||
# activation, but it doesn't hurt.
|
||||
capmanager.activate_fixture(request.node)
|
||||
yield fixture
|
||||
fixture.close()
|
||||
|
@ -357,7 +383,7 @@ class CaptureFixture(object):
|
|||
def readouterr(self):
|
||||
"""Read and return the captured output so far, resetting the internal buffer.
|
||||
|
||||
:return: captured content as a namedtuple with ``out`` and ``err`` string attributes
|
||||
:return: captured content as a namedtuple with ``out`` and ``err`` string attributes
|
||||
"""
|
||||
captured_out, captured_err = self._captured_out, self._captured_err
|
||||
if self._capture is not None:
|
||||
|
@ -446,6 +472,9 @@ class MultiCapture(object):
|
|||
if err:
|
||||
self.err = Capture(2)
|
||||
|
||||
def __repr__(self):
|
||||
return "<MultiCapture out=%r err=%r in_=%r>" % (self.out, self.err, self.in_)
|
||||
|
||||
def start_capturing(self):
|
||||
if self.in_:
|
||||
self.in_.start()
|
||||
|
@ -593,7 +622,7 @@ class FDCapture(FDCaptureBinary):
|
|||
EMPTY_BUFFER = str()
|
||||
|
||||
def snap(self):
|
||||
res = FDCaptureBinary.snap(self)
|
||||
res = super(FDCapture, self).snap()
|
||||
enc = getattr(self.tmpfile, "encoding", None)
|
||||
if enc and isinstance(res, bytes):
|
||||
res = six.text_type(res, enc, "replace")
|
||||
|
@ -696,13 +725,11 @@ def _colorama_workaround():
|
|||
first import of colorama while I/O capture is active, colorama will
|
||||
fail in various ways.
|
||||
"""
|
||||
|
||||
if not sys.platform.startswith("win32"):
|
||||
return
|
||||
try:
|
||||
import colorama # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
if sys.platform.startswith("win32"):
|
||||
try:
|
||||
import colorama # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def _readline_workaround():
|
||||
|
@ -723,13 +750,11 @@ def _readline_workaround():
|
|||
|
||||
See https://github.com/pytest-dev/pytest/pull/1281
|
||||
"""
|
||||
|
||||
if not sys.platform.startswith("win32"):
|
||||
return
|
||||
try:
|
||||
import readline # noqa
|
||||
except ImportError:
|
||||
pass
|
||||
if sys.platform.startswith("win32"):
|
||||
try:
|
||||
import readline # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
|
||||
|
||||
def _py36_windowsconsoleio_workaround(stream):
|
||||
|
|
|
@ -140,6 +140,7 @@ default_plugins = (
|
|||
"stepwise",
|
||||
"warnings",
|
||||
"logging",
|
||||
"reports",
|
||||
)
|
||||
|
||||
|
||||
|
@ -147,10 +148,15 @@ builtin_plugins = set(default_plugins)
|
|||
builtin_plugins.add("pytester")
|
||||
|
||||
|
||||
def get_config():
|
||||
def get_config(args=None):
|
||||
# subsequent calls to main will create a fresh instance
|
||||
pluginmanager = PytestPluginManager()
|
||||
config = Config(pluginmanager)
|
||||
|
||||
if args is not None:
|
||||
# Handle any "-p no:plugin" args.
|
||||
pluginmanager.consider_preparse(args)
|
||||
|
||||
for spec in default_plugins:
|
||||
pluginmanager.import_plugin(spec)
|
||||
return config
|
||||
|
@ -178,7 +184,7 @@ def _prepareconfig(args=None, plugins=None):
|
|||
msg = "`args` parameter expected to be a list or tuple of strings, got: {!r} (type: {})"
|
||||
raise TypeError(msg.format(args, type(args)))
|
||||
|
||||
config = get_config()
|
||||
config = get_config(args)
|
||||
pluginmanager = config.pluginmanager
|
||||
try:
|
||||
if plugins:
|
||||
|
@ -476,7 +482,10 @@ class PytestPluginManager(PluginManager):
|
|||
i += 1
|
||||
if isinstance(opt, six.string_types):
|
||||
if opt == "-p":
|
||||
parg = args[i]
|
||||
try:
|
||||
parg = args[i]
|
||||
except IndexError:
|
||||
return
|
||||
i += 1
|
||||
elif opt.startswith("-p"):
|
||||
parg = opt[2:]
|
||||
|
@ -496,7 +505,15 @@ class PytestPluginManager(PluginManager):
|
|||
if not name.startswith("pytest_"):
|
||||
self.set_blocked("pytest_" + name)
|
||||
else:
|
||||
self.import_plugin(arg)
|
||||
name = arg
|
||||
# Unblock the plugin. None indicates that it has been blocked.
|
||||
# There is no interface with pluggy for this.
|
||||
if self._name2plugin.get(name, -1) is None:
|
||||
del self._name2plugin[name]
|
||||
if not name.startswith("pytest_"):
|
||||
if self._name2plugin.get("pytest_" + name, -1) is None:
|
||||
del self._name2plugin["pytest_" + name]
|
||||
self.import_plugin(arg, consider_entry_points=True)
|
||||
|
||||
def consider_conftest(self, conftestmodule):
|
||||
self.register(conftestmodule, name=conftestmodule.__file__)
|
||||
|
@ -512,7 +529,11 @@ class PytestPluginManager(PluginManager):
|
|||
for import_spec in plugins:
|
||||
self.import_plugin(import_spec)
|
||||
|
||||
def import_plugin(self, modname):
|
||||
def import_plugin(self, modname, consider_entry_points=False):
|
||||
"""
|
||||
Imports a plugin with ``modname``. If ``consider_entry_points`` is True, entry point
|
||||
names are also considered to find a plugin.
|
||||
"""
|
||||
# most often modname refers to builtin modules, e.g. "pytester",
|
||||
# "terminal" or "capture". Those plugins are registered under their
|
||||
# basename for historic purposes but must be imported with the
|
||||
|
@ -523,22 +544,26 @@ class PytestPluginManager(PluginManager):
|
|||
modname = str(modname)
|
||||
if self.is_blocked(modname) or self.get_plugin(modname) is not None:
|
||||
return
|
||||
if modname in builtin_plugins:
|
||||
importspec = "_pytest." + modname
|
||||
else:
|
||||
importspec = modname
|
||||
|
||||
importspec = "_pytest." + modname if modname in builtin_plugins else modname
|
||||
self.rewrite_hook.mark_rewrite(importspec)
|
||||
|
||||
if consider_entry_points:
|
||||
loaded = self.load_setuptools_entrypoints("pytest11", name=modname)
|
||||
if loaded:
|
||||
return
|
||||
|
||||
try:
|
||||
__import__(importspec)
|
||||
except ImportError as e:
|
||||
new_exc_type = ImportError
|
||||
new_exc_message = 'Error importing plugin "%s": %s' % (
|
||||
modname,
|
||||
safe_str(e.args[0]),
|
||||
)
|
||||
new_exc = new_exc_type(new_exc_message)
|
||||
new_exc = ImportError(new_exc_message)
|
||||
tb = sys.exc_info()[2]
|
||||
|
||||
six.reraise(new_exc_type, new_exc, sys.exc_info()[2])
|
||||
six.reraise(ImportError, new_exc, tb)
|
||||
|
||||
except Skipped as e:
|
||||
from _pytest.warnings import _issue_warning_captured
|
||||
|
@ -697,7 +722,7 @@ class Config(object):
|
|||
@classmethod
|
||||
def fromdictargs(cls, option_dict, args):
|
||||
""" constructor useable for subprocesses. """
|
||||
config = get_config()
|
||||
config = get_config(args)
|
||||
config.option.__dict__.update(option_dict)
|
||||
config.parse(args, addopts=False)
|
||||
for x in config.option.plugins:
|
||||
|
@ -741,7 +766,7 @@ class Config(object):
|
|||
by the importhook.
|
||||
"""
|
||||
ns, unknown_args = self._parser.parse_known_and_unknown_args(args)
|
||||
mode = ns.assertmode
|
||||
mode = getattr(ns, "assertmode", "plain")
|
||||
if mode == "rewrite":
|
||||
try:
|
||||
hook = _pytest.assertion.install_importhook(self)
|
||||
|
|
|
@ -33,7 +33,12 @@ def getcfg(args, config=None):
|
|||
p = base.join(inibasename)
|
||||
if exists(p):
|
||||
iniconfig = py.iniconfig.IniConfig(p)
|
||||
if "pytest" in iniconfig.sections:
|
||||
if (
|
||||
inibasename == "setup.cfg"
|
||||
and "tool:pytest" in iniconfig.sections
|
||||
):
|
||||
return base, p, iniconfig["tool:pytest"]
|
||||
elif "pytest" in iniconfig.sections:
|
||||
if inibasename == "setup.cfg" and config is not None:
|
||||
|
||||
fail(
|
||||
|
@ -41,11 +46,6 @@ def getcfg(args, config=None):
|
|||
pytrace=False,
|
||||
)
|
||||
return base, p, iniconfig["pytest"]
|
||||
if (
|
||||
inibasename == "setup.cfg"
|
||||
and "tool:pytest" in iniconfig.sections
|
||||
):
|
||||
return base, p, iniconfig["tool:pytest"]
|
||||
elif inibasename == "pytest.ini":
|
||||
# allowed to be empty
|
||||
return base, p, {}
|
||||
|
|
|
@ -3,6 +3,7 @@ from __future__ import absolute_import
|
|||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import pdb
|
||||
import sys
|
||||
from doctest import UnexpectedException
|
||||
|
@ -11,6 +12,31 @@ from _pytest import outcomes
|
|||
from _pytest.config import hookimpl
|
||||
|
||||
|
||||
def _validate_usepdb_cls(value):
|
||||
try:
|
||||
modname, classname = value.split(":")
|
||||
except ValueError:
|
||||
raise argparse.ArgumentTypeError(
|
||||
"{!r} is not in the format 'modname:classname'".format(value)
|
||||
)
|
||||
|
||||
try:
|
||||
__import__(modname)
|
||||
mod = sys.modules[modname]
|
||||
|
||||
# Handle --pdbcls=pdb:pdb.Pdb (useful e.g. with pdbpp).
|
||||
parts = classname.split(".")
|
||||
pdb_cls = getattr(mod, parts[0])
|
||||
for part in parts[1:]:
|
||||
pdb_cls = getattr(pdb_cls, part)
|
||||
|
||||
return pdb_cls
|
||||
except Exception as exc:
|
||||
raise argparse.ArgumentTypeError(
|
||||
"could not get pdb class for {!r}: {}".format(value, exc)
|
||||
)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption(
|
||||
|
@ -23,6 +49,7 @@ def pytest_addoption(parser):
|
|||
"--pdbcls",
|
||||
dest="usepdb_cls",
|
||||
metavar="modulename:classname",
|
||||
type=_validate_usepdb_cls,
|
||||
help="start a custom interactive Python debugger on errors. "
|
||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb",
|
||||
)
|
||||
|
@ -35,11 +62,8 @@ def pytest_addoption(parser):
|
|||
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.getvalue("usepdb_cls"):
|
||||
modname, classname = config.getvalue("usepdb_cls").split(":")
|
||||
__import__(modname)
|
||||
pdb_cls = getattr(sys.modules[modname], classname)
|
||||
else:
|
||||
pdb_cls = config.getvalue("usepdb_cls")
|
||||
if not pdb_cls:
|
||||
pdb_cls = pdb.Pdb
|
||||
|
||||
if config.getvalue("trace"):
|
||||
|
@ -77,6 +101,12 @@ class pytestPDB(object):
|
|||
_saved = []
|
||||
_recursive_debug = 0
|
||||
|
||||
@classmethod
|
||||
def _is_capturing(cls, capman):
|
||||
if capman:
|
||||
return capman.is_capturing()
|
||||
return False
|
||||
|
||||
@classmethod
|
||||
def _init_pdb(cls, *args, **kwargs):
|
||||
""" Initialize PDB debugging, dropping any IO capturing. """
|
||||
|
@ -85,7 +115,7 @@ class pytestPDB(object):
|
|||
if cls._pluginmanager is not None:
|
||||
capman = cls._pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspend_global_capture(in_=True)
|
||||
capman.suspend(in_=True)
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
if cls._recursive_debug == 0:
|
||||
|
@ -93,10 +123,19 @@ class pytestPDB(object):
|
|||
header = kwargs.pop("header", None)
|
||||
if header is not None:
|
||||
tw.sep(">", header)
|
||||
elif capman and capman.is_globally_capturing():
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
else:
|
||||
tw.sep(">", "PDB set_trace")
|
||||
capturing = cls._is_capturing(capman)
|
||||
if capturing:
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
else:
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB set_trace (IO-capturing turned off for %s)"
|
||||
% capturing,
|
||||
)
|
||||
else:
|
||||
tw.sep(">", "PDB set_trace")
|
||||
|
||||
class _PdbWrapper(cls._pdb_cls, object):
|
||||
_pytest_capman = capman
|
||||
|
@ -110,15 +149,24 @@ class pytestPDB(object):
|
|||
|
||||
def do_continue(self, arg):
|
||||
ret = super(_PdbWrapper, self).do_continue(arg)
|
||||
if self._pytest_capman:
|
||||
if cls._recursive_debug == 0:
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
if cls._recursive_debug == 0:
|
||||
if self._pytest_capman.is_globally_capturing():
|
||||
|
||||
capman = self._pytest_capman
|
||||
capturing = pytestPDB._is_capturing(capman)
|
||||
if capturing:
|
||||
if capturing == "global":
|
||||
tw.sep(">", "PDB continue (IO-capturing resumed)")
|
||||
else:
|
||||
tw.sep(">", "PDB continue")
|
||||
self._pytest_capman.resume_global_capture()
|
||||
tw.sep(
|
||||
">",
|
||||
"PDB continue (IO-capturing resumed for %s)"
|
||||
% capturing,
|
||||
)
|
||||
capman.resume()
|
||||
else:
|
||||
tw.sep(">", "PDB continue")
|
||||
cls._pluginmanager.hook.pytest_leave_pdb(
|
||||
config=cls._config, pdb=self
|
||||
)
|
||||
|
@ -128,8 +176,15 @@ class pytestPDB(object):
|
|||
do_c = do_cont = do_continue
|
||||
|
||||
def set_quit(self):
|
||||
"""Raise Exit outcome when quit command is used in pdb.
|
||||
|
||||
This is a bit of a hack - it would be better if BdbQuit
|
||||
could be handled, but this would require to wrap the
|
||||
whole pytest run, and adjust the report etc.
|
||||
"""
|
||||
super(_PdbWrapper, self).set_quit()
|
||||
outcomes.exit("Quitting debugger")
|
||||
if cls._recursive_debug == 0:
|
||||
outcomes.exit("Quitting debugger")
|
||||
|
||||
def setup(self, f, tb):
|
||||
"""Suspend on setup().
|
||||
|
@ -185,17 +240,12 @@ def _test_pytest_function(pyfuncitem):
|
|||
_pdb = pytestPDB._init_pdb()
|
||||
testfunction = pyfuncitem.obj
|
||||
pyfuncitem.obj = _pdb.runcall
|
||||
if pyfuncitem._isyieldedfunction():
|
||||
arg_list = list(pyfuncitem._args)
|
||||
arg_list.insert(0, testfunction)
|
||||
pyfuncitem._args = tuple(arg_list)
|
||||
else:
|
||||
if "func" in pyfuncitem._fixtureinfo.argnames:
|
||||
raise ValueError("--trace can't be used with a fixture named func!")
|
||||
pyfuncitem.funcargs["func"] = testfunction
|
||||
new_list = list(pyfuncitem._fixtureinfo.argnames)
|
||||
new_list.append("func")
|
||||
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
|
||||
if "func" in pyfuncitem._fixtureinfo.argnames: # noqa
|
||||
raise ValueError("--trace can't be used with a fixture named func!")
|
||||
pyfuncitem.funcargs["func"] = testfunction
|
||||
new_list = list(pyfuncitem._fixtureinfo.argnames)
|
||||
new_list.append("func")
|
||||
pyfuncitem._fixtureinfo.argnames = tuple(new_list)
|
||||
|
||||
|
||||
def _enter_pdb(node, excinfo, rep):
|
||||
|
@ -244,9 +294,9 @@ def _find_last_non_hidden_frame(stack):
|
|||
|
||||
|
||||
def post_mortem(t):
|
||||
class Pdb(pytestPDB._pdb_cls):
|
||||
class Pdb(pytestPDB._pdb_cls, object):
|
||||
def get_stack(self, f, t):
|
||||
stack, i = pdb.Pdb.get_stack(self, f, t)
|
||||
stack, i = super(Pdb, self).get_stack(f, t)
|
||||
if f is None:
|
||||
i = _find_last_non_hidden_frame(stack)
|
||||
return stack, i
|
||||
|
|
|
@ -15,6 +15,7 @@ from _pytest._code.code import ReprFileLocation
|
|||
from _pytest._code.code import TerminalRepr
|
||||
from _pytest.compat import safe_getattr
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
from _pytest.outcomes import Skipped
|
||||
|
||||
DOCTEST_REPORT_CHOICE_NONE = "none"
|
||||
DOCTEST_REPORT_CHOICE_CDIFF = "cdiff"
|
||||
|
@ -153,6 +154,8 @@ def _init_runner_class():
|
|||
raise failure
|
||||
|
||||
def report_unexpected_exception(self, out, test, example, exc_info):
|
||||
if isinstance(exc_info[1], Skipped):
|
||||
raise exc_info[1]
|
||||
failure = doctest.UnexpectedException(test, example, exc_info)
|
||||
if self.continue_on_failure:
|
||||
out.append(failure)
|
||||
|
|
|
@ -585,11 +585,13 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
# call the fixture function
|
||||
fixturedef.execute(request=subrequest)
|
||||
finally:
|
||||
# if fixture function failed it might have registered finalizers
|
||||
self.session._setupstate.addfinalizer(
|
||||
functools.partial(fixturedef.finish, request=subrequest),
|
||||
subrequest.node,
|
||||
)
|
||||
self._schedule_finalizers(fixturedef, subrequest)
|
||||
|
||||
def _schedule_finalizers(self, fixturedef, subrequest):
|
||||
# if fixture function failed it might have registered finalizers
|
||||
self.session._setupstate.addfinalizer(
|
||||
functools.partial(fixturedef.finish, request=subrequest), subrequest.node
|
||||
)
|
||||
|
||||
def _check_scope(self, argname, invoking_scope, requested_scope):
|
||||
if argname == "request":
|
||||
|
@ -659,6 +661,16 @@ class SubRequest(FixtureRequest):
|
|||
def addfinalizer(self, finalizer):
|
||||
self._fixturedef.addfinalizer(finalizer)
|
||||
|
||||
def _schedule_finalizers(self, fixturedef, subrequest):
|
||||
# if the executing fixturedef was not explicitly requested in the argument list (via
|
||||
# getfixturevalue inside the fixture call) then ensure this fixture def will be finished
|
||||
# first
|
||||
if fixturedef.argname not in self.funcargnames:
|
||||
fixturedef.addfinalizer(
|
||||
functools.partial(self._fixturedef.finish, request=self)
|
||||
)
|
||||
super(SubRequest, self)._schedule_finalizers(fixturedef, subrequest)
|
||||
|
||||
|
||||
scopes = "session package module class function".split()
|
||||
scopenum_function = scopes.index("function")
|
||||
|
@ -1053,7 +1065,7 @@ def pytestconfig(request):
|
|||
Example::
|
||||
|
||||
def test_foo(pytestconfig):
|
||||
if pytestconfig.getoption("verbose"):
|
||||
if pytestconfig.getoption("verbose") > 0:
|
||||
...
|
||||
|
||||
"""
|
||||
|
|
|
@ -60,7 +60,7 @@ def pytest_addoption(parser):
|
|||
dest="plugins",
|
||||
default=[],
|
||||
metavar="name",
|
||||
help="early-load given plugin (multi-allowed). "
|
||||
help="early-load given plugin module name or entry point (multi-allowed). "
|
||||
"To avoid loading of plugins, use the `no:` prefix, e.g. "
|
||||
"`no:doctest`.",
|
||||
)
|
||||
|
|
|
@ -376,6 +376,41 @@ def pytest_runtest_logreport(report):
|
|||
the respective phase of executing a test. """
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_report_to_serializable(config, report):
|
||||
"""
|
||||
.. warning::
|
||||
This hook is experimental and subject to change between pytest releases, even
|
||||
bug fixes.
|
||||
|
||||
The intent is for this to be used by plugins maintained by the core-devs, such
|
||||
as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal
|
||||
'resultlog' plugin.
|
||||
|
||||
In the future it might become part of the public hook API.
|
||||
|
||||
Serializes the given report object into a data structure suitable for sending
|
||||
over the wire, e.g. converted to JSON.
|
||||
"""
|
||||
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_report_from_serializable(config, data):
|
||||
"""
|
||||
.. warning::
|
||||
This hook is experimental and subject to change between pytest releases, even
|
||||
bug fixes.
|
||||
|
||||
The intent is for this to be used by plugins maintained by the core-devs, such
|
||||
as ``pytest-xdist``, ``pytest-subtests``, and as a replacement for the internal
|
||||
'resultlog' plugin.
|
||||
|
||||
In the future it might become part of the public hook API.
|
||||
|
||||
Restores a report object previously serialized with pytest_report_to_serializable().
|
||||
"""
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Fixture related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
|
|
@ -389,7 +389,7 @@ class LoggingPlugin(object):
|
|||
self._config = config
|
||||
|
||||
# enable verbose output automatically if live logging is enabled
|
||||
if self._log_cli_enabled() and not config.getoption("verbose"):
|
||||
if self._log_cli_enabled() and config.getoption("verbose") < 1:
|
||||
config.option.verbose = 1
|
||||
|
||||
self.print_logs = get_option_ini(config, "log_print")
|
||||
|
@ -577,8 +577,15 @@ class LoggingPlugin(object):
|
|||
if self.log_cli_handler:
|
||||
self.log_cli_handler.set_when("sessionfinish")
|
||||
if self.log_file_handler is not None:
|
||||
with catching_logs(self.log_file_handler, level=self.log_file_level):
|
||||
yield
|
||||
try:
|
||||
with catching_logs(
|
||||
self.log_file_handler, level=self.log_file_level
|
||||
):
|
||||
yield
|
||||
finally:
|
||||
# Close the FileHandler explicitly.
|
||||
# (logging.shutdown might have lost the weakref?!)
|
||||
self.log_file_handler.close()
|
||||
else:
|
||||
yield
|
||||
|
||||
|
|
|
@ -225,7 +225,7 @@ def wrap_session(config, doit):
|
|||
config.notify_exception(excinfo, config.option)
|
||||
session.exitstatus = EXIT_INTERNALERROR
|
||||
if excinfo.errisinstance(SystemExit):
|
||||
sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
|
||||
sys.stderr.write("mainloop: caught unexpected SystemExit!\n")
|
||||
|
||||
finally:
|
||||
excinfo = None # Explicitly break reference cycle.
|
||||
|
@ -441,6 +441,15 @@ class Session(nodes.FSCollector):
|
|||
|
||||
self.config.pluginmanager.register(self, name="session")
|
||||
|
||||
def __repr__(self):
|
||||
return "<%s %s exitstatus=%r testsfailed=%d testscollected=%d>" % (
|
||||
self.__class__.__name__,
|
||||
self.name,
|
||||
getattr(self, "exitstatus", "<UNSET>"),
|
||||
self.testsfailed,
|
||||
self.testscollected,
|
||||
)
|
||||
|
||||
def _node_location_to_relpath(self, node_path):
|
||||
# bestrelpath is a quite slow function
|
||||
return self._bestrelpathcache[node_path]
|
||||
|
@ -548,7 +557,7 @@ class Session(nodes.FSCollector):
|
|||
# Start with a Session root, and delve to argpath item (dir or file)
|
||||
# and stack all Packages found on the way.
|
||||
# No point in finding packages when collecting doctests
|
||||
if not self.config.option.doctestmodules:
|
||||
if not self.config.getoption("doctestmodules", False):
|
||||
pm = self.config.pluginmanager
|
||||
for parent in reversed(argpath.parts()):
|
||||
if pm._confcutdir and pm._confcutdir.relto(parent):
|
||||
|
|
|
@ -44,7 +44,7 @@ def get_empty_parameterset_mark(config, argnames, func):
|
|||
f_name = func.__name__
|
||||
_, lineno = getfslineno(func)
|
||||
raise Collector.CollectError(
|
||||
"Empty parameter set in '%s' at line %d" % (f_name, lineno)
|
||||
"Empty parameter set in '%s' at line %d" % (f_name, lineno + 1)
|
||||
)
|
||||
else:
|
||||
raise LookupError(requested_mark)
|
||||
|
|
|
@ -262,10 +262,15 @@ class MonkeyPatch(object):
|
|||
|
||||
def syspath_prepend(self, path):
|
||||
""" Prepend ``path`` to ``sys.path`` list of import locations. """
|
||||
from pkg_resources import fixup_namespace_packages
|
||||
|
||||
if self._savesyspath is None:
|
||||
self._savesyspath = sys.path[:]
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
# https://github.com/pypa/setuptools/blob/d8b901bc/docs/pkg_resources.txt#L162-L171
|
||||
fixup_namespace_packages(str(path))
|
||||
|
||||
def chdir(self, path):
|
||||
""" Change the current working directory to the specified path.
|
||||
Path can be a string or a py.path.local object.
|
||||
|
|
|
@ -325,7 +325,14 @@ class Collector(Node):
|
|||
if excinfo.errisinstance(self.CollectError):
|
||||
exc = excinfo.value
|
||||
return str(exc.args[0])
|
||||
return self._repr_failure_py(excinfo, style="short")
|
||||
|
||||
# Respect explicit tbstyle option, but default to "short"
|
||||
# (None._repr_failure_py defaults to "long" without "fulltrace" option).
|
||||
tbstyle = self.config.getoption("tbstyle")
|
||||
if tbstyle == "auto":
|
||||
tbstyle = "short"
|
||||
|
||||
return self._repr_failure_py(excinfo, style=tbstyle)
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
if hasattr(self, "fspath"):
|
||||
|
|
|
@ -80,7 +80,8 @@ def skip(msg="", **kwargs):
|
|||
Skip an executing test with the given message.
|
||||
|
||||
This function should be called only during testing (setup, call or teardown) or
|
||||
during collection by using the ``allow_module_level`` flag.
|
||||
during collection by using the ``allow_module_level`` flag. This function can
|
||||
be called in doctests as well.
|
||||
|
||||
:kwarg bool allow_module_level: allows this function to be called at
|
||||
module level, skipping the rest of the module. Default to False.
|
||||
|
@ -89,6 +90,9 @@ def skip(msg="", **kwargs):
|
|||
It is better to use the :ref:`pytest.mark.skipif ref` marker when possible to declare a test to be
|
||||
skipped under certain conditions like mismatching platforms or
|
||||
dependencies.
|
||||
Similarly, use the ``# doctest: +SKIP`` directive (see `doctest.SKIP
|
||||
<https://docs.python.org/3/library/doctest.html#doctest.SKIP>`_)
|
||||
to skip a doctest statically.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
allow_module_level = kwargs.pop("allow_module_level", False)
|
||||
|
|
|
@ -4,7 +4,6 @@ from __future__ import division
|
|||
from __future__ import print_function
|
||||
|
||||
import codecs
|
||||
import distutils.spawn
|
||||
import gc
|
||||
import os
|
||||
import platform
|
||||
|
@ -26,9 +25,11 @@ from _pytest.assertion.rewrite import AssertionRewritingHook
|
|||
from _pytest.capture import MultiCapture
|
||||
from _pytest.capture import SysCapture
|
||||
from _pytest.compat import safe_str
|
||||
from _pytest.compat import Sequence
|
||||
from _pytest.main import EXIT_INTERRUPTED
|
||||
from _pytest.main import EXIT_OK
|
||||
from _pytest.main import Session
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from _pytest.pathlib import Path
|
||||
|
||||
IGNORE_PAM = [ # filenames added when obtaining details about the current user
|
||||
|
@ -151,47 +152,6 @@ winpymap = {
|
|||
}
|
||||
|
||||
|
||||
def getexecutable(name, cache={}):
|
||||
try:
|
||||
return cache[name]
|
||||
except KeyError:
|
||||
executable = distutils.spawn.find_executable(name)
|
||||
if executable:
|
||||
import subprocess
|
||||
|
||||
popen = subprocess.Popen(
|
||||
[str(executable), "--version"],
|
||||
universal_newlines=True,
|
||||
stderr=subprocess.PIPE,
|
||||
)
|
||||
out, err = popen.communicate()
|
||||
if name == "jython":
|
||||
if not err or "2.5" not in err:
|
||||
executable = None
|
||||
if "2.5.2" in err:
|
||||
executable = None # http://bugs.jython.org/issue1790
|
||||
elif popen.returncode != 0:
|
||||
# handle pyenv's 127
|
||||
executable = None
|
||||
cache[name] = executable
|
||||
return executable
|
||||
|
||||
|
||||
@pytest.fixture(params=["python2.7", "python3.4", "pypy", "pypy3"])
|
||||
def anypython(request):
|
||||
name = request.param
|
||||
executable = getexecutable(name)
|
||||
if executable is None:
|
||||
if sys.platform == "win32":
|
||||
executable = winpymap.get(name, None)
|
||||
if executable:
|
||||
executable = py.path.local(executable)
|
||||
if executable.check():
|
||||
return executable
|
||||
pytest.skip("no suitable %s found" % (name,))
|
||||
return executable
|
||||
|
||||
|
||||
# used at least by pytest-xdist plugin
|
||||
|
||||
|
||||
|
@ -518,6 +478,7 @@ class Testdir(object):
|
|||
self.test_tmproot = tmpdir_factory.mktemp("tmp-" + name, numbered=True)
|
||||
os.environ["PYTEST_DEBUG_TEMPROOT"] = str(self.test_tmproot)
|
||||
os.environ.pop("TOX_ENV_DIR", None) # Ensure that it is not used for caching.
|
||||
os.environ.pop("PYTEST_ADDOPTS", None) # Do not use outer options.
|
||||
self.plugins = []
|
||||
self._cwd_snapshot = CwdSnapshot()
|
||||
self._sys_path_snapshot = SysPathsSnapshot()
|
||||
|
@ -641,11 +602,16 @@ class Testdir(object):
|
|||
|
||||
This is undone automatically when this object dies at the end of each
|
||||
test.
|
||||
|
||||
"""
|
||||
from pkg_resources import fixup_namespace_packages
|
||||
|
||||
if path is None:
|
||||
path = self.tmpdir
|
||||
sys.path.insert(0, str(path))
|
||||
|
||||
dirname = str(path)
|
||||
sys.path.insert(0, dirname)
|
||||
fixup_namespace_packages(dirname)
|
||||
|
||||
# a call to syspathinsert() usually means that the caller wants to
|
||||
# import some dynamically created files, thus with python3 we
|
||||
# invalidate its import caches
|
||||
|
@ -654,12 +620,10 @@ class Testdir(object):
|
|||
def _possibly_invalidate_import_caches(self):
|
||||
# invalidate caches if we can (py33 and above)
|
||||
try:
|
||||
import importlib
|
||||
from importlib import invalidate_caches
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
if hasattr(importlib, "invalidate_caches"):
|
||||
importlib.invalidate_caches()
|
||||
return
|
||||
invalidate_caches()
|
||||
|
||||
def mkdir(self, name):
|
||||
"""Create a new (sub)directory."""
|
||||
|
@ -835,6 +799,12 @@ class Testdir(object):
|
|||
"""
|
||||
finalizers = []
|
||||
try:
|
||||
# Do not load user config.
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setenv("HOME", str(self.tmpdir))
|
||||
monkeypatch.setenv("USERPROFILE", str(self.tmpdir))
|
||||
finalizers.append(monkeypatch.undo)
|
||||
|
||||
# When running pytest inline any plugins active in the main test
|
||||
# process are already imported. So this disables the warning which
|
||||
# will trigger to say they can no longer be rewritten, which is
|
||||
|
@ -1065,6 +1035,9 @@ class Testdir(object):
|
|||
env["PYTHONPATH"] = os.pathsep.join(
|
||||
filter(None, [os.getcwd(), env.get("PYTHONPATH", "")])
|
||||
)
|
||||
# Do not load user config.
|
||||
env["HOME"] = str(self.tmpdir)
|
||||
env["USERPROFILE"] = env["HOME"]
|
||||
kw["env"] = env
|
||||
|
||||
popen = subprocess.Popen(
|
||||
|
@ -1375,6 +1348,7 @@ class LineMatcher(object):
|
|||
will be logged to stdout when a match occurs
|
||||
|
||||
"""
|
||||
assert isinstance(lines2, Sequence)
|
||||
lines2 = self._getlines(lines2)
|
||||
lines1 = self.lines[:]
|
||||
nextline = None
|
||||
|
|
|
@ -43,6 +43,7 @@ from _pytest.mark import MARK_GEN
|
|||
from _pytest.mark.structures import get_unpacked_marks
|
||||
from _pytest.mark.structures import normalize_mark_list
|
||||
from _pytest.outcomes import fail
|
||||
from _pytest.outcomes import skip
|
||||
from _pytest.pathlib import parts
|
||||
from _pytest.warning_types import PytestWarning
|
||||
|
||||
|
@ -101,6 +102,13 @@ def pytest_addoption(parser):
|
|||
default=["test"],
|
||||
help="prefixes or glob names for Python test function and method discovery",
|
||||
)
|
||||
parser.addini(
|
||||
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support",
|
||||
type="bool",
|
||||
default=False,
|
||||
help="disable string escape non-ascii characters, might cause unwanted "
|
||||
"side effects(use at your own risk)",
|
||||
)
|
||||
|
||||
group.addoption(
|
||||
"--import-mode",
|
||||
|
@ -156,14 +164,18 @@ def pytest_configure(config):
|
|||
@hookimpl(trylast=True)
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
testfunction = pyfuncitem.obj
|
||||
if pyfuncitem._isyieldedfunction():
|
||||
testfunction(*pyfuncitem._args)
|
||||
else:
|
||||
funcargs = pyfuncitem.funcargs
|
||||
testargs = {}
|
||||
for arg in pyfuncitem._fixtureinfo.argnames:
|
||||
testargs[arg] = funcargs[arg]
|
||||
testfunction(**testargs)
|
||||
iscoroutinefunction = getattr(inspect, "iscoroutinefunction", None)
|
||||
if iscoroutinefunction is not None and iscoroutinefunction(testfunction):
|
||||
msg = "Coroutine functions are not natively supported and have been skipped.\n"
|
||||
msg += "You need to install a suitable plugin for your async framework, for example:\n"
|
||||
msg += " - pytest-asyncio\n"
|
||||
msg += " - pytest-trio\n"
|
||||
msg += " - pytest-tornasync"
|
||||
warnings.warn(PytestWarning(msg.format(pyfuncitem.nodeid)))
|
||||
skip(msg="coroutine function and no async plugin installed (see warnings)")
|
||||
funcargs = pyfuncitem.funcargs
|
||||
testargs = {arg: funcargs[arg] for arg in pyfuncitem._fixtureinfo.argnames}
|
||||
testfunction(**testargs)
|
||||
return True
|
||||
|
||||
|
||||
|
@ -1151,6 +1163,16 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
|
|||
return "function"
|
||||
|
||||
|
||||
def _ascii_escaped_by_config(val, config):
|
||||
if config is None:
|
||||
escape_option = False
|
||||
else:
|
||||
escape_option = config.getini(
|
||||
"disable_test_id_escaping_and_forfeit_all_rights_to_community_support"
|
||||
)
|
||||
return val if escape_option else ascii_escaped(val)
|
||||
|
||||
|
||||
def _idval(val, argname, idx, idfn, item, config):
|
||||
if idfn:
|
||||
try:
|
||||
|
@ -1172,7 +1194,7 @@ def _idval(val, argname, idx, idfn, item, config):
|
|||
return hook_id
|
||||
|
||||
if isinstance(val, STRING_TYPES):
|
||||
return ascii_escaped(val)
|
||||
return _ascii_escaped_by_config(val, config)
|
||||
elif isinstance(val, (float, int, bool, NoneType)):
|
||||
return str(val)
|
||||
elif isinstance(val, REGEX_TYPE):
|
||||
|
@ -1404,7 +1426,7 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
|
||||
if fixtureinfo is None:
|
||||
fixtureinfo = self.session._fixturemanager.getfixtureinfo(
|
||||
self, self.obj, self.cls, funcargs=not self._isyieldedfunction()
|
||||
self, self.obj, self.cls, funcargs=True
|
||||
)
|
||||
self._fixtureinfo = fixtureinfo
|
||||
self.fixturenames = fixtureinfo.names_closure
|
||||
|
@ -1418,16 +1440,6 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
|
||||
def _initrequest(self):
|
||||
self.funcargs = {}
|
||||
if self._isyieldedfunction():
|
||||
assert not hasattr(
|
||||
self, "callspec"
|
||||
), "yielded functions (deprecated) cannot have funcargs"
|
||||
else:
|
||||
if hasattr(self, "callspec"):
|
||||
callspec = self.callspec
|
||||
assert not callspec.funcargs
|
||||
if hasattr(callspec, "param"):
|
||||
self.param = callspec.param
|
||||
self._request = fixtures.FixtureRequest(self)
|
||||
|
||||
@property
|
||||
|
@ -1447,9 +1459,6 @@ class Function(FunctionMixin, nodes.Item, fixtures.FuncargnamesCompatAttr):
|
|||
"(compatonly) for code expecting pytest-2.2 style request objects"
|
||||
return self
|
||||
|
||||
def _isyieldedfunction(self):
|
||||
return getattr(self, "_args", None) is not None
|
||||
|
||||
def runtest(self):
|
||||
""" execute the underlying test function. """
|
||||
self.ihook.pytest_pyfunc_call(pyfuncitem=self)
|
||||
|
|
|
@ -1,6 +1,19 @@
|
|||
import py
|
||||
from pprint import pprint
|
||||
|
||||
import py
|
||||
import six
|
||||
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
from _pytest._code.code import ReprEntry
|
||||
from _pytest._code.code import ReprEntryNative
|
||||
from _pytest._code.code import ReprExceptionInfo
|
||||
from _pytest._code.code import ReprFileLocation
|
||||
from _pytest._code.code import ReprFuncArgs
|
||||
from _pytest._code.code import ReprLocals
|
||||
from _pytest._code.code import ReprTraceback
|
||||
from _pytest._code.code import TerminalRepr
|
||||
from _pytest.outcomes import skip
|
||||
from _pytest.pathlib import Path
|
||||
|
||||
|
||||
def getslaveinfoline(node):
|
||||
|
@ -20,6 +33,7 @@ def getslaveinfoline(node):
|
|||
|
||||
class BaseReport(object):
|
||||
when = None
|
||||
location = None
|
||||
|
||||
def __init__(self, **kw):
|
||||
self.__dict__.update(kw)
|
||||
|
@ -97,12 +111,173 @@ class BaseReport(object):
|
|||
def fspath(self):
|
||||
return self.nodeid.split("::")[0]
|
||||
|
||||
@property
|
||||
def count_towards_summary(self):
|
||||
"""
|
||||
**Experimental**
|
||||
|
||||
Returns True if this report should be counted towards the totals shown at the end of the
|
||||
test session: "1 passed, 1 failure, etc".
|
||||
|
||||
.. note::
|
||||
|
||||
This function is considered **experimental**, so beware that it is subject to changes
|
||||
even in patch releases.
|
||||
"""
|
||||
return True
|
||||
|
||||
@property
|
||||
def head_line(self):
|
||||
"""
|
||||
**Experimental**
|
||||
|
||||
Returns the head line shown with longrepr output for this report, more commonly during
|
||||
traceback representation during failures::
|
||||
|
||||
________ Test.foo ________
|
||||
|
||||
|
||||
In the example above, the head_line is "Test.foo".
|
||||
|
||||
.. note::
|
||||
|
||||
This function is considered **experimental**, so beware that it is subject to changes
|
||||
even in patch releases.
|
||||
"""
|
||||
if self.location is not None:
|
||||
fspath, lineno, domain = self.location
|
||||
return domain
|
||||
|
||||
def _to_json(self):
|
||||
"""
|
||||
This was originally the serialize_report() function from xdist (ca03269).
|
||||
|
||||
Returns the contents of this report as a dict of builtin entries, suitable for
|
||||
serialization.
|
||||
|
||||
Experimental method.
|
||||
"""
|
||||
|
||||
def disassembled_report(rep):
|
||||
reprtraceback = rep.longrepr.reprtraceback.__dict__.copy()
|
||||
reprcrash = rep.longrepr.reprcrash.__dict__.copy()
|
||||
|
||||
new_entries = []
|
||||
for entry in reprtraceback["reprentries"]:
|
||||
entry_data = {
|
||||
"type": type(entry).__name__,
|
||||
"data": entry.__dict__.copy(),
|
||||
}
|
||||
for key, value in entry_data["data"].items():
|
||||
if hasattr(value, "__dict__"):
|
||||
entry_data["data"][key] = value.__dict__.copy()
|
||||
new_entries.append(entry_data)
|
||||
|
||||
reprtraceback["reprentries"] = new_entries
|
||||
|
||||
return {
|
||||
"reprcrash": reprcrash,
|
||||
"reprtraceback": reprtraceback,
|
||||
"sections": rep.longrepr.sections,
|
||||
}
|
||||
|
||||
d = self.__dict__.copy()
|
||||
if hasattr(self.longrepr, "toterminal"):
|
||||
if hasattr(self.longrepr, "reprtraceback") and hasattr(
|
||||
self.longrepr, "reprcrash"
|
||||
):
|
||||
d["longrepr"] = disassembled_report(self)
|
||||
else:
|
||||
d["longrepr"] = six.text_type(self.longrepr)
|
||||
else:
|
||||
d["longrepr"] = self.longrepr
|
||||
for name in d:
|
||||
if isinstance(d[name], (py.path.local, Path)):
|
||||
d[name] = str(d[name])
|
||||
elif name == "result":
|
||||
d[name] = None # for now
|
||||
return d
|
||||
|
||||
@classmethod
|
||||
def _from_json(cls, reportdict):
|
||||
"""
|
||||
This was originally the serialize_report() function from xdist (ca03269).
|
||||
|
||||
Factory method that returns either a TestReport or CollectReport, depending on the calling
|
||||
class. It's the callers responsibility to know which class to pass here.
|
||||
|
||||
Experimental method.
|
||||
"""
|
||||
if reportdict["longrepr"]:
|
||||
if (
|
||||
"reprcrash" in reportdict["longrepr"]
|
||||
and "reprtraceback" in reportdict["longrepr"]
|
||||
):
|
||||
|
||||
reprtraceback = reportdict["longrepr"]["reprtraceback"]
|
||||
reprcrash = reportdict["longrepr"]["reprcrash"]
|
||||
|
||||
unserialized_entries = []
|
||||
reprentry = None
|
||||
for entry_data in reprtraceback["reprentries"]:
|
||||
data = entry_data["data"]
|
||||
entry_type = entry_data["type"]
|
||||
if entry_type == "ReprEntry":
|
||||
reprfuncargs = None
|
||||
reprfileloc = None
|
||||
reprlocals = None
|
||||
if data["reprfuncargs"]:
|
||||
reprfuncargs = ReprFuncArgs(**data["reprfuncargs"])
|
||||
if data["reprfileloc"]:
|
||||
reprfileloc = ReprFileLocation(**data["reprfileloc"])
|
||||
if data["reprlocals"]:
|
||||
reprlocals = ReprLocals(data["reprlocals"]["lines"])
|
||||
|
||||
reprentry = ReprEntry(
|
||||
lines=data["lines"],
|
||||
reprfuncargs=reprfuncargs,
|
||||
reprlocals=reprlocals,
|
||||
filelocrepr=reprfileloc,
|
||||
style=data["style"],
|
||||
)
|
||||
elif entry_type == "ReprEntryNative":
|
||||
reprentry = ReprEntryNative(data["lines"])
|
||||
else:
|
||||
_report_unserialization_failure(entry_type, cls, reportdict)
|
||||
unserialized_entries.append(reprentry)
|
||||
reprtraceback["reprentries"] = unserialized_entries
|
||||
|
||||
exception_info = ReprExceptionInfo(
|
||||
reprtraceback=ReprTraceback(**reprtraceback),
|
||||
reprcrash=ReprFileLocation(**reprcrash),
|
||||
)
|
||||
|
||||
for section in reportdict["longrepr"]["sections"]:
|
||||
exception_info.addsection(*section)
|
||||
reportdict["longrepr"] = exception_info
|
||||
|
||||
return cls(**reportdict)
|
||||
|
||||
|
||||
def _report_unserialization_failure(type_name, report_class, reportdict):
|
||||
url = "https://github.com/pytest-dev/pytest/issues"
|
||||
stream = py.io.TextIO()
|
||||
pprint("-" * 100, stream=stream)
|
||||
pprint("INTERNALERROR: Unknown entry type returned: %s" % type_name, stream=stream)
|
||||
pprint("report_name: %s" % report_class, stream=stream)
|
||||
pprint(reportdict, stream=stream)
|
||||
pprint("Please report this bug at %s" % url, stream=stream)
|
||||
pprint("-" * 100, stream=stream)
|
||||
raise RuntimeError(stream.getvalue())
|
||||
|
||||
|
||||
class TestReport(BaseReport):
|
||||
""" Basic test report object (also used for setup and teardown calls if
|
||||
they fail).
|
||||
"""
|
||||
|
||||
__test__ = False
|
||||
|
||||
def __init__(
|
||||
self,
|
||||
nodeid,
|
||||
|
@ -159,6 +334,49 @@ class TestReport(BaseReport):
|
|||
self.outcome,
|
||||
)
|
||||
|
||||
@classmethod
|
||||
def from_item_and_call(cls, item, call):
|
||||
"""
|
||||
Factory method to create and fill a TestReport with standard item and call info.
|
||||
"""
|
||||
when = call.when
|
||||
duration = call.stop - call.start
|
||||
keywords = {x: 1 for x in item.keywords}
|
||||
excinfo = call.excinfo
|
||||
sections = []
|
||||
if not call.excinfo:
|
||||
outcome = "passed"
|
||||
longrepr = None
|
||||
else:
|
||||
if not isinstance(excinfo, ExceptionInfo):
|
||||
outcome = "failed"
|
||||
longrepr = excinfo
|
||||
elif excinfo.errisinstance(skip.Exception):
|
||||
outcome = "skipped"
|
||||
r = excinfo._getreprcrash()
|
||||
longrepr = (str(r.path), r.lineno, r.message)
|
||||
else:
|
||||
outcome = "failed"
|
||||
if call.when == "call":
|
||||
longrepr = item.repr_failure(excinfo)
|
||||
else: # exception in setup or teardown
|
||||
longrepr = item._repr_failure_py(
|
||||
excinfo, style=item.config.option.tbstyle
|
||||
)
|
||||
for rwhen, key, content in item._report_sections:
|
||||
sections.append(("Captured %s %s" % (key, rwhen), content))
|
||||
return cls(
|
||||
item.nodeid,
|
||||
item.location,
|
||||
keywords,
|
||||
outcome,
|
||||
longrepr,
|
||||
when,
|
||||
sections,
|
||||
duration,
|
||||
user_properties=item.user_properties,
|
||||
)
|
||||
|
||||
|
||||
class CollectReport(BaseReport):
|
||||
when = "collect"
|
||||
|
@ -189,3 +407,21 @@ class CollectErrorRepr(TerminalRepr):
|
|||
|
||||
def toterminal(self, out):
|
||||
out.line(self.longrepr, red=True)
|
||||
|
||||
|
||||
def pytest_report_to_serializable(report):
|
||||
if isinstance(report, (TestReport, CollectReport)):
|
||||
data = report._to_json()
|
||||
data["_report_type"] = report.__class__.__name__
|
||||
return data
|
||||
|
||||
|
||||
def pytest_report_from_serializable(data):
|
||||
if "_report_type" in data:
|
||||
if data["_report_type"] == "TestReport":
|
||||
return TestReport._from_json(data)
|
||||
elif data["_report_type"] == "CollectReport":
|
||||
return CollectReport._from_json(data)
|
||||
assert False, "Unknown report_type unserialize data: {}".format(
|
||||
data["_report_type"]
|
||||
)
|
||||
|
|
|
@ -87,9 +87,9 @@ def runtestprotocol(item, log=True, nextitem=None):
|
|||
rep = call_and_report(item, "setup", log)
|
||||
reports = [rep]
|
||||
if rep.passed:
|
||||
if item.config.option.setupshow:
|
||||
if item.config.getoption("setupshow", False):
|
||||
show_test_item(item)
|
||||
if not item.config.option.setuponly:
|
||||
if not item.config.getoption("setuponly", False):
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log, nextitem=nextitem))
|
||||
# after all teardown hooks have been called
|
||||
|
@ -192,7 +192,7 @@ def call_runtest_hook(item, when, **kwds):
|
|||
hookname = "pytest_runtest_" + when
|
||||
ihook = getattr(item.ihook, hookname)
|
||||
reraise = (Exit,)
|
||||
if not item.config.getvalue("usepdb"):
|
||||
if not item.config.getoption("usepdb", False):
|
||||
reraise += (KeyboardInterrupt,)
|
||||
return CallInfo.from_call(
|
||||
lambda: ihook(item=item, **kwds), when=when, reraise=reraise
|
||||
|
@ -246,43 +246,7 @@ class CallInfo(object):
|
|||
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
when = call.when
|
||||
duration = call.stop - call.start
|
||||
keywords = {x: 1 for x in item.keywords}
|
||||
excinfo = call.excinfo
|
||||
sections = []
|
||||
if not call.excinfo:
|
||||
outcome = "passed"
|
||||
longrepr = None
|
||||
else:
|
||||
if not isinstance(excinfo, ExceptionInfo):
|
||||
outcome = "failed"
|
||||
longrepr = excinfo
|
||||
elif excinfo.errisinstance(skip.Exception):
|
||||
outcome = "skipped"
|
||||
r = excinfo._getreprcrash()
|
||||
longrepr = (str(r.path), r.lineno, r.message)
|
||||
else:
|
||||
outcome = "failed"
|
||||
if call.when == "call":
|
||||
longrepr = item.repr_failure(excinfo)
|
||||
else: # exception in setup or teardown
|
||||
longrepr = item._repr_failure_py(
|
||||
excinfo, style=item.config.option.tbstyle
|
||||
)
|
||||
for rwhen, key, content in item._report_sections:
|
||||
sections.append(("Captured %s %s" % (key, rwhen), content))
|
||||
return TestReport(
|
||||
item.nodeid,
|
||||
item.location,
|
||||
keywords,
|
||||
outcome,
|
||||
longrepr,
|
||||
when,
|
||||
sections,
|
||||
duration,
|
||||
user_properties=item.user_properties,
|
||||
)
|
||||
return TestReport.from_item_and_call(item, call)
|
||||
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
|
|
|
@ -207,20 +207,22 @@ def pytest_terminal_summary(terminalreporter):
|
|||
def show_simple(terminalreporter, lines, stat):
|
||||
failed = terminalreporter.stats.get(stat)
|
||||
if failed:
|
||||
config = terminalreporter.config
|
||||
for rep in failed:
|
||||
verbose_word = _get_report_str(terminalreporter, rep)
|
||||
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
||||
verbose_word = _get_report_str(config, rep)
|
||||
pos = _get_pos(config, rep)
|
||||
lines.append("%s %s" % (verbose_word, pos))
|
||||
|
||||
|
||||
def show_xfailed(terminalreporter, lines):
|
||||
xfailed = terminalreporter.stats.get("xfailed")
|
||||
if xfailed:
|
||||
config = terminalreporter.config
|
||||
for rep in xfailed:
|
||||
verbose_word = _get_report_str(terminalreporter, rep)
|
||||
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
||||
reason = rep.wasxfail
|
||||
verbose_word = _get_report_str(config, rep)
|
||||
pos = _get_pos(config, rep)
|
||||
lines.append("%s %s" % (verbose_word, pos))
|
||||
reason = rep.wasxfail
|
||||
if reason:
|
||||
lines.append(" " + str(reason))
|
||||
|
||||
|
@ -228,9 +230,10 @@ def show_xfailed(terminalreporter, lines):
|
|||
def show_xpassed(terminalreporter, lines):
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
config = terminalreporter.config
|
||||
for rep in xpassed:
|
||||
verbose_word = _get_report_str(terminalreporter, rep)
|
||||
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
||||
verbose_word = _get_report_str(config, rep)
|
||||
pos = _get_pos(config, rep)
|
||||
reason = rep.wasxfail
|
||||
lines.append("%s %s %s" % (verbose_word, pos, reason))
|
||||
|
||||
|
@ -261,9 +264,9 @@ def show_skipped(terminalreporter, lines):
|
|||
tr = terminalreporter
|
||||
skipped = tr.stats.get("skipped", [])
|
||||
if skipped:
|
||||
verbose_word = _get_report_str(terminalreporter, report=skipped[0])
|
||||
fskips = folded_skips(skipped)
|
||||
if fskips:
|
||||
verbose_word = _get_report_str(terminalreporter.config, report=skipped[0])
|
||||
for num, fspath, lineno, reason in fskips:
|
||||
if reason.startswith("Skipped: "):
|
||||
reason = reason[9:]
|
||||
|
@ -283,13 +286,18 @@ def shower(stat):
|
|||
return show_
|
||||
|
||||
|
||||
def _get_report_str(terminalreporter, report):
|
||||
_category, _short, verbose = terminalreporter.config.hook.pytest_report_teststatus(
|
||||
report=report, config=terminalreporter.config
|
||||
def _get_report_str(config, report):
|
||||
_category, _short, verbose = config.hook.pytest_report_teststatus(
|
||||
report=report, config=config
|
||||
)
|
||||
return verbose
|
||||
|
||||
|
||||
def _get_pos(config, rep):
|
||||
nodeid = config.cwd_relative_nodeid(rep.nodeid)
|
||||
return nodeid
|
||||
|
||||
|
||||
REPORTCHAR_ACTIONS = {
|
||||
"x": show_xfailed,
|
||||
"X": show_xpassed,
|
||||
|
|
|
@ -8,7 +8,7 @@ def pytest_addoption(parser):
|
|||
"--stepwise",
|
||||
action="store_true",
|
||||
dest="stepwise",
|
||||
help="exit on test fail and continue from last failing test next time",
|
||||
help="exit on test failure and continue from last failing test next time",
|
||||
)
|
||||
group.addoption(
|
||||
"--stepwise-skip",
|
||||
|
@ -37,7 +37,10 @@ class StepwisePlugin:
|
|||
self.session = session
|
||||
|
||||
def pytest_collection_modifyitems(self, session, config, items):
|
||||
if not self.active or not self.lastfailed:
|
||||
if not self.active:
|
||||
return
|
||||
if not self.lastfailed:
|
||||
self.report_status = "no previously failed tests, not skipping."
|
||||
return
|
||||
|
||||
already_passed = []
|
||||
|
@ -54,7 +57,12 @@ class StepwisePlugin:
|
|||
# If the previously failed test was not found among the test items,
|
||||
# do not skip any tests.
|
||||
if not found:
|
||||
self.report_status = "previously failed test not found, not skipping."
|
||||
already_passed = []
|
||||
else:
|
||||
self.report_status = "skipping {} already passed items.".format(
|
||||
len(already_passed)
|
||||
)
|
||||
|
||||
for item in already_passed:
|
||||
items.remove(item)
|
||||
|
@ -94,6 +102,10 @@ class StepwisePlugin:
|
|||
if report.nodeid == self.lastfailed:
|
||||
self.lastfailed = None
|
||||
|
||||
def pytest_report_collectionfinish(self):
|
||||
if self.active and self.config.getoption("verbose") >= 0:
|
||||
return "stepwise: %s" % self.report_status
|
||||
|
||||
def pytest_sessionfinish(self, session):
|
||||
if self.active:
|
||||
self.config.cache.set("cache/stepwise", self.lastfailed)
|
||||
|
|
|
@ -26,6 +26,8 @@ from _pytest.main import EXIT_OK
|
|||
from _pytest.main import EXIT_TESTSFAILED
|
||||
from _pytest.main import EXIT_USAGEERROR
|
||||
|
||||
REPORT_COLLECTING_RESOLUTION = 0.5
|
||||
|
||||
|
||||
class MoreQuietAction(argparse.Action):
|
||||
"""
|
||||
|
@ -197,6 +199,7 @@ class WarningReport(object):
|
|||
message = attr.ib()
|
||||
nodeid = attr.ib(default=None)
|
||||
fslocation = attr.ib(default=None)
|
||||
count_towards_summary = True
|
||||
|
||||
def get_location(self, config):
|
||||
"""
|
||||
|
@ -245,10 +248,10 @@ class TerminalReporter(object):
|
|||
def _determine_show_progress_info(self):
|
||||
"""Return True if we should display progress information based on the current config"""
|
||||
# do not show progress if we are not capturing output (#3038)
|
||||
if self.config.getoption("capture") == "no":
|
||||
if self.config.getoption("capture", "no") == "no":
|
||||
return False
|
||||
# do not show progress if we are showing fixture setup/teardown
|
||||
if self.config.getoption("setupshow"):
|
||||
if self.config.getoption("setupshow", False):
|
||||
return False
|
||||
return self.config.getini("console_output_style") in ("progress", "count")
|
||||
|
||||
|
@ -383,6 +386,7 @@ class TerminalReporter(object):
|
|||
self.write_fspath_result(fsid, "")
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
self._tests_ran = True
|
||||
rep = report
|
||||
res = self.config.hook.pytest_report_teststatus(report=rep, config=self.config)
|
||||
category, letter, word = res
|
||||
|
@ -391,7 +395,6 @@ class TerminalReporter(object):
|
|||
else:
|
||||
markup = None
|
||||
self.stats.setdefault(category, []).append(rep)
|
||||
self._tests_ran = True
|
||||
if not letter and not word:
|
||||
# probably passed setup/teardown
|
||||
return
|
||||
|
@ -455,8 +458,6 @@ class TerminalReporter(object):
|
|||
self._tw.write(msg + "\n", cyan=True)
|
||||
|
||||
def _get_progress_information_message(self):
|
||||
if self.config.getoption("capture") == "no":
|
||||
return ""
|
||||
collected = self._session.testscollected
|
||||
if self.config.getini("console_output_style") == "count":
|
||||
if collected:
|
||||
|
@ -513,7 +514,7 @@ class TerminalReporter(object):
|
|||
t = time.time()
|
||||
if (
|
||||
self._collect_report_last_write is not None
|
||||
and self._collect_report_last_write > t - 0.5
|
||||
and self._collect_report_last_write > t - REPORT_COLLECTING_RESOLUTION
|
||||
):
|
||||
return
|
||||
self._collect_report_last_write = t
|
||||
|
@ -583,16 +584,21 @@ class TerminalReporter(object):
|
|||
self.write_line(line)
|
||||
|
||||
def pytest_report_header(self, config):
|
||||
inifile = ""
|
||||
line = "rootdir: %s" % config.rootdir
|
||||
|
||||
if config.inifile:
|
||||
inifile = " " + config.rootdir.bestrelpath(config.inifile)
|
||||
lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)]
|
||||
line += ", inifile: " + config.rootdir.bestrelpath(config.inifile)
|
||||
|
||||
testpaths = config.getini("testpaths")
|
||||
if testpaths and config.args == testpaths:
|
||||
rel_paths = [config.rootdir.bestrelpath(x) for x in testpaths]
|
||||
line += ", testpaths: {}".format(", ".join(rel_paths))
|
||||
result = [line]
|
||||
|
||||
plugininfo = config.pluginmanager.list_plugin_distinfo()
|
||||
if plugininfo:
|
||||
|
||||
lines.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
|
||||
return lines
|
||||
result.append("plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
|
||||
return result
|
||||
|
||||
def pytest_collection_finish(self, session):
|
||||
if self.config.getoption("collectonly"):
|
||||
|
@ -719,9 +725,8 @@ class TerminalReporter(object):
|
|||
return res + " "
|
||||
|
||||
def _getfailureheadline(self, rep):
|
||||
if hasattr(rep, "location"):
|
||||
fspath, lineno, domain = rep.location
|
||||
return domain
|
||||
if rep.head_line:
|
||||
return rep.head_line
|
||||
else:
|
||||
return "test session" # XXX?
|
||||
|
||||
|
@ -869,18 +874,23 @@ class TerminalReporter(object):
|
|||
|
||||
|
||||
def build_summary_stats_line(stats):
|
||||
keys = ("failed passed skipped deselected xfailed xpassed warnings error").split()
|
||||
unknown_key_seen = False
|
||||
for key in stats.keys():
|
||||
if key not in keys:
|
||||
if key: # setup/teardown reports have an empty key, ignore them
|
||||
keys.append(key)
|
||||
unknown_key_seen = True
|
||||
known_types = (
|
||||
"failed passed skipped deselected xfailed xpassed warnings error".split()
|
||||
)
|
||||
unknown_type_seen = False
|
||||
for found_type in stats:
|
||||
if found_type not in known_types:
|
||||
if found_type: # setup/teardown reports have an empty key, ignore them
|
||||
known_types.append(found_type)
|
||||
unknown_type_seen = True
|
||||
parts = []
|
||||
for key in keys:
|
||||
val = stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" % (len(val), key))
|
||||
for key in known_types:
|
||||
reports = stats.get(key, None)
|
||||
if reports:
|
||||
count = sum(
|
||||
1 for rep in reports if getattr(rep, "count_towards_summary", True)
|
||||
)
|
||||
parts.append("%d %s" % (count, key))
|
||||
|
||||
if parts:
|
||||
line = ", ".join(parts)
|
||||
|
@ -889,14 +899,14 @@ def build_summary_stats_line(stats):
|
|||
|
||||
if "failed" in stats or "error" in stats:
|
||||
color = "red"
|
||||
elif "warnings" in stats or unknown_key_seen:
|
||||
elif "warnings" in stats or unknown_type_seen:
|
||||
color = "yellow"
|
||||
elif "passed" in stats:
|
||||
color = "green"
|
||||
else:
|
||||
color = "yellow"
|
||||
|
||||
return (line, color)
|
||||
return line, color
|
||||
|
||||
|
||||
def _plugin_nameversions(plugininfo):
|
||||
|
|
|
@ -103,8 +103,9 @@ def catch_warnings_for_item(config, ihook, when, item):
|
|||
|
||||
|
||||
def warning_record_to_str(warning_message):
|
||||
"""Convert a warnings.WarningMessage to a string, taking in account a lot of unicode shenaningans in Python 2.
|
||||
"""Convert a warnings.WarningMessage to a string.
|
||||
|
||||
This takes lot of unicode shenaningans into account for Python 2.
|
||||
When Python 2 support is dropped this function can be greatly simplified.
|
||||
"""
|
||||
warn_msg = warning_message.message
|
||||
|
|
|
@ -8,6 +8,7 @@ import sys
|
|||
import textwrap
|
||||
import types
|
||||
|
||||
import attr
|
||||
import py
|
||||
import six
|
||||
|
||||
|
@ -108,6 +109,60 @@ class TestGeneralUsage(object):
|
|||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
@pytest.mark.parametrize("load_cov_early", [True, False])
|
||||
def test_early_load_setuptools_name(self, testdir, monkeypatch, load_cov_early):
|
||||
pkg_resources = pytest.importorskip("pkg_resources")
|
||||
|
||||
testdir.makepyfile(mytestplugin1_module="")
|
||||
testdir.makepyfile(mytestplugin2_module="")
|
||||
testdir.makepyfile(mycov_module="")
|
||||
testdir.syspathinsert()
|
||||
|
||||
loaded = []
|
||||
|
||||
@attr.s
|
||||
class DummyEntryPoint(object):
|
||||
name = attr.ib()
|
||||
module = attr.ib()
|
||||
version = "1.0"
|
||||
|
||||
@property
|
||||
def project_name(self):
|
||||
return self.name
|
||||
|
||||
def load(self):
|
||||
__import__(self.module)
|
||||
loaded.append(self.name)
|
||||
return sys.modules[self.module]
|
||||
|
||||
@property
|
||||
def dist(self):
|
||||
return self
|
||||
|
||||
def _get_metadata(self, *args):
|
||||
return []
|
||||
|
||||
entry_points = [
|
||||
DummyEntryPoint("myplugin1", "mytestplugin1_module"),
|
||||
DummyEntryPoint("myplugin2", "mytestplugin2_module"),
|
||||
DummyEntryPoint("mycov", "mycov_module"),
|
||||
]
|
||||
|
||||
def my_iter(group, name=None):
|
||||
assert group == "pytest11"
|
||||
for ep in entry_points:
|
||||
if name is not None and ep.name != name:
|
||||
continue
|
||||
yield ep
|
||||
|
||||
monkeypatch.setattr(pkg_resources, "iter_entry_points", my_iter)
|
||||
params = ("-p", "mycov") if load_cov_early else ()
|
||||
testdir.runpytest_inprocess(*params)
|
||||
if load_cov_early:
|
||||
assert loaded == ["mycov", "myplugin1", "myplugin2"]
|
||||
else:
|
||||
assert loaded == ["myplugin1", "myplugin2", "mycov"]
|
||||
|
||||
def test_assertion_magic(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
"""
|
||||
|
@ -622,6 +677,8 @@ class TestInvocationVariants(object):
|
|||
def test_cmdline_python_namespace_package(self, testdir, monkeypatch):
|
||||
"""
|
||||
test --pyargs option with namespace packages (#1567)
|
||||
|
||||
Ref: https://packaging.python.org/guides/packaging-namespace-packages/
|
||||
"""
|
||||
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
|
||||
|
||||
|
@ -978,7 +1035,7 @@ def test_pytest_plugins_as_module(testdir):
|
|||
}
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 1 passed in *")
|
||||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
||||
|
||||
|
||||
def test_deferred_hook_checking(testdir):
|
||||
|
@ -1118,9 +1175,37 @@ def test_fixture_mock_integration(testdir):
|
|||
"""Test that decorators applied to fixture are left working (#3774)"""
|
||||
p = testdir.copy_example("acceptance/fixture_mock_integration.py")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_usage_error_code(testdir):
|
||||
result = testdir.runpytest("-unknown-option-")
|
||||
assert result.ret == EXIT_USAGEERROR
|
||||
|
||||
|
||||
@pytest.mark.skipif(
|
||||
sys.version_info[:2] < (3, 5), reason="async def syntax python 3.5+ only"
|
||||
)
|
||||
@pytest.mark.filterwarnings("default")
|
||||
def test_warn_on_async_function(testdir):
|
||||
testdir.makepyfile(
|
||||
test_async="""
|
||||
async def test_1():
|
||||
pass
|
||||
async def test_2():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"test_async.py::test_1",
|
||||
"test_async.py::test_2",
|
||||
"*Coroutine functions are not natively supported*",
|
||||
"*2 skipped, 2 warnings in*",
|
||||
]
|
||||
)
|
||||
# ensure our warning message appears only once
|
||||
assert (
|
||||
result.stdout.str().count("Coroutine functions are not natively supported") == 1
|
||||
)
|
||||
|
|
|
@ -172,6 +172,10 @@ class TestExceptionInfo(object):
|
|||
exci = _pytest._code.ExceptionInfo.from_current()
|
||||
assert exci.getrepr()
|
||||
|
||||
def test_from_current_with_missing(self):
|
||||
with pytest.raises(AssertionError, match="no current exception"):
|
||||
_pytest._code.ExceptionInfo.from_current()
|
||||
|
||||
|
||||
class TestTracebackEntry(object):
|
||||
def test_getsource(self):
|
||||
|
|
|
@ -147,7 +147,7 @@ def test_pytest_plugins_in_non_top_level_conftest_unsupported_pyargs(
|
|||
if use_pyargs:
|
||||
assert msg not in res.stdout.str()
|
||||
else:
|
||||
res.stdout.fnmatch_lines("*{msg}*".format(msg=msg))
|
||||
res.stdout.fnmatch_lines(["*{msg}*".format(msg=msg)])
|
||||
|
||||
|
||||
def test_pytest_plugins_in_non_top_level_conftest_unsupported_no_top_level_conftest(
|
||||
|
|
|
@ -54,6 +54,7 @@ def test_root_logger_affected(testdir):
|
|||
"""
|
||||
import logging
|
||||
logger = logging.getLogger()
|
||||
|
||||
def test_foo():
|
||||
logger.info('info text ' + 'going to logger')
|
||||
logger.warning('warning text ' + 'going to logger')
|
||||
|
@ -66,15 +67,14 @@ def test_root_logger_affected(testdir):
|
|||
result = testdir.runpytest("--log-level=ERROR", "--log-file=pytest.log")
|
||||
assert result.ret == 1
|
||||
|
||||
# the capture log calls in the stdout section only contain the
|
||||
# logger.error msg, because --log-level=ERROR
|
||||
# The capture log calls in the stdout section only contain the
|
||||
# logger.error msg, because of --log-level=ERROR.
|
||||
result.stdout.fnmatch_lines(["*error text going to logger*"])
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
result.stdout.fnmatch_lines(["*warning text going to logger*"])
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
result.stdout.fnmatch_lines(["*info text going to logger*"])
|
||||
stdout = result.stdout.str()
|
||||
assert "warning text going to logger" not in stdout
|
||||
assert "info text going to logger" not in stdout
|
||||
|
||||
# the log file should contain the warning and the error log messages and
|
||||
# The log file should contain the warning and the error log messages and
|
||||
# not the info one, because the default level of the root logger is
|
||||
# WARNING.
|
||||
assert os.path.isfile(log_file)
|
||||
|
@ -635,7 +635,6 @@ def test_log_cli_auto_enable(testdir, request, cli_args):
|
|||
"""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
import logging
|
||||
|
||||
def test_log_1():
|
||||
|
@ -653,6 +652,7 @@ def test_log_cli_auto_enable(testdir, request, cli_args):
|
|||
)
|
||||
|
||||
result = testdir.runpytest(cli_args)
|
||||
stdout = result.stdout.str()
|
||||
if cli_args == "--log-cli-level=WARNING":
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
|
@ -663,13 +663,13 @@ def test_log_cli_auto_enable(testdir, request, cli_args):
|
|||
"=* 1 passed in *=",
|
||||
]
|
||||
)
|
||||
assert "INFO" not in result.stdout.str()
|
||||
assert "INFO" not in stdout
|
||||
else:
|
||||
result.stdout.fnmatch_lines(
|
||||
["*test_log_cli_auto_enable*100%*", "=* 1 passed in *="]
|
||||
)
|
||||
assert "INFO" not in result.stdout.str()
|
||||
assert "WARNING" not in result.stdout.str()
|
||||
assert "INFO" not in stdout
|
||||
assert "WARNING" not in stdout
|
||||
|
||||
|
||||
def test_log_file_cli(testdir):
|
||||
|
@ -747,7 +747,7 @@ def test_log_level_not_changed_by_default(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("* 1 passed in *")
|
||||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
||||
|
||||
|
||||
def test_log_file_ini(testdir):
|
||||
|
|
|
@ -34,8 +34,6 @@ class TestModule(object):
|
|||
)
|
||||
|
||||
def test_import_prepend_append(self, testdir, monkeypatch):
|
||||
syspath = list(sys.path)
|
||||
monkeypatch.setattr(sys, "path", syspath)
|
||||
root1 = testdir.mkdir("root1")
|
||||
root2 = testdir.mkdir("root2")
|
||||
root1.ensure("x456.py")
|
||||
|
@ -560,7 +558,7 @@ class TestFunction(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *")
|
||||
result.stdout.fnmatch_lines(["* 2 passed, 1 skipped in *"])
|
||||
|
||||
def test_parametrize_skip(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -575,7 +573,7 @@ class TestFunction(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 2 passed, 1 skipped in *")
|
||||
result.stdout.fnmatch_lines(["* 2 passed, 1 skipped in *"])
|
||||
|
||||
def test_parametrize_skipif_no_skip(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -590,7 +588,7 @@ class TestFunction(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 1 failed, 2 passed in *")
|
||||
result.stdout.fnmatch_lines(["* 1 failed, 2 passed in *"])
|
||||
|
||||
def test_parametrize_xfail(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -605,7 +603,7 @@ class TestFunction(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 2 passed, 1 xfailed in *")
|
||||
result.stdout.fnmatch_lines(["* 2 passed, 1 xfailed in *"])
|
||||
|
||||
def test_parametrize_passed(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -620,7 +618,7 @@ class TestFunction(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 2 passed, 1 xpassed in *")
|
||||
result.stdout.fnmatch_lines(["* 2 passed, 1 xpassed in *"])
|
||||
|
||||
def test_parametrize_xfail_passed(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -635,7 +633,7 @@ class TestFunction(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 3 passed in *")
|
||||
result.stdout.fnmatch_lines(["* 3 passed in *"])
|
||||
|
||||
def test_function_original_name(self, testdir):
|
||||
items = testdir.getitems(
|
||||
|
@ -833,7 +831,7 @@ class TestConftestCustomization(object):
|
|||
)
|
||||
# Use runpytest_subprocess, since we're futzing with sys.meta_path.
|
||||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_setup_only_available_in_subdir(testdir):
|
||||
|
@ -1298,14 +1296,14 @@ def test_keep_duplicates(testdir):
|
|||
def test_package_collection_infinite_recursion(testdir):
|
||||
testdir.copy_example("collect/package_infinite_recursion")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_package_collection_init_given_as_argument(testdir):
|
||||
"""Regression test for #3749"""
|
||||
p = testdir.copy_example("collect/package_init_given_as_arg")
|
||||
result = testdir.runpytest(p / "pkg" / "__init__.py")
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_package_with_modules(testdir):
|
||||
|
|
|
@ -536,7 +536,7 @@ class TestRequestBasic(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines("* 1 passed in *")
|
||||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
||||
|
||||
def test_getfixturevalue_recursive(self, testdir):
|
||||
testdir.makeconftest(
|
||||
|
@ -562,6 +562,44 @@ class TestRequestBasic(object):
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_getfixturevalue_teardown(self, testdir):
|
||||
"""
|
||||
Issue #1895
|
||||
|
||||
`test_inner` requests `inner` fixture, which in turn requests `resource`
|
||||
using `getfixturevalue`. `test_func` then requests `resource`.
|
||||
|
||||
`resource` is teardown before `inner` because the fixture mechanism won't consider
|
||||
`inner` dependent on `resource` when it is used via `getfixturevalue`: `test_func`
|
||||
will then cause the `resource`'s finalizer to be called first because of this.
|
||||
"""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def resource():
|
||||
r = ['value']
|
||||
yield r
|
||||
r.pop()
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def inner(request):
|
||||
resource = request.getfixturevalue('resource')
|
||||
assert resource == ['value']
|
||||
yield
|
||||
assert resource == ['value']
|
||||
|
||||
def test_inner(inner):
|
||||
pass
|
||||
|
||||
def test_func(resource):
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["* 2 passed in *"])
|
||||
|
||||
@pytest.mark.parametrize("getfixmethod", ("getfixturevalue", "getfuncargvalue"))
|
||||
def test_getfixturevalue(self, testdir, getfixmethod):
|
||||
item = testdir.getitem(
|
||||
|
@ -749,7 +787,7 @@ class TestRequestBasic(object):
|
|||
"""Regression test for #3057"""
|
||||
testdir.copy_example("fixtures/test_getfixturevalue_dynamic.py")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_funcargnames_compatattr(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -1490,7 +1528,7 @@ class TestFixtureManagerParseFactories(object):
|
|||
def test_collect_custom_items(self, testdir):
|
||||
testdir.copy_example("fixtures/custom_item")
|
||||
result = testdir.runpytest("foo")
|
||||
result.stdout.fnmatch_lines("*passed*")
|
||||
result.stdout.fnmatch_lines(["*passed*"])
|
||||
|
||||
|
||||
class TestAutouseDiscovery(object):
|
||||
|
@ -2572,7 +2610,7 @@ class TestFixtureMarker(object):
|
|||
)
|
||||
reprec = testdir.runpytest("-s")
|
||||
for test in ["test_browser"]:
|
||||
reprec.stdout.fnmatch_lines("*Finalized*")
|
||||
reprec.stdout.fnmatch_lines(["*Finalized*"])
|
||||
|
||||
def test_class_scope_with_normal_tests(self, testdir):
|
||||
testpath = testdir.makepyfile(
|
||||
|
@ -3413,7 +3451,7 @@ class TestContextManagerFixtureFuncs(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("*mew*")
|
||||
result.stdout.fnmatch_lines(["*mew*"])
|
||||
|
||||
|
||||
class TestParameterizedSubRequest(object):
|
|
@ -11,6 +11,7 @@ import six
|
|||
|
||||
import _pytest.assertion as plugin
|
||||
import pytest
|
||||
from _pytest import outcomes
|
||||
from _pytest.assertion import truncate
|
||||
from _pytest.assertion import util
|
||||
|
||||
|
@ -1305,3 +1306,13 @@ def test_issue_1944(testdir):
|
|||
"AttributeError: 'Module' object has no attribute '_obj'"
|
||||
not in result.stdout.str()
|
||||
)
|
||||
|
||||
|
||||
def test_exit_from_assertrepr_compare(monkeypatch):
|
||||
def raise_exit(obj):
|
||||
outcomes.exit("Quitting debugger")
|
||||
|
||||
monkeypatch.setattr(util, "istext", raise_exit)
|
||||
|
||||
with pytest.raises(outcomes.Exit, match="Quitting debugger"):
|
||||
callequal(1, 1)
|
||||
|
|
|
@ -127,7 +127,7 @@ class TestAssertionRewrite(object):
|
|||
result = testdir.runpytest_subprocess()
|
||||
assert "warnings" not in "".join(result.outlines)
|
||||
|
||||
def test_name(self):
|
||||
def test_name(self, request):
|
||||
def f():
|
||||
assert False
|
||||
|
||||
|
@ -147,17 +147,41 @@ class TestAssertionRewrite(object):
|
|||
def f():
|
||||
assert sys == 42
|
||||
|
||||
assert getmsg(f, {"sys": sys}) == "assert sys == 42"
|
||||
verbose = request.config.getoption("verbose")
|
||||
msg = getmsg(f, {"sys": sys})
|
||||
if verbose > 0:
|
||||
assert msg == (
|
||||
"assert <module 'sys' (built-in)> == 42\n"
|
||||
" -<module 'sys' (built-in)>\n"
|
||||
" +42"
|
||||
)
|
||||
else:
|
||||
assert msg == "assert sys == 42"
|
||||
|
||||
def f():
|
||||
assert cls == 42 # noqa
|
||||
assert cls == 42 # noqa: F821
|
||||
|
||||
class X(object):
|
||||
pass
|
||||
|
||||
assert getmsg(f, {"cls": X}) == "assert cls == 42"
|
||||
msg = getmsg(f, {"cls": X}).splitlines()
|
||||
if verbose > 0:
|
||||
if six.PY2:
|
||||
assert msg == [
|
||||
"assert <class 'test_assertrewrite.X'> == 42",
|
||||
" -<class 'test_assertrewrite.X'>",
|
||||
" +42",
|
||||
]
|
||||
else:
|
||||
assert msg == [
|
||||
"assert <class 'test_...e.<locals>.X'> == 42",
|
||||
" -<class 'test_assertrewrite.TestAssertionRewrite.test_name.<locals>.X'>",
|
||||
" +42",
|
||||
]
|
||||
else:
|
||||
assert msg == ["assert cls == 42"]
|
||||
|
||||
def test_dont_rewrite_if_hasattr_fails(self):
|
||||
def test_dont_rewrite_if_hasattr_fails(self, request):
|
||||
class Y(object):
|
||||
""" A class whos getattr fails, but not with `AttributeError` """
|
||||
|
||||
|
@ -173,10 +197,16 @@ class TestAssertionRewrite(object):
|
|||
def f():
|
||||
assert cls().foo == 2 # noqa
|
||||
|
||||
message = getmsg(f, {"cls": Y})
|
||||
assert "assert 3 == 2" in message
|
||||
assert "+ where 3 = Y.foo" in message
|
||||
assert "+ where Y = cls()" in message
|
||||
# XXX: looks like the "where" should also be there in verbose mode?!
|
||||
message = getmsg(f, {"cls": Y}).splitlines()
|
||||
if request.config.getoption("verbose") > 0:
|
||||
assert message == ["assert 3 == 2", " -3", " +2"]
|
||||
else:
|
||||
assert message == [
|
||||
"assert 3 == 2",
|
||||
" + where 3 = Y.foo",
|
||||
" + where Y = cls()",
|
||||
]
|
||||
|
||||
def test_assert_already_has_message(self):
|
||||
def f():
|
||||
|
@ -552,15 +582,16 @@ class TestAssertionRewrite(object):
|
|||
|
||||
getmsg(f, must_pass=True)
|
||||
|
||||
def test_len(self):
|
||||
def test_len(self, request):
|
||||
def f():
|
||||
values = list(range(10))
|
||||
assert len(values) == 11
|
||||
|
||||
assert getmsg(f).startswith(
|
||||
"""assert 10 == 11
|
||||
+ where 10 = len(["""
|
||||
)
|
||||
msg = getmsg(f)
|
||||
if request.config.getoption("verbose") > 0:
|
||||
assert msg == "assert 10 == 11\n -10\n +11"
|
||||
else:
|
||||
assert msg == "assert 10 == 11\n + where 10 = len([0, 1, 2, 3, 4, 5, ...])"
|
||||
|
||||
def test_custom_reprcompare(self, monkeypatch):
|
||||
def my_reprcompare(op, left, right):
|
||||
|
@ -608,7 +639,7 @@ class TestAssertionRewrite(object):
|
|||
|
||||
assert getmsg(f).startswith("assert '%test' == 'test'")
|
||||
|
||||
def test_custom_repr(self):
|
||||
def test_custom_repr(self, request):
|
||||
def f():
|
||||
class Foo(object):
|
||||
a = 1
|
||||
|
@ -619,7 +650,11 @@ class TestAssertionRewrite(object):
|
|||
f = Foo()
|
||||
assert 0 == f.a
|
||||
|
||||
assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0]
|
||||
lines = util._format_lines([getmsg(f)])
|
||||
if request.config.getoption("verbose") > 0:
|
||||
assert lines == ["assert 0 == 1\n -0\n +1"]
|
||||
else:
|
||||
assert lines == ["assert 0 == 1\n + where 1 = \\n{ \\n~ \\n}.a"]
|
||||
|
||||
def test_custom_repr_non_ascii(self):
|
||||
def f():
|
||||
|
@ -796,7 +831,7 @@ def test_rewritten():
|
|||
)
|
||||
# needs to be a subprocess because pytester explicitly disables this warning
|
||||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines("*Module already imported*: _pytest")
|
||||
result.stdout.fnmatch_lines(["*Module already imported*: _pytest"])
|
||||
|
||||
def test_rewrite_module_imported_from_conftest(self, testdir):
|
||||
testdir.makeconftest(
|
||||
|
@ -1123,7 +1158,7 @@ class TestAssertionRewriteHookDetails(object):
|
|||
)
|
||||
path.join("data.txt").write("Hey")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_issue731(testdir):
|
||||
|
@ -1154,7 +1189,7 @@ class TestIssue925(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*E*assert (False == False) == False")
|
||||
result.stdout.fnmatch_lines(["*E*assert (False == False) == False"])
|
||||
|
||||
def test_long_case(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -1164,7 +1199,7 @@ class TestIssue925(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*E*assert (False == True) == True")
|
||||
result.stdout.fnmatch_lines(["*E*assert (False == True) == True"])
|
||||
|
||||
def test_many_brackets(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -1174,7 +1209,7 @@ class TestIssue925(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*E*assert True == ((False == True) == True)")
|
||||
result.stdout.fnmatch_lines(["*E*assert True == ((False == True) == True)"])
|
||||
|
||||
|
||||
class TestIssue2121:
|
||||
|
@ -1194,7 +1229,7 @@ class TestIssue2121:
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*E*assert (1 + 1) == 3")
|
||||
result.stdout.fnmatch_lines(["*E*assert (1 + 1) == 3"])
|
||||
|
||||
|
||||
@pytest.mark.parametrize("offset", [-1, +1])
|
||||
|
@ -1335,7 +1370,7 @@ class TestEarlyRewriteBailout(object):
|
|||
# Setup conditions for py's fspath trying to import pathlib on py34
|
||||
# always (previously triggered via xdist only).
|
||||
# Ref: https://github.com/pytest-dev/py/pull/207
|
||||
monkeypatch.setattr(sys, "path", [""] + sys.path)
|
||||
monkeypatch.syspath_prepend("")
|
||||
monkeypatch.delitem(sys.modules, "pathlib", raising=False)
|
||||
|
||||
testdir.makepyfile(
|
||||
|
@ -1356,4 +1391,4 @@ class TestEarlyRewriteBailout(object):
|
|||
}
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 1 passed in *")
|
||||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
||||
|
|
|
@ -393,7 +393,7 @@ class TestLastFailed(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 failed in*")
|
||||
result.stdout.fnmatch_lines(["*1 failed in*"])
|
||||
|
||||
def test_terminal_report_lastfailed(self, testdir):
|
||||
test_a = testdir.makepyfile(
|
||||
|
@ -574,7 +574,7 @@ class TestLastFailed(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 xfailed*")
|
||||
result.stdout.fnmatch_lines(["*1 xfailed*"])
|
||||
assert self.get_cached_last_failed(testdir) == []
|
||||
|
||||
def test_xfail_strict_considered_failure(self, testdir):
|
||||
|
@ -587,7 +587,7 @@ class TestLastFailed(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*1 failed*")
|
||||
result.stdout.fnmatch_lines(["*1 failed*"])
|
||||
assert self.get_cached_last_failed(testdir) == [
|
||||
"test_xfail_strict_considered_failure.py::test"
|
||||
]
|
||||
|
@ -680,12 +680,12 @@ class TestLastFailed(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest(test_bar)
|
||||
result.stdout.fnmatch_lines("*2 passed*")
|
||||
result.stdout.fnmatch_lines(["*2 passed*"])
|
||||
# ensure cache does not forget that test_foo_4 failed once before
|
||||
assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"]
|
||||
|
||||
result = testdir.runpytest("--last-failed")
|
||||
result.stdout.fnmatch_lines("*1 failed, 3 deselected*")
|
||||
result.stdout.fnmatch_lines(["*1 failed, 3 deselected*"])
|
||||
assert self.get_cached_last_failed(testdir) == ["test_foo.py::test_foo_4"]
|
||||
|
||||
# 3. fix test_foo_4, run only test_foo.py
|
||||
|
@ -698,11 +698,11 @@ class TestLastFailed(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest(test_foo, "--last-failed")
|
||||
result.stdout.fnmatch_lines("*1 passed, 1 deselected*")
|
||||
result.stdout.fnmatch_lines(["*1 passed, 1 deselected*"])
|
||||
assert self.get_cached_last_failed(testdir) == []
|
||||
|
||||
result = testdir.runpytest("--last-failed")
|
||||
result.stdout.fnmatch_lines("*4 passed*")
|
||||
result.stdout.fnmatch_lines(["*4 passed*"])
|
||||
assert self.get_cached_last_failed(testdir) == []
|
||||
|
||||
def test_lastfailed_no_failures_behavior_all_passed(self, testdir):
|
||||
|
@ -884,7 +884,7 @@ class TestReadme(object):
|
|||
def test_readme_failed(self, testdir):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_always_passes():
|
||||
def test_always_fails():
|
||||
assert 0
|
||||
"""
|
||||
)
|
||||
|
|
|
@ -576,7 +576,7 @@ class TestCaptureFixture(object):
|
|||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_hello*",
|
||||
"*capsysbinary is only supported on python 3*",
|
||||
"*capsysbinary is only supported on Python 3*",
|
||||
"*1 error in*",
|
||||
]
|
||||
)
|
||||
|
@ -683,7 +683,7 @@ class TestCaptureFixture(object):
|
|||
)
|
||||
)
|
||||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
assert "stdout contents begin" not in result.stdout.str()
|
||||
assert "stderr contents begin" not in result.stdout.str()
|
||||
|
||||
|
|
|
@ -11,6 +11,7 @@ import py
|
|||
|
||||
import pytest
|
||||
from _pytest.main import _in_venv
|
||||
from _pytest.main import EXIT_INTERRUPTED
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
from _pytest.main import Session
|
||||
|
||||
|
@ -350,10 +351,10 @@ class TestCustomConftests(object):
|
|||
p = testdir.makepyfile("def test_hello(): pass")
|
||||
result = testdir.runpytest(p)
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
result.stdout.fnmatch_lines("*collected 0 items*")
|
||||
result.stdout.fnmatch_lines(["*collected 0 items*"])
|
||||
|
||||
def test_collectignore_exclude_on_option(self, testdir):
|
||||
testdir.makeconftest(
|
||||
|
@ -390,10 +391,10 @@ class TestCustomConftests(object):
|
|||
testdir.makepyfile(test_welt="def test_hallo(): pass")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
result.stdout.fnmatch_lines("*collected 0 items*")
|
||||
result.stdout.fnmatch_lines(["*collected 0 items*"])
|
||||
result = testdir.runpytest("--XX")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines("*2 passed*")
|
||||
result.stdout.fnmatch_lines(["*2 passed*"])
|
||||
|
||||
def test_pytest_fs_collect_hooks_are_seen(self, testdir):
|
||||
testdir.makeconftest(
|
||||
|
@ -1234,3 +1235,20 @@ def test_collect_sub_with_symlinks(use_pkg, testdir):
|
|||
"*2 passed in*",
|
||||
]
|
||||
)
|
||||
|
||||
|
||||
def test_collector_respects_tbstyle(testdir):
|
||||
p1 = testdir.makepyfile("assert 0")
|
||||
result = testdir.runpytest(p1, "--tb=native")
|
||||
assert result.ret == EXIT_INTERRUPTED
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*_ ERROR collecting test_collector_respects_tbstyle.py _*",
|
||||
"Traceback (most recent call last):",
|
||||
' File "*/test_collector_respects_tbstyle.py", line 1, in <module>',
|
||||
" assert 0",
|
||||
"AssertionError: assert 0",
|
||||
"*! Interrupted: 1 errors during collection !*",
|
||||
"*= 1 error in *",
|
||||
]
|
||||
)
|
||||
|
|
|
@ -18,10 +18,10 @@ from _pytest.outcomes import OutcomeException
|
|||
|
||||
def test_is_generator():
|
||||
def zap():
|
||||
yield
|
||||
yield # pragma: no cover
|
||||
|
||||
def foo():
|
||||
pass
|
||||
pass # pragma: no cover
|
||||
|
||||
assert is_generator(zap)
|
||||
assert not is_generator(foo)
|
||||
|
@ -37,15 +37,20 @@ def test_real_func_loop_limit():
|
|||
|
||||
def __getattr__(self, attr):
|
||||
if not self.left:
|
||||
raise RuntimeError("its over")
|
||||
raise RuntimeError("it's over") # pragma: no cover
|
||||
self.left -= 1
|
||||
return self
|
||||
|
||||
evil = Evil()
|
||||
|
||||
with pytest.raises(ValueError):
|
||||
res = get_real_func(evil)
|
||||
print(res)
|
||||
with pytest.raises(
|
||||
ValueError,
|
||||
match=(
|
||||
"could not find real function of <Evil left=800>\n"
|
||||
"stopped at <Evil left=800>"
|
||||
),
|
||||
):
|
||||
get_real_func(evil)
|
||||
|
||||
|
||||
def test_get_real_func():
|
||||
|
@ -54,14 +59,14 @@ def test_get_real_func():
|
|||
def decorator(f):
|
||||
@wraps(f)
|
||||
def inner():
|
||||
pass
|
||||
pass # pragma: no cover
|
||||
|
||||
if six.PY2:
|
||||
inner.__wrapped__ = f
|
||||
return inner
|
||||
|
||||
def func():
|
||||
pass
|
||||
pass # pragma: no cover
|
||||
|
||||
wrapped_func = decorator(decorator(func))
|
||||
assert get_real_func(wrapped_func) is func
|
||||
|
|
|
@ -5,6 +5,8 @@ from __future__ import print_function
|
|||
import sys
|
||||
import textwrap
|
||||
|
||||
import attr
|
||||
|
||||
import _pytest._code
|
||||
import pytest
|
||||
from _pytest.config import _iter_rewritable_modules
|
||||
|
@ -13,6 +15,8 @@ from _pytest.config.findpaths import determine_setup
|
|||
from _pytest.config.findpaths import get_common_ancestor
|
||||
from _pytest.config.findpaths import getcfg
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
from _pytest.main import EXIT_OK
|
||||
from _pytest.main import EXIT_TESTSFAILED
|
||||
from _pytest.main import EXIT_USAGEERROR
|
||||
|
||||
|
||||
|
@ -42,6 +46,22 @@ class TestParseIni(object):
|
|||
"""correctly handle zero length arguments (a la pytest '')"""
|
||||
getcfg([""])
|
||||
|
||||
def test_setupcfg_uses_toolpytest_with_pytest(self, testdir):
|
||||
p1 = testdir.makepyfile("def test(): pass")
|
||||
testdir.makefile(
|
||||
".cfg",
|
||||
setup="""
|
||||
[tool:pytest]
|
||||
testpaths=%s
|
||||
[pytest]
|
||||
testpaths=ignored
|
||||
"""
|
||||
% p1.basename,
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["*, inifile: setup.cfg, *", "* 1 passed in *"])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_append_parse_args(self, testdir, tmpdir, monkeypatch):
|
||||
monkeypatch.setenv("PYTEST_ADDOPTS", '--color no -rs --tb="short"')
|
||||
tmpdir.join("pytest.ini").write(
|
||||
|
@ -622,7 +642,28 @@ def test_disable_plugin_autoload(testdir, monkeypatch, parse_args, should_load):
|
|||
pkg_resources = pytest.importorskip("pkg_resources")
|
||||
|
||||
def my_iter(group, name=None):
|
||||
raise AssertionError("Should not be called")
|
||||
assert group == "pytest11"
|
||||
assert name == "mytestplugin"
|
||||
return iter([DummyEntryPoint()])
|
||||
|
||||
@attr.s
|
||||
class DummyEntryPoint(object):
|
||||
name = "mytestplugin"
|
||||
version = "1.0"
|
||||
|
||||
@property
|
||||
def project_name(self):
|
||||
return self.name
|
||||
|
||||
def load(self):
|
||||
return sys.modules[self.name]
|
||||
|
||||
@property
|
||||
def dist(self):
|
||||
return self
|
||||
|
||||
def _get_metadata(self, *args):
|
||||
return []
|
||||
|
||||
class PseudoPlugin(object):
|
||||
x = 42
|
||||
|
@ -674,9 +715,9 @@ def test_invalid_options_show_extra_information(testdir):
|
|||
["-v", "dir2", "dir1"],
|
||||
],
|
||||
)
|
||||
def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
|
||||
def test_consider_args_after_options_for_rootdir(testdir, args):
|
||||
"""
|
||||
Consider all arguments in the command-line for rootdir and inifile
|
||||
Consider all arguments in the command-line for rootdir
|
||||
discovery, even if they happen to occur after an option. #949
|
||||
"""
|
||||
# replace "dir1" and "dir2" from "args" into their real directory
|
||||
|
@ -690,7 +731,7 @@ def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args):
|
|||
args[i] = d2
|
||||
with root.as_cwd():
|
||||
result = testdir.runpytest(*args)
|
||||
result.stdout.fnmatch_lines(["*rootdir: *myroot, inifile:"])
|
||||
result.stdout.fnmatch_lines(["*rootdir: *myroot"])
|
||||
|
||||
|
||||
@pytest.mark.skipif("sys.platform == 'win32'")
|
||||
|
@ -778,7 +819,7 @@ def test_collect_pytest_prefix_bug_integration(testdir):
|
|||
"""Integration test for issue #3775"""
|
||||
p = testdir.copy_example("config/collect_pytest_prefix")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines("* 1 passed *")
|
||||
result.stdout.fnmatch_lines(["* 1 passed *"])
|
||||
|
||||
|
||||
def test_collect_pytest_prefix_bug(pytestconfig):
|
||||
|
@ -1145,3 +1186,53 @@ def test_help_and_version_after_argument_error(testdir):
|
|||
["*pytest*{}*imported from*".format(pytest.__version__)]
|
||||
)
|
||||
assert result.ret == EXIT_USAGEERROR
|
||||
|
||||
|
||||
def test_config_does_not_load_blocked_plugin_from_args(testdir):
|
||||
"""This tests that pytest's config setup handles "-p no:X"."""
|
||||
p = testdir.makepyfile("def test(capfd): pass")
|
||||
result = testdir.runpytest(str(p), "-pno:capture")
|
||||
result.stdout.fnmatch_lines(["E fixture 'capfd' not found"])
|
||||
assert result.ret == EXIT_TESTSFAILED
|
||||
|
||||
result = testdir.runpytest(str(p), "-pno:capture", "-s")
|
||||
result.stderr.fnmatch_lines(["*: error: unrecognized arguments: -s"])
|
||||
assert result.ret == EXIT_USAGEERROR
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"plugin",
|
||||
[
|
||||
x
|
||||
for x in _pytest.config.default_plugins
|
||||
if x
|
||||
not in [
|
||||
"fixtures",
|
||||
"helpconfig", # Provides -p.
|
||||
"main",
|
||||
"mark",
|
||||
"python",
|
||||
"runner",
|
||||
"terminal", # works in OK case (no output), but not with failures.
|
||||
]
|
||||
],
|
||||
)
|
||||
def test_config_blocked_default_plugins(testdir, plugin):
|
||||
if plugin == "debugging":
|
||||
# https://github.com/pytest-dev/pytest-xdist/pull/422
|
||||
try:
|
||||
import xdist # noqa: F401
|
||||
except ImportError:
|
||||
pass
|
||||
else:
|
||||
pytest.skip("does not work with xdist currently")
|
||||
|
||||
p = testdir.makepyfile("def test(): pass")
|
||||
result = testdir.runpytest(str(p), "-pno:%s" % plugin)
|
||||
assert result.ret == EXIT_OK
|
||||
result.stdout.fnmatch_lines(["* 1 passed in *"])
|
||||
|
||||
p = testdir.makepyfile("def test(): assert 0")
|
||||
result = testdir.runpytest(str(p), "-pno:%s" % plugin)
|
||||
assert result.ret == EXIT_TESTSFAILED
|
||||
result.stdout.fnmatch_lines(["* 1 failed in *"])
|
||||
|
|
|
@ -188,6 +188,18 @@ class TestDoctests(object):
|
|||
]
|
||||
)
|
||||
|
||||
def test_doctest_skip(self, testdir):
|
||||
testdir.maketxtfile(
|
||||
"""
|
||||
>>> 1
|
||||
1
|
||||
>>> import pytest
|
||||
>>> pytest.skip("")
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("--doctest-modules")
|
||||
result.stdout.fnmatch_lines(["*1 skipped*"])
|
||||
|
||||
def test_docstring_partial_context_around_error(self, testdir):
|
||||
"""Test that we show some context before the actual line of a failing
|
||||
doctest.
|
||||
|
@ -956,7 +968,7 @@ class TestDoctestAutoUseFixtures(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest("--doctest-modules")
|
||||
result.stdout.fnmatch_lines("*2 passed*")
|
||||
result.stdout.fnmatch_lines(["*2 passed*"])
|
||||
|
||||
@pytest.mark.parametrize("scope", SCOPES)
|
||||
@pytest.mark.parametrize("enable_doctest", [True, False])
|
||||
|
|
|
@ -818,14 +818,11 @@ def test_invalid_xml_escape():
|
|||
|
||||
def test_logxml_path_expansion(tmpdir, monkeypatch):
|
||||
home_tilde = py.path.local(os.path.expanduser("~")).join("test.xml")
|
||||
|
||||
xml_tilde = LogXML("~%stest.xml" % tmpdir.sep, None)
|
||||
assert xml_tilde.logfile == home_tilde
|
||||
|
||||
# this is here for when $HOME is not set correct
|
||||
monkeypatch.setenv("HOME", str(tmpdir))
|
||||
home_var = os.path.normpath(os.path.expandvars("$HOME/test.xml"))
|
||||
|
||||
xml_var = LogXML("$HOME%stest.xml" % tmpdir.sep, None)
|
||||
assert xml_var.logfile == home_var
|
||||
|
||||
|
|
|
@ -8,6 +8,7 @@ import sys
|
|||
import six
|
||||
|
||||
import pytest
|
||||
from _pytest.main import EXIT_INTERRUPTED
|
||||
from _pytest.mark import EMPTY_PARAMETERSET_OPTION
|
||||
from _pytest.mark import MarkGenerator as Mark
|
||||
from _pytest.nodes import Collector
|
||||
|
@ -859,20 +860,34 @@ def test_parameterset_for_fail_at_collect(testdir):
|
|||
|
||||
config = testdir.parseconfig()
|
||||
from _pytest.mark import pytest_configure, get_empty_parameterset_mark
|
||||
from _pytest.compat import getfslineno
|
||||
|
||||
pytest_configure(config)
|
||||
|
||||
test_func = all
|
||||
func_name = test_func.__name__
|
||||
_, func_lineno = getfslineno(test_func)
|
||||
expected_errmsg = r"Empty parameter set in '%s' at line %d" % (
|
||||
func_name,
|
||||
func_lineno,
|
||||
)
|
||||
with pytest.raises(
|
||||
Collector.CollectError,
|
||||
match=r"Empty parameter set in 'pytest_configure' at line \d\d+",
|
||||
):
|
||||
get_empty_parameterset_mark(config, ["a"], pytest_configure)
|
||||
|
||||
with pytest.raises(Collector.CollectError, match=expected_errmsg):
|
||||
get_empty_parameterset_mark(config, ["a"], test_func)
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize("empty", [])
|
||||
def test():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(str(p1))
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"collected 0 items / 1 errors",
|
||||
"* ERROR collecting test_parameterset_for_fail_at_collect.py *",
|
||||
"Empty parameter set in 'test' at line 3",
|
||||
"*= 1 error in *",
|
||||
]
|
||||
)
|
||||
assert result.ret == EXIT_INTERRUPTED
|
||||
|
||||
|
||||
def test_parameterset_for_parametrize_bad_markname(testdir):
|
||||
|
|
|
@ -30,8 +30,9 @@ def test_fileimport(modfile):
|
|||
stderr=subprocess.PIPE,
|
||||
)
|
||||
(out, err) = p.communicate()
|
||||
if p.returncode != 0:
|
||||
pytest.fail(
|
||||
"importing %s failed (exitcode %d): out=%r, err=%r"
|
||||
% (modfile, p.returncode, out, err)
|
||||
)
|
||||
assert p.returncode == 0, "importing %s failed (exitcode %d): out=%r, err=%r" % (
|
||||
modfile,
|
||||
p.returncode,
|
||||
out,
|
||||
err,
|
||||
)
|
||||
|
|
|
@ -437,3 +437,28 @@ def test_context():
|
|||
m.setattr(functools, "partial", 3)
|
||||
assert not inspect.isclass(functools.partial)
|
||||
assert inspect.isclass(functools.partial)
|
||||
|
||||
|
||||
def test_syspath_prepend_with_namespace_packages(testdir, monkeypatch):
|
||||
for dirname in "hello", "world":
|
||||
d = testdir.mkdir(dirname)
|
||||
ns = d.mkdir("ns_pkg")
|
||||
ns.join("__init__.py").write(
|
||||
"__import__('pkg_resources').declare_namespace(__name__)"
|
||||
)
|
||||
lib = ns.mkdir(dirname)
|
||||
lib.join("__init__.py").write("def check(): return %r" % dirname)
|
||||
|
||||
monkeypatch.syspath_prepend("hello")
|
||||
import ns_pkg.hello
|
||||
|
||||
assert ns_pkg.hello.check() == "hello"
|
||||
|
||||
with pytest.raises(ImportError):
|
||||
import ns_pkg.world
|
||||
|
||||
# Prepending should call fixup_namespace_packages.
|
||||
monkeypatch.syspath_prepend("world")
|
||||
import ns_pkg.world
|
||||
|
||||
assert ns_pkg.world.check() == "world"
|
||||
|
|
|
@ -380,4 +380,4 @@ def test_skip_test_with_unicode(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* 1 skipped *")
|
||||
result.stdout.fnmatch_lines(["* 1 skipped *"])
|
||||
|
|
|
@ -2,12 +2,14 @@ from __future__ import absolute_import
|
|||
from __future__ import division
|
||||
from __future__ import print_function
|
||||
|
||||
import argparse
|
||||
import os
|
||||
import platform
|
||||
import sys
|
||||
|
||||
import _pytest._code
|
||||
import pytest
|
||||
from _pytest.debugging import _validate_usepdb_cls
|
||||
|
||||
try:
|
||||
breakpoint
|
||||
|
@ -468,9 +470,6 @@ class TestPDB(object):
|
|||
'''
|
||||
"""
|
||||
)
|
||||
# Prevent ~/.pdbrc etc to output anything.
|
||||
monkeypatch.setenv("HOME", str(testdir))
|
||||
|
||||
child = testdir.spawn_pytest("--doctest-modules --pdb %s" % p1)
|
||||
child.expect("Pdb")
|
||||
|
||||
|
@ -520,7 +519,10 @@ class TestPDB(object):
|
|||
assert "1 failed" in rest
|
||||
self.flush(child)
|
||||
|
||||
def test_pdb_interaction_continue_recursive(self, testdir):
|
||||
def test_pdb_with_injected_do_debug(self, testdir):
|
||||
"""Simulates pdbpp, which injects Pdb into do_debug, and uses
|
||||
self.__class__ in do_continue.
|
||||
"""
|
||||
p1 = testdir.makepyfile(
|
||||
mytest="""
|
||||
import pdb
|
||||
|
@ -528,8 +530,6 @@ class TestPDB(object):
|
|||
|
||||
count_continue = 0
|
||||
|
||||
# Simulates pdbpp, which injects Pdb into do_debug, and uses
|
||||
# self.__class__ in do_continue.
|
||||
class CustomPdb(pdb.Pdb, object):
|
||||
def do_debug(self, arg):
|
||||
import sys
|
||||
|
@ -577,7 +577,16 @@ class TestPDB(object):
|
|||
child.sendline("c")
|
||||
child.expect("LEAVING RECURSIVE DEBUGGER")
|
||||
assert b"PDB continue" not in child.before
|
||||
assert b"print_from_foo" in child.before
|
||||
# No extra newline.
|
||||
assert child.before.endswith(b"c\r\nprint_from_foo\r\n")
|
||||
|
||||
# set_debug should not raise outcomes.Exit, if used recrursively.
|
||||
child.sendline("debug 42")
|
||||
child.sendline("q")
|
||||
child.expect("LEAVING RECURSIVE DEBUGGER")
|
||||
assert b"ENTERING RECURSIVE DEBUGGER" in child.before
|
||||
assert b"Quitting debugger" not in child.before
|
||||
|
||||
child.sendline("c")
|
||||
child.expect(r"PDB continue \(IO-capturing resumed\)")
|
||||
rest = child.read().decode("utf8")
|
||||
|
@ -604,6 +613,98 @@ class TestPDB(object):
|
|||
child.expect("1 passed")
|
||||
self.flush(child)
|
||||
|
||||
@pytest.mark.parametrize("capture_arg", ("", "-s", "-p no:capture"))
|
||||
def test_pdb_continue_with_recursive_debug(self, capture_arg, testdir):
|
||||
"""Full coverage for do_debug without capturing.
|
||||
|
||||
This is very similar to test_pdb_interaction_continue_recursive in general,
|
||||
but mocks out ``pdb.set_trace`` for providing more coverage.
|
||||
"""
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
try:
|
||||
input = raw_input
|
||||
except NameError:
|
||||
pass
|
||||
|
||||
def set_trace():
|
||||
__import__('pdb').set_trace()
|
||||
|
||||
def test_1(monkeypatch):
|
||||
import _pytest.debugging
|
||||
|
||||
class pytestPDBTest(_pytest.debugging.pytestPDB):
|
||||
@classmethod
|
||||
def set_trace(cls, *args, **kwargs):
|
||||
# Init _PdbWrapper to handle capturing.
|
||||
_pdb = cls._init_pdb(*args, **kwargs)
|
||||
|
||||
# Mock out pdb.Pdb.do_continue.
|
||||
import pdb
|
||||
pdb.Pdb.do_continue = lambda self, arg: None
|
||||
|
||||
print("=== SET_TRACE ===")
|
||||
assert input() == "debug set_trace()"
|
||||
|
||||
# Simulate _PdbWrapper.do_debug
|
||||
cls._recursive_debug += 1
|
||||
print("ENTERING RECURSIVE DEBUGGER")
|
||||
print("=== SET_TRACE_2 ===")
|
||||
|
||||
assert input() == "c"
|
||||
_pdb.do_continue("")
|
||||
print("=== SET_TRACE_3 ===")
|
||||
|
||||
# Simulate _PdbWrapper.do_debug
|
||||
print("LEAVING RECURSIVE DEBUGGER")
|
||||
cls._recursive_debug -= 1
|
||||
|
||||
print("=== SET_TRACE_4 ===")
|
||||
assert input() == "c"
|
||||
_pdb.do_continue("")
|
||||
|
||||
def do_continue(self, arg):
|
||||
print("=== do_continue")
|
||||
# _PdbWrapper.do_continue("")
|
||||
|
||||
monkeypatch.setattr(_pytest.debugging, "pytestPDB", pytestPDBTest)
|
||||
|
||||
import pdb
|
||||
monkeypatch.setattr(pdb, "set_trace", pytestPDBTest.set_trace)
|
||||
|
||||
set_trace()
|
||||
"""
|
||||
)
|
||||
child = testdir.spawn_pytest("%s %s" % (p1, capture_arg))
|
||||
child.expect("=== SET_TRACE ===")
|
||||
before = child.before.decode("utf8")
|
||||
if not capture_arg:
|
||||
assert ">>> PDB set_trace (IO-capturing turned off) >>>" in before
|
||||
else:
|
||||
assert ">>> PDB set_trace >>>" in before
|
||||
child.sendline("debug set_trace()")
|
||||
child.expect("=== SET_TRACE_2 ===")
|
||||
before = child.before.decode("utf8")
|
||||
assert "\r\nENTERING RECURSIVE DEBUGGER\r\n" in before
|
||||
child.sendline("c")
|
||||
child.expect("=== SET_TRACE_3 ===")
|
||||
|
||||
# No continue message with recursive debugging.
|
||||
before = child.before.decode("utf8")
|
||||
assert ">>> PDB continue " not in before
|
||||
|
||||
child.sendline("c")
|
||||
child.expect("=== SET_TRACE_4 ===")
|
||||
before = child.before.decode("utf8")
|
||||
assert "\r\nLEAVING RECURSIVE DEBUGGER\r\n" in before
|
||||
child.sendline("c")
|
||||
rest = child.read().decode("utf8")
|
||||
if not capture_arg:
|
||||
assert "> PDB continue (IO-capturing resumed) >" in rest
|
||||
else:
|
||||
assert "> PDB continue >" in rest
|
||||
assert "1 passed in" in rest
|
||||
|
||||
def test_pdb_used_outside_test(self, testdir):
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
|
@ -693,6 +794,23 @@ class TestPDB(object):
|
|||
result.stdout.fnmatch_lines(["*NameError*xxx*", "*1 error*"])
|
||||
assert custom_pdb_calls == ["init", "reset", "interaction"]
|
||||
|
||||
def test_pdb_custom_cls_invalid(self, testdir):
|
||||
result = testdir.runpytest_inprocess("--pdbcls=invalid")
|
||||
result.stderr.fnmatch_lines(
|
||||
[
|
||||
"*: error: argument --pdbcls: 'invalid' is not in the format 'modname:classname'"
|
||||
]
|
||||
)
|
||||
|
||||
def test_pdb_validate_usepdb_cls(self, testdir):
|
||||
assert _validate_usepdb_cls("os.path:dirname.__name__") == "dirname"
|
||||
|
||||
with pytest.raises(
|
||||
argparse.ArgumentTypeError,
|
||||
match=r"^could not get pdb class for 'pdb:DoesNotExist': .*'DoesNotExist'",
|
||||
):
|
||||
_validate_usepdb_cls("pdb:DoesNotExist")
|
||||
|
||||
def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls):
|
||||
p1 = testdir.makepyfile("""xxx """)
|
||||
result = testdir.runpytest_inprocess("--pdbcls=_pytest:_CustomPdb", p1)
|
||||
|
@ -954,3 +1072,52 @@ def test_quit_with_swallowed_SystemExit(testdir):
|
|||
rest = child.read().decode("utf8")
|
||||
assert "no tests ran" in rest
|
||||
TestPDB.flush(child)
|
||||
|
||||
|
||||
@pytest.mark.parametrize("fixture", ("capfd", "capsys"))
|
||||
def test_pdb_suspends_fixture_capturing(testdir, fixture):
|
||||
"""Using "-s" with pytest should suspend/resume fixture capturing."""
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
def test_inner({fixture}):
|
||||
import sys
|
||||
|
||||
print("out_inner_before")
|
||||
sys.stderr.write("err_inner_before\\n")
|
||||
|
||||
__import__("pdb").set_trace()
|
||||
|
||||
print("out_inner_after")
|
||||
sys.stderr.write("err_inner_after\\n")
|
||||
|
||||
out, err = {fixture}.readouterr()
|
||||
assert out =="out_inner_before\\nout_inner_after\\n"
|
||||
assert err =="err_inner_before\\nerr_inner_after\\n"
|
||||
""".format(
|
||||
fixture=fixture
|
||||
)
|
||||
)
|
||||
|
||||
child = testdir.spawn_pytest(str(p1) + " -s")
|
||||
|
||||
child.expect("Pdb")
|
||||
before = child.before.decode("utf8")
|
||||
assert (
|
||||
"> PDB set_trace (IO-capturing turned off for fixture %s) >" % (fixture)
|
||||
in before
|
||||
)
|
||||
|
||||
# Test that capturing is really suspended.
|
||||
child.sendline("p 40 + 2")
|
||||
child.expect("Pdb")
|
||||
assert "\r\n42\r\n" in child.before.decode("utf8")
|
||||
|
||||
child.sendline("c")
|
||||
rest = child.read().decode("utf8")
|
||||
assert "out_inner" not in rest
|
||||
assert "err_inner" not in rest
|
||||
|
||||
TestPDB.flush(child)
|
||||
assert child.exitstatus == 0
|
||||
assert "= 1 passed in " in rest
|
||||
assert "> PDB continue (IO-capturing resumed for fixture %s) >" % (fixture) in rest
|
||||
|
|
|
@ -4,7 +4,6 @@ from __future__ import division
|
|||
from __future__ import print_function
|
||||
|
||||
import os
|
||||
import re
|
||||
import sys
|
||||
import types
|
||||
|
||||
|
@ -164,10 +163,10 @@ def test_importplugin_error_message(testdir, pytestpm):
|
|||
with pytest.raises(ImportError) as excinfo:
|
||||
pytestpm.import_plugin("qwe")
|
||||
|
||||
expected_message = '.*Error importing plugin "qwe": Not possible to import: .'
|
||||
expected_traceback = ".*in test_traceback"
|
||||
assert re.match(expected_message, str(excinfo.value))
|
||||
assert re.match(expected_traceback, str(excinfo.traceback[-1]))
|
||||
assert str(excinfo.value).endswith(
|
||||
'Error importing plugin "qwe": Not possible to import: ☺'
|
||||
)
|
||||
assert "in test_traceback" in str(excinfo.traceback[-1])
|
||||
|
||||
|
||||
class TestPytestPluginManager(object):
|
||||
|
@ -312,6 +311,9 @@ class TestPytestPluginManagerBootstrapming(object):
|
|||
assert '"hello123"' in excinfo.value.args[0]
|
||||
pytestpm.consider_preparse(["-pno:hello123"])
|
||||
|
||||
# Handles -p without following arg (when used without argparse).
|
||||
pytestpm.consider_preparse(["-p"])
|
||||
|
||||
def test_plugin_prevent_register(self, pytestpm):
|
||||
pytestpm.consider_preparse(["xyz", "-p", "no:abc"])
|
||||
l1 = pytestpm.get_plugins()
|
||||
|
@ -345,3 +347,10 @@ class TestPytestPluginManagerBootstrapming(object):
|
|||
l2 = pytestpm.get_plugins()
|
||||
assert 42 not in l2
|
||||
assert 43 not in l2
|
||||
|
||||
def test_blocked_plugin_can_be_used(self, pytestpm):
|
||||
pytestpm.consider_preparse(["xyz", "-p", "no:abc", "-p", "abc"])
|
||||
|
||||
assert pytestpm.has_plugin("abc")
|
||||
assert not pytestpm.is_blocked("abc")
|
||||
assert not pytestpm.is_blocked("pytest_abc")
|
||||
|
|
|
@ -17,6 +17,7 @@ from _pytest.main import EXIT_OK
|
|||
from _pytest.main import EXIT_TESTSFAILED
|
||||
from _pytest.pytester import CwdSnapshot
|
||||
from _pytest.pytester import HookRecorder
|
||||
from _pytest.pytester import LineMatcher
|
||||
from _pytest.pytester import SysModulesSnapshot
|
||||
from _pytest.pytester import SysPathsSnapshot
|
||||
|
||||
|
@ -453,3 +454,18 @@ def test_testdir_run_timeout_expires(testdir):
|
|||
)
|
||||
with pytest.raises(testdir.TimeoutExpired):
|
||||
testdir.runpytest_subprocess(testfile, timeout=1)
|
||||
|
||||
|
||||
def test_linematcher_with_nonlist():
|
||||
"""Test LineMatcher with regard to passing in a set (accidentally)."""
|
||||
lm = LineMatcher([])
|
||||
|
||||
with pytest.raises(AssertionError):
|
||||
lm.fnmatch_lines(set())
|
||||
with pytest.raises(AssertionError):
|
||||
lm.fnmatch_lines({})
|
||||
lm.fnmatch_lines([])
|
||||
lm.fnmatch_lines(())
|
||||
|
||||
assert lm._getlines({}) == {}
|
||||
assert lm._getlines(set()) == set()
|
||||
|
|
|
@ -0,0 +1,313 @@
|
|||
import pytest
|
||||
from _pytest.pathlib import Path
|
||||
from _pytest.reports import CollectReport
|
||||
from _pytest.reports import TestReport
|
||||
|
||||
|
||||
class TestReportSerialization(object):
|
||||
def test_xdist_longrepr_to_str_issue_241(self, testdir):
|
||||
"""
|
||||
Regarding issue pytest-xdist#241
|
||||
|
||||
This test came originally from test_remote.py in xdist (ca03269).
|
||||
"""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_a(): assert False
|
||||
def test_b(): pass
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 6
|
||||
test_a_call = reports[1]
|
||||
assert test_a_call.when == "call"
|
||||
assert test_a_call.outcome == "failed"
|
||||
assert test_a_call._to_json()["longrepr"]["reprtraceback"]["style"] == "long"
|
||||
test_b_call = reports[4]
|
||||
assert test_b_call.when == "call"
|
||||
assert test_b_call.outcome == "passed"
|
||||
assert test_b_call._to_json()["longrepr"] is None
|
||||
|
||||
def test_xdist_report_longrepr_reprcrash_130(self, testdir):
|
||||
"""Regarding issue pytest-xdist#130
|
||||
|
||||
This test came originally from test_remote.py in xdist (ca03269).
|
||||
"""
|
||||
reprec = testdir.inline_runsource(
|
||||
"""
|
||||
def test_fail():
|
||||
assert False, 'Expected Message'
|
||||
"""
|
||||
)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3
|
||||
rep = reports[1]
|
||||
added_section = ("Failure Metadata", str("metadata metadata"), "*")
|
||||
rep.longrepr.sections.append(added_section)
|
||||
d = rep._to_json()
|
||||
a = TestReport._from_json(d)
|
||||
# Check assembled == rep
|
||||
assert a.__dict__.keys() == rep.__dict__.keys()
|
||||
for key in rep.__dict__.keys():
|
||||
if key != "longrepr":
|
||||
assert getattr(a, key) == getattr(rep, key)
|
||||
assert rep.longrepr.reprcrash.lineno == a.longrepr.reprcrash.lineno
|
||||
assert rep.longrepr.reprcrash.message == a.longrepr.reprcrash.message
|
||||
assert rep.longrepr.reprcrash.path == a.longrepr.reprcrash.path
|
||||
assert rep.longrepr.reprtraceback.entrysep == a.longrepr.reprtraceback.entrysep
|
||||
assert (
|
||||
rep.longrepr.reprtraceback.extraline == a.longrepr.reprtraceback.extraline
|
||||
)
|
||||
assert rep.longrepr.reprtraceback.style == a.longrepr.reprtraceback.style
|
||||
assert rep.longrepr.sections == a.longrepr.sections
|
||||
# Missing section attribute PR171
|
||||
assert added_section in a.longrepr.sections
|
||||
|
||||
def test_reprentries_serialization_170(self, testdir):
|
||||
"""Regarding issue pytest-xdist#170
|
||||
|
||||
This test came originally from test_remote.py in xdist (ca03269).
|
||||
"""
|
||||
from _pytest._code.code import ReprEntry
|
||||
|
||||
reprec = testdir.inline_runsource(
|
||||
"""
|
||||
def test_repr_entry():
|
||||
x = 0
|
||||
assert x
|
||||
""",
|
||||
"--showlocals",
|
||||
)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3
|
||||
rep = reports[1]
|
||||
d = rep._to_json()
|
||||
a = TestReport._from_json(d)
|
||||
|
||||
rep_entries = rep.longrepr.reprtraceback.reprentries
|
||||
a_entries = a.longrepr.reprtraceback.reprentries
|
||||
for i in range(len(a_entries)):
|
||||
assert isinstance(rep_entries[i], ReprEntry)
|
||||
assert rep_entries[i].lines == a_entries[i].lines
|
||||
assert rep_entries[i].reprfileloc.lineno == a_entries[i].reprfileloc.lineno
|
||||
assert (
|
||||
rep_entries[i].reprfileloc.message == a_entries[i].reprfileloc.message
|
||||
)
|
||||
assert rep_entries[i].reprfileloc.path == a_entries[i].reprfileloc.path
|
||||
assert rep_entries[i].reprfuncargs.args == a_entries[i].reprfuncargs.args
|
||||
assert rep_entries[i].reprlocals.lines == a_entries[i].reprlocals.lines
|
||||
assert rep_entries[i].style == a_entries[i].style
|
||||
|
||||
def test_reprentries_serialization_196(self, testdir):
|
||||
"""Regarding issue pytest-xdist#196
|
||||
|
||||
This test came originally from test_remote.py in xdist (ca03269).
|
||||
"""
|
||||
from _pytest._code.code import ReprEntryNative
|
||||
|
||||
reprec = testdir.inline_runsource(
|
||||
"""
|
||||
def test_repr_entry_native():
|
||||
x = 0
|
||||
assert x
|
||||
""",
|
||||
"--tb=native",
|
||||
)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3
|
||||
rep = reports[1]
|
||||
d = rep._to_json()
|
||||
a = TestReport._from_json(d)
|
||||
|
||||
rep_entries = rep.longrepr.reprtraceback.reprentries
|
||||
a_entries = a.longrepr.reprtraceback.reprentries
|
||||
for i in range(len(a_entries)):
|
||||
assert isinstance(rep_entries[i], ReprEntryNative)
|
||||
assert rep_entries[i].lines == a_entries[i].lines
|
||||
|
||||
def test_itemreport_outcomes(self, testdir):
|
||||
"""
|
||||
This test came originally from test_remote.py in xdist (ca03269).
|
||||
"""
|
||||
reprec = testdir.inline_runsource(
|
||||
"""
|
||||
import py
|
||||
def test_pass(): pass
|
||||
def test_fail(): 0/0
|
||||
@py.test.mark.skipif("True")
|
||||
def test_skip(): pass
|
||||
def test_skip_imperative():
|
||||
py.test.skip("hello")
|
||||
@py.test.mark.xfail("True")
|
||||
def test_xfail(): 0/0
|
||||
def test_xfail_imperative():
|
||||
py.test.xfail("hello")
|
||||
"""
|
||||
)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 17 # with setup/teardown "passed" reports
|
||||
for rep in reports:
|
||||
d = rep._to_json()
|
||||
newrep = TestReport._from_json(d)
|
||||
assert newrep.passed == rep.passed
|
||||
assert newrep.failed == rep.failed
|
||||
assert newrep.skipped == rep.skipped
|
||||
if newrep.skipped and not hasattr(newrep, "wasxfail"):
|
||||
assert len(newrep.longrepr) == 3
|
||||
assert newrep.outcome == rep.outcome
|
||||
assert newrep.when == rep.when
|
||||
assert newrep.keywords == rep.keywords
|
||||
if rep.failed:
|
||||
assert newrep.longreprtext == rep.longreprtext
|
||||
|
||||
def test_collectreport_passed(self, testdir):
|
||||
"""This test came originally from test_remote.py in xdist (ca03269)."""
|
||||
reprec = testdir.inline_runsource("def test_func(): pass")
|
||||
reports = reprec.getreports("pytest_collectreport")
|
||||
for rep in reports:
|
||||
d = rep._to_json()
|
||||
newrep = CollectReport._from_json(d)
|
||||
assert newrep.passed == rep.passed
|
||||
assert newrep.failed == rep.failed
|
||||
assert newrep.skipped == rep.skipped
|
||||
|
||||
def test_collectreport_fail(self, testdir):
|
||||
"""This test came originally from test_remote.py in xdist (ca03269)."""
|
||||
reprec = testdir.inline_runsource("qwe abc")
|
||||
reports = reprec.getreports("pytest_collectreport")
|
||||
assert reports
|
||||
for rep in reports:
|
||||
d = rep._to_json()
|
||||
newrep = CollectReport._from_json(d)
|
||||
assert newrep.passed == rep.passed
|
||||
assert newrep.failed == rep.failed
|
||||
assert newrep.skipped == rep.skipped
|
||||
if rep.failed:
|
||||
assert newrep.longrepr == str(rep.longrepr)
|
||||
|
||||
def test_extended_report_deserialization(self, testdir):
|
||||
"""This test came originally from test_remote.py in xdist (ca03269)."""
|
||||
reprec = testdir.inline_runsource("qwe abc")
|
||||
reports = reprec.getreports("pytest_collectreport")
|
||||
assert reports
|
||||
for rep in reports:
|
||||
rep.extra = True
|
||||
d = rep._to_json()
|
||||
newrep = CollectReport._from_json(d)
|
||||
assert newrep.extra
|
||||
assert newrep.passed == rep.passed
|
||||
assert newrep.failed == rep.failed
|
||||
assert newrep.skipped == rep.skipped
|
||||
if rep.failed:
|
||||
assert newrep.longrepr == str(rep.longrepr)
|
||||
|
||||
def test_paths_support(self, testdir):
|
||||
"""Report attributes which are py.path or pathlib objects should become strings."""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_a():
|
||||
assert False
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3
|
||||
test_a_call = reports[1]
|
||||
test_a_call.path1 = testdir.tmpdir
|
||||
test_a_call.path2 = Path(testdir.tmpdir)
|
||||
data = test_a_call._to_json()
|
||||
assert data["path1"] == str(testdir.tmpdir)
|
||||
assert data["path2"] == str(testdir.tmpdir)
|
||||
|
||||
def test_unserialization_failure(self, testdir):
|
||||
"""Check handling of failure during unserialization of report types."""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_a():
|
||||
assert False
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3
|
||||
test_a_call = reports[1]
|
||||
data = test_a_call._to_json()
|
||||
entry = data["longrepr"]["reprtraceback"]["reprentries"][0]
|
||||
assert entry["type"] == "ReprEntry"
|
||||
|
||||
entry["type"] = "Unknown"
|
||||
with pytest.raises(
|
||||
RuntimeError, match="INTERNALERROR: Unknown entry type returned: Unknown"
|
||||
):
|
||||
TestReport._from_json(data)
|
||||
|
||||
|
||||
class TestHooks:
|
||||
"""Test that the hooks are working correctly for plugins"""
|
||||
|
||||
def test_test_report(self, testdir, pytestconfig):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_a(): assert False
|
||||
def test_b(): pass
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 6
|
||||
for rep in reports:
|
||||
data = pytestconfig.hook.pytest_report_to_serializable(
|
||||
config=pytestconfig, report=rep
|
||||
)
|
||||
assert data["_report_type"] == "TestReport"
|
||||
new_rep = pytestconfig.hook.pytest_report_from_serializable(
|
||||
config=pytestconfig, data=data
|
||||
)
|
||||
assert new_rep.nodeid == rep.nodeid
|
||||
assert new_rep.when == rep.when
|
||||
assert new_rep.outcome == rep.outcome
|
||||
|
||||
def test_collect_report(self, testdir, pytestconfig):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_a(): assert False
|
||||
def test_b(): pass
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reports = reprec.getreports("pytest_collectreport")
|
||||
assert len(reports) == 2
|
||||
for rep in reports:
|
||||
data = pytestconfig.hook.pytest_report_to_serializable(
|
||||
config=pytestconfig, report=rep
|
||||
)
|
||||
assert data["_report_type"] == "CollectReport"
|
||||
new_rep = pytestconfig.hook.pytest_report_from_serializable(
|
||||
config=pytestconfig, data=data
|
||||
)
|
||||
assert new_rep.nodeid == rep.nodeid
|
||||
assert new_rep.when == "collect"
|
||||
assert new_rep.outcome == rep.outcome
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
"hook_name", ["pytest_runtest_logreport", "pytest_collectreport"]
|
||||
)
|
||||
def test_invalid_report_types(self, testdir, pytestconfig, hook_name):
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_a(): pass
|
||||
"""
|
||||
)
|
||||
reprec = testdir.inline_run()
|
||||
reports = reprec.getreports(hook_name)
|
||||
assert reports
|
||||
rep = reports[0]
|
||||
data = pytestconfig.hook.pytest_report_to_serializable(
|
||||
config=pytestconfig, report=rep
|
||||
)
|
||||
data["_report_type"] = "Unknown"
|
||||
with pytest.raises(AssertionError):
|
||||
_ = pytestconfig.hook.pytest_report_from_serializable(
|
||||
config=pytestconfig, data=data
|
||||
)
|
|
@ -640,7 +640,7 @@ def test_pytest_fail_notrace_non_ascii(testdir, str_prefix):
|
|||
|
||||
def test_pytest_no_tests_collected_exit_status(testdir):
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*collected 0 items*")
|
||||
result.stdout.fnmatch_lines(["*collected 0 items*"])
|
||||
assert result.ret == main.EXIT_NOTESTSCOLLECTED
|
||||
|
||||
testdir.makepyfile(
|
||||
|
@ -650,13 +650,13 @@ def test_pytest_no_tests_collected_exit_status(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*collected 1 item*")
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*collected 1 item*"])
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
assert result.ret == main.EXIT_OK
|
||||
|
||||
result = testdir.runpytest("-k nonmatch")
|
||||
result.stdout.fnmatch_lines("*collected 1 item*")
|
||||
result.stdout.fnmatch_lines("*1 deselected*")
|
||||
result.stdout.fnmatch_lines(["*collected 1 item*"])
|
||||
result.stdout.fnmatch_lines(["*1 deselected*"])
|
||||
assert result.ret == main.EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
|
|
|
@ -68,9 +68,7 @@ class SessionTests(object):
|
|||
passed, skipped, failed = reprec.listoutcomes()
|
||||
assert len(failed) == 1
|
||||
out = failed[0].longrepr.reprcrash.message
|
||||
if not out.find("DID NOT RAISE") != -1:
|
||||
print(out)
|
||||
pytest.fail("incorrect raises() output")
|
||||
assert "DID NOT RAISE" in out
|
||||
|
||||
def test_syntax_error_module(self, testdir):
|
||||
reprec = testdir.inline_runsource("this is really not python")
|
||||
|
@ -127,14 +125,14 @@ class SessionTests(object):
|
|||
)
|
||||
reprec = testdir.inline_run(p)
|
||||
passed, skipped, failed = reprec.listoutcomes()
|
||||
assert len(failed) == 1
|
||||
assert (len(passed), len(skipped), len(failed)) == (1, 0, 1)
|
||||
out = failed[0].longrepr.reprcrash.message
|
||||
assert (
|
||||
out.find(
|
||||
"""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]"""
|
||||
)
|
||||
!= -1
|
||||
) # '
|
||||
)
|
||||
|
||||
def test_skip_file_by_conftest(self, testdir):
|
||||
testdir.makepyfile(
|
||||
|
@ -149,7 +147,7 @@ class SessionTests(object):
|
|||
)
|
||||
try:
|
||||
reprec = testdir.inline_run(testdir.tmpdir)
|
||||
except pytest.skip.Exception:
|
||||
except pytest.skip.Exception: # pragma: no covers
|
||||
pytest.fail("wrong skipped caught")
|
||||
reports = reprec.getreports("pytest_collectreport")
|
||||
assert len(reports) == 1
|
||||
|
@ -333,7 +331,7 @@ def test_rootdir_option_arg(testdir, monkeypatch, path):
|
|||
result = testdir.runpytest("--rootdir={}".format(path))
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*rootdir: {}/root, inifile:*".format(testdir.tmpdir),
|
||||
"*rootdir: {}/root".format(testdir.tmpdir),
|
||||
"root/test_rootdir_option_arg.py *",
|
||||
"*1 passed*",
|
||||
]
|
||||
|
|
|
@ -331,7 +331,7 @@ class TestXFail(object):
|
|||
result = testdir.runpytest(p, "-rx")
|
||||
result.stdout.fnmatch_lines(["*XFAIL*test_this*", "*reason:*hello*"])
|
||||
result = testdir.runpytest(p, "--runxfail")
|
||||
result.stdout.fnmatch_lines("*1 pass*")
|
||||
result.stdout.fnmatch_lines(["*1 pass*"])
|
||||
|
||||
def test_xfail_imperative_in_setup_function(self, testdir):
|
||||
p = testdir.makepyfile(
|
||||
|
@ -477,7 +477,7 @@ class TestXFail(object):
|
|||
% strict
|
||||
)
|
||||
result = testdir.runpytest(p, "-rxX")
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
assert result.ret == 0
|
||||
|
||||
@pytest.mark.parametrize("strict", [True, False])
|
||||
|
@ -493,7 +493,7 @@ class TestXFail(object):
|
|||
% strict
|
||||
)
|
||||
result = testdir.runpytest(p, "-rxX")
|
||||
result.stdout.fnmatch_lines("*1 passed*")
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
assert result.ret == 0
|
||||
|
||||
@pytest.mark.parametrize("strict_val", ["true", "false"])
|
||||
|
@ -515,7 +515,7 @@ class TestXFail(object):
|
|||
)
|
||||
result = testdir.runpytest(p, "-rxX")
|
||||
strict = strict_val == "true"
|
||||
result.stdout.fnmatch_lines("*1 failed*" if strict else "*1 xpassed*")
|
||||
result.stdout.fnmatch_lines(["*1 failed*" if strict else "*1 xpassed*"])
|
||||
assert result.ret == (1 if strict else 0)
|
||||
|
||||
|
||||
|
@ -1130,7 +1130,9 @@ def test_module_level_skip_error(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*Using pytest.skip outside of a test is not allowed*")
|
||||
result.stdout.fnmatch_lines(
|
||||
["*Using pytest.skip outside of a test is not allowed*"]
|
||||
)
|
||||
|
||||
|
||||
def test_module_level_skip_with_allow_module_level(testdir):
|
||||
|
@ -1147,7 +1149,7 @@ def test_module_level_skip_with_allow_module_level(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest("-rxs")
|
||||
result.stdout.fnmatch_lines("*SKIP*skip_module_level")
|
||||
result.stdout.fnmatch_lines(["*SKIP*skip_module_level"])
|
||||
|
||||
|
||||
def test_invalid_skip_keyword_parameter(testdir):
|
||||
|
@ -1164,7 +1166,7 @@ def test_invalid_skip_keyword_parameter(testdir):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*TypeError:*['unknown']*")
|
||||
result.stdout.fnmatch_lines(["*TypeError:*['unknown']*"])
|
||||
|
||||
|
||||
def test_mark_xfail_item(testdir):
|
||||
|
|
|
@ -15,6 +15,7 @@ import py
|
|||
|
||||
import pytest
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
from _pytest.reports import BaseReport
|
||||
from _pytest.terminal import _plugin_nameversions
|
||||
from _pytest.terminal import build_summary_stats_line
|
||||
from _pytest.terminal import getreportopt
|
||||
|
@ -24,15 +25,14 @@ DistInfo = collections.namedtuple("DistInfo", ["project_name", "version"])
|
|||
|
||||
|
||||
class Option(object):
|
||||
def __init__(self, verbose=False, fulltrace=False):
|
||||
self.verbose = verbose
|
||||
def __init__(self, verbosity=0, fulltrace=False):
|
||||
self.verbosity = verbosity
|
||||
self.fulltrace = fulltrace
|
||||
|
||||
@property
|
||||
def args(self):
|
||||
values = []
|
||||
if self.verbose:
|
||||
values.append("-v")
|
||||
values.append("--verbosity=%d" % self.verbosity)
|
||||
if self.fulltrace:
|
||||
values.append("--fulltrace")
|
||||
return values
|
||||
|
@ -40,9 +40,9 @@ class Option(object):
|
|||
|
||||
@pytest.fixture(
|
||||
params=[
|
||||
Option(verbose=False),
|
||||
Option(verbose=True),
|
||||
Option(verbose=-1),
|
||||
Option(verbosity=0),
|
||||
Option(verbosity=1),
|
||||
Option(verbosity=-1),
|
||||
Option(fulltrace=True),
|
||||
],
|
||||
ids=["default", "verbose", "quiet", "fulltrace"],
|
||||
|
@ -86,7 +86,7 @@ class TestTerminal(object):
|
|||
"""
|
||||
)
|
||||
result = testdir.runpytest(*option.args)
|
||||
if option.verbose:
|
||||
if option.verbosity > 0:
|
||||
result.stdout.fnmatch_lines(
|
||||
[
|
||||
"*test_pass_skip_fail.py::test_ok PASS*",
|
||||
|
@ -94,8 +94,10 @@ class TestTerminal(object):
|
|||
"*test_pass_skip_fail.py::test_func FAIL*",
|
||||
]
|
||||
)
|
||||
else:
|
||||
elif option.verbosity == 0:
|
||||
result.stdout.fnmatch_lines(["*test_pass_skip_fail.py .sF*"])
|
||||
else:
|
||||
result.stdout.fnmatch_lines([".sF*"])
|
||||
result.stdout.fnmatch_lines(
|
||||
[" def test_func():", "> assert 0", "E assert 0"]
|
||||
)
|
||||
|
@ -141,6 +143,31 @@ class TestTerminal(object):
|
|||
child.sendeof()
|
||||
child.kill(15)
|
||||
|
||||
def test_report_collect_after_half_a_second(self, testdir):
|
||||
"""Test for "collecting" being updated after 0.5s"""
|
||||
|
||||
testdir.makepyfile(
|
||||
**{
|
||||
"test1.py": """
|
||||
import _pytest.terminal
|
||||
|
||||
_pytest.terminal.REPORT_COLLECTING_RESOLUTION = 0
|
||||
|
||||
def test_1():
|
||||
pass
|
||||
""",
|
||||
"test2.py": "def test_2(): pass",
|
||||
}
|
||||
)
|
||||
|
||||
child = testdir.spawn_pytest("-v test1.py test2.py")
|
||||
child.expect(r"collecting \.\.\.")
|
||||
child.expect(r"collecting 1 item")
|
||||
child.expect(r"collecting 2 items")
|
||||
child.expect(r"collected 2 items")
|
||||
rest = child.read().decode("utf8")
|
||||
assert "2 passed in" in rest
|
||||
|
||||
def test_itemreport_subclasses_show_subclassed_file(self, testdir):
|
||||
testdir.makepyfile(
|
||||
test_p1="""
|
||||
|
@ -567,6 +594,35 @@ class TestTerminalFunctional(object):
|
|||
if request.config.pluginmanager.list_plugin_distinfo():
|
||||
result.stdout.fnmatch_lines(["plugins: *"])
|
||||
|
||||
def test_header(self, testdir, request):
|
||||
testdir.tmpdir.join("tests").ensure_dir()
|
||||
testdir.tmpdir.join("gui").ensure_dir()
|
||||
|
||||
# no ini file
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["rootdir: *test_header0"])
|
||||
|
||||
# with inifile
|
||||
testdir.makeini("""[pytest]""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(["rootdir: *test_header0, inifile: tox.ini"])
|
||||
|
||||
# with testpaths option, and not passing anything in the command-line
|
||||
testdir.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
testpaths = tests gui
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(
|
||||
["rootdir: *test_header0, inifile: tox.ini, testpaths: tests, gui"]
|
||||
)
|
||||
|
||||
# with testpaths option, passing directory in command-line: do not show testpaths then
|
||||
result = testdir.runpytest("tests")
|
||||
result.stdout.fnmatch_lines(["rootdir: *test_header0, inifile: tox.ini"])
|
||||
|
||||
def test_showlocals(self, testdir):
|
||||
p1 = testdir.makepyfile(
|
||||
"""
|
||||
|
@ -605,7 +661,6 @@ class TestTerminalFunctional(object):
|
|||
)
|
||||
|
||||
def test_verbose_reporting(self, verbose_testfile, testdir, pytestconfig):
|
||||
|
||||
result = testdir.runpytest(
|
||||
verbose_testfile, "-v", "-Walways::pytest.PytestWarning"
|
||||
)
|
||||
|
@ -1199,13 +1254,18 @@ def test_summary_stats(exp_line, exp_color, stats_arg):
|
|||
assert color == exp_color
|
||||
|
||||
|
||||
def test_no_trailing_whitespace_after_inifile_word(testdir):
|
||||
result = testdir.runpytest("")
|
||||
assert "inifile:\n" in result.stdout.str()
|
||||
def test_skip_counting_towards_summary():
|
||||
class DummyReport(BaseReport):
|
||||
count_towards_summary = True
|
||||
|
||||
testdir.makeini("[pytest]")
|
||||
result = testdir.runpytest("")
|
||||
assert "inifile: tox.ini\n" in result.stdout.str()
|
||||
r1 = DummyReport()
|
||||
r2 = DummyReport()
|
||||
res = build_summary_stats_line({"failed": (r1, r2)})
|
||||
assert res == ("2 failed", "red")
|
||||
|
||||
r1.count_towards_summary = False
|
||||
res = build_summary_stats_line({"failed": (r1, r2)})
|
||||
assert res == ("1 failed", "red")
|
||||
|
||||
|
||||
class TestClassicOutputStyle(object):
|
||||
|
|
|
@ -16,7 +16,7 @@ from _pytest.warnings import SHOW_PYTEST_WARNINGS_ARG
|
|||
def test_tmpdir_fixture(testdir):
|
||||
p = testdir.copy_example("tmpdir/tmpdir_fixture.py")
|
||||
results = testdir.runpytest(p)
|
||||
results.stdout.fnmatch_lines("*1 passed*")
|
||||
results.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
|
||||
def test_ensuretemp(recwarn):
|
||||
|
|
|
@ -794,7 +794,7 @@ def test_unittest_setup_interaction(testdir, fix_type, stmt):
|
|||
)
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("*3 passed*")
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
|
||||
|
||||
def test_non_unittest_no_setupclass_support(testdir):
|
||||
|
@ -1040,4 +1040,4 @@ def test_setup_inheritance_skipping(testdir, test_name, expected_outcome):
|
|||
"""Issue #4700"""
|
||||
testdir.copy_example("unittest/{}".format(test_name))
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines("* {} in *".format(expected_outcome))
|
||||
result.stdout.fnmatch_lines(["* {} in *".format(expected_outcome)])
|
||||
|
|
2
tox.ini
2
tox.ini
|
@ -73,7 +73,6 @@ commands = pre-commit run --all-files --show-diff-on-failure
|
|||
|
||||
[testenv:docs]
|
||||
basepython = python3
|
||||
skipsdist = True
|
||||
usedevelop = True
|
||||
changedir = doc/en
|
||||
deps = -r{toxinidir}/doc/en/requirements.txt
|
||||
|
@ -127,7 +126,6 @@ commands =
|
|||
[testenv:release]
|
||||
decription = do a release, required posarg of the version number
|
||||
basepython = python3.6
|
||||
skipsdist = True
|
||||
usedevelop = True
|
||||
passenv = *
|
||||
deps =
|
||||
|
|
Loading…
Reference in New Issue