Merge branch 'release-3.1'
This commit is contained in:
commit
d343f9497c
|
@ -18,6 +18,9 @@ include/
|
|||
*~
|
||||
.hypothesis/
|
||||
|
||||
# autogenerated
|
||||
_pytest/_version.py
|
||||
# setuptools
|
||||
.eggs/
|
||||
|
||||
doc/*/_build
|
||||
|
|
9
AUTHORS
9
AUTHORS
|
@ -14,6 +14,7 @@ Andrzej Ostrowski
|
|||
Andy Freeland
|
||||
Anthon van der Neut
|
||||
Antony Lee
|
||||
Anthony Sottile
|
||||
Armin Rigo
|
||||
Aron Curzon
|
||||
Aviv Palivoda
|
||||
|
@ -47,6 +48,7 @@ David Vierra
|
|||
Denis Kirisov
|
||||
Diego Russo
|
||||
Dmitry Dygalo
|
||||
Dmitry Pribysh
|
||||
Duncan Betts
|
||||
Edison Gustavo Muenz
|
||||
Edoardo Batini
|
||||
|
@ -78,6 +80,7 @@ Javier Romero
|
|||
Jeff Widman
|
||||
John Towler
|
||||
Jon Sonesen
|
||||
Jonas Obrist
|
||||
Jordan Guymon
|
||||
Joshua Bronson
|
||||
Jurko Gospodnetić
|
||||
|
@ -93,6 +96,8 @@ Lukas Bednar
|
|||
Luke Murphy
|
||||
Maciek Fijalkowski
|
||||
Maho
|
||||
Mandeep Bhutani
|
||||
Manuel Krebber
|
||||
Marc Schlaich
|
||||
Marcin Bachry
|
||||
Mark Abramowitz
|
||||
|
@ -102,6 +107,7 @@ Martin K. Scherer
|
|||
Martin Prusse
|
||||
Mathieu Clabaut
|
||||
Matt Bachmann
|
||||
Matt Duck
|
||||
Matt Williams
|
||||
Matthias Hafner
|
||||
mbyt
|
||||
|
@ -109,6 +115,7 @@ Michael Aquilina
|
|||
Michael Birtwell
|
||||
Michael Droettboom
|
||||
Michael Seifert
|
||||
Michal Wajszczuk
|
||||
Mike Lundy
|
||||
Ned Batchelder
|
||||
Neven Mundar
|
||||
|
@ -127,6 +134,7 @@ Ralf Schmitt
|
|||
Ran Benita
|
||||
Raphael Pierzina
|
||||
Raquel Alegre
|
||||
Ravi Chandra
|
||||
Roberto Polli
|
||||
Romain Dorgueil
|
||||
Roman Bolshakov
|
||||
|
@ -150,6 +158,7 @@ Trevor Bekolay
|
|||
Tyler Goodlet
|
||||
Vasily Kuznetsov
|
||||
Victor Uriarte
|
||||
Vlad Dragos
|
||||
Vidar T. Fauske
|
||||
Vitaly Lashmanov
|
||||
Wouter van Ackooy
|
||||
|
|
138
CHANGELOG.rst
138
CHANGELOG.rst
|
@ -1,6 +1,101 @@
|
|||
3.0.8 (unreleased)
|
||||
3.1.0 (2017-05-20)
|
||||
==================
|
||||
|
||||
|
||||
New Features
|
||||
------------
|
||||
|
||||
* The ``pytest-warnings`` plugin has been integrated into the core, so now ``pytest`` automatically
|
||||
captures and displays warnings at the end of the test session.
|
||||
Thanks `@nicoddemus`_ for the PR.
|
||||
|
||||
* Added ``junit_suite_name`` ini option to specify root `<testsuite>` name for JUnit XML reports (`#533`_).
|
||||
|
||||
* Added an ini option ``doctest_encoding`` to specify which encoding to use for doctest files.
|
||||
Thanks `@wheerd`_ for the PR (`#2101`_).
|
||||
|
||||
* ``pytest.warns`` now checks for subclass relationship rather than
|
||||
class equality. Thanks `@lesteve`_ for the PR (`#2166`_)
|
||||
|
||||
* ``pytest.raises`` now asserts that the error message matches a text or regex
|
||||
with the ``match`` keyword argument. Thanks `@Kriechi`_ for the PR.
|
||||
|
||||
* ``pytest.param`` can be used to declare test parameter sets with marks and test ids.
|
||||
Thanks `@RonnyPfannschmidt`_ for the PR.
|
||||
|
||||
|
||||
Changes
|
||||
-------
|
||||
|
||||
* remove all internal uses of pytest_namespace hooks,
|
||||
this is to prepare the removal of preloadconfig in pytest 4.0
|
||||
Thanks to `@RonnyPfannschmidt`_ for the PR.
|
||||
|
||||
* pytest now warns when a callable ids raises in a parametrized test. Thanks `@fogo`_ for the PR.
|
||||
|
||||
* It is now possible to skip test classes from being collected by setting a
|
||||
``__test__`` attribute to ``False`` in the class body (`#2007`_). Thanks
|
||||
to `@syre`_ for the report and `@lwm`_ for the PR.
|
||||
|
||||
* Change junitxml.py to produce reports that comply with Junitxml schema.
|
||||
If the same test fails with failure in call and then errors in teardown
|
||||
we split testcase element into two, one containing the error and the other
|
||||
the failure. (`#2228`_) Thanks to `@kkoukiou`_ for the PR.
|
||||
|
||||
* Testcase reports with a ``url`` attribute will now properly write this to junitxml.
|
||||
Thanks `@fushi`_ for the PR (`#1874`_).
|
||||
|
||||
* Remove common items from dict comparision output when verbosity=1. Also update
|
||||
the truncation message to make it clearer that pytest truncates all
|
||||
assertion messages if verbosity < 2 (`#1512`_).
|
||||
Thanks `@mattduck`_ for the PR
|
||||
|
||||
* ``--pdbcls`` no longer implies ``--pdb``. This makes it possible to use
|
||||
``addopts=--pdbcls=module.SomeClass`` on ``pytest.ini``. Thanks `@davidszotten`_ for
|
||||
the PR (`#1952`_).
|
||||
* Change exception raised by ``capture.DontReadFromInput.fileno()`` from ``ValueError``
|
||||
to ``io.UnsupportedOperation``. Thanks `@vlad-dragos`_ for the PR.
|
||||
|
||||
* fix `#2013`_: turn RecordedWarning into ``namedtuple``,
|
||||
to give it a comprehensible repr while preventing unwarranted modification.
|
||||
|
||||
* fix `#2208`_: ensure a iteration limit for _pytest.compat.get_real_func.
|
||||
Thanks `@RonnyPfannschmidt`_ for the report and PR.
|
||||
|
||||
* Hooks are now verified after collection is complete, rather than right after loading installed plugins. This
|
||||
makes it easy to write hooks for plugins which will be loaded during collection, for example using the
|
||||
``pytest_plugins`` special variable (`#1821`_).
|
||||
Thanks `@nicoddemus`_ for the PR.
|
||||
|
||||
* Modify ``pytest_make_parametrize_id()`` hook to accept ``argname`` as an
|
||||
additional parameter.
|
||||
Thanks `@unsignedint`_ for the PR.
|
||||
|
||||
* Add ``venv`` to the default ``norecursedirs`` setting.
|
||||
Thanks `@The-Compiler`_ for the PR.
|
||||
|
||||
* ``PluginManager.import_plugin`` now accepts unicode plugin names in Python 2.
|
||||
Thanks `@reutsharabani`_ for the PR.
|
||||
|
||||
* fix `#2308`_: When using both ``--lf`` and ``--ff``, only the last failed tests are run.
|
||||
Thanks `@ojii`_ for the PR.
|
||||
|
||||
* Replace minor/patch level version numbers in the documentation with placeholders.
|
||||
This significantly reduces change-noise as different contributors regnerate
|
||||
the documentation on different platforms.
|
||||
Thanks `@RonnyPfannschmidt`_ for the PR.
|
||||
|
||||
* fix `#2391`_: consider pytest_plugins on all plugin modules
|
||||
Thansks `@RonnyPfannschmidt`_ for the PR.
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
* Fix ``AttributeError`` on ``sys.stdout.buffer`` / ``sys.stderr.buffer``
|
||||
while using ``capsys`` fixture in python 3. (`#1407`_).
|
||||
Thanks to `@asottile`_.
|
||||
|
||||
* Change capture.py's ``DontReadFromInput`` class to throw ``io.UnsupportedOperation`` errors rather
|
||||
than ValueErrors in the ``fileno`` method (`#2276`_).
|
||||
Thanks `@metasyn`_ for the PR.
|
||||
|
@ -8,7 +103,7 @@
|
|||
* Fix exception formatting while importing modules when the exception message
|
||||
contains non-ascii characters (`#2336`_).
|
||||
Thanks `@fabioz`_ for the report and `@nicoddemus`_ for the PR.
|
||||
|
||||
|
||||
* Added documentation related to issue (`#1937`_)
|
||||
Thanks `@skylarjhdownes`_ for the PR.
|
||||
|
||||
|
@ -18,26 +113,45 @@
|
|||
* Show the correct error message when collect "parametrize" func with wrong args (`#2383`_).
|
||||
Thanks `@The-Compiler`_ for the report and `@robin0371`_ for the PR.
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
|
||||
|
||||
.. _@skylarjhdownes: https://github.com/skylarjhdownes
|
||||
.. _@davidszotten: https://github.com/davidszotten
|
||||
.. _@fabioz: https://github.com/fabioz
|
||||
.. _@metasyn: https://github.com/metasyn
|
||||
.. _@fogo: https://github.com/fogo
|
||||
.. _@fushi: https://github.com/fushi
|
||||
.. _@Kodiologist: https://github.com/Kodiologist
|
||||
.. _@Kriechi: https://github.com/Kriechi
|
||||
.. _@mandeep: https://github.com/mandeep
|
||||
.. _@mattduck: https://github.com/mattduck
|
||||
.. _@metasyn: https://github.com/metasyn
|
||||
.. _@MichalTHEDUDE: https://github.com/MichalTHEDUDE
|
||||
.. _@ojii: https://github.com/ojii
|
||||
.. _@reutsharabani: https://github.com/reutsharabani
|
||||
.. _@robin0371: https://github.com/robin0371
|
||||
.. _@skylarjhdownes: https://github.com/skylarjhdownes
|
||||
.. _@unsignedint: https://github.com/unsignedint
|
||||
.. _@wheerd: https://github.com/wheerd
|
||||
|
||||
|
||||
.. _#1407: https://github.com/pytest-dev/pytest/issues/1407
|
||||
.. _#1512: https://github.com/pytest-dev/pytest/issues/1512
|
||||
.. _#1821: https://github.com/pytest-dev/pytest/issues/1821
|
||||
.. _#1874: https://github.com/pytest-dev/pytest/pull/1874
|
||||
.. _#1937: https://github.com/pytest-dev/pytest/issues/1937
|
||||
.. _#1952: https://github.com/pytest-dev/pytest/pull/1952
|
||||
.. _#2007: https://github.com/pytest-dev/pytest/issues/2007
|
||||
.. _#2013: https://github.com/pytest-dev/pytest/issues/2013
|
||||
.. _#2101: https://github.com/pytest-dev/pytest/pull/2101
|
||||
.. _#2166: https://github.com/pytest-dev/pytest/pull/2166
|
||||
.. _#2208: https://github.com/pytest-dev/pytest/issues/2208
|
||||
.. _#2228: https://github.com/pytest-dev/pytest/issues/2228
|
||||
.. _#2276: https://github.com/pytest-dev/pytest/issues/2276
|
||||
.. _#2308: https://github.com/pytest-dev/pytest/issues/2308
|
||||
.. _#2336: https://github.com/pytest-dev/pytest/issues/2336
|
||||
.. _#2369: https://github.com/pytest-dev/pytest/issues/2369
|
||||
.. _#2383: https://github.com/pytest-dev/pytest/issues/2383
|
||||
.. _#2391: https://github.com/pytest-dev/pytest/issues/2391
|
||||
.. _#533: https://github.com/pytest-dev/pytest/issues/533
|
||||
|
||||
|
||||
|
||||
3.0.7 (2017-03-14)
|
||||
|
@ -191,6 +305,7 @@
|
|||
* Cope gracefully with a .pyc file with no matching .py file (`#2038`_). Thanks
|
||||
`@nedbat`_.
|
||||
|
||||
.. _@syre: https://github.com/syre
|
||||
.. _@adler-j: https://github.com/adler-j
|
||||
.. _@d-b-w: https://bitbucket.org/d-b-w/
|
||||
.. _@DuncanBetts: https://github.com/DuncanBetts
|
||||
|
@ -298,6 +413,7 @@
|
|||
.. _@raquel-ucl: https://github.com/raquel-ucl
|
||||
.. _@axil: https://github.com/axil
|
||||
.. _@tgoodlet: https://github.com/tgoodlet
|
||||
.. _@vlad-dragos: https://github.com/vlad-dragos
|
||||
|
||||
.. _#1853: https://github.com/pytest-dev/pytest/issues/1853
|
||||
.. _#1905: https://github.com/pytest-dev/pytest/issues/1905
|
||||
|
|
|
@ -206,12 +206,12 @@ but here is a simple overview:
|
|||
|
||||
#. Run all the tests
|
||||
|
||||
You need to have Python 2.7 and 3.5 available in your system. Now
|
||||
You need to have Python 2.7 and 3.6 available in your system. Now
|
||||
running tests is as simple as issuing this command::
|
||||
|
||||
$ tox -e linting,py27,py35
|
||||
$ tox -e linting,py27,py36
|
||||
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.5
|
||||
This command will run tests via the "tox" tool against Python 2.7 and 3.6
|
||||
and also perform "lint" coding-style checks.
|
||||
|
||||
#. You can now edit your local working copy.
|
||||
|
@ -223,9 +223,9 @@ but here is a simple overview:
|
|||
|
||||
$ tox -e py27 -- --pdb
|
||||
|
||||
Or to only run tests in a particular test module on Python 3.5::
|
||||
Or to only run tests in a particular test module on Python 3.6::
|
||||
|
||||
$ tox -e py35 -- testing/test_config.py
|
||||
$ tox -e py36 -- testing/test_config.py
|
||||
|
||||
#. Commit and push once your tests pass and you are happy with your change(s)::
|
||||
|
||||
|
|
|
@ -1,29 +1,30 @@
|
|||
How to release pytest
|
||||
--------------------------------------------
|
||||
|
||||
Note: this assumes you have already registered on pypi.
|
||||
.. important::
|
||||
|
||||
1. Bump version numbers in ``_pytest/__init__.py`` (``setup.py`` reads it).
|
||||
pytest releases must be prepared on **linux** because the docs and examples expect
|
||||
to be executed in that platform.
|
||||
|
||||
2. Check and finalize ``CHANGELOG.rst``.
|
||||
#. Install development dependencies in a virtual environment with::
|
||||
|
||||
3. Write ``doc/en/announce/release-VERSION.txt`` and include
|
||||
it in ``doc/en/announce/index.txt``. Run this command to list names of authors involved::
|
||||
pip3 install -r tasks/requirements.txt
|
||||
|
||||
git log $(git describe --abbrev=0 --tags)..HEAD --format='%aN' | sort -u
|
||||
#. Create a branch ``release-X.Y.Z`` with the version for the release. Make sure it is up to date
|
||||
with the latest ``master`` (for patch releases) and with the latest ``features`` merged with
|
||||
the latest ``master`` (for minor releases). Ensure your are in a clean work tree.
|
||||
|
||||
4. Regenerate the docs examples using tox::
|
||||
#. Check and finalize ``CHANGELOG.rst`` (will be automated soon).
|
||||
|
||||
tox -e regen
|
||||
#. Execute to automatically generate docs, announcements and upload a package to
|
||||
your ``devpi`` staging server::
|
||||
|
||||
5. At this point, open a PR named ``release-X`` so others can help find regressions or provide suggestions.
|
||||
invoke generate.pre_release <VERSION> <DEVPI USER> --password <DEVPI PASSWORD>
|
||||
|
||||
6. Use devpi for uploading a release tarball to a staging area::
|
||||
If ``--password`` is not given, it is assumed the user is already logged in. If you don't have
|
||||
an account, please ask for one!
|
||||
|
||||
devpi use https://devpi.net/USER/dev
|
||||
devpi upload --formats sdist,bdist_wheel
|
||||
|
||||
7. Run from multiple machines::
|
||||
#. Run from multiple machines::
|
||||
|
||||
devpi use https://devpi.net/USER/dev
|
||||
devpi test pytest==VERSION
|
||||
|
@ -31,27 +32,27 @@ Note: this assumes you have already registered on pypi.
|
|||
Alternatively, you can use `devpi-cloud-tester <https://github.com/nicoddemus/devpi-cloud-tester>`_ to test
|
||||
the package on AppVeyor and Travis (follow instructions on the ``README``).
|
||||
|
||||
8. Check that tests pass for relevant combinations with::
|
||||
#. Check that tests pass for relevant combinations with::
|
||||
|
||||
devpi list pytest
|
||||
|
||||
or look at failures with "devpi list -f pytest".
|
||||
|
||||
9. Feeling confident? Publish to pypi::
|
||||
#. Feeling confident? Publish to pypi::
|
||||
|
||||
devpi push pytest==VERSION pypi:NAME
|
||||
|
||||
where NAME is the name of pypi.python.org as configured in your ``~/.pypirc``
|
||||
file `for devpi <http://doc.devpi.net/latest/quickstart-releaseprocess.html?highlight=pypirc#devpi-push-releasing-to-an-external-index>`_.
|
||||
|
||||
10. Tag the release::
|
||||
#. Tag the release::
|
||||
|
||||
git tag VERSION <hash>
|
||||
git push origin VERSION
|
||||
|
||||
Make sure ``<hash>`` is **exactly** the git hash at the time the package was created.
|
||||
|
||||
11. Send release announcement to mailing lists:
|
||||
#. Send release announcement to mailing lists:
|
||||
|
||||
- pytest-dev@python.org
|
||||
- python-announce-list@python.org
|
||||
|
@ -59,7 +60,7 @@ Note: this assumes you have already registered on pypi.
|
|||
|
||||
And announce the release on Twitter, making sure to add the hashtag ``#pytest``.
|
||||
|
||||
12. **After the release**
|
||||
#. **After the release**
|
||||
|
||||
a. **patch release (2.8.3)**:
|
||||
|
||||
|
@ -81,5 +82,3 @@ Note: this assumes you have already registered on pypi.
|
|||
9. Push ``master`` and ``features``.
|
||||
|
||||
c. **major release (3.0.0)**: same steps as that of a **minor release**
|
||||
|
||||
|
||||
|
|
|
@ -20,6 +20,7 @@ recursive-include extra *.py
|
|||
graft testing
|
||||
graft doc
|
||||
prune doc/en/_build
|
||||
graft tasks
|
||||
|
||||
exclude _pytest/impl
|
||||
|
||||
|
|
|
@ -1,2 +1,8 @@
|
|||
#
|
||||
__version__ = '3.0.8.dev'
|
||||
__all__ = ['__version__']
|
||||
|
||||
try:
|
||||
from ._version import version as __version__
|
||||
except ImportError:
|
||||
# broken installation, we don't even try
|
||||
# unknown only works because we do poor mans version compare
|
||||
__version__ = 'unknown'
|
||||
|
|
|
@ -57,7 +57,7 @@ If things do not work right away:
|
|||
which should throw a KeyError: 'COMPLINE' (which is properly set by the
|
||||
global argcomplete script).
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
import os
|
||||
from glob import glob
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
""" python inspection/code generation API """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
from .code import Code # noqa
|
||||
from .code import ExceptionInfo # noqa
|
||||
from .code import Frame # noqa
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
# CHANGES:
|
||||
# - some_str is replaced, trying to create unicode strings
|
||||
#
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import types
|
||||
|
||||
def format_exception_only(etype, value):
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
from inspect import CO_VARARGS, CO_VARKEYWORDS
|
||||
import re
|
||||
|
|
|
@ -1,4 +1,4 @@
|
|||
from __future__ import generators
|
||||
from __future__ import absolute_import, division, generators, print_function
|
||||
|
||||
from bisect import bisect_right
|
||||
import sys
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
imports symbols from vendored "pluggy" if available, otherwise
|
||||
falls back to importing "pluggy" from the default namespace.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
try:
|
||||
from _pytest.vendored_packages.pluggy import * # noqa
|
||||
from _pytest.vendored_packages.pluggy import __version__ # noqa
|
||||
|
|
|
@ -1,12 +1,13 @@
|
|||
"""
|
||||
support for presenting detailed information in failing assertions.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import py
|
||||
import os
|
||||
import sys
|
||||
|
||||
from _pytest.assertion import util
|
||||
from _pytest.assertion import rewrite
|
||||
from _pytest.assertion import truncate
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
|
@ -24,9 +25,6 @@ def pytest_addoption(parser):
|
|||
expression information.""")
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {'register_assert_rewrite': register_assert_rewrite}
|
||||
|
||||
|
||||
def register_assert_rewrite(*names):
|
||||
"""Register one or more module names to be rewritten on import.
|
||||
|
@ -100,12 +98,6 @@ def pytest_collection(session):
|
|||
assertstate.hook.set_session(session)
|
||||
|
||||
|
||||
def _running_on_ci():
|
||||
"""Check if we're currently running on a CI system."""
|
||||
env_vars = ['CI', 'BUILD_NUMBER']
|
||||
return any(var in os.environ for var in env_vars)
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
"""Setup the pytest_assertrepr_compare hook
|
||||
|
||||
|
@ -119,8 +111,8 @@ def pytest_runtest_setup(item):
|
|||
|
||||
This uses the first result from the hook and then ensures the
|
||||
following:
|
||||
* Overly verbose explanations are dropped unless -vv was used or
|
||||
running on a CI.
|
||||
* Overly verbose explanations are truncated unless configured otherwise
|
||||
(eg. if running in verbose mode).
|
||||
* Embedded newlines are escaped to help util.format_explanation()
|
||||
later.
|
||||
* If the rewrite mode is used embedded %-characters are replaced
|
||||
|
@ -133,14 +125,7 @@ def pytest_runtest_setup(item):
|
|||
config=item.config, op=op, left=left, right=right)
|
||||
for new_expl in hook_result:
|
||||
if new_expl:
|
||||
if (sum(len(p) for p in new_expl[1:]) > 80*8 and
|
||||
item.config.option.verbose < 2 and
|
||||
not _running_on_ci()):
|
||||
show_max = 10
|
||||
truncated_lines = len(new_expl) - show_max
|
||||
new_expl[show_max:] = [py.builtin._totext(
|
||||
'Detailed information truncated (%d more lines)'
|
||||
', use "-vv" to show' % truncated_lines)]
|
||||
new_expl = truncate.truncate_if_required(new_expl, item)
|
||||
new_expl = [line.replace("\n", "\\n") for line in new_expl]
|
||||
res = py.builtin._totext("\n~").join(new_expl)
|
||||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""Rewrite assertion AST to produce nice error messages"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import ast
|
||||
import _ast
|
||||
import errno
|
||||
|
@ -337,7 +337,7 @@ def _rewrite_test(config, fn):
|
|||
return None, None
|
||||
rewrite_asserts(tree, fn, config)
|
||||
try:
|
||||
co = compile(tree, fn.strpath, "exec")
|
||||
co = compile(tree, fn.strpath, "exec", dont_inherit=True)
|
||||
except SyntaxError:
|
||||
# It's possible that this error is from some bug in the
|
||||
# assertion rewriting, but I don't know of a fast way to tell.
|
||||
|
|
|
@ -0,0 +1,102 @@
|
|||
"""
|
||||
Utilities for truncating assertion output.
|
||||
|
||||
Current default behaviour is to truncate assertion explanations at
|
||||
~8 terminal lines, unless running in "-vv" mode or running on CI.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import os
|
||||
|
||||
import py
|
||||
|
||||
|
||||
DEFAULT_MAX_LINES = 8
|
||||
DEFAULT_MAX_CHARS = 8 * 80
|
||||
USAGE_MSG = "use '-vv' to show"
|
||||
|
||||
|
||||
def truncate_if_required(explanation, item, max_length=None):
|
||||
"""
|
||||
Truncate this assertion explanation if the given test item is eligible.
|
||||
"""
|
||||
if _should_truncate_item(item):
|
||||
return _truncate_explanation(explanation)
|
||||
return explanation
|
||||
|
||||
|
||||
def _should_truncate_item(item):
|
||||
"""
|
||||
Whether or not this test item is eligible for truncation.
|
||||
"""
|
||||
verbose = item.config.option.verbose
|
||||
return verbose < 2 and not _running_on_ci()
|
||||
|
||||
|
||||
def _running_on_ci():
|
||||
"""Check if we're currently running on a CI system."""
|
||||
env_vars = ['CI', 'BUILD_NUMBER']
|
||||
return any(var in os.environ for var in env_vars)
|
||||
|
||||
|
||||
def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
|
||||
"""
|
||||
Truncate given list of strings that makes up the assertion explanation.
|
||||
|
||||
Truncates to either 8 lines, or 640 characters - whichever the input reaches
|
||||
first. The remaining lines will be replaced by a usage message.
|
||||
"""
|
||||
|
||||
if max_lines is None:
|
||||
max_lines = DEFAULT_MAX_LINES
|
||||
if max_chars is None:
|
||||
max_chars = DEFAULT_MAX_CHARS
|
||||
|
||||
# Check if truncation required
|
||||
input_char_count = len("".join(input_lines))
|
||||
if len(input_lines) <= max_lines and input_char_count <= max_chars:
|
||||
return input_lines
|
||||
|
||||
# Truncate first to max_lines, and then truncate to max_chars if max_chars
|
||||
# is exceeded.
|
||||
truncated_explanation = input_lines[:max_lines]
|
||||
truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars)
|
||||
|
||||
# Add ellipsis to final line
|
||||
truncated_explanation[-1] = truncated_explanation[-1] + "..."
|
||||
|
||||
# Append useful message to explanation
|
||||
truncated_line_count = len(input_lines) - len(truncated_explanation)
|
||||
truncated_line_count += 1 # Account for the part-truncated final line
|
||||
msg = '...Full output truncated'
|
||||
if truncated_line_count == 1:
|
||||
msg += ' ({0} line hidden)'.format(truncated_line_count)
|
||||
else:
|
||||
msg += ' ({0} lines hidden)'.format(truncated_line_count)
|
||||
msg += ", {0}" .format(USAGE_MSG)
|
||||
truncated_explanation.extend([
|
||||
py.builtin._totext(""),
|
||||
py.builtin._totext(msg),
|
||||
])
|
||||
return truncated_explanation
|
||||
|
||||
|
||||
def _truncate_by_char_count(input_lines, max_chars):
|
||||
# Check if truncation required
|
||||
if len("".join(input_lines)) <= max_chars:
|
||||
return input_lines
|
||||
|
||||
# Find point at which input length exceeds total allowed length
|
||||
iterated_char_count = 0
|
||||
for iterated_index, input_line in enumerate(input_lines):
|
||||
if iterated_char_count + len(input_line) > max_chars:
|
||||
break
|
||||
iterated_char_count += len(input_line)
|
||||
|
||||
# Create truncated explanation with modified final line
|
||||
truncated_result = input_lines[:iterated_index]
|
||||
final_line = input_lines[iterated_index]
|
||||
if final_line:
|
||||
final_line_truncate_point = max_chars - iterated_char_count
|
||||
final_line = final_line[:final_line_truncate_point]
|
||||
truncated_result.append(final_line)
|
||||
return truncated_result
|
|
@ -1,4 +1,5 @@
|
|||
"""Utilities for assertion debugging"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import pprint
|
||||
|
||||
import _pytest._code
|
||||
|
@ -8,7 +9,7 @@ try:
|
|||
except ImportError:
|
||||
Sequence = list
|
||||
|
||||
BuiltinAssertionError = py.builtin.builtins.AssertionError
|
||||
|
||||
u = py.builtin._totext
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
|
@ -256,8 +257,8 @@ def _compare_eq_dict(left, right, verbose=False):
|
|||
explanation = []
|
||||
common = set(left).intersection(set(right))
|
||||
same = dict((k, left[k]) for k in common if left[k] == right[k])
|
||||
if same and not verbose:
|
||||
explanation += [u('Omitting %s identical items, use -v to show') %
|
||||
if same and verbose < 2:
|
||||
explanation += [u('Omitting %s identical items, use -vv to show') %
|
||||
len(same)]
|
||||
elif same:
|
||||
explanation += [u('Common items:')]
|
||||
|
|
|
@ -4,7 +4,7 @@ merged implementation of the cache provider
|
|||
the name cache was not chosen to ensure pluggy automatically
|
||||
ignores the external pytest-cache
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import py
|
||||
import pytest
|
||||
import json
|
||||
|
@ -139,11 +139,11 @@ class LFPlugin:
|
|||
# running a subset of all tests with recorded failures outside
|
||||
# of the set of tests currently executing
|
||||
pass
|
||||
elif self.config.getvalue("failedfirst"):
|
||||
items[:] = previously_failed + previously_passed
|
||||
else:
|
||||
elif self.config.getvalue("lf"):
|
||||
items[:] = previously_failed
|
||||
config.hook.pytest_deselected(items=previously_passed)
|
||||
else:
|
||||
items[:] = previously_failed + previously_passed
|
||||
|
||||
def pytest_sessionfinish(self, session):
|
||||
config = self.config
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
per-test stdout/stderr capturing mechanism.
|
||||
|
||||
"""
|
||||
from __future__ import with_statement
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
import os
|
||||
from io import UnsupportedOperation
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest.compat import CaptureIO
|
||||
|
||||
from py.io import TextIO
|
||||
from io import UnsupportedOperation
|
||||
unicode = py.builtin.text
|
||||
|
||||
patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
|
||||
|
@ -403,7 +403,7 @@ class SysCapture:
|
|||
if name == "stdin":
|
||||
tmpfile = DontReadFromInput()
|
||||
else:
|
||||
tmpfile = TextIO()
|
||||
tmpfile = CaptureIO()
|
||||
self.tmpfile = tmpfile
|
||||
|
||||
def start(self):
|
||||
|
@ -449,7 +449,8 @@ class DontReadFromInput:
|
|||
__iter__ = read
|
||||
|
||||
def fileno(self):
|
||||
raise UnsupportedOperation("redirected Stdin is pseudofile, has no fileno()")
|
||||
raise UnsupportedOperation("redirected stdin is pseudofile, "
|
||||
"has no fileno()")
|
||||
|
||||
def isatty(self):
|
||||
return False
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""
|
||||
python version compatibility code
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
import inspect
|
||||
import types
|
||||
|
@ -123,7 +124,7 @@ if sys.version_info[:2] == (2, 6):
|
|||
|
||||
if _PY3:
|
||||
import codecs
|
||||
|
||||
imap = map
|
||||
STRING_TYPES = bytes, str
|
||||
|
||||
def _escape_strings(val):
|
||||
|
@ -157,6 +158,8 @@ if _PY3:
|
|||
else:
|
||||
STRING_TYPES = bytes, str, unicode
|
||||
|
||||
from itertools import imap # NOQA
|
||||
|
||||
def _escape_strings(val):
|
||||
"""In py2 bytes and str are the same type, so return if it's a bytes
|
||||
object, return it unchanged if it is a full ascii string,
|
||||
|
@ -179,8 +182,18 @@ def get_real_func(obj):
|
|||
""" gets the real function object of the (possibly) wrapped object by
|
||||
functools.wraps or functools.partial.
|
||||
"""
|
||||
while hasattr(obj, "__wrapped__"):
|
||||
obj = obj.__wrapped__
|
||||
start_obj = obj
|
||||
for i in range(100):
|
||||
new_obj = getattr(obj, '__wrapped__', None)
|
||||
if new_obj is None:
|
||||
break
|
||||
obj = new_obj
|
||||
else:
|
||||
raise ValueError(
|
||||
("could not find real function of {start}"
|
||||
"\nstopped at {current}").format(
|
||||
start=py.io.saferepr(start_obj),
|
||||
current=py.io.saferepr(obj)))
|
||||
if isinstance(obj, functools.partial):
|
||||
obj = obj.func
|
||||
return obj
|
||||
|
@ -210,7 +223,7 @@ def safe_getattr(object, name, default):
|
|||
""" Like getattr but return default upon any Exception.
|
||||
|
||||
Attribute access can potentially fail for 'evil' Python objects.
|
||||
See issue214
|
||||
See issue #214.
|
||||
"""
|
||||
try:
|
||||
return getattr(object, name, default)
|
||||
|
@ -242,3 +255,51 @@ else:
|
|||
v = unicode(v)
|
||||
errors = 'replace'
|
||||
return v.encode('utf-8', errors)
|
||||
|
||||
|
||||
COLLECT_FAKEMODULE_ATTRIBUTES = (
|
||||
'Collector',
|
||||
'Module',
|
||||
'Generator',
|
||||
'Function',
|
||||
'Instance',
|
||||
'Session',
|
||||
'Item',
|
||||
'Class',
|
||||
'File',
|
||||
'_fillfuncargs',
|
||||
)
|
||||
|
||||
|
||||
def _setup_collect_fakemodule():
|
||||
from types import ModuleType
|
||||
import pytest
|
||||
pytest.collect = ModuleType('pytest.collect')
|
||||
pytest.collect.__all__ = [] # used for setns
|
||||
for attr in COLLECT_FAKEMODULE_ATTRIBUTES:
|
||||
setattr(pytest.collect, attr, getattr(pytest, attr))
|
||||
|
||||
|
||||
if _PY2:
|
||||
from py.io import TextIO as CaptureIO
|
||||
else:
|
||||
import io
|
||||
|
||||
class CaptureIO(io.TextIOWrapper):
|
||||
def __init__(self):
|
||||
super(CaptureIO, self).__init__(
|
||||
io.BytesIO(),
|
||||
encoding='UTF-8', newline='', write_through=True,
|
||||
)
|
||||
|
||||
def getvalue(self):
|
||||
return self.buffer.getvalue().decode('UTF-8')
|
||||
|
||||
class FuncargnamesCompatAttr(object):
|
||||
""" helper class so that Metafunc, Function and FixtureRequest
|
||||
don't need to each define the "funcargnames" compatibility attribute.
|
||||
"""
|
||||
@property
|
||||
def funcargnames(self):
|
||||
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
|
||||
return self.fixturenames
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
""" command line options, ini-file and conftest.py processing. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import argparse
|
||||
import shlex
|
||||
import traceback
|
||||
|
@ -7,7 +8,8 @@ import warnings
|
|||
|
||||
import py
|
||||
# DON't import pytest here because it causes import cycle troubles
|
||||
import sys, os
|
||||
import sys
|
||||
import os
|
||||
import _pytest._code
|
||||
import _pytest.hookspec # the extension point definitions
|
||||
import _pytest.assertion
|
||||
|
@ -53,7 +55,6 @@ def main(args=None, plugins=None):
|
|||
return 4
|
||||
else:
|
||||
try:
|
||||
config.pluginmanager.check_pending()
|
||||
return config.hook.pytest_cmdline_main(config=config)
|
||||
finally:
|
||||
config._ensure_unconfigure()
|
||||
|
@ -98,7 +99,8 @@ default_plugins = (
|
|||
"mark main terminal runner python fixtures debugging unittest capture skipping "
|
||||
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
|
||||
"junitxml resultlog doctest cacheprovider freeze_support "
|
||||
"setuponly setupplan").split()
|
||||
"setuponly setupplan warnings").split()
|
||||
|
||||
|
||||
builtin_plugins = set(default_plugins)
|
||||
builtin_plugins.add("pytester")
|
||||
|
@ -251,6 +253,9 @@ class PytestPluginManager(PluginManager):
|
|||
if ret:
|
||||
self.hook.pytest_plugin_registered.call_historic(
|
||||
kwargs=dict(plugin=plugin, manager=self))
|
||||
|
||||
if isinstance(plugin, types.ModuleType):
|
||||
self.consider_module(plugin)
|
||||
return ret
|
||||
|
||||
def getplugin(self, name):
|
||||
|
@ -395,8 +400,7 @@ class PytestPluginManager(PluginManager):
|
|||
self.import_plugin(arg)
|
||||
|
||||
def consider_conftest(self, conftestmodule):
|
||||
if self.register(conftestmodule, name=conftestmodule.__file__):
|
||||
self.consider_module(conftestmodule)
|
||||
self.register(conftestmodule, name=conftestmodule.__file__)
|
||||
|
||||
def consider_env(self):
|
||||
self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
|
||||
|
@ -414,7 +418,8 @@ class PytestPluginManager(PluginManager):
|
|||
# "terminal" or "capture". Those plugins are registered under their
|
||||
# basename for historic purposes but must be imported with the
|
||||
# _pytest prefix.
|
||||
assert isinstance(modname, str), "module name as string required, got %r" % modname
|
||||
assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname
|
||||
modname = str(modname)
|
||||
if self.get_plugin(modname) is not None:
|
||||
return
|
||||
if modname in builtin_plugins:
|
||||
|
@ -439,7 +444,6 @@ class PytestPluginManager(PluginManager):
|
|||
else:
|
||||
mod = sys.modules[importspec]
|
||||
self.register(mod, modname)
|
||||
self.consider_module(mod)
|
||||
|
||||
|
||||
def _get_plugin_specs_as_list(specs):
|
||||
|
@ -910,11 +914,11 @@ class Config(object):
|
|||
fin = self._cleanup.pop()
|
||||
fin()
|
||||
|
||||
def warn(self, code, message, fslocation=None):
|
||||
def warn(self, code, message, fslocation=None, nodeid=None):
|
||||
""" generate a warning for this test session. """
|
||||
self.hook.pytest_logwarning.call_historic(kwargs=dict(
|
||||
code=code, message=message,
|
||||
fslocation=fslocation, nodeid=None))
|
||||
fslocation=fslocation, nodeid=nodeid))
|
||||
|
||||
def get_terminal_writer(self):
|
||||
return self.pluginmanager.get_plugin("terminalreporter")._tw
|
||||
|
|
|
@ -1,9 +1,8 @@
|
|||
""" interactive debugging with PDB, the Python Debugger. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import pdb
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
|
@ -16,19 +15,17 @@ def pytest_addoption(parser):
|
|||
help="start a custom interactive Python debugger on errors. "
|
||||
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
|
||||
|
||||
def pytest_namespace():
|
||||
return {'set_trace': pytestPDB().set_trace}
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.getvalue("usepdb") or config.getvalue("usepdb_cls"):
|
||||
if config.getvalue("usepdb_cls"):
|
||||
modname, classname = config.getvalue("usepdb_cls").split(":")
|
||||
__import__(modname)
|
||||
pdb_cls = getattr(sys.modules[modname], classname)
|
||||
else:
|
||||
pdb_cls = pdb.Pdb
|
||||
|
||||
if config.getvalue("usepdb"):
|
||||
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
|
||||
if config.getvalue("usepdb_cls"):
|
||||
modname, classname = config.getvalue("usepdb_cls").split(":")
|
||||
__import__(modname)
|
||||
pdb_cls = getattr(sys.modules[modname], classname)
|
||||
else:
|
||||
pdb_cls = pdb.Pdb
|
||||
pytestPDB._pdb_cls = pdb_cls
|
||||
|
||||
old = (pdb.set_trace, pytestPDB._pluginmanager)
|
||||
|
||||
|
@ -37,9 +34,10 @@ def pytest_configure(config):
|
|||
pytestPDB._config = None
|
||||
pytestPDB._pdb_cls = pdb.Pdb
|
||||
|
||||
pdb.set_trace = pytest.set_trace
|
||||
pdb.set_trace = pytestPDB.set_trace
|
||||
pytestPDB._pluginmanager = config.pluginmanager
|
||||
pytestPDB._config = config
|
||||
pytestPDB._pdb_cls = pdb_cls
|
||||
config._cleanup.append(fin)
|
||||
|
||||
class pytestPDB:
|
||||
|
@ -48,19 +46,20 @@ class pytestPDB:
|
|||
_config = None
|
||||
_pdb_cls = pdb.Pdb
|
||||
|
||||
def set_trace(self):
|
||||
@classmethod
|
||||
def set_trace(cls):
|
||||
""" invoke PDB set_trace debugging, dropping any IO capturing. """
|
||||
import _pytest.config
|
||||
frame = sys._getframe().f_back
|
||||
if self._pluginmanager is not None:
|
||||
capman = self._pluginmanager.getplugin("capturemanager")
|
||||
if cls._pluginmanager is not None:
|
||||
capman = cls._pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspendcapture(in_=True)
|
||||
tw = _pytest.config.create_terminal_writer(self._config)
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
|
||||
self._pdb_cls().set_trace(frame)
|
||||
cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config)
|
||||
cls._pdb_cls().set_trace(frame)
|
||||
|
||||
|
||||
class PdbInvoke:
|
||||
|
@ -74,7 +73,7 @@ class PdbInvoke:
|
|||
|
||||
def pytest_internalerror(self, excrepr, excinfo):
|
||||
for line in str(excrepr).split("\n"):
|
||||
sys.stderr.write("INTERNALERROR> %s\n" %line)
|
||||
sys.stderr.write("INTERNALERROR> %s\n" % line)
|
||||
sys.stderr.flush()
|
||||
tb = _postmortem_traceback(excinfo)
|
||||
post_mortem(tb)
|
||||
|
|
|
@ -5,7 +5,7 @@ that is planned to be removed in the next pytest release.
|
|||
Keeping it in a central location makes it easy to track what is deprecated and should
|
||||
be removed when the time comes.
|
||||
"""
|
||||
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \
|
||||
'pass a list of arguments instead.'
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
""" discover and run doctests in modules and test files."""
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import traceback
|
||||
|
||||
|
@ -25,6 +25,7 @@ DOCTEST_REPORT_CHOICES = (
|
|||
def pytest_addoption(parser):
|
||||
parser.addini('doctest_optionflags', 'option flags for doctests',
|
||||
type="args", default=["ELLIPSIS"])
|
||||
parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8")
|
||||
group = parser.getgroup("collect")
|
||||
group.addoption("--doctest-modules",
|
||||
action="store_true", default=False,
|
||||
|
@ -162,7 +163,6 @@ def get_optionflags(parent):
|
|||
flag_acc |= flag_lookup_table[flag]
|
||||
return flag_acc
|
||||
|
||||
|
||||
class DoctestTextfile(pytest.Module):
|
||||
obj = None
|
||||
|
||||
|
@ -171,7 +171,8 @@ class DoctestTextfile(pytest.Module):
|
|||
|
||||
# inspired by doctest.testfile; ideally we would use it directly,
|
||||
# but it doesn't support passing a custom checker
|
||||
text = self.fspath.read()
|
||||
encoding = self.config.getini("doctest_encoding")
|
||||
text = self.fspath.read_text(encoding)
|
||||
filename = str(self.fspath)
|
||||
name = self.fspath.basename
|
||||
globs = {'__name__': '__main__'}
|
||||
|
|
|
@ -1,9 +1,9 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
|
||||
from py._code.code import FormattedExcinfo
|
||||
|
||||
import py
|
||||
import pytest
|
||||
import warnings
|
||||
|
||||
import inspect
|
||||
|
@ -16,8 +16,16 @@ from _pytest.compat import (
|
|||
getlocation, getfuncargnames,
|
||||
safe_getattr,
|
||||
)
|
||||
from _pytest.runner import fail
|
||||
from _pytest.compat import FuncargnamesCompatAttr
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
import _pytest.python
|
||||
scopename2class.update({
|
||||
'class': _pytest.python.Class,
|
||||
'module': _pytest.python.Module,
|
||||
'function': _pytest.main.Item,
|
||||
})
|
||||
session._fixturemanager = FixtureManager(session)
|
||||
|
||||
|
||||
|
@ -44,19 +52,6 @@ def scopeproperty(name=None, doc=None):
|
|||
return decoratescope
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
scopename2class.update({
|
||||
'class': pytest.Class,
|
||||
'module': pytest.Module,
|
||||
'function': pytest.Item,
|
||||
})
|
||||
return {
|
||||
'fixture': fixture,
|
||||
'yield_fixture': yield_fixture,
|
||||
'collect': {'_fillfuncargs': fillfixtures}
|
||||
}
|
||||
|
||||
|
||||
def get_scope_node(node, scope):
|
||||
cls = scopename2class.get(scope)
|
||||
if cls is None:
|
||||
|
@ -104,7 +99,7 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
|
|||
if scope != "function":
|
||||
node = get_scope_node(collector, scope)
|
||||
if node is None:
|
||||
assert scope == "class" and isinstance(collector, pytest.Module)
|
||||
assert scope == "class" and isinstance(collector, _pytest.python.Module)
|
||||
# use module-level collector for class-scope (for now)
|
||||
node = collector
|
||||
if node and argname in node._name2pseudofixturedef:
|
||||
|
@ -220,17 +215,6 @@ def slice_items(items, ignore, scoped_argkeys_cache):
|
|||
return items, None, None, None
|
||||
|
||||
|
||||
|
||||
class FuncargnamesCompatAttr:
|
||||
""" helper class so that Metafunc, Function and FixtureRequest
|
||||
don't need to each define the "funcargnames" compatibility attribute.
|
||||
"""
|
||||
@property
|
||||
def funcargnames(self):
|
||||
""" alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
|
||||
return self.fixturenames
|
||||
|
||||
|
||||
def fillfixtures(function):
|
||||
""" fill missing funcargs for a test function. """
|
||||
try:
|
||||
|
@ -326,7 +310,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
@scopeproperty("class")
|
||||
def cls(self):
|
||||
""" class (can be None) where the test function was collected. """
|
||||
clscol = self._pyfuncitem.getparent(pytest.Class)
|
||||
clscol = self._pyfuncitem.getparent(_pytest.python.Class)
|
||||
if clscol:
|
||||
return clscol.obj
|
||||
|
||||
|
@ -344,7 +328,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
@scopeproperty()
|
||||
def module(self):
|
||||
""" python module object where the test function was collected. """
|
||||
return self._pyfuncitem.getparent(pytest.Module).obj
|
||||
return self._pyfuncitem.getparent(_pytest.python.Module).obj
|
||||
|
||||
@scopeproperty()
|
||||
def fspath(self):
|
||||
|
@ -507,7 +491,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
source_lineno,
|
||||
)
|
||||
)
|
||||
pytest.fail(msg)
|
||||
fail(msg)
|
||||
else:
|
||||
# indices might not be set if old-style metafunc.addcall() was used
|
||||
param_index = funcitem.callspec.indices.get(argname, 0)
|
||||
|
@ -540,10 +524,10 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
if scopemismatch(invoking_scope, requested_scope):
|
||||
# try to report something helpful
|
||||
lines = self._factorytraceback()
|
||||
pytest.fail("ScopeMismatch: You tried to access the %r scoped "
|
||||
"fixture %r with a %r scoped request object, "
|
||||
"involved factories\n%s" %(
|
||||
(requested_scope, argname, invoking_scope, "\n".join(lines))),
|
||||
fail("ScopeMismatch: You tried to access the %r scoped "
|
||||
"fixture %r with a %r scoped request object, "
|
||||
"involved factories\n%s" % (
|
||||
(requested_scope, argname, invoking_scope, "\n".join(lines))),
|
||||
pytrace=False)
|
||||
|
||||
def _factorytraceback(self):
|
||||
|
@ -553,7 +537,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
fs, lineno = getfslineno(factory)
|
||||
p = self._pyfuncitem.session.fspath.bestrelpath(fs)
|
||||
args = _format_args(factory)
|
||||
lines.append("%s:%d: def %s%s" %(
|
||||
lines.append("%s:%d: def %s%s" % (
|
||||
p, lineno, factory.__name__, args))
|
||||
return lines
|
||||
|
||||
|
@ -698,8 +682,9 @@ def fail_fixturefunc(fixturefunc, msg):
|
|||
fs, lineno = getfslineno(fixturefunc)
|
||||
location = "%s:%s" % (fs, lineno+1)
|
||||
source = _pytest._code.Source(fixturefunc)
|
||||
pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
|
||||
pytrace=False)
|
||||
fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
|
||||
pytrace=False)
|
||||
|
||||
|
||||
def call_fixture_func(fixturefunc, request, kwargs):
|
||||
yieldctx = is_generator(fixturefunc)
|
||||
|
@ -1080,7 +1065,7 @@ class FixtureManager:
|
|||
continue
|
||||
marker = defaultfuncargprefixmarker
|
||||
from _pytest import deprecated
|
||||
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name))
|
||||
self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid)
|
||||
name = name[len(self._argprefix):]
|
||||
elif not isinstance(marker, FixtureFunctionMarker):
|
||||
# magic globals with __getattr__ might have got us a wrong
|
||||
|
|
|
@ -2,9 +2,8 @@
|
|||
Provides a function to report all internal modules for using freezing tools
|
||||
pytest
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
def pytest_namespace():
|
||||
return {'freeze_includes': freeze_includes}
|
||||
|
||||
|
||||
def freeze_includes():
|
||||
|
@ -42,4 +41,4 @@ def _iter_all_modules(package, prefix=''):
|
|||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
|
||||
yield prefix + m
|
||||
else:
|
||||
yield prefix + name
|
||||
yield prefix + name
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
""" version info, help messages, tracing configuration. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import py
|
||||
import pytest
|
||||
import os, sys
|
||||
|
|
|
@ -16,7 +16,9 @@ def pytest_addhooks(pluginmanager):
|
|||
|
||||
@hookspec(historic=True)
|
||||
def pytest_namespace():
|
||||
"""return dict of name->object to be made globally available in
|
||||
"""
|
||||
DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged
|
||||
return dict of name->object to be made globally available in
|
||||
the pytest namespace. This hook is called at plugin registration
|
||||
time.
|
||||
"""
|
||||
|
@ -157,9 +159,10 @@ def pytest_generate_tests(metafunc):
|
|||
""" generate (multiple) parametrized calls to a test function."""
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_make_parametrize_id(config, val):
|
||||
def pytest_make_parametrize_id(config, val, argname):
|
||||
"""Return a user-friendly string representation of the given ``val`` that will be used
|
||||
by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
|
||||
The parameter name is available as ``argname``, if required.
|
||||
"""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
|
|
|
@ -4,9 +4,11 @@
|
|||
|
||||
|
||||
Based on initial code from Ross Lawley.
|
||||
|
||||
Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
|
||||
src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
|
||||
"""
|
||||
# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
|
||||
# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import functools
|
||||
import py
|
||||
|
@ -105,6 +107,8 @@ class _NodeReporter(object):
|
|||
}
|
||||
if testreport.location[1] is not None:
|
||||
attrs["line"] = testreport.location[1]
|
||||
if hasattr(testreport, "url"):
|
||||
attrs["url"] = testreport.url
|
||||
self.attrs = attrs
|
||||
|
||||
def to_xml(self):
|
||||
|
@ -222,13 +226,14 @@ def pytest_addoption(parser):
|
|||
metavar="str",
|
||||
default=None,
|
||||
help="prepend prefix to classnames in junit-xml output")
|
||||
parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
xmlpath = config.option.xmlpath
|
||||
# prevent opening xmllog on slave nodes (xdist)
|
||||
if xmlpath and not hasattr(config, 'slaveinput'):
|
||||
config._xml = LogXML(xmlpath, config.option.junitprefix)
|
||||
config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name"))
|
||||
config.pluginmanager.register(config._xml)
|
||||
|
||||
|
||||
|
@ -255,10 +260,11 @@ def mangle_test_address(address):
|
|||
|
||||
|
||||
class LogXML(object):
|
||||
def __init__(self, logfile, prefix):
|
||||
def __init__(self, logfile, prefix, suite_name="pytest"):
|
||||
logfile = os.path.expanduser(os.path.expandvars(logfile))
|
||||
self.logfile = os.path.normpath(os.path.abspath(logfile))
|
||||
self.prefix = prefix
|
||||
self.suite_name = suite_name
|
||||
self.stats = dict.fromkeys([
|
||||
'error',
|
||||
'passed',
|
||||
|
@ -268,6 +274,9 @@ class LogXML(object):
|
|||
self.node_reporters = {} # nodeid -> _NodeReporter
|
||||
self.node_reporters_ordered = []
|
||||
self.global_properties = []
|
||||
# List of reports that failed on call but teardown is pending.
|
||||
self.open_reports = []
|
||||
self.cnt_double_fail_tests = 0
|
||||
|
||||
def finalize(self, report):
|
||||
nodeid = getattr(report, 'nodeid', report)
|
||||
|
@ -327,14 +336,33 @@ class LogXML(object):
|
|||
-> teardown node2
|
||||
-> teardown node1
|
||||
"""
|
||||
close_report = None
|
||||
if report.passed:
|
||||
if report.when == "call": # ignore setup/teardown
|
||||
reporter = self._opentestcase(report)
|
||||
reporter.append_pass(report)
|
||||
elif report.failed:
|
||||
if report.when == "teardown":
|
||||
# The following vars are needed when xdist plugin is used
|
||||
report_wid = getattr(report, "worker_id", None)
|
||||
report_ii = getattr(report, "item_index", None)
|
||||
close_report = next(
|
||||
(rep for rep in self.open_reports
|
||||
if (rep.nodeid == report.nodeid and
|
||||
getattr(rep, "item_index", None) == report_ii and
|
||||
getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
), None)
|
||||
if close_report:
|
||||
# We need to open new testcase in case we have failure in
|
||||
# call and error in teardown in order to follow junit
|
||||
# schema
|
||||
self.finalize(close_report)
|
||||
self.cnt_double_fail_tests += 1
|
||||
reporter = self._opentestcase(report)
|
||||
if report.when == "call":
|
||||
reporter.append_failure(report)
|
||||
self.open_reports.append(report)
|
||||
else:
|
||||
reporter.append_error(report)
|
||||
elif report.skipped:
|
||||
|
@ -345,6 +373,17 @@ class LogXML(object):
|
|||
reporter = self._opentestcase(report)
|
||||
reporter.write_captured_output(report)
|
||||
self.finalize(report)
|
||||
report_wid = getattr(report, "worker_id", None)
|
||||
report_ii = getattr(report, "item_index", None)
|
||||
close_report = next(
|
||||
(rep for rep in self.open_reports
|
||||
if (rep.nodeid == report.nodeid and
|
||||
getattr(rep, "item_index", None) == report_ii and
|
||||
getattr(rep, "worker_id", None) == report_wid
|
||||
)
|
||||
), None)
|
||||
if close_report:
|
||||
self.open_reports.remove(close_report)
|
||||
|
||||
def update_testcase_duration(self, report):
|
||||
"""accumulates total duration for nodeid from given report and updates
|
||||
|
@ -377,14 +416,15 @@ class LogXML(object):
|
|||
suite_stop_time = time.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
|
||||
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error']
|
||||
|
||||
numtests = (self.stats['passed'] + self.stats['failure'] +
|
||||
self.stats['skipped'] + self.stats['error'] -
|
||||
self.cnt_double_fail_tests)
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
|
||||
logfile.write(Junit.testsuite(
|
||||
self._get_global_properties_node(),
|
||||
[x.to_xml() for x in self.node_reporters_ordered],
|
||||
name="pytest",
|
||||
name=self.suite_name,
|
||||
errors=self.stats['error'],
|
||||
failures=self.stats['failure'],
|
||||
skips=self.stats['skipped'],
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
""" core implementation of testing process: init, session, runtest loop. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import functools
|
||||
import os
|
||||
import sys
|
||||
|
@ -6,14 +8,13 @@ import sys
|
|||
import _pytest
|
||||
import _pytest._code
|
||||
import py
|
||||
import pytest
|
||||
try:
|
||||
from collections import MutableMapping as MappingMixin
|
||||
except ImportError:
|
||||
from UserDict import DictMixin as MappingMixin
|
||||
|
||||
from _pytest.config import directory_arg
|
||||
from _pytest.runner import collect_one_node
|
||||
from _pytest.config import directory_arg, UsageError, hookimpl
|
||||
from _pytest.runner import collect_one_node, exit
|
||||
|
||||
tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
|
||||
|
||||
|
@ -25,9 +26,10 @@ EXIT_INTERNALERROR = 3
|
|||
EXIT_USAGEERROR = 4
|
||||
EXIT_NOTESTSCOLLECTED = 5
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'])
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'])
|
||||
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
|
||||
type="args", default=[])
|
||||
#parser.addini("dirpatterns",
|
||||
|
@ -75,13 +77,18 @@ def pytest_addoption(parser):
|
|||
help="base temporary directory for this test run.")
|
||||
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
|
||||
return dict(collect=collect)
|
||||
"""keeping this one works around a deeper startup issue in pytest
|
||||
|
||||
i tried to find it for a while but the amount of time turned unsustainable,
|
||||
so i put a hack in to revisit later
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
pytest.config = config # compatibility
|
||||
__import__('pytest').config = config # compatibiltiy
|
||||
|
||||
|
||||
def wrap_session(config, doit):
|
||||
|
@ -96,12 +103,11 @@ def wrap_session(config, doit):
|
|||
config.hook.pytest_sessionstart(session=session)
|
||||
initstate = 2
|
||||
session.exitstatus = doit(config, session) or 0
|
||||
except pytest.UsageError:
|
||||
except UsageError:
|
||||
raise
|
||||
except KeyboardInterrupt:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
if initstate < 2 and isinstance(
|
||||
excinfo.value, pytest.exit.Exception):
|
||||
if initstate < 2 and isinstance(excinfo.value, exit.Exception):
|
||||
sys.stderr.write('{0}: {1}\n'.format(
|
||||
excinfo.typename, excinfo.value.msg))
|
||||
config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
|
@ -123,9 +129,11 @@ def wrap_session(config, doit):
|
|||
config._ensure_unconfigure()
|
||||
return session.exitstatus
|
||||
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
return wrap_session(config, _main)
|
||||
|
||||
|
||||
def _main(config, session):
|
||||
""" default command line protocol for initialization, session,
|
||||
running tests and reporting. """
|
||||
|
@ -137,9 +145,11 @@ def _main(config, session):
|
|||
elif session.testscollected == 0:
|
||||
return EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def pytest_collection(session):
|
||||
return session.perform_collect()
|
||||
|
||||
|
||||
def pytest_runtestloop(session):
|
||||
if (session.testsfailed and
|
||||
not session.config.option.continue_on_collection_errors):
|
||||
|
@ -156,6 +166,7 @@ def pytest_runtestloop(session):
|
|||
raise session.Interrupted(session.shouldstop)
|
||||
return True
|
||||
|
||||
|
||||
def pytest_ignore_collect(path, config):
|
||||
p = path.dirpath()
|
||||
ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
|
||||
|
@ -203,7 +214,7 @@ class _CompatProperty(object):
|
|||
# "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format(
|
||||
# name=self.name, owner=type(owner).__name__),
|
||||
# PendingDeprecationWarning, stacklevel=2)
|
||||
return getattr(pytest, self.name)
|
||||
return getattr(__import__('pytest'), self.name)
|
||||
|
||||
|
||||
|
||||
|
@ -287,7 +298,7 @@ class Node(object):
|
|||
def _getcustomclass(self, name):
|
||||
maybe_compatprop = getattr(type(self), name)
|
||||
if isinstance(maybe_compatprop, _CompatProperty):
|
||||
return getattr(pytest, name)
|
||||
return getattr(__import__('pytest'), name)
|
||||
else:
|
||||
cls = getattr(self, name)
|
||||
# TODO: reenable in the features branch
|
||||
|
@ -307,9 +318,6 @@ class Node(object):
|
|||
fslocation = getattr(self, "location", None)
|
||||
if fslocation is None:
|
||||
fslocation = getattr(self, "fspath", None)
|
||||
else:
|
||||
fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1)
|
||||
|
||||
self.ihook.pytest_logwarning.call_historic(kwargs=dict(
|
||||
code=code, message=message,
|
||||
nodeid=self.nodeid, fslocation=fslocation))
|
||||
|
@ -370,9 +378,9 @@ class Node(object):
|
|||
|
||||
``marker`` can be a string or pytest.mark.* instance.
|
||||
"""
|
||||
from _pytest.mark import MarkDecorator
|
||||
from _pytest.mark import MarkDecorator, MARK_GEN
|
||||
if isinstance(marker, py.builtin._basestring):
|
||||
marker = MarkDecorator(marker)
|
||||
marker = getattr(MARK_GEN, marker)
|
||||
elif not isinstance(marker, MarkDecorator):
|
||||
raise ValueError("is not a string or pytest.mark.* Marker")
|
||||
self.keywords[marker.name] = marker
|
||||
|
@ -471,10 +479,6 @@ class Collector(Node):
|
|||
return str(exc.args[0])
|
||||
return self._repr_failure_py(excinfo, style="short")
|
||||
|
||||
def _memocollect(self):
|
||||
""" internal helper method to cache results of calling collect(). """
|
||||
return self._memoizedcall('_collected', lambda: list(self.collect()))
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
if hasattr(self, 'fspath'):
|
||||
traceback = excinfo.traceback
|
||||
|
@ -562,12 +566,12 @@ class Session(FSCollector):
|
|||
def _makeid(self):
|
||||
return ""
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_collectstart(self):
|
||||
if self.shouldstop:
|
||||
raise self.Interrupted(self.shouldstop)
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.failed and not hasattr(report, 'wasxfail'):
|
||||
self.testsfailed += 1
|
||||
|
@ -598,6 +602,7 @@ class Session(FSCollector):
|
|||
hook = self.config.hook
|
||||
try:
|
||||
items = self._perform_collect(args, genitems)
|
||||
self.config.pluginmanager.check_pending()
|
||||
hook.pytest_collection_modifyitems(session=self,
|
||||
config=self.config, items=items)
|
||||
finally:
|
||||
|
@ -626,8 +631,8 @@ class Session(FSCollector):
|
|||
for arg, exc in self._notfound:
|
||||
line = "(no name %r in any of %r)" % (arg, exc.args[0])
|
||||
errors.append("not found: %s\n%s" % (arg, line))
|
||||
#XXX: test this
|
||||
raise pytest.UsageError(*errors)
|
||||
# XXX: test this
|
||||
raise UsageError(*errors)
|
||||
if not genitems:
|
||||
return rep.result
|
||||
else:
|
||||
|
@ -655,7 +660,7 @@ class Session(FSCollector):
|
|||
names = self._parsearg(arg)
|
||||
path = names.pop(0)
|
||||
if path.check(dir=1):
|
||||
assert not names, "invalid arg %r" %(arg,)
|
||||
assert not names, "invalid arg %r" % (arg,)
|
||||
for path in path.visit(fil=lambda x: x.check(file=1),
|
||||
rec=self._recurse, bf=True, sort=True):
|
||||
for x in self._collectfile(path):
|
||||
|
@ -714,9 +719,11 @@ class Session(FSCollector):
|
|||
path = self.config.invocation_dir.join(relpath, abs=True)
|
||||
if not path.check():
|
||||
if self.config.option.pyargs:
|
||||
raise pytest.UsageError("file or package not found: " + arg + " (missing __init__.py?)")
|
||||
raise UsageError(
|
||||
"file or package not found: " + arg +
|
||||
" (missing __init__.py?)")
|
||||
else:
|
||||
raise pytest.UsageError("file not found: " + arg)
|
||||
raise UsageError("file not found: " + arg)
|
||||
parts[0] = path
|
||||
return parts
|
||||
|
||||
|
@ -739,11 +746,11 @@ class Session(FSCollector):
|
|||
nextnames = names[1:]
|
||||
resultnodes = []
|
||||
for node in matching:
|
||||
if isinstance(node, pytest.Item):
|
||||
if isinstance(node, Item):
|
||||
if not names:
|
||||
resultnodes.append(node)
|
||||
continue
|
||||
assert isinstance(node, pytest.Collector)
|
||||
assert isinstance(node, Collector)
|
||||
rep = collect_one_node(node)
|
||||
if rep.passed:
|
||||
has_matched = False
|
||||
|
@ -761,11 +768,11 @@ class Session(FSCollector):
|
|||
|
||||
def genitems(self, node):
|
||||
self.trace("genitems", node)
|
||||
if isinstance(node, pytest.Item):
|
||||
if isinstance(node, Item):
|
||||
node.ihook.pytest_itemcollected(item=node)
|
||||
yield node
|
||||
else:
|
||||
assert isinstance(node, pytest.Collector)
|
||||
assert isinstance(node, Collector)
|
||||
rep = collect_one_node(node)
|
||||
if rep.passed:
|
||||
for subnode in rep.result:
|
||||
|
|
167
_pytest/mark.py
167
_pytest/mark.py
|
@ -1,5 +1,64 @@
|
|||
""" generic mechanism for marking and selecting python functions. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import inspect
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
from .compat import imap
|
||||
|
||||
|
||||
def alias(name):
|
||||
return property(attrgetter(name), doc='alias for ' + name)
|
||||
|
||||
|
||||
class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
|
||||
@classmethod
|
||||
def param(cls, *values, **kw):
|
||||
marks = kw.pop('marks', ())
|
||||
if isinstance(marks, MarkDecorator):
|
||||
marks = marks,
|
||||
else:
|
||||
assert isinstance(marks, (tuple, list, set))
|
||||
|
||||
def param_extract_id(id=None):
|
||||
return id
|
||||
|
||||
id = param_extract_id(**kw)
|
||||
return cls(values, marks, id)
|
||||
|
||||
@classmethod
|
||||
def extract_from(cls, parameterset, legacy_force_tuple=False):
|
||||
"""
|
||||
:param parameterset:
|
||||
a legacy style parameterset that may or may not be a tuple,
|
||||
and may or may not be wrapped into a mess of mark objects
|
||||
|
||||
:param legacy_force_tuple:
|
||||
enforce tuple wrapping so single argument tuple values
|
||||
don't get decomposed and break tests
|
||||
|
||||
"""
|
||||
|
||||
if isinstance(parameterset, cls):
|
||||
return parameterset
|
||||
if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple:
|
||||
return cls.param(parameterset)
|
||||
|
||||
newmarks = []
|
||||
argval = parameterset
|
||||
while isinstance(argval, MarkDecorator):
|
||||
newmarks.append(MarkDecorator(Mark(
|
||||
argval.markname, argval.args[:-1], argval.kwargs)))
|
||||
argval = argval.args[-1]
|
||||
assert not isinstance(argval, ParameterSet)
|
||||
if legacy_force_tuple:
|
||||
argval = argval,
|
||||
|
||||
return cls(argval, marks=newmarks, id=None)
|
||||
|
||||
@property
|
||||
def deprecated_arg_dict(self):
|
||||
return dict((mark.name, mark) for mark in self.marks)
|
||||
|
||||
|
||||
class MarkerError(Exception):
|
||||
|
@ -7,8 +66,8 @@ class MarkerError(Exception):
|
|||
"""Error in use of a pytest marker/attribute."""
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {'mark': MarkGenerator()}
|
||||
def param(*values, **kw):
|
||||
return ParameterSet.param(*values, **kw)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
|
@ -162,9 +221,13 @@ def matchkeyword(colitem, keywordexpr):
|
|||
|
||||
|
||||
def pytest_configure(config):
|
||||
import pytest
|
||||
config._old_mark_config = MARK_GEN._config
|
||||
if config.option.strict:
|
||||
pytest.mark._config = config
|
||||
MARK_GEN._config = config
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
MARK_GEN._config = getattr(config, '_old_mark_config', None)
|
||||
|
||||
|
||||
class MarkGenerator:
|
||||
|
@ -178,13 +241,15 @@ class MarkGenerator:
|
|||
|
||||
will set a 'slowtest' :class:`MarkInfo` object
|
||||
on the ``test_function`` object. """
|
||||
_config = None
|
||||
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == "_":
|
||||
raise AttributeError("Marker name must NOT start with underscore")
|
||||
if hasattr(self, '_config'):
|
||||
if self._config is not None:
|
||||
self._check(name)
|
||||
return MarkDecorator(name)
|
||||
return MarkDecorator(Mark(name, (), {}))
|
||||
|
||||
def _check(self, name):
|
||||
try:
|
||||
|
@ -200,6 +265,7 @@ class MarkGenerator:
|
|||
if name not in self._markers:
|
||||
raise AttributeError("%r not a registered marker" % (name,))
|
||||
|
||||
|
||||
def istestfunc(func):
|
||||
return hasattr(func, "__call__") and \
|
||||
getattr(func, "__name__", "<lambda>") != "<lambda>"
|
||||
|
@ -237,19 +303,23 @@ class MarkDecorator:
|
|||
additional keyword or positional arguments.
|
||||
|
||||
"""
|
||||
def __init__(self, name, args=None, kwargs=None):
|
||||
self.name = name
|
||||
self.args = args or ()
|
||||
self.kwargs = kwargs or {}
|
||||
def __init__(self, mark):
|
||||
assert isinstance(mark, Mark), repr(mark)
|
||||
self.mark = mark
|
||||
|
||||
name = alias('mark.name')
|
||||
args = alias('mark.args')
|
||||
kwargs = alias('mark.kwargs')
|
||||
|
||||
@property
|
||||
def markname(self):
|
||||
return self.name # for backward-compat (2.4.1 had this attr)
|
||||
|
||||
def __eq__(self, other):
|
||||
return self.mark == other.mark
|
||||
|
||||
def __repr__(self):
|
||||
d = self.__dict__.copy()
|
||||
name = d.pop('name')
|
||||
return "<MarkDecorator %r %r>" % (name, d)
|
||||
return "<MarkDecorator %r>" % (self.mark,)
|
||||
|
||||
def __call__(self, *args, **kwargs):
|
||||
""" if passed a single callable argument: decorate it with mark info.
|
||||
|
@ -272,57 +342,50 @@ class MarkDecorator:
|
|||
else:
|
||||
holder = getattr(func, self.name, None)
|
||||
if holder is None:
|
||||
holder = MarkInfo(
|
||||
self.name, self.args, self.kwargs
|
||||
)
|
||||
holder = MarkInfo(self.mark)
|
||||
setattr(func, self.name, holder)
|
||||
else:
|
||||
holder.add(self.args, self.kwargs)
|
||||
holder.add_mark(self.mark)
|
||||
return func
|
||||
kw = self.kwargs.copy()
|
||||
kw.update(kwargs)
|
||||
args = self.args + args
|
||||
return self.__class__(self.name, args=args, kwargs=kw)
|
||||
|
||||
mark = Mark(self.name, args, kwargs)
|
||||
return self.__class__(self.mark.combined_with(mark))
|
||||
|
||||
|
||||
def extract_argvalue(maybe_marked_args):
|
||||
# TODO: incorrect mark data, the old code wanst able to collect lists
|
||||
# individual parametrized argument sets can be wrapped in a series
|
||||
# of markers in which case we unwrap the values and apply the mark
|
||||
# at Function init
|
||||
newmarks = {}
|
||||
argval = maybe_marked_args
|
||||
while isinstance(argval, MarkDecorator):
|
||||
newmark = MarkDecorator(argval.markname,
|
||||
argval.args[:-1], argval.kwargs)
|
||||
newmarks[newmark.markname] = newmark
|
||||
argval = argval.args[-1]
|
||||
return argval, newmarks
|
||||
|
||||
|
||||
class MarkInfo:
|
||||
|
||||
class Mark(namedtuple('Mark', 'name, args, kwargs')):
|
||||
|
||||
def combined_with(self, other):
|
||||
assert self.name == other.name
|
||||
return Mark(
|
||||
self.name, self.args + other.args,
|
||||
dict(self.kwargs, **other.kwargs))
|
||||
|
||||
|
||||
class MarkInfo(object):
|
||||
""" Marking object created by :class:`MarkDecorator` instances. """
|
||||
def __init__(self, name, args, kwargs):
|
||||
#: name of attribute
|
||||
self.name = name
|
||||
#: positional argument list, empty if none specified
|
||||
self.args = args
|
||||
#: keyword argument dictionary, empty if nothing specified
|
||||
self.kwargs = kwargs.copy()
|
||||
self._arglist = [(args, kwargs.copy())]
|
||||
def __init__(self, mark):
|
||||
assert isinstance(mark, Mark), repr(mark)
|
||||
self.combined = mark
|
||||
self._marks = [mark]
|
||||
|
||||
name = alias('combined.name')
|
||||
args = alias('combined.args')
|
||||
kwargs = alias('combined.kwargs')
|
||||
|
||||
def __repr__(self):
|
||||
return "<MarkInfo %r args=%r kwargs=%r>" % (
|
||||
self.name, self.args, self.kwargs
|
||||
)
|
||||
return "<MarkInfo {0!r}>".format(self.combined)
|
||||
|
||||
def add(self, args, kwargs):
|
||||
def add_mark(self, mark):
|
||||
""" add a MarkInfo with the given args and kwargs. """
|
||||
self._arglist.append((args, kwargs))
|
||||
self.args += args
|
||||
self.kwargs.update(kwargs)
|
||||
self._marks.append(mark)
|
||||
self.combined = self.combined.combined_with(mark)
|
||||
|
||||
def __iter__(self):
|
||||
""" yield MarkInfo objects each relating to a marking-call. """
|
||||
for args, kwargs in self._arglist:
|
||||
yield MarkInfo(self.name, args, kwargs)
|
||||
return imap(MarkInfo, self._marks)
|
||||
|
||||
|
||||
MARK_GEN = MarkGenerator()
|
||||
|
|
|
@ -1,16 +1,17 @@
|
|||
""" monkeypatching and mocking functionality. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import os, sys
|
||||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
from py.builtin import _basestring
|
||||
|
||||
import pytest
|
||||
from _pytest.fixtures import fixture
|
||||
|
||||
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
@fixture
|
||||
def monkeypatch():
|
||||
"""The returned ``monkeypatch`` fixture provides these
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
""" run test suites written for nose. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import sys
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest import unittest
|
||||
from _pytest import unittest, runner, python
|
||||
from _pytest.config import hookimpl
|
||||
|
||||
|
||||
def get_skip_exceptions():
|
||||
|
@ -19,19 +20,19 @@ def get_skip_exceptions():
|
|||
def pytest_runtest_makereport(item, call):
|
||||
if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
|
||||
# let's substitute the excinfo with a pytest.skip one
|
||||
call2 = call.__class__(lambda:
|
||||
pytest.skip(str(call.excinfo.value)), call.when)
|
||||
call2 = call.__class__(
|
||||
lambda: runner.skip(str(call.excinfo.value)), call.when)
|
||||
call.excinfo = call2.excinfo
|
||||
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
@hookimpl(trylast=True)
|
||||
def pytest_runtest_setup(item):
|
||||
if is_potential_nosetest(item):
|
||||
if isinstance(item.parent, pytest.Generator):
|
||||
if isinstance(item.parent, python.Generator):
|
||||
gen = item.parent
|
||||
if not hasattr(gen, '_nosegensetup'):
|
||||
call_optional(gen.obj, 'setup')
|
||||
if isinstance(gen.parent, pytest.Instance):
|
||||
if isinstance(gen.parent, python.Instance):
|
||||
call_optional(gen.parent.obj, 'setup')
|
||||
gen._nosegensetup = True
|
||||
if not call_optional(item.obj, 'setup'):
|
||||
|
@ -50,14 +51,14 @@ def teardown_nose(item):
|
|||
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
if isinstance(collector, pytest.Generator):
|
||||
if isinstance(collector, python.Generator):
|
||||
call_optional(collector.obj, 'setup')
|
||||
|
||||
|
||||
def is_potential_nosetest(item):
|
||||
# extra check needed since we do not do nose style setup/teardown
|
||||
# on direct unittest style classes
|
||||
return isinstance(item, pytest.Function) and \
|
||||
return isinstance(item, python.Function) and \
|
||||
not isinstance(item, unittest.TestCaseFunction)
|
||||
|
||||
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
""" submit failure or test session information to a pastebin service. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
import tempfile
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
""" (disabled by default) support for testing pytest and pytest plugins. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import codecs
|
||||
import gc
|
||||
import os
|
||||
|
@ -10,8 +12,9 @@ import time
|
|||
import traceback
|
||||
from fnmatch import fnmatch
|
||||
|
||||
from py.builtin import print_
|
||||
from weakref import WeakKeyDictionary
|
||||
|
||||
from _pytest.capture import MultiCapture, SysCapture
|
||||
from _pytest._code import Source
|
||||
import py
|
||||
import pytest
|
||||
|
@ -85,7 +88,7 @@ class LsofFdLeakChecker(object):
|
|||
return True
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True, tryfirst=True)
|
||||
def pytest_runtest_item(self, item):
|
||||
def pytest_runtest_protocol(self, item):
|
||||
lines1 = self.get_open_files()
|
||||
yield
|
||||
if hasattr(sys, "pypy_version_info"):
|
||||
|
@ -104,7 +107,8 @@ class LsofFdLeakChecker(object):
|
|||
error.extend([str(f) for f in lines2])
|
||||
error.append(error[0])
|
||||
error.append("*** function %s:%s: %s " % item.location)
|
||||
pytest.fail("\n".join(error), pytrace=False)
|
||||
error.append("See issue #2366")
|
||||
item.warn('', "\n".join(error))
|
||||
|
||||
|
||||
# XXX copied from execnet's conftest.py - needs to be merged
|
||||
|
@ -226,15 +230,15 @@ class HookRecorder:
|
|||
name, check = entries.pop(0)
|
||||
for ind, call in enumerate(self.calls[i:]):
|
||||
if call._name == name:
|
||||
print_("NAMEMATCH", name, call)
|
||||
print("NAMEMATCH", name, call)
|
||||
if eval(check, backlocals, call.__dict__):
|
||||
print_("CHECKERMATCH", repr(check), "->", call)
|
||||
print("CHECKERMATCH", repr(check), "->", call)
|
||||
else:
|
||||
print_("NOCHECKERMATCH", repr(check), "-", call)
|
||||
print("NOCHECKERMATCH", repr(check), "-", call)
|
||||
continue
|
||||
i += ind + 1
|
||||
break
|
||||
print_("NONAMEMATCH", name, "with", call)
|
||||
print("NONAMEMATCH", name, "with", call)
|
||||
else:
|
||||
pytest.fail("could not find %r check %r" % (name, check))
|
||||
|
||||
|
@ -402,6 +406,7 @@ class Testdir:
|
|||
|
||||
def __init__(self, request, tmpdir_factory):
|
||||
self.request = request
|
||||
self._mod_collections = WeakKeyDictionary()
|
||||
# XXX remove duplication with tmpdir plugin
|
||||
basetmp = tmpdir_factory.ensuretemp("testdir")
|
||||
name = request.function.__name__
|
||||
|
@ -470,7 +475,7 @@ class Testdir:
|
|||
if not hasattr(self, '_olddir'):
|
||||
self._olddir = old
|
||||
|
||||
def _makefile(self, ext, args, kwargs):
|
||||
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
|
||||
items = list(kwargs.items())
|
||||
if args:
|
||||
source = py.builtin._totext("\n").join(
|
||||
|
@ -490,7 +495,7 @@ class Testdir:
|
|||
|
||||
source_unicode = "\n".join([my_totext(line) for line in source.lines])
|
||||
source = py.builtin._totext(source_unicode)
|
||||
content = source.strip().encode("utf-8") # + "\n"
|
||||
content = source.strip().encode(encoding) # + "\n"
|
||||
#content = content.rstrip() + "\n"
|
||||
p.write(content, "wb")
|
||||
if ret is None:
|
||||
|
@ -735,7 +740,8 @@ class Testdir:
|
|||
if kwargs.get("syspathinsert"):
|
||||
self.syspathinsert()
|
||||
now = time.time()
|
||||
capture = py.io.StdCapture()
|
||||
capture = MultiCapture(Capture=SysCapture)
|
||||
capture.start_capturing()
|
||||
try:
|
||||
try:
|
||||
reprec = self.inline_run(*args, **kwargs)
|
||||
|
@ -750,7 +756,8 @@ class Testdir:
|
|||
class reprec:
|
||||
ret = 3
|
||||
finally:
|
||||
out, err = capture.reset()
|
||||
out, err = capture.readouterr()
|
||||
capture.stop_capturing()
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
|
@ -867,6 +874,7 @@ class Testdir:
|
|||
self.makepyfile(__init__ = "#")
|
||||
self.config = config = self.parseconfigure(path, *configargs)
|
||||
node = self.getnode(config, path)
|
||||
|
||||
return node
|
||||
|
||||
def collect_by_name(self, modcol, name):
|
||||
|
@ -881,7 +889,9 @@ class Testdir:
|
|||
:param name: The name of the node to return.
|
||||
|
||||
"""
|
||||
for colitem in modcol._memocollect():
|
||||
if modcol not in self._mod_collections:
|
||||
self._mod_collections[modcol] = list(modcol.collect())
|
||||
for colitem in self._mod_collections[modcol]:
|
||||
if colitem.name == name:
|
||||
return colitem
|
||||
|
||||
|
@ -916,8 +926,8 @@ class Testdir:
|
|||
cmdargs = [str(x) for x in cmdargs]
|
||||
p1 = self.tmpdir.join("stdout")
|
||||
p2 = self.tmpdir.join("stderr")
|
||||
print_("running:", ' '.join(cmdargs))
|
||||
print_(" in:", str(py.path.local()))
|
||||
print("running:", ' '.join(cmdargs))
|
||||
print(" in:", str(py.path.local()))
|
||||
f1 = codecs.open(str(p1), "w", encoding="utf8")
|
||||
f2 = codecs.open(str(p2), "w", encoding="utf8")
|
||||
try:
|
||||
|
@ -943,7 +953,7 @@ class Testdir:
|
|||
def _dump_lines(self, lines, fp):
|
||||
try:
|
||||
for line in lines:
|
||||
py.builtin.print_(line, file=fp)
|
||||
print(line, file=fp)
|
||||
except UnicodeEncodeError:
|
||||
print("couldn't print to %s because of encoding" % (fp,))
|
||||
|
||||
|
@ -1000,7 +1010,7 @@ class Testdir:
|
|||
The pexpect child is returned.
|
||||
|
||||
"""
|
||||
basetemp = self.tmpdir.mkdir("pexpect")
|
||||
basetemp = self.tmpdir.mkdir("temp-pexpect")
|
||||
invoke = " ".join(map(str, self._getpytestargs()))
|
||||
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
|
||||
return self.spawn(cmd, expect_timeout=expect_timeout)
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
""" Python test discovery, setup and run of test functions. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import fnmatch
|
||||
import inspect
|
||||
|
@ -9,19 +10,20 @@ import math
|
|||
from itertools import count
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest.mark import MarkerError
|
||||
|
||||
from _pytest.config import hookimpl
|
||||
|
||||
import _pytest
|
||||
import _pytest._pluggy as pluggy
|
||||
from _pytest import fixtures
|
||||
from _pytest import main
|
||||
from _pytest.compat import (
|
||||
isclass, isfunction, is_generator, _escape_strings,
|
||||
REGEX_TYPE, STRING_TYPES, NoneType, NOTSET,
|
||||
get_real_func, getfslineno, safe_getattr,
|
||||
safe_str, getlocation, enum,
|
||||
)
|
||||
from _pytest.runner import fail
|
||||
|
||||
cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
|
||||
cutdir2 = py.path.local(_pytest.__file__).dirpath()
|
||||
|
@ -49,7 +51,7 @@ def filter_traceback(entry):
|
|||
|
||||
def pyobj_property(name):
|
||||
def get(self):
|
||||
node = self.getparent(getattr(pytest, name))
|
||||
node = self.getparent(getattr(__import__('pytest'), name))
|
||||
if node is not None:
|
||||
return node.obj
|
||||
doc = "python %s object this node was collected from (can be None)." % (
|
||||
|
@ -126,23 +128,8 @@ def pytest_configure(config):
|
|||
"all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
|
||||
)
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_namespace():
|
||||
raises.Exception = pytest.fail.Exception
|
||||
return {
|
||||
'raises': raises,
|
||||
'approx': approx,
|
||||
'collect': {
|
||||
'Module': Module,
|
||||
'Class': Class,
|
||||
'Instance': Instance,
|
||||
'Function': Function,
|
||||
'Generator': Generator,
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
@hookimpl(trylast=True)
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
testfunction = pyfuncitem.obj
|
||||
if pyfuncitem._isyieldedfunction():
|
||||
|
@ -155,6 +142,7 @@ def pytest_pyfunc_call(pyfuncitem):
|
|||
testfunction(**testargs)
|
||||
return True
|
||||
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
ext = path.ext
|
||||
if ext == ".py":
|
||||
|
@ -170,7 +158,7 @@ def pytest_collect_file(path, parent):
|
|||
def pytest_pycollect_makemodule(path, parent):
|
||||
return Module(path, parent)
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
outcome = yield
|
||||
res = outcome.get_result()
|
||||
|
@ -198,7 +186,7 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
|||
res = list(collector._genfunctions(name, obj))
|
||||
outcome.force_result(res)
|
||||
|
||||
def pytest_make_parametrize_id(config, val):
|
||||
def pytest_make_parametrize_id(config, val, argname=None):
|
||||
return None
|
||||
|
||||
|
||||
|
@ -265,7 +253,7 @@ class PyobjMixin(PyobjContext):
|
|||
assert isinstance(lineno, int)
|
||||
return fspath, lineno, modpath
|
||||
|
||||
class PyCollector(PyobjMixin, pytest.Collector):
|
||||
class PyCollector(PyobjMixin, main.Collector):
|
||||
|
||||
def funcnamefilter(self, name):
|
||||
return self._matches_prefix_or_glob_option('python_functions', name)
|
||||
|
@ -402,10 +390,12 @@ def transfer_markers(funcobj, cls, mod):
|
|||
if not _marked(funcobj, pytestmark):
|
||||
pytestmark(funcobj)
|
||||
|
||||
class Module(pytest.File, PyCollector):
|
||||
|
||||
class Module(main.File, PyCollector):
|
||||
""" Collector for test classes and functions. """
|
||||
|
||||
def _getobj(self):
|
||||
return self._memoizedcall('_obj', self._importtestmodule)
|
||||
return self._importtestmodule()
|
||||
|
||||
def collect(self):
|
||||
self.session._fixturemanager.parsefactories(self)
|
||||
|
@ -502,6 +492,8 @@ def _get_xunit_func(obj, name):
|
|||
class Class(PyCollector):
|
||||
""" Collector for test methods. """
|
||||
def collect(self):
|
||||
if not safe_getattr(self.obj, "__test__", True):
|
||||
return []
|
||||
if hasinit(self.obj):
|
||||
self.warn("C1", "cannot collect test class %r because it has a "
|
||||
"__init__ constructor" % self.obj.__name__)
|
||||
|
@ -586,7 +578,7 @@ class FunctionMixin(PyobjMixin):
|
|||
entry.set_repr_style('short')
|
||||
|
||||
def _repr_failure_py(self, excinfo, style="long"):
|
||||
if excinfo.errisinstance(pytest.fail.Exception):
|
||||
if excinfo.errisinstance(fail.Exception):
|
||||
if not excinfo.value.pytrace:
|
||||
return py._builtin._totext(excinfo.value)
|
||||
return super(FunctionMixin, self)._repr_failure_py(excinfo,
|
||||
|
@ -784,36 +776,34 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
to set a dynamic scope using test context or configuration.
|
||||
"""
|
||||
from _pytest.fixtures import scope2index
|
||||
from _pytest.mark import extract_argvalue
|
||||
from _pytest.mark import MARK_GEN, ParameterSet
|
||||
from py.io import saferepr
|
||||
|
||||
unwrapped_argvalues = []
|
||||
newkeywords = []
|
||||
for maybe_marked_args in argvalues:
|
||||
argval, newmarks = extract_argvalue(maybe_marked_args)
|
||||
unwrapped_argvalues.append(argval)
|
||||
newkeywords.append(newmarks)
|
||||
argvalues = unwrapped_argvalues
|
||||
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
|
||||
if len(argnames) == 1:
|
||||
argvalues = [(val,) for val in argvalues]
|
||||
if not argvalues:
|
||||
argvalues = [(NOTSET,) * len(argnames)]
|
||||
# we passed a empty list to parameterize, skip that test
|
||||
#
|
||||
force_tuple = len(argnames) == 1
|
||||
else:
|
||||
force_tuple = False
|
||||
parameters = [
|
||||
ParameterSet.extract_from(x, legacy_force_tuple=force_tuple)
|
||||
for x in argvalues]
|
||||
del argvalues
|
||||
|
||||
if not parameters:
|
||||
fs, lineno = getfslineno(self.function)
|
||||
newmark = pytest.mark.skip(
|
||||
reason="got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames, self.function.__name__, fs, lineno))
|
||||
newkeywords = [{newmark.markname: newmark}]
|
||||
reason = "got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames, self.function.__name__, fs, lineno)
|
||||
mark = MARK_GEN.skip(reason=reason)
|
||||
parameters.append(ParameterSet(
|
||||
values=(NOTSET,) * len(argnames),
|
||||
marks=[mark],
|
||||
id=None,
|
||||
))
|
||||
|
||||
if scope is None:
|
||||
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
|
||||
|
||||
scopenum = scope2index(
|
||||
scope, descr='call to {0}'.format(self.parametrize))
|
||||
scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize))
|
||||
valtypes = {}
|
||||
for arg in argnames:
|
||||
if arg not in self.fixturenames:
|
||||
|
@ -841,26 +831,26 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
idfn = ids
|
||||
ids = None
|
||||
if ids:
|
||||
if len(ids) != len(argvalues):
|
||||
raise ValueError('%d tests specified with %d ids' %(
|
||||
len(argvalues), len(ids)))
|
||||
if len(ids) != len(parameters):
|
||||
raise ValueError('%d tests specified with %d ids' % (
|
||||
len(parameters), len(ids)))
|
||||
for id_value in ids:
|
||||
if id_value is not None and not isinstance(id_value, py.builtin._basestring):
|
||||
msg = 'ids must be list of strings, found: %s (type: %s)'
|
||||
raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
|
||||
ids = idmaker(argnames, argvalues, idfn, ids, self.config)
|
||||
ids = idmaker(argnames, parameters, idfn, ids, self.config)
|
||||
newcalls = []
|
||||
for callspec in self._calls or [CallSpec2(self)]:
|
||||
elements = zip(ids, argvalues, newkeywords, count())
|
||||
for a_id, valset, keywords, param_index in elements:
|
||||
if len(valset) != len(argnames):
|
||||
elements = zip(ids, parameters, count())
|
||||
for a_id, param, param_index in elements:
|
||||
if len(param.values) != len(argnames):
|
||||
raise ValueError(
|
||||
'In "parametrize" the number of values ({0}) must be '
|
||||
'equal to the number of names ({1})'.format(
|
||||
valset, argnames))
|
||||
param.values, argnames))
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti(valtypes, argnames, valset, a_id,
|
||||
keywords, scopenum, param_index)
|
||||
newcallspec.setmulti(valtypes, argnames, param.values, a_id,
|
||||
param.deprecated_arg_dict, scopenum, param_index)
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
|
@ -884,7 +874,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
if name not in self.fixturenames:
|
||||
pytest.fail("funcarg %r not used in this function." % name)
|
||||
fail("funcarg %r not used in this function." % name)
|
||||
else:
|
||||
funcargs = {}
|
||||
if id is None:
|
||||
|
@ -929,15 +919,21 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect):
|
|||
|
||||
def _idval(val, argname, idx, idfn, config=None):
|
||||
if idfn:
|
||||
s = None
|
||||
try:
|
||||
s = idfn(val)
|
||||
if s:
|
||||
return _escape_strings(s)
|
||||
except Exception:
|
||||
pass
|
||||
# See issue https://github.com/pytest-dev/pytest/issues/2169
|
||||
import warnings
|
||||
msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx)
|
||||
msg += '\nUpdate your code as this will raise an error in pytest-4.0.'
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
if s:
|
||||
return _escape_strings(s)
|
||||
|
||||
if config:
|
||||
hook_id = config.hook.pytest_make_parametrize_id(config=config, val=val)
|
||||
hook_id = config.hook.pytest_make_parametrize_id(
|
||||
config=config, val=val, argname=argname)
|
||||
if hook_id:
|
||||
return hook_id
|
||||
|
||||
|
@ -953,17 +949,21 @@ def _idval(val, argname, idx, idfn, config=None):
|
|||
return val.__name__
|
||||
return str(argname)+str(idx)
|
||||
|
||||
def _idvalset(idx, valset, argnames, idfn, ids, config=None):
|
||||
|
||||
def _idvalset(idx, parameterset, argnames, idfn, ids, config=None):
|
||||
if parameterset.id is not None:
|
||||
return parameterset.id
|
||||
if ids is None or (idx >= len(ids) or ids[idx] is None):
|
||||
this_id = [_idval(val, argname, idx, idfn, config)
|
||||
for val, argname in zip(valset, argnames)]
|
||||
for val, argname in zip(parameterset.values, argnames)]
|
||||
return "-".join(this_id)
|
||||
else:
|
||||
return _escape_strings(ids[idx])
|
||||
|
||||
def idmaker(argnames, argvalues, idfn=None, ids=None, config=None):
|
||||
ids = [_idvalset(valindex, valset, argnames, idfn, ids, config)
|
||||
for valindex, valset in enumerate(argvalues)]
|
||||
|
||||
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
|
||||
ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config)
|
||||
for valindex, parameterset in enumerate(parametersets)]
|
||||
if len(set(ids)) != len(ids):
|
||||
# The ids are not unique
|
||||
duplicates = [testid for testid in ids if ids.count(testid) > 1]
|
||||
|
@ -1039,6 +1039,7 @@ def showfixtures(config):
|
|||
from _pytest.main import wrap_session
|
||||
return wrap_session(config, _showfixtures_main)
|
||||
|
||||
|
||||
def _showfixtures_main(config, session):
|
||||
import _pytest.config
|
||||
session.perform_collect()
|
||||
|
@ -1129,7 +1130,7 @@ def raises(expected_exception, *args, **kwargs):
|
|||
>>> with raises(ValueError) as exc_info:
|
||||
... if value > 10:
|
||||
... raise ValueError("value must be <= 10")
|
||||
... assert str(exc_info.value) == "value must be <= 10" # this will not execute
|
||||
... assert exc_info.type == ValueError # this will not execute
|
||||
|
||||
Instead, the following approach must be taken (note the difference in
|
||||
scope)::
|
||||
|
@ -1138,7 +1139,16 @@ def raises(expected_exception, *args, **kwargs):
|
|||
... if value > 10:
|
||||
... raise ValueError("value must be <= 10")
|
||||
...
|
||||
>>> assert str(exc_info.value) == "value must be <= 10"
|
||||
>>> assert exc_info.type == ValueError
|
||||
|
||||
Or you can use the keyword argument ``match`` to assert that the
|
||||
exception matches a text or regex::
|
||||
|
||||
>>> with raises(ValueError, match='must be 0 or None'):
|
||||
... raise ValueError("value must be 0 or None")
|
||||
|
||||
>>> with raises(ValueError, match=r'must be \d+$'):
|
||||
... raise ValueError("value must be 42")
|
||||
|
||||
|
||||
Or you can specify a callable by passing a to-be-called lambda::
|
||||
|
@ -1179,12 +1189,6 @@ def raises(expected_exception, *args, **kwargs):
|
|||
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
if expected_exception is AssertionError:
|
||||
# we want to catch a AssertionError
|
||||
# replace our subclass with the builtin one
|
||||
# see https://github.com/pytest-dev/pytest/issues/176
|
||||
from _pytest.assertion.util import BuiltinAssertionError \
|
||||
as expected_exception
|
||||
msg = ("exceptions must be old-style classes or"
|
||||
" derived from BaseException, not %s")
|
||||
if isinstance(expected_exception, tuple):
|
||||
|
@ -1195,11 +1199,15 @@ def raises(expected_exception, *args, **kwargs):
|
|||
raise TypeError(msg % type(expected_exception))
|
||||
|
||||
message = "DID NOT RAISE {0}".format(expected_exception)
|
||||
match_expr = None
|
||||
|
||||
if not args:
|
||||
if "message" in kwargs:
|
||||
message = kwargs.pop("message")
|
||||
return RaisesContext(expected_exception, message)
|
||||
if "match" in kwargs:
|
||||
match_expr = kwargs.pop("match")
|
||||
message += " matching '{0}'".format(match_expr)
|
||||
return RaisesContext(expected_exception, message, match_expr)
|
||||
elif isinstance(args[0], str):
|
||||
code, = args
|
||||
assert isinstance(code, str)
|
||||
|
@ -1220,12 +1228,17 @@ def raises(expected_exception, *args, **kwargs):
|
|||
func(*args[1:], **kwargs)
|
||||
except expected_exception:
|
||||
return _pytest._code.ExceptionInfo()
|
||||
pytest.fail(message)
|
||||
fail(message)
|
||||
|
||||
|
||||
raises.Exception = fail.Exception
|
||||
|
||||
|
||||
class RaisesContext(object):
|
||||
def __init__(self, expected_exception, message):
|
||||
def __init__(self, expected_exception, message, match_expr):
|
||||
self.expected_exception = expected_exception
|
||||
self.message = message
|
||||
self.match_expr = match_expr
|
||||
self.excinfo = None
|
||||
|
||||
def __enter__(self):
|
||||
|
@ -1235,7 +1248,7 @@ class RaisesContext(object):
|
|||
def __exit__(self, *tp):
|
||||
__tracebackhide__ = True
|
||||
if tp[0] is None:
|
||||
pytest.fail(self.message)
|
||||
fail(self.message)
|
||||
if sys.version_info < (2, 7):
|
||||
# py26: on __exit__() exc_value often does not contain the
|
||||
# exception value.
|
||||
|
@ -1247,6 +1260,8 @@ class RaisesContext(object):
|
|||
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
|
||||
if sys.version_info[0] == 2 and suppress_exception:
|
||||
sys.exc_clear()
|
||||
if self.match_expr:
|
||||
self.excinfo.match(self.match_expr)
|
||||
return suppress_exception
|
||||
|
||||
|
||||
|
@ -1504,7 +1519,7 @@ class ApproxNonIterable(object):
|
|||
# the basic pytest Function item
|
||||
#
|
||||
|
||||
class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr):
|
||||
class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr):
|
||||
""" a Function Item is responsible for setting up and executing a
|
||||
Python test function.
|
||||
"""
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
""" recording warnings during test function execution. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import inspect
|
||||
|
||||
|
@ -6,11 +7,11 @@ import _pytest._code
|
|||
import py
|
||||
import sys
|
||||
import warnings
|
||||
import pytest
|
||||
from _pytest.fixtures import yield_fixture
|
||||
|
||||
|
||||
@pytest.yield_fixture
|
||||
def recwarn(request):
|
||||
@yield_fixture
|
||||
def recwarn():
|
||||
"""Return a WarningsRecorder instance that provides these methods:
|
||||
|
||||
* ``pop(category=None)``: return last warning matching the category.
|
||||
|
@ -25,11 +26,6 @@ def recwarn(request):
|
|||
yield wrec
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {'deprecated_call': deprecated_call,
|
||||
'warns': warns}
|
||||
|
||||
|
||||
def deprecated_call(func=None, *args, **kwargs):
|
||||
""" assert that calling ``func(*args, **kwargs)`` triggers a
|
||||
``DeprecationWarning`` or ``PendingDeprecationWarning``.
|
||||
|
@ -55,14 +51,12 @@ def deprecated_call(func=None, *args, **kwargs):
|
|||
|
||||
def warn_explicit(message, category, *args, **kwargs):
|
||||
categories.append(category)
|
||||
old_warn_explicit(message, category, *args, **kwargs)
|
||||
|
||||
def warn(message, category=None, *args, **kwargs):
|
||||
if isinstance(message, Warning):
|
||||
categories.append(message.__class__)
|
||||
else:
|
||||
categories.append(category)
|
||||
old_warn(message, category, *args, **kwargs)
|
||||
|
||||
old_warn = warnings.warn
|
||||
old_warn_explicit = warnings.warn_explicit
|
||||
|
@ -115,24 +109,14 @@ def warns(expected_warning, *args, **kwargs):
|
|||
return func(*args[1:], **kwargs)
|
||||
|
||||
|
||||
class RecordedWarning(object):
|
||||
def __init__(self, message, category, filename, lineno, file, line):
|
||||
self.message = message
|
||||
self.category = category
|
||||
self.filename = filename
|
||||
self.lineno = lineno
|
||||
self.file = file
|
||||
self.line = line
|
||||
|
||||
|
||||
class WarningsRecorder(object):
|
||||
class WarningsRecorder(warnings.catch_warnings):
|
||||
"""A context manager to record raised warnings.
|
||||
|
||||
Adapted from `warnings.catch_warnings`.
|
||||
"""
|
||||
|
||||
def __init__(self, module=None):
|
||||
self._module = sys.modules['warnings'] if module is None else module
|
||||
def __init__(self):
|
||||
super(WarningsRecorder, self).__init__(record=True)
|
||||
self._entered = False
|
||||
self._list = []
|
||||
|
||||
|
@ -169,38 +153,20 @@ class WarningsRecorder(object):
|
|||
if self._entered:
|
||||
__tracebackhide__ = True
|
||||
raise RuntimeError("Cannot enter %r twice" % self)
|
||||
self._entered = True
|
||||
self._filters = self._module.filters
|
||||
self._module.filters = self._filters[:]
|
||||
self._showwarning = self._module.showwarning
|
||||
|
||||
def showwarning(message, category, filename, lineno,
|
||||
file=None, line=None):
|
||||
self._list.append(RecordedWarning(
|
||||
message, category, filename, lineno, file, line))
|
||||
|
||||
# still perform old showwarning functionality
|
||||
self._showwarning(
|
||||
message, category, filename, lineno, file=file, line=line)
|
||||
|
||||
self._module.showwarning = showwarning
|
||||
|
||||
# allow the same warning to be raised more than once
|
||||
|
||||
self._module.simplefilter('always')
|
||||
self._list = super(WarningsRecorder, self).__enter__()
|
||||
warnings.simplefilter('always')
|
||||
return self
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
if not self._entered:
|
||||
__tracebackhide__ = True
|
||||
raise RuntimeError("Cannot exit %r without entering first" % self)
|
||||
self._module.filters = self._filters
|
||||
self._module.showwarning = self._showwarning
|
||||
super(WarningsRecorder, self).__exit__(*exc_info)
|
||||
|
||||
|
||||
class WarningsChecker(WarningsRecorder):
|
||||
def __init__(self, expected_warning=None, module=None):
|
||||
super(WarningsChecker, self).__init__(module=module)
|
||||
def __init__(self, expected_warning=None):
|
||||
super(WarningsChecker, self).__init__()
|
||||
|
||||
msg = ("exceptions must be old-style classes or "
|
||||
"derived from Warning, not %s")
|
||||
|
@ -221,9 +187,11 @@ class WarningsChecker(WarningsRecorder):
|
|||
# only check if we're not currently handling an exception
|
||||
if all(a is None for a in exc_info):
|
||||
if self.expected_warning is not None:
|
||||
if not any(r.category in self.expected_warning for r in self):
|
||||
if not any(issubclass(r.category, self.expected_warning)
|
||||
for r in self):
|
||||
__tracebackhide__ = True
|
||||
pytest.fail("DID NOT WARN. No warnings of type {0} was emitted. "
|
||||
"The list of emitted warnings is: {1}.".format(
|
||||
self.expected_warning,
|
||||
[each.message for each in self]))
|
||||
from _pytest.runner import fail
|
||||
fail("DID NOT WARN. No warnings of type {0} was emitted. "
|
||||
"The list of emitted warnings is: {1}.".format(
|
||||
self.expected_warning,
|
||||
[each.message for each in self]))
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
""" log machine-parseable test session result information in a plain
|
||||
text file.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import py
|
||||
import os
|
||||
|
@ -61,9 +62,9 @@ class ResultLog(object):
|
|||
self.logfile = logfile # preferably line buffered
|
||||
|
||||
def write_log_entry(self, testpath, lettercode, longrepr):
|
||||
py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
|
||||
print("%s %s" % (lettercode, testpath), file=self.logfile)
|
||||
for line in longrepr.splitlines():
|
||||
py.builtin.print_(" %s" % line, file=self.logfile)
|
||||
print(" %s" % line, file=self.logfile)
|
||||
|
||||
def log_outcome(self, report, lettercode, longrepr):
|
||||
testpath = getattr(report, 'nodeid', None)
|
||||
|
|
|
@ -1,20 +1,14 @@
|
|||
""" basic collect and runtest protocol implementations """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import bdb
|
||||
import sys
|
||||
from time import time
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest._code.code import TerminalRepr, ExceptionInfo
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {
|
||||
'fail' : fail,
|
||||
'skip' : skip,
|
||||
'importorskip' : importorskip,
|
||||
'exit' : exit,
|
||||
}
|
||||
|
||||
#
|
||||
# pytest plugin hooks
|
||||
|
@ -262,7 +256,7 @@ def pytest_runtest_makereport(item, call):
|
|||
if not isinstance(excinfo, ExceptionInfo):
|
||||
outcome = "failed"
|
||||
longrepr = excinfo
|
||||
elif excinfo.errisinstance(pytest.skip.Exception):
|
||||
elif excinfo.errisinstance(skip.Exception):
|
||||
outcome = "skipped"
|
||||
r = excinfo._getreprcrash()
|
||||
longrepr = (str(r.path), r.lineno, r.message)
|
||||
|
@ -330,7 +324,9 @@ class TeardownErrorReport(BaseReport):
|
|||
self.__dict__.update(extra)
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = CallInfo(collector._memocollect, "memocollect")
|
||||
call = CallInfo(
|
||||
lambda: list(collector.collect()),
|
||||
'collect')
|
||||
longrepr = None
|
||||
if not call.excinfo:
|
||||
outcome = "passed"
|
||||
|
@ -550,14 +546,21 @@ def importorskip(modname, minversion=None):
|
|||
__version__ attribute. If no minversion is specified the a skip
|
||||
is only triggered if the module can not be imported.
|
||||
"""
|
||||
import warnings
|
||||
__tracebackhide__ = True
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
should_skip = False
|
||||
try:
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
# Do not raise chained exception here(#1485)
|
||||
should_skip = True
|
||||
|
||||
with warnings.catch_warnings():
|
||||
# make sure to ignore ImportWarnings that might happen because
|
||||
# of existing directories with the same name we're trying to
|
||||
# import but without a __init__.py file
|
||||
warnings.simplefilter('ignore')
|
||||
try:
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
# Do not raise chained exception here(#1485)
|
||||
should_skip = True
|
||||
if should_skip:
|
||||
raise Skipped("could not import %r" %(modname,), allow_module_level=True)
|
||||
mod = sys.modules[modname]
|
||||
|
@ -575,4 +578,3 @@ def importorskip(modname, minversion=None):
|
|||
raise Skipped("module %r has __version__ %r, required is: %r" %(
|
||||
modname, verattr, minversion), allow_module_level=True)
|
||||
return mod
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import pytest
|
||||
import sys
|
||||
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
|
|
|
@ -1,12 +1,14 @@
|
|||
""" support for skip/xfail functions and markers. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.mark import MarkInfo, MarkDecorator
|
||||
|
||||
from _pytest.runner import fail, skip
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
|
@ -23,6 +25,8 @@ def pytest_addoption(parser):
|
|||
|
||||
def pytest_configure(config):
|
||||
if config.option.runxfail:
|
||||
# yay a hack
|
||||
import pytest
|
||||
old = pytest.xfail
|
||||
config._cleanup.append(lambda: setattr(pytest, "xfail", old))
|
||||
|
||||
|
@ -55,11 +59,7 @@ def pytest_configure(config):
|
|||
)
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return dict(xfail=xfail)
|
||||
|
||||
|
||||
class XFailed(pytest.fail.Exception):
|
||||
class XFailed(fail.Exception):
|
||||
""" raised from an explicit call to pytest.xfail() """
|
||||
|
||||
|
||||
|
@ -100,15 +100,15 @@ class MarkEvaluator:
|
|||
except Exception:
|
||||
self.exc = sys.exc_info()
|
||||
if isinstance(self.exc[1], SyntaxError):
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^",]
|
||||
msg = [" " * (self.exc[1].offset + 4) + "^", ]
|
||||
msg.append("SyntaxError: invalid syntax")
|
||||
else:
|
||||
msg = traceback.format_exception_only(*self.exc[:2])
|
||||
pytest.fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
%(self.name, self.expr, "\n".join(msg)),
|
||||
pytrace=False)
|
||||
fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
% (self.name, self.expr, "\n".join(msg)),
|
||||
pytrace=False)
|
||||
|
||||
def _getglobals(self):
|
||||
d = {'os': os, 'sys': sys, 'config': self.item.config}
|
||||
|
@ -125,11 +125,9 @@ class MarkEvaluator:
|
|||
# "holder" might be a MarkInfo or a MarkDecorator; only
|
||||
# MarkInfo keeps track of all parameters it received in an
|
||||
# _arglist attribute
|
||||
if hasattr(self.holder, '_arglist'):
|
||||
arglist = self.holder._arglist
|
||||
else:
|
||||
arglist = [(self.holder.args, self.holder.kwargs)]
|
||||
for args, kwargs in arglist:
|
||||
marks = getattr(self.holder, '_marks', None) \
|
||||
or [self.holder.mark]
|
||||
for _, args, kwargs in marks:
|
||||
if 'condition' in kwargs:
|
||||
args = (kwargs['condition'],)
|
||||
for expr in args:
|
||||
|
@ -142,7 +140,7 @@ class MarkEvaluator:
|
|||
# XXX better be checked at collection time
|
||||
msg = "you need to specify reason=STRING " \
|
||||
"when using booleans as conditions."
|
||||
pytest.fail(msg)
|
||||
fail(msg)
|
||||
result = bool(expr)
|
||||
if result:
|
||||
self.result = True
|
||||
|
@ -166,7 +164,7 @@ class MarkEvaluator:
|
|||
return expl
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_setup(item):
|
||||
# Check if skip or skipif are specified as pytest marks
|
||||
|
||||
|
@ -175,23 +173,23 @@ def pytest_runtest_setup(item):
|
|||
eval_skipif = MarkEvaluator(item, 'skipif')
|
||||
if eval_skipif.istrue():
|
||||
item._evalskip = eval_skipif
|
||||
pytest.skip(eval_skipif.getexplanation())
|
||||
skip(eval_skipif.getexplanation())
|
||||
|
||||
skip_info = item.keywords.get('skip')
|
||||
if isinstance(skip_info, (MarkInfo, MarkDecorator)):
|
||||
item._evalskip = True
|
||||
if 'reason' in skip_info.kwargs:
|
||||
pytest.skip(skip_info.kwargs['reason'])
|
||||
skip(skip_info.kwargs['reason'])
|
||||
elif skip_info.args:
|
||||
pytest.skip(skip_info.args[0])
|
||||
skip(skip_info.args[0])
|
||||
else:
|
||||
pytest.skip("unconditional skip")
|
||||
skip("unconditional skip")
|
||||
|
||||
item._evalxfail = MarkEvaluator(item, 'xfail')
|
||||
check_xfail_no_run(item)
|
||||
|
||||
|
||||
@pytest.mark.hookwrapper
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
check_xfail_no_run(pyfuncitem)
|
||||
outcome = yield
|
||||
|
@ -206,7 +204,7 @@ def check_xfail_no_run(item):
|
|||
evalxfail = item._evalxfail
|
||||
if evalxfail.istrue():
|
||||
if not evalxfail.get('run', True):
|
||||
pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
|
||||
xfail("[NOTRUN] " + evalxfail.getexplanation())
|
||||
|
||||
|
||||
def check_strict_xfail(pyfuncitem):
|
||||
|
@ -218,10 +216,10 @@ def check_strict_xfail(pyfuncitem):
|
|||
if is_strict_xfail:
|
||||
del pyfuncitem._evalxfail
|
||||
explanation = evalxfail.getexplanation()
|
||||
pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
||||
fail('[XPASS(strict)] ' + explanation, pytrace=False)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
|
@ -241,7 +239,7 @@ def pytest_runtest_makereport(item, call):
|
|||
rep.wasxfail = rep.longrepr
|
||||
elif item.config.option.runxfail:
|
||||
pass # don't interefere
|
||||
elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
|
||||
elif call.excinfo and call.excinfo.errisinstance(xfail.Exception):
|
||||
rep.wasxfail = "reason: " + call.excinfo.value.msg
|
||||
rep.outcome = "skipped"
|
||||
elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
|
||||
|
@ -309,12 +307,14 @@ def pytest_terminal_summary(terminalreporter):
|
|||
for line in lines:
|
||||
tr._tw.line(line)
|
||||
|
||||
|
||||
def show_simple(terminalreporter, lines, stat, format):
|
||||
failed = terminalreporter.stats.get(stat)
|
||||
if failed:
|
||||
for rep in failed:
|
||||
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
||||
lines.append(format %(pos,))
|
||||
lines.append(format % (pos,))
|
||||
|
||||
|
||||
def show_xfailed(terminalreporter, lines):
|
||||
xfailed = terminalreporter.stats.get("xfailed")
|
||||
|
@ -326,13 +326,15 @@ def show_xfailed(terminalreporter, lines):
|
|||
if reason:
|
||||
lines.append(" " + str(reason))
|
||||
|
||||
|
||||
def show_xpassed(terminalreporter, lines):
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
for rep in xpassed:
|
||||
pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
|
||||
reason = rep.wasxfail
|
||||
lines.append("XPASS %s %s" %(pos, reason))
|
||||
lines.append("XPASS %s %s" % (pos, reason))
|
||||
|
||||
|
||||
def cached_eval(config, expr, d):
|
||||
if not hasattr(config, '_evalcache'):
|
||||
|
@ -357,6 +359,7 @@ def folded_skips(skipped):
|
|||
l.append((len(events),) + key)
|
||||
return l
|
||||
|
||||
|
||||
def show_skipped(terminalreporter, lines):
|
||||
tr = terminalreporter
|
||||
skipped = tr.stats.get('skipped', [])
|
||||
|
@ -372,5 +375,6 @@ def show_skipped(terminalreporter, lines):
|
|||
for num, fspath, lineno, reason in fskips:
|
||||
if reason.startswith("Skipped: "):
|
||||
reason = reason[9:]
|
||||
lines.append("SKIP [%d] %s:%d: %s" %
|
||||
lines.append(
|
||||
"SKIP [%d] %s:%d: %s" %
|
||||
(num, fspath, lineno, reason))
|
||||
|
|
|
@ -2,6 +2,9 @@
|
|||
|
||||
This is a good source for looking at the various reporting hooks.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import itertools
|
||||
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
|
||||
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
|
||||
import pytest
|
||||
|
@ -24,11 +27,11 @@ def pytest_addoption(parser):
|
|||
help="show extra test summary info as specified by chars (f)ailed, "
|
||||
"(E)error, (s)skipped, (x)failed, (X)passed, "
|
||||
"(p)passed, (P)passed with output, (a)all except pP. "
|
||||
"The pytest warnings are displayed at all times except when "
|
||||
"--disable-pytest-warnings is set")
|
||||
group._addoption('--disable-pytest-warnings', default=False,
|
||||
dest='disablepytestwarnings', action='store_true',
|
||||
help='disable warnings summary, overrides -r w flag')
|
||||
"Warnings are displayed at all times except when "
|
||||
"--disable-warnings is set")
|
||||
group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False,
|
||||
dest='disable_warnings', action='store_true',
|
||||
help='disable warnings summary')
|
||||
group._addoption('-l', '--showlocals',
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
|
@ -57,9 +60,9 @@ def pytest_configure(config):
|
|||
def getreportopt(config):
|
||||
reportopts = ""
|
||||
reportchars = config.option.reportchars
|
||||
if not config.option.disablepytestwarnings and 'w' not in reportchars:
|
||||
if not config.option.disable_warnings and 'w' not in reportchars:
|
||||
reportchars += 'w'
|
||||
elif config.option.disablepytestwarnings and 'w' in reportchars:
|
||||
elif config.option.disable_warnings and 'w' in reportchars:
|
||||
reportchars = reportchars.replace('w', '')
|
||||
if reportchars:
|
||||
for char in reportchars:
|
||||
|
@ -80,13 +83,40 @@ def pytest_report_teststatus(report):
|
|||
letter = "f"
|
||||
return report.outcome, letter, report.outcome.upper()
|
||||
|
||||
|
||||
class WarningReport:
|
||||
"""
|
||||
Simple structure to hold warnings information captured by ``pytest_logwarning``.
|
||||
"""
|
||||
def __init__(self, code, message, nodeid=None, fslocation=None):
|
||||
"""
|
||||
:param code: unused
|
||||
:param str message: user friendly message about the warning
|
||||
:param str|None nodeid: node id that generated the warning (see ``get_location``).
|
||||
:param tuple|py.path.local fslocation:
|
||||
file system location of the source of the warning (see ``get_location``).
|
||||
"""
|
||||
self.code = code
|
||||
self.message = message
|
||||
self.nodeid = nodeid
|
||||
self.fslocation = fslocation
|
||||
|
||||
def get_location(self, config):
|
||||
"""
|
||||
Returns the more user-friendly information about the location
|
||||
of a warning, or None.
|
||||
"""
|
||||
if self.nodeid:
|
||||
return self.nodeid
|
||||
if self.fslocation:
|
||||
if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2:
|
||||
filename, linenum = self.fslocation[:2]
|
||||
relpath = py.path.local(filename).relto(config.invocation_dir)
|
||||
return '%s:%s' % (relpath, linenum)
|
||||
else:
|
||||
return str(self.fslocation)
|
||||
return None
|
||||
|
||||
|
||||
class TerminalReporter:
|
||||
def __init__(self, config, file=None):
|
||||
|
@ -166,8 +196,6 @@ class TerminalReporter:
|
|||
|
||||
def pytest_logwarning(self, code, fslocation, message, nodeid):
|
||||
warnings = self.stats.setdefault("warnings", [])
|
||||
if isinstance(fslocation, tuple):
|
||||
fslocation = "%s:%d" % fslocation
|
||||
warning = WarningReport(code=code, fslocation=fslocation,
|
||||
message=message, nodeid=nodeid)
|
||||
warnings.append(warning)
|
||||
|
@ -438,13 +466,21 @@ class TerminalReporter:
|
|||
|
||||
def summary_warnings(self):
|
||||
if self.hasopt("w"):
|
||||
warnings = self.stats.get("warnings")
|
||||
if not warnings:
|
||||
all_warnings = self.stats.get("warnings")
|
||||
if not all_warnings:
|
||||
return
|
||||
self.write_sep("=", "pytest-warning summary")
|
||||
for w in warnings:
|
||||
self._tw.line("W%s %s %s" % (w.code,
|
||||
w.fslocation, w.message))
|
||||
|
||||
grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
|
||||
|
||||
self.write_sep("=", "warnings summary", yellow=True, bold=False)
|
||||
for location, warnings in grouped:
|
||||
self._tw.line(str(location) or '<undetermined location>')
|
||||
for w in warnings:
|
||||
lines = w.message.splitlines()
|
||||
indented = '\n'.join(' ' + x for x in lines)
|
||||
self._tw.line(indented)
|
||||
self._tw.line()
|
||||
self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html')
|
||||
|
||||
def summary_passes(self):
|
||||
if self.config.option.tbstyle != "no":
|
||||
|
@ -546,8 +582,7 @@ def flatten(l):
|
|||
|
||||
def build_summary_stats_line(stats):
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings error").split()
|
||||
key_translation = {'warnings': 'pytest-warnings'}
|
||||
"xfailed xpassed warnings error").split()
|
||||
unknown_key_seen = False
|
||||
for key in stats.keys():
|
||||
if key not in keys:
|
||||
|
@ -558,8 +593,7 @@ def build_summary_stats_line(stats):
|
|||
for key in keys:
|
||||
val = stats.get(key, None)
|
||||
if val:
|
||||
key_name = key_translation.get(key, key)
|
||||
parts.append("%d %s" % (len(val), key_name))
|
||||
parts.append("%d %s" % (len(val), key))
|
||||
|
||||
if parts:
|
||||
line = ", ".join(parts)
|
||||
|
|
|
@ -1,4 +1,6 @@
|
|||
""" support for providing temporary directories to test functions. """
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import re
|
||||
|
||||
import pytest
|
||||
|
|
|
@ -1,14 +1,15 @@
|
|||
""" discovery and running of std-library "unittest" style tests. """
|
||||
from __future__ import absolute_import
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import pytest
|
||||
# for transferring markers
|
||||
import _pytest._code
|
||||
from _pytest.python import transfer_markers
|
||||
from _pytest.skipping import MarkEvaluator
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.runner import fail, skip
|
||||
from _pytest.python import transfer_markers, Class, Module, Function
|
||||
from _pytest.skipping import MarkEvaluator, xfail
|
||||
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
|
@ -22,11 +23,11 @@ def pytest_pycollect_makeitem(collector, name, obj):
|
|||
return UnitTestCase(name, parent=collector)
|
||||
|
||||
|
||||
class UnitTestCase(pytest.Class):
|
||||
class UnitTestCase(Class):
|
||||
# marker for fixturemanger.getfixtureinfo()
|
||||
# to declare that our children do not support funcargs
|
||||
nofuncargs = True
|
||||
|
||||
|
||||
def setup(self):
|
||||
cls = self.obj
|
||||
if getattr(cls, '__unittest_skip__', False):
|
||||
|
@ -46,7 +47,7 @@ class UnitTestCase(pytest.Class):
|
|||
return
|
||||
self.session._fixturemanager.parsefactories(self, unittest=True)
|
||||
loader = TestLoader()
|
||||
module = self.getparent(pytest.Module).obj
|
||||
module = self.getparent(Module).obj
|
||||
foundsomething = False
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
x = getattr(self.obj, name)
|
||||
|
@ -65,7 +66,7 @@ class UnitTestCase(pytest.Class):
|
|||
yield TestCaseFunction('runTest', parent=self)
|
||||
|
||||
|
||||
class TestCaseFunction(pytest.Function):
|
||||
class TestCaseFunction(Function):
|
||||
_excinfo = None
|
||||
|
||||
def setup(self):
|
||||
|
@ -110,36 +111,37 @@ class TestCaseFunction(pytest.Function):
|
|||
try:
|
||||
l = traceback.format_exception(*rawexcinfo)
|
||||
l.insert(0, "NOTE: Incompatible Exception Representation, "
|
||||
"displaying natively:\n\n")
|
||||
pytest.fail("".join(l), pytrace=False)
|
||||
except (pytest.fail.Exception, KeyboardInterrupt):
|
||||
"displaying natively:\n\n")
|
||||
fail("".join(l), pytrace=False)
|
||||
except (fail.Exception, KeyboardInterrupt):
|
||||
raise
|
||||
except:
|
||||
pytest.fail("ERROR: Unknown Incompatible Exception "
|
||||
"representation:\n%r" %(rawexcinfo,), pytrace=False)
|
||||
fail("ERROR: Unknown Incompatible Exception "
|
||||
"representation:\n%r" % (rawexcinfo,), pytrace=False)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except pytest.fail.Exception:
|
||||
except fail.Exception:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
self.__dict__.setdefault('_excinfo', []).append(excinfo)
|
||||
|
||||
def addError(self, testcase, rawexcinfo):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
|
||||
def addFailure(self, testcase, rawexcinfo):
|
||||
self._addexcinfo(rawexcinfo)
|
||||
|
||||
def addSkip(self, testcase, reason):
|
||||
try:
|
||||
pytest.skip(reason)
|
||||
except pytest.skip.Exception:
|
||||
skip(reason)
|
||||
except skip.Exception:
|
||||
self._evalskip = MarkEvaluator(self, 'SkipTest')
|
||||
self._evalskip.result = True
|
||||
self._addexcinfo(sys.exc_info())
|
||||
|
||||
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
|
||||
try:
|
||||
pytest.xfail(str(reason))
|
||||
except pytest.xfail.Exception:
|
||||
xfail(str(reason))
|
||||
except xfail.Exception:
|
||||
self._addexcinfo(sys.exc_info())
|
||||
|
||||
def addUnexpectedSuccess(self, testcase, reason=""):
|
||||
|
@ -179,13 +181,14 @@ class TestCaseFunction(pytest.Function):
|
|||
self._testcase.debug()
|
||||
|
||||
def _prunetraceback(self, excinfo):
|
||||
pytest.Function._prunetraceback(self, excinfo)
|
||||
Function._prunetraceback(self, excinfo)
|
||||
traceback = excinfo.traceback.filter(
|
||||
lambda x:not x.frame.f_globals.get('__unittest'))
|
||||
lambda x: not x.frame.f_globals.get('__unittest'))
|
||||
if traceback:
|
||||
excinfo.traceback = traceback
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
if isinstance(item, TestCaseFunction):
|
||||
if item._excinfo:
|
||||
|
@ -197,7 +200,8 @@ def pytest_runtest_makereport(item, call):
|
|||
|
||||
# twisted trial support
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
||||
@hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_protocol(item):
|
||||
if isinstance(item, TestCaseFunction) and \
|
||||
'twisted.trial.unittest' in sys.modules:
|
||||
|
|
|
@ -0,0 +1,73 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import warnings
|
||||
from contextlib import contextmanager
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def _setoption(wmod, arg):
|
||||
"""
|
||||
Copy of the warning._setoption function but does not escape arguments.
|
||||
"""
|
||||
parts = arg.split(':')
|
||||
if len(parts) > 5:
|
||||
raise wmod._OptionError("too many fields (max 5): %r" % (arg,))
|
||||
while len(parts) < 5:
|
||||
parts.append('')
|
||||
action, message, category, module, lineno = [s.strip()
|
||||
for s in parts]
|
||||
action = wmod._getaction(action)
|
||||
category = wmod._getcategory(category)
|
||||
if lineno:
|
||||
try:
|
||||
lineno = int(lineno)
|
||||
if lineno < 0:
|
||||
raise ValueError
|
||||
except (ValueError, OverflowError):
|
||||
raise wmod._OptionError("invalid lineno %r" % (lineno,))
|
||||
else:
|
||||
lineno = 0
|
||||
wmod.filterwarnings(action, message, category, module, lineno)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("pytest-warnings")
|
||||
group.addoption(
|
||||
'-W', '--pythonwarnings', action='append',
|
||||
help="set which warnings to report, see -W option of python itself.")
|
||||
parser.addini("filterwarnings", type="linelist",
|
||||
help="Each line specifies warning filter pattern which would be passed"
|
||||
"to warnings.filterwarnings. Process after -W and --pythonwarnings.")
|
||||
|
||||
|
||||
@contextmanager
|
||||
def catch_warnings_for_item(item):
|
||||
"""
|
||||
catches the warnings generated during setup/call/teardown execution
|
||||
of the given item and after it is done posts them as warnings to this
|
||||
item.
|
||||
"""
|
||||
args = item.config.getoption('pythonwarnings') or []
|
||||
inifilters = item.config.getini("filterwarnings")
|
||||
with warnings.catch_warnings(record=True) as log:
|
||||
warnings.simplefilter('once')
|
||||
for arg in args:
|
||||
warnings._setoption(arg)
|
||||
|
||||
for arg in inifilters:
|
||||
_setoption(warnings, arg)
|
||||
|
||||
yield
|
||||
|
||||
for warning in log:
|
||||
msg = warnings.formatwarning(
|
||||
warning.message, warning.category,
|
||||
warning.filename, warning.lineno, warning.line)
|
||||
item.warn("unused", msg)
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_protocol(item):
|
||||
with catch_warnings_for_item(item):
|
||||
yield
|
|
@ -17,7 +17,12 @@ REGENDOC_ARGS := \
|
|||
--normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \
|
||||
--normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \
|
||||
--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
|
||||
|
||||
--normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \
|
||||
--normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \
|
||||
--normalize "@py-(\d+)\\.[^ ,]+@py-\1.x.y@" \
|
||||
--normalize "@pluggy-(\d+)\\.[.\d,]+@pluggy-\1.x.y@" \
|
||||
--normalize "@hypothesis-(\d+)\\.[.\d,]+@hypothesis-\1.x.y@" \
|
||||
--normalize "@Python (\d+)\\.[^ ,]+@Python \1.x.y@"
|
||||
|
||||
.PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest
|
||||
|
||||
|
@ -36,7 +41,7 @@ clean:
|
|||
-rm -rf $(BUILDDIR)/*
|
||||
|
||||
regen:
|
||||
PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS}
|
||||
PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPT=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS}
|
||||
|
||||
html:
|
||||
$(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html
|
||||
|
|
|
@ -6,6 +6,7 @@ Release announcements
|
|||
:maxdepth: 2
|
||||
|
||||
|
||||
release-3.1.0
|
||||
release-3.0.7
|
||||
release-3.0.6
|
||||
release-3.0.5
|
||||
|
|
|
@ -0,0 +1,61 @@
|
|||
pytest-3.1.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.1.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1600 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
http://doc.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
http://docs.pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Ben Lloyd
|
||||
* Bruno Oliveira
|
||||
* David Giese
|
||||
* David Szotten
|
||||
* Dmitri Pribysh
|
||||
* Florian Bruhin
|
||||
* Florian Schulze
|
||||
* Floris Bruynooghe
|
||||
* John Towler
|
||||
* Jonas Obrist
|
||||
* Katerina Koukiou
|
||||
* Kodi Arfer
|
||||
* Krzysztof Szularz
|
||||
* Lev Maximov
|
||||
* Loïc Estève
|
||||
* Luke Murphy
|
||||
* Manuel Krebber
|
||||
* Matthew Duck
|
||||
* Matthias Bussonnier
|
||||
* Michael Howitz
|
||||
* Michal Wajszczuk
|
||||
* Paweł Adamczak
|
||||
* Rafael Bertoldi
|
||||
* Ravi Chandra
|
||||
* Ronny Pfannschmidt
|
||||
* Skylar Downes
|
||||
* Thomas Kriechbaumer
|
||||
* Vitaly Lashmanov
|
||||
* Vlad Dragos
|
||||
* Wheerd
|
||||
* Xander Johnson
|
||||
* mandeep
|
||||
* reut
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
|
@ -26,7 +26,7 @@ you will see the return value of the function call::
|
|||
|
||||
$ pytest test_assert1.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
@ -170,7 +170,7 @@ if you run this module::
|
|||
|
||||
$ pytest test_assert2.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
@ -223,7 +223,7 @@ provides an alternative explanation for ``Foo`` objects::
|
|||
now, given this test module::
|
||||
|
||||
# content of test_foocompare.py
|
||||
class Foo:
|
||||
class Foo(object):
|
||||
def __init__(self, val):
|
||||
self.val = val
|
||||
|
||||
|
|
|
@ -80,7 +80,7 @@ If you then run it with ``--lf``::
|
|||
|
||||
$ pytest --lf
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
run-last-failure: rerun last 2 failures
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items
|
||||
|
@ -122,7 +122,7 @@ of ``FF`` and dots)::
|
|||
|
||||
$ pytest --ff
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
run-last-failure: rerun last 2 failures first
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items
|
||||
|
@ -227,14 +227,14 @@ You can always peek at the content of the cache using the
|
|||
|
||||
$ py.test --cache-show
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
cachedir: $REGENDOC_TMPDIR/.cache
|
||||
------------------------------- cache values -------------------------------
|
||||
cache/lastfailed contains:
|
||||
{'test_caching.py::test_function': True}
|
||||
example/value contains:
|
||||
42
|
||||
cache/lastfailed contains:
|
||||
{'test_caching.py::test_function': True}
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
|
||||
|
|
|
@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
|
|
@ -19,7 +19,7 @@ Full pytest documentation
|
|||
monkeypatch
|
||||
tmpdir
|
||||
capture
|
||||
recwarn
|
||||
warnings
|
||||
doctest
|
||||
mark
|
||||
skipping
|
||||
|
|
|
@ -160,7 +160,7 @@ Builtin configuration file options
|
|||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'``.
|
||||
Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'``.
|
||||
Setting a ``norecursedirs`` replaces the default. Here is an example of
|
||||
how to avoid certain directories:
|
||||
|
||||
|
@ -242,3 +242,23 @@ Builtin configuration file options
|
|||
By default, pytest will stop searching for ``conftest.py`` files upwards
|
||||
from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any,
|
||||
or up to the file-system root.
|
||||
|
||||
|
||||
.. confval:: filterwarnings
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
Sets a list of filters and actions that should be taken for matched
|
||||
warnings. By default all warnings emitted during the test session
|
||||
will be displayed in a summary at the end of the test session.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
error
|
||||
ignore::DeprecationWarning
|
||||
|
||||
This tells pytest to ignore deprecation warnings and turn all other warnings
|
||||
into errors. For more information please refer to :ref:`warnings`.
|
||||
|
|
|
@ -11,6 +11,19 @@ can change the pattern by issuing::
|
|||
on the command line. Since version ``2.9``, ``--doctest-glob``
|
||||
can be given multiple times in the command-line.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
You can specify the encoding that will be used for those doctest files
|
||||
using the ``doctest_encoding`` ini option:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
doctest_encoding = latin1
|
||||
|
||||
The default encoding is UTF-8.
|
||||
|
||||
You can also trigger running of doctests
|
||||
from docstrings in all python modules (including regular
|
||||
python test modules)::
|
||||
|
@ -49,7 +62,7 @@ then you can just invoke ``pytest`` without command line options::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 1 items
|
||||
|
||||
|
|
|
@ -128,7 +128,7 @@ def test_attribute_multiple():
|
|||
def globf(x):
|
||||
return x+1
|
||||
|
||||
class TestRaises:
|
||||
class TestRaises(object):
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
raises(TypeError, "int(s)")
|
||||
|
@ -167,7 +167,7 @@ def test_dynamic_compile_shows_nicely():
|
|||
|
||||
|
||||
|
||||
class TestMoreErrors:
|
||||
class TestMoreErrors(object):
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
return 44
|
||||
|
@ -213,23 +213,23 @@ class TestMoreErrors:
|
|||
x = 0
|
||||
|
||||
|
||||
class TestCustomAssertMsg:
|
||||
class TestCustomAssertMsg(object):
|
||||
|
||||
def test_single_line(self):
|
||||
class A:
|
||||
class A(object):
|
||||
a = 1
|
||||
b = 2
|
||||
assert A.a == b, "A.a appears not to be b"
|
||||
|
||||
def test_multiline(self):
|
||||
class A:
|
||||
class A(object):
|
||||
a = 1
|
||||
b = 2
|
||||
assert A.a == b, "A.a appears not to be b\n" \
|
||||
"or does not appear to be b\none of those"
|
||||
|
||||
def test_custom_repr(self):
|
||||
class JSON:
|
||||
class JSON(object):
|
||||
a = 1
|
||||
def __repr__(self):
|
||||
return "This is JSON\n{\n 'foo': 'bar'\n}"
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
def setup_module(module):
|
||||
module.TestStateFullThing.classcount = 0
|
||||
|
||||
class TestStateFullThing:
|
||||
class TestStateFullThing(object):
|
||||
def setup_class(cls):
|
||||
cls.classcount += 1
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ example: specifying and selecting acceptance tests
|
|||
def pytest_funcarg__accept(request):
|
||||
return AcceptFixture(request)
|
||||
|
||||
class AcceptFixture:
|
||||
class AcceptFixture(object):
|
||||
def __init__(self, request):
|
||||
if not request.config.option.acceptance:
|
||||
pytest.skip("specify -A to run acceptance tests")
|
||||
|
@ -61,7 +61,7 @@ extend the `accept example`_ by putting this in our test module:
|
|||
arg.tmpdir.mkdir("special")
|
||||
return arg
|
||||
|
||||
class TestSpecialAcceptance:
|
||||
class TestSpecialAcceptance(object):
|
||||
def test_sometest(self, accept):
|
||||
assert accept.tmpdir.join("special").check()
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ def setup(request):
|
|||
yield setup
|
||||
setup.finalize()
|
||||
|
||||
class CostlySetup:
|
||||
class CostlySetup(object):
|
||||
def __init__(self):
|
||||
import time
|
||||
print ("performing costly setup")
|
||||
|
|
|
@ -21,7 +21,7 @@ You can "mark" a test function with custom metadata like this::
|
|||
pass
|
||||
def test_another():
|
||||
pass
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method(self):
|
||||
pass
|
||||
|
||||
|
@ -31,7 +31,7 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
|||
|
||||
$ pytest -v -m webtest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
@ -45,7 +45,7 @@ Or the inverse, running all tests except the webtest ones::
|
|||
|
||||
$ pytest -v -m "not webtest"
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
@ -66,7 +66,7 @@ tests based on their module, class, method, or function name::
|
|||
|
||||
$ pytest -v test_server.py::TestClass::test_method
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 5 items
|
||||
|
@ -79,7 +79,7 @@ You can also select on the class::
|
|||
|
||||
$ pytest -v test_server.py::TestClass
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
@ -92,7 +92,7 @@ Or select multiple nodes::
|
|||
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 8 items
|
||||
|
@ -130,7 +130,7 @@ select tests based on their names::
|
|||
|
||||
$ pytest -v -k http # running with the above defined example module
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
@ -144,7 +144,7 @@ And you can also run all tests except the ones that match the keyword::
|
|||
|
||||
$ pytest -k "not send_http" -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
@ -160,7 +160,7 @@ Or to select "http" and "quick" tests::
|
|||
|
||||
$ pytest -k "http or quick" -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
@ -242,7 +242,7 @@ its test methods::
|
|||
# content of test_mark_classlevel.py
|
||||
import pytest
|
||||
@pytest.mark.webtest
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_startup(self):
|
||||
pass
|
||||
def test_startup_and_more(self):
|
||||
|
@ -256,14 +256,14 @@ To remain backward-compatible with Python 2.4 you can also set a
|
|||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
or if you need to use multiple markers you can use a list::
|
||||
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
|
||||
|
||||
You can also set a module level marker::
|
||||
|
@ -352,7 +352,7 @@ the test needs::
|
|||
|
||||
$ pytest -E stage2
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
@ -364,7 +364,7 @@ and here is one that specifies exactly the environment needed::
|
|||
|
||||
$ pytest -E stage1
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
@ -407,7 +407,7 @@ code you can read over all such settings. Example::
|
|||
pytestmark = pytest.mark.glob("module", x=1)
|
||||
|
||||
@pytest.mark.glob("class", x=2)
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.mark.glob("function", x=3)
|
||||
def test_something(self):
|
||||
pass
|
||||
|
@ -485,7 +485,7 @@ then you will see two tests skipped and two executed tests as expected::
|
|||
|
||||
$ pytest -rs # this option reports skip reasons
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
|
@ -499,7 +499,7 @@ Note that if you specify a platform via the marker-command line option like this
|
|||
|
||||
$ pytest -m linux
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
|
@ -551,7 +551,7 @@ We can now use the ``-m option`` to select one set::
|
|||
|
||||
$ pytest -m interface --tb=short
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
|
@ -573,7 +573,7 @@ or to select both "event" and "interface" tests::
|
|||
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
|
|
|
@ -16,7 +16,7 @@ def python1(request, tmpdir):
|
|||
def python2(request, python1):
|
||||
return Python(request.param, python1.picklefile)
|
||||
|
||||
class Python:
|
||||
class Python(object):
|
||||
def __init__(self, version, picklefile):
|
||||
self.pythonpath = py.path.local.sysfind(version)
|
||||
if not self.pythonpath:
|
||||
|
|
|
@ -27,7 +27,7 @@ now execute the test specification::
|
|||
|
||||
nonpython $ pytest test_simple.yml
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
@ -59,7 +59,7 @@ consulted when reporting in ``verbose`` mode::
|
|||
|
||||
nonpython $ pytest -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
@ -81,7 +81,7 @@ interesting to just look at the collection tree::
|
|||
|
||||
nonpython $ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
|
|
|
@ -130,7 +130,7 @@ objects, they are still using the default pytest representation::
|
|||
|
||||
$ pytest test_time.py --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 6 items
|
||||
<Module 'test_time.py'>
|
||||
|
@ -168,7 +168,7 @@ only have to work a bit to construct the correct arguments for pytest's
|
|||
scenario1 = ('basic', {'attribute': 'value'})
|
||||
scenario2 = ('advanced', {'attribute': 'value2'})
|
||||
|
||||
class TestSampleWithScenarios:
|
||||
class TestSampleWithScenarios(object):
|
||||
scenarios = [scenario1, scenario2]
|
||||
|
||||
def test_demo1(self, attribute):
|
||||
|
@ -181,7 +181,7 @@ this is a fully self-contained example which you can run with::
|
|||
|
||||
$ pytest test_scenarios.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
|
@ -194,7 +194,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
|||
|
||||
$ pytest --collect-only test_scenarios.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
<Module 'test_scenarios.py'>
|
||||
|
@ -241,9 +241,9 @@ creates a database object for the actual test invocations::
|
|||
if 'db' in metafunc.fixturenames:
|
||||
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
|
||||
|
||||
class DB1:
|
||||
class DB1(object):
|
||||
"one database object"
|
||||
class DB2:
|
||||
class DB2(object):
|
||||
"alternative database object"
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -259,7 +259,7 @@ Let's first see how it looks like at collection time::
|
|||
|
||||
$ pytest test_backends.py --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
|
@ -320,7 +320,7 @@ The result of this test will be successful::
|
|||
|
||||
$ pytest test_indirect_list.py --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
<Module 'test_indirect_list.py'>
|
||||
|
@ -350,7 +350,7 @@ parametrizer`_ but in a lot less code::
|
|||
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
|
||||
for funcargs in funcarglist])
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
# a map specifying multiple argument sets for a test method
|
||||
params = {
|
||||
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
|
||||
|
@ -447,7 +447,7 @@ If you run this with reporting for skips enabled::
|
|||
|
||||
$ pytest -rs test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
|
|
@ -4,7 +4,7 @@
|
|||
def test_function():
|
||||
pass
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method(self):
|
||||
pass
|
||||
def test_anothermethod(self):
|
||||
|
|
|
@ -107,7 +107,7 @@ This would make ``pytest`` look for tests in files that match the ``check_*
|
|||
that match ``*_check``. For example, if we have::
|
||||
|
||||
# content of check_myapp.py
|
||||
class CheckMyApp:
|
||||
class CheckMyApp(object):
|
||||
def simple_check(self):
|
||||
pass
|
||||
def complex_check(self):
|
||||
|
@ -117,7 +117,7 @@ then the test collection looks like this::
|
|||
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
|
@ -163,7 +163,7 @@ You can always peek at the collection tree without running tests like this::
|
|||
|
||||
. $ pytest --collect-only pythoncollection.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 3 items
|
||||
<Module 'CWD/pythoncollection.py'>
|
||||
|
@ -230,7 +230,7 @@ will be left out::
|
|||
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 0 items
|
||||
|
||||
|
|
|
@ -11,7 +11,7 @@ get on the terminal - we are working on that)::
|
|||
|
||||
assertion $ pytest failure_demo.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
|
||||
collected 42 items
|
||||
|
||||
|
@ -144,13 +144,9 @@ get on the terminal - we are working on that)::
|
|||
E 1
|
||||
E 1
|
||||
E 1
|
||||
E 1
|
||||
E - a2
|
||||
E + b2
|
||||
E 2
|
||||
E 2
|
||||
E 2
|
||||
E 2
|
||||
E 1...
|
||||
E
|
||||
E ...Full output truncated (7 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:59: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_list ________
|
||||
|
@ -184,14 +180,15 @@ get on the terminal - we are working on that)::
|
|||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
|
||||
E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
|
||||
E Omitting 1 identical items, use -v to show
|
||||
E Omitting 1 identical items, use -vv to show
|
||||
E Differing items:
|
||||
E {'b': 1} != {'b': 2}
|
||||
E Left contains more items:
|
||||
E {'c': 0}
|
||||
E Right contains more items:
|
||||
E {'d': 0}
|
||||
E Use -v to get the full diff
|
||||
E {'d': 0}...
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:70: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_set ________
|
||||
|
@ -200,15 +197,16 @@ get on the terminal - we are working on that)::
|
|||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
E assert {0, 10, 11, 12} == {0, 20, 21}
|
||||
E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21}
|
||||
E Extra items in the left set:
|
||||
E 10
|
||||
E 11
|
||||
E 12
|
||||
E Extra items in the right set:
|
||||
E 20
|
||||
E 21
|
||||
E Use -v to get the full diff
|
||||
E 21...
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:73: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_longer_list ________
|
||||
|
@ -245,8 +243,9 @@ get on the terminal - we are working on that)::
|
|||
E which
|
||||
E includes foo
|
||||
E ? +++
|
||||
E and a
|
||||
E tail
|
||||
E and a...
|
||||
E
|
||||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:83: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_not_in_text_single ________
|
||||
|
@ -359,7 +358,7 @@ get on the terminal - we are working on that)::
|
|||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python.py:1207>:1: ValueError
|
||||
<0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python.py:1219>:1: ValueError
|
||||
_______ TestRaises.test_raises_doesnt ________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
@ -550,7 +549,7 @@ get on the terminal - we are working on that)::
|
|||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
|
||||
def test_single_line(self):
|
||||
class A:
|
||||
class A(object):
|
||||
a = 1
|
||||
b = 2
|
||||
> assert A.a == b, "A.a appears not to be b"
|
||||
|
@ -564,7 +563,7 @@ get on the terminal - we are working on that)::
|
|||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
|
||||
def test_multiline(self):
|
||||
class A:
|
||||
class A(object):
|
||||
a = 1
|
||||
b = 2
|
||||
> assert A.a == b, "A.a appears not to be b\n" \
|
||||
|
@ -581,7 +580,7 @@ get on the terminal - we are working on that)::
|
|||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
|
||||
def test_custom_repr(self):
|
||||
class JSON:
|
||||
class JSON(object):
|
||||
a = 1
|
||||
def __repr__(self):
|
||||
return "This is JSON\n{\n 'foo': 'bar'\n}"
|
||||
|
|
|
@ -113,7 +113,7 @@ directory with the above conftest.py::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
|
@ -164,7 +164,7 @@ and when running it will see a skipped "slow" test::
|
|||
|
||||
$ pytest -rs # "-rs" means report details on the little 's'
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
@ -178,7 +178,7 @@ Or run it including the ``slow`` marked test::
|
|||
|
||||
$ pytest --runslow
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
@ -303,7 +303,7 @@ which will add the string to the test header accordingly::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
project deps: mylib-1.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
@ -328,7 +328,7 @@ which will add info only when run with "--v"::
|
|||
|
||||
$ pytest -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
|
@ -341,7 +341,7 @@ and nothing when run plainly::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
|
@ -375,7 +375,7 @@ Now we can profile which test functions execute the slowest::
|
|||
|
||||
$ pytest --durations=3
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
|
@ -426,7 +426,7 @@ tests in a class. Here is a test module example:
|
|||
import pytest
|
||||
|
||||
@pytest.mark.incremental
|
||||
class TestUserHandling:
|
||||
class TestUserHandling(object):
|
||||
def test_login(self):
|
||||
pass
|
||||
def test_modification(self):
|
||||
|
@ -441,7 +441,7 @@ If we run this::
|
|||
|
||||
$ pytest -rx
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
|
@ -484,7 +484,7 @@ Here is an example for making a ``db`` fixture available in a directory:
|
|||
# content of a/conftest.py
|
||||
import pytest
|
||||
|
||||
class DB:
|
||||
class DB(object):
|
||||
pass
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
|
@ -520,7 +520,7 @@ We can run this::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 7 items
|
||||
|
||||
|
@ -628,7 +628,7 @@ and run them::
|
|||
|
||||
$ pytest test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
@ -722,7 +722,7 @@ and run it::
|
|||
|
||||
$ pytest -s test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
|
|
|
@ -28,7 +28,7 @@ will be called ahead of running any tests::
|
|||
|
||||
# content of test_module.py
|
||||
|
||||
class TestHello:
|
||||
class TestHello(object):
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print ("callme called!")
|
||||
|
@ -39,7 +39,7 @@ will be called ahead of running any tests::
|
|||
def test_method2(self):
|
||||
print ("test_method1 called")
|
||||
|
||||
class TestOther:
|
||||
class TestOther(object):
|
||||
@classmethod
|
||||
def callme(cls):
|
||||
print ("callme other called")
|
||||
|
|
|
@ -70,7 +70,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
|||
|
||||
$ pytest test_smtpsimple.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
@ -188,7 +188,7 @@ inspect what is going on and can now run the tests::
|
|||
|
||||
$ pytest test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
@ -523,7 +523,7 @@ Running the above tests results in the following test IDs being used::
|
|||
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 10 items
|
||||
<Module 'test_anothersmtp.py'>
|
||||
|
@ -558,7 +558,7 @@ and instantiate an object ``app`` where we stick the already defined
|
|||
|
||||
import pytest
|
||||
|
||||
class App:
|
||||
class App(object):
|
||||
def __init__(self, smtp):
|
||||
self.smtp = smtp
|
||||
|
||||
|
@ -574,7 +574,7 @@ Here we declare an ``app`` fixture which receives the previously defined
|
|||
|
||||
$ pytest -v test_appsetup.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
@ -643,7 +643,7 @@ Let's run the tests in verbose mode and with looking at the print-output::
|
|||
|
||||
$ pytest -v -s test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 8 items
|
||||
|
@ -729,7 +729,7 @@ and declare its use in a test module via a ``usefixtures`` marker::
|
|||
import pytest
|
||||
|
||||
@pytest.mark.usefixtures("cleandir")
|
||||
class TestDirectoryInit:
|
||||
class TestDirectoryInit(object):
|
||||
def test_cwd_starts_empty(self):
|
||||
assert os.listdir(os.getcwd()) == []
|
||||
with open("myfile", "w") as f:
|
||||
|
@ -792,7 +792,7 @@ self-contained implementation of this idea::
|
|||
|
||||
import pytest
|
||||
|
||||
class DB:
|
||||
class DB(object):
|
||||
def __init__(self):
|
||||
self.intransaction = []
|
||||
def begin(self, name):
|
||||
|
@ -804,7 +804,7 @@ self-contained implementation of this idea::
|
|||
def db():
|
||||
return DB()
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.fixture(autouse=True)
|
||||
def transact(self, request, db):
|
||||
db.begin(request.function.__name__)
|
||||
|
@ -862,7 +862,7 @@ into a conftest.py file **without** using ``autouse``::
|
|||
and then e.g. have a TestClass using it by declaring the need::
|
||||
|
||||
@pytest.mark.usefixtures("transact")
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method1(self):
|
||||
...
|
||||
|
||||
|
|
|
@ -24,7 +24,7 @@ resources. Here is a basic example how we could implement
|
|||
a per-session Database object::
|
||||
|
||||
# content of conftest.py
|
||||
class Database:
|
||||
class Database(object):
|
||||
def __init__(self):
|
||||
print ("database instance created")
|
||||
def destroy(self):
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
import textwrap
|
||||
import inspect
|
||||
|
||||
class Writer:
|
||||
class Writer(object):
|
||||
def __init__(self, clsname):
|
||||
self.clsname = clsname
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ Installation::
|
|||
To check your installation has installed the correct version::
|
||||
|
||||
$ pytest --version
|
||||
This is pytest version 3.0.7, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py
|
||||
This is pytest version 3.x.y, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py
|
||||
|
||||
.. _`simpletest`:
|
||||
|
||||
|
@ -46,20 +46,20 @@ That's it. You can execute the test function now::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
||||
test_sample.py F
|
||||
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
|
||||
|
||||
def test_answer():
|
||||
> assert func(3) == 5
|
||||
E assert 4 == 5
|
||||
E + where 4 = func(3)
|
||||
|
||||
|
||||
test_sample.py:5: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
|
||||
|
@ -111,7 +111,7 @@ to group tests logically, in classes and modules. Let's write a class
|
|||
containing two tests::
|
||||
|
||||
# content of test_class.py
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_one(self):
|
||||
x = "this"
|
||||
assert 'h' in x
|
||||
|
@ -128,15 +128,15 @@ run the module by passing its filename::
|
|||
.F
|
||||
======= FAILURES ========
|
||||
_______ TestClass.test_two ________
|
||||
|
||||
|
||||
self = <test_class.TestClass object at 0xdeadbeef>
|
||||
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
> assert hasattr(x, 'check')
|
||||
E AssertionError: assert False
|
||||
E + where False = hasattr('hello', 'check')
|
||||
|
||||
|
||||
test_class.py:8: AssertionError
|
||||
1 failed, 1 passed in 0.12 seconds
|
||||
|
||||
|
@ -165,14 +165,14 @@ before performing the test function call. Let's just run it::
|
|||
F
|
||||
======= FAILURES ========
|
||||
_______ test_needsfiles ________
|
||||
|
||||
|
||||
tmpdir = local('PYTEST_TMPDIR/test_needsfiles0')
|
||||
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print (tmpdir)
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
PYTEST_TMPDIR/test_needsfiles0
|
||||
|
|
|
@ -25,7 +25,7 @@ To execute it::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
|
|
@ -54,7 +54,7 @@ them in turn::
|
|||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
|
@ -93,16 +93,37 @@ for example with the builtin ``mark.xfail``::
|
|||
@pytest.mark.parametrize("test_input,expected", [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
pytest.mark.xfail(("6*9", 42)),
|
||||
pytest.param("6*9", 42,
|
||||
marks=pytest.mark.xfail),
|
||||
])
|
||||
def test_eval(test_input, expected):
|
||||
assert eval(test_input) == expected
|
||||
|
||||
.. note::
|
||||
|
||||
prior to version 3.1 the supported mechanism for marking values
|
||||
used the syntax::
|
||||
|
||||
import pytest
|
||||
@pytest.mark.parametrize("test_input,expected", [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
pytest.mark.xfail(("6*9", 42),),
|
||||
])
|
||||
def test_eval(test_input, expected):
|
||||
assert eval(test_input) == expected
|
||||
|
||||
|
||||
This was an initial hack to support the feature but soon was demonstrated to be incomplete,
|
||||
broken for passing functions or applying multiple marks with the same name but different parameters.
|
||||
The old syntax will be removed in pytest-4.0.
|
||||
|
||||
|
||||
Let's run this::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
|
|
|
@ -1,139 +1,3 @@
|
|||
.. _`asserting warnings`:
|
||||
:orphan:
|
||||
|
||||
.. _assertwarnings:
|
||||
|
||||
Asserting Warnings
|
||||
=====================================================
|
||||
|
||||
.. _`asserting warnings with the warns function`:
|
||||
|
||||
.. _warns:
|
||||
|
||||
Asserting warnings with the warns function
|
||||
-----------------------------------------------
|
||||
|
||||
.. versionadded:: 2.8
|
||||
|
||||
You can check that code raises a particular warning using ``pytest.warns``,
|
||||
which works in a similar manner to :ref:`raises <assertraises>`::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_warning():
|
||||
with pytest.warns(UserWarning):
|
||||
warnings.warn("my warning", UserWarning)
|
||||
|
||||
The test will fail if the warning in question is not raised.
|
||||
|
||||
You can also call ``pytest.warns`` on a function or code string::
|
||||
|
||||
pytest.warns(expected_warning, func, *args, **kwargs)
|
||||
pytest.warns(expected_warning, "func(*args, **kwargs)")
|
||||
|
||||
The function also returns a list of all raised warnings (as
|
||||
``warnings.WarningMessage`` objects), which you can query for
|
||||
additional information::
|
||||
|
||||
with pytest.warns(RuntimeWarning) as record:
|
||||
warnings.warn("another warning", RuntimeWarning)
|
||||
|
||||
# check that only one warning was raised
|
||||
assert len(record) == 1
|
||||
# check that the message matches
|
||||
assert record[0].message.args[0] == "another warning"
|
||||
|
||||
Alternatively, you can examine raised warnings in detail using the
|
||||
:ref:`recwarn <recwarn>` fixture (see below).
|
||||
|
||||
.. note::
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
.. _`recording warnings`:
|
||||
|
||||
.. _recwarn:
|
||||
|
||||
Recording warnings
|
||||
------------------------
|
||||
|
||||
You can record raised warnings either using ``pytest.warns`` or with
|
||||
the ``recwarn`` fixture.
|
||||
|
||||
To record with ``pytest.warns`` without asserting anything about the warnings,
|
||||
pass ``None`` as the expected warning type::
|
||||
|
||||
with pytest.warns(None) as record:
|
||||
warnings.warn("user", UserWarning)
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
|
||||
assert len(record) == 2
|
||||
assert str(record[0].message) == "user"
|
||||
assert str(record[1].message) == "runtime"
|
||||
|
||||
The ``recwarn`` fixture will record warnings for the whole function::
|
||||
|
||||
import warnings
|
||||
|
||||
def test_hello(recwarn):
|
||||
warnings.warn("hello", UserWarning)
|
||||
assert len(recwarn) == 1
|
||||
w = recwarn.pop(UserWarning)
|
||||
assert issubclass(w.category, UserWarning)
|
||||
assert str(w.message) == "hello"
|
||||
assert w.filename
|
||||
assert w.lineno
|
||||
|
||||
Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded
|
||||
warnings: a WarningsRecorder instance. To view the recorded warnings, you can
|
||||
iterate over this instance, call ``len`` on it to get the number of recorded
|
||||
warnings, or index into it to get a particular recorded warning. It also
|
||||
provides these methods:
|
||||
|
||||
.. autoclass:: _pytest.recwarn.WarningsRecorder()
|
||||
:members:
|
||||
|
||||
Each recorded warning has the attributes ``message``, ``category``,
|
||||
``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the
|
||||
class of the warning. The ``message`` is the warning itself; calling
|
||||
``str(message)`` will return the actual message of the warning.
|
||||
|
||||
.. note::
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
.. _`ensuring a function triggers a deprecation warning`:
|
||||
|
||||
.. _ensuring_function_triggers:
|
||||
|
||||
Ensuring a function triggers a deprecation warning
|
||||
-------------------------------------------------------
|
||||
|
||||
You can also call a global helper for checking
|
||||
that a certain function call triggers a ``DeprecationWarning`` or
|
||||
``PendingDeprecationWarning``::
|
||||
|
||||
import pytest
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
|
||||
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
|
||||
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
|
||||
them. If you wish to record them in your own code, use the
|
||||
command ``warnings.simplefilter('always')``::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_deprecation(recwarn):
|
||||
warnings.simplefilter('always')
|
||||
warnings.warn("deprecated", DeprecationWarning)
|
||||
assert len(recwarn) == 1
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
|
||||
You can also use it as a contextmanager::
|
||||
|
||||
def test_global():
|
||||
with pytest.deprecated_call():
|
||||
myobject.deprecated_method()
|
||||
This page has been moved, please see :ref:`assertwarnings`.
|
||||
|
|
|
@ -98,7 +98,7 @@ You can use the ``skipif`` decorator (and any other marker) on classes::
|
|||
|
||||
@pytest.mark.skipif(sys.platform == 'win32',
|
||||
reason="does not run on windows")
|
||||
class TestPosixCalls:
|
||||
class TestPosixCalls(object):
|
||||
|
||||
def test_function(self):
|
||||
"will not be setup or run under 'win32' platform"
|
||||
|
@ -224,7 +224,7 @@ Running it with the report-on-xfail option gives this output::
|
|||
|
||||
example $ pytest -rx xfail_demo.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/example, inifile:
|
||||
collected 7 items
|
||||
|
||||
|
|
|
@ -110,7 +110,7 @@ If you want to disable a complete test class you
|
|||
can set the class-level attribute ``disabled``.
|
||||
For example, in order to avoid running some tests on Win32::
|
||||
|
||||
class TestPosixOnly:
|
||||
class TestPosixOnly(object):
|
||||
disabled = sys.platform == 'win32'
|
||||
|
||||
def test_xxx(self):
|
||||
|
|
|
@ -29,7 +29,7 @@ Running this would result in a passed test except for the last
|
|||
|
||||
$ pytest test_tmpdir.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
|
|
|
@ -71,7 +71,7 @@ it from a unittest-style test::
|
|||
|
||||
@pytest.fixture(scope="class")
|
||||
def db_class(request):
|
||||
class DummyDB:
|
||||
class DummyDB(object):
|
||||
pass
|
||||
# set a class attribute on the invoking test context
|
||||
request.cls.db = DummyDB()
|
||||
|
@ -108,7 +108,7 @@ the ``self.db`` values in the traceback::
|
|||
|
||||
$ pytest test_unittest_db.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.2, pytest-3.0.7, py-1.4.32, pluggy-0.4.0
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
|
@ -171,7 +171,8 @@ creation of a per-test temporary directory::
|
|||
tmpdir.join("samplefile.ini").write("# testdata")
|
||||
|
||||
def test_method(self):
|
||||
s = open("samplefile.ini").read()
|
||||
with open("samplefile.ini") as f:
|
||||
s = f.read()
|
||||
assert "testdata" in s
|
||||
|
||||
Due to the ``autouse`` flag the ``initdir`` fixture function will be
|
||||
|
|
|
@ -177,6 +177,15 @@ integration servers, use this invocation::
|
|||
|
||||
to create an XML file at ``path``.
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
junit_suite_name = my_suite
|
||||
|
||||
record_xml_property
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
|
@ -238,7 +247,7 @@ to all testcases you can use ``LogXML.add_global_properties``
|
|||
def start_and_prepare_env():
|
||||
pass
|
||||
|
||||
class TestMe:
|
||||
class TestMe(object):
|
||||
def test_foo(self):
|
||||
assert True
|
||||
|
||||
|
@ -326,7 +335,7 @@ You can specify additional plugins to ``pytest.main``::
|
|||
|
||||
# content of myinvoke.py
|
||||
import pytest
|
||||
class MyPlugin:
|
||||
class MyPlugin(object):
|
||||
def pytest_sessionfinish(self):
|
||||
print("*** test run reporting finishing")
|
||||
|
||||
|
|
|
@ -0,0 +1,224 @@
|
|||
.. _`warnings`:
|
||||
|
||||
Warnings Capture
|
||||
================
|
||||
|
||||
.. versionadded:: 3.1
|
||||
|
||||
Starting from version ``3.1``, pytest now automatically catches all warnings during test execution
|
||||
and displays them at the end of the session::
|
||||
|
||||
# content of test_show_warnings.py
|
||||
import warnings
|
||||
|
||||
def deprecated_function():
|
||||
warnings.warn("this function is deprecated, use another_function()", DeprecationWarning)
|
||||
return 1
|
||||
|
||||
def test_one():
|
||||
assert deprecated_function() == 1
|
||||
|
||||
Running pytest now produces this output::
|
||||
|
||||
$ pytest test_show_warnings.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 items
|
||||
|
||||
test_show_warnings.py .
|
||||
|
||||
======= warnings summary ========
|
||||
test_show_warnings.py::test_one
|
||||
$REGENDOC_TMPDIR/test_show_warnings.py:4: DeprecationWarning: this function is deprecated, use another_function()
|
||||
warnings.warn("this function is deprecated, use another_function()", DeprecationWarning)
|
||||
|
||||
-- Docs: http://doc.pytest.org/en/latest/warnings.html
|
||||
======= 1 passed, 1 warnings in 0.12 seconds ========
|
||||
|
||||
The ``-W`` flag can be passed to control which warnings will be displayed or even turn
|
||||
them into errors::
|
||||
|
||||
$ pytest -q test_show_warnings.py -W error::DeprecationWarning
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_one ________
|
||||
|
||||
def test_one():
|
||||
> assert deprecated_function() == 1
|
||||
|
||||
test_show_warnings.py:8:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
def deprecated_function():
|
||||
> warnings.warn("this function is deprecated, use another_function()", DeprecationWarning)
|
||||
E DeprecationWarning: this function is deprecated, use another_function()
|
||||
|
||||
test_show_warnings.py:4: DeprecationWarning
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option.
|
||||
For example, the configuration below will ignore all deprecation warnings, but will transform
|
||||
all other warnings into errors.
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
filterwarnings =
|
||||
error
|
||||
ignore::DeprecationWarning
|
||||
|
||||
|
||||
When a warning matches more than one option in the list, the action for the last matching option
|
||||
is performed.
|
||||
|
||||
Both ``-W`` command-line option and ``filterwarnings`` ini option are based on Python's own
|
||||
`-W option`_ and `warnings.simplefilter`_, so please refer to those sections in the Python
|
||||
documentation for other examples and advanced usage.
|
||||
|
||||
*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_
|
||||
*plugin.*
|
||||
|
||||
.. _`-W option`: https://docs.python.org/3/using/cmdline.html?highlight=#cmdoption-W
|
||||
.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter
|
||||
.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings
|
||||
|
||||
.. _`asserting warnings`:
|
||||
|
||||
.. _assertwarnings:
|
||||
|
||||
.. _`asserting warnings with the warns function`:
|
||||
|
||||
.. _warns:
|
||||
|
||||
Asserting warnings with the warns function
|
||||
-----------------------------------------------
|
||||
|
||||
.. versionadded:: 2.8
|
||||
|
||||
You can check that code raises a particular warning using ``pytest.warns``,
|
||||
which works in a similar manner to :ref:`raises <assertraises>`::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_warning():
|
||||
with pytest.warns(UserWarning):
|
||||
warnings.warn("my warning", UserWarning)
|
||||
|
||||
The test will fail if the warning in question is not raised.
|
||||
|
||||
You can also call ``pytest.warns`` on a function or code string::
|
||||
|
||||
pytest.warns(expected_warning, func, *args, **kwargs)
|
||||
pytest.warns(expected_warning, "func(*args, **kwargs)")
|
||||
|
||||
The function also returns a list of all raised warnings (as
|
||||
``warnings.WarningMessage`` objects), which you can query for
|
||||
additional information::
|
||||
|
||||
with pytest.warns(RuntimeWarning) as record:
|
||||
warnings.warn("another warning", RuntimeWarning)
|
||||
|
||||
# check that only one warning was raised
|
||||
assert len(record) == 1
|
||||
# check that the message matches
|
||||
assert record[0].message.args[0] == "another warning"
|
||||
|
||||
Alternatively, you can examine raised warnings in detail using the
|
||||
:ref:`recwarn <recwarn>` fixture (see below).
|
||||
|
||||
.. note::
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
.. _`recording warnings`:
|
||||
|
||||
.. _recwarn:
|
||||
|
||||
Recording warnings
|
||||
------------------------
|
||||
|
||||
You can record raised warnings either using ``pytest.warns`` or with
|
||||
the ``recwarn`` fixture.
|
||||
|
||||
To record with ``pytest.warns`` without asserting anything about the warnings,
|
||||
pass ``None`` as the expected warning type::
|
||||
|
||||
with pytest.warns(None) as record:
|
||||
warnings.warn("user", UserWarning)
|
||||
warnings.warn("runtime", RuntimeWarning)
|
||||
|
||||
assert len(record) == 2
|
||||
assert str(record[0].message) == "user"
|
||||
assert str(record[1].message) == "runtime"
|
||||
|
||||
The ``recwarn`` fixture will record warnings for the whole function::
|
||||
|
||||
import warnings
|
||||
|
||||
def test_hello(recwarn):
|
||||
warnings.warn("hello", UserWarning)
|
||||
assert len(recwarn) == 1
|
||||
w = recwarn.pop(UserWarning)
|
||||
assert issubclass(w.category, UserWarning)
|
||||
assert str(w.message) == "hello"
|
||||
assert w.filename
|
||||
assert w.lineno
|
||||
|
||||
Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded
|
||||
warnings: a WarningsRecorder instance. To view the recorded warnings, you can
|
||||
iterate over this instance, call ``len`` on it to get the number of recorded
|
||||
warnings, or index into it to get a particular recorded warning. It also
|
||||
provides these methods:
|
||||
|
||||
.. autoclass:: _pytest.recwarn.WarningsRecorder()
|
||||
:members:
|
||||
|
||||
Each recorded warning has the attributes ``message``, ``category``,
|
||||
``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the
|
||||
class of the warning. The ``message`` is the warning itself; calling
|
||||
``str(message)`` will return the actual message of the warning.
|
||||
|
||||
.. note::
|
||||
:class:`RecordedWarning` was changed from a plain class to a namedtuple in pytest 3.1
|
||||
|
||||
.. note::
|
||||
``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated
|
||||
differently; see :ref:`ensuring_function_triggers`.
|
||||
|
||||
.. _`ensuring a function triggers a deprecation warning`:
|
||||
|
||||
.. _ensuring_function_triggers:
|
||||
|
||||
Ensuring a function triggers a deprecation warning
|
||||
-------------------------------------------------------
|
||||
|
||||
You can also call a global helper for checking
|
||||
that a certain function call triggers a ``DeprecationWarning`` or
|
||||
``PendingDeprecationWarning``::
|
||||
|
||||
import pytest
|
||||
|
||||
def test_global():
|
||||
pytest.deprecated_call(myfunction, 17)
|
||||
|
||||
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
|
||||
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
|
||||
them. If you wish to record them in your own code, use the
|
||||
command ``warnings.simplefilter('always')``::
|
||||
|
||||
import warnings
|
||||
import pytest
|
||||
|
||||
def test_deprecation(recwarn):
|
||||
warnings.simplefilter('always')
|
||||
warnings.warn("deprecated", DeprecationWarning)
|
||||
assert len(recwarn) == 1
|
||||
assert recwarn.pop(DeprecationWarning)
|
||||
|
||||
You can also use it as a contextmanager::
|
||||
|
||||
def test_global():
|
||||
with pytest.deprecated_call():
|
||||
myobject.deprecated_method()
|
|
@ -517,7 +517,6 @@ Initialization, command line and configuration hooks
|
|||
.. autofunction:: pytest_load_initial_conftests
|
||||
.. autofunction:: pytest_cmdline_preparse
|
||||
.. autofunction:: pytest_cmdline_parse
|
||||
.. autofunction:: pytest_namespace
|
||||
.. autofunction:: pytest_addoption
|
||||
.. autofunction:: pytest_cmdline_main
|
||||
.. autofunction:: pytest_configure
|
||||
|
|
76
pytest.py
76
pytest.py
|
@ -2,19 +2,7 @@
|
|||
"""
|
||||
pytest: unit and functional testing with Python.
|
||||
"""
|
||||
__all__ = [
|
||||
'main',
|
||||
'UsageError',
|
||||
'cmdline',
|
||||
'hookspec',
|
||||
'hookimpl',
|
||||
'__version__',
|
||||
]
|
||||
|
||||
if __name__ == '__main__': # if run as a script or by 'python -m pytest'
|
||||
# we trigger the below "else" condition by the following import
|
||||
import pytest
|
||||
raise SystemExit(pytest.main())
|
||||
|
||||
# else we are imported
|
||||
|
||||
|
@ -22,7 +10,69 @@ from _pytest.config import (
|
|||
main, UsageError, _preloadplugins, cmdline,
|
||||
hookspec, hookimpl
|
||||
)
|
||||
from _pytest.fixtures import fixture, yield_fixture
|
||||
from _pytest.assertion import register_assert_rewrite
|
||||
from _pytest.freeze_support import freeze_includes
|
||||
from _pytest import __version__
|
||||
from _pytest.debugging import pytestPDB as __pytestPDB
|
||||
from _pytest.recwarn import warns, deprecated_call
|
||||
from _pytest.runner import fail, skip, importorskip, exit
|
||||
from _pytest.mark import MARK_GEN as mark, param
|
||||
from _pytest.skipping import xfail
|
||||
from _pytest.main import Item, Collector, File, Session
|
||||
from _pytest.fixtures import fillfixtures as _fillfuncargs
|
||||
from _pytest.python import (
|
||||
raises, approx,
|
||||
Module, Class, Instance, Function, Generator,
|
||||
)
|
||||
|
||||
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
|
||||
set_trace = __pytestPDB.set_trace
|
||||
|
||||
__all__ = [
|
||||
'main',
|
||||
'UsageError',
|
||||
'cmdline',
|
||||
'hookspec',
|
||||
'hookimpl',
|
||||
'__version__',
|
||||
'register_assert_rewrite',
|
||||
'freeze_includes',
|
||||
'set_trace',
|
||||
'warns',
|
||||
'deprecated_call',
|
||||
'fixture',
|
||||
'yield_fixture',
|
||||
'fail',
|
||||
'skip',
|
||||
'xfail',
|
||||
'importorskip',
|
||||
'exit',
|
||||
'mark',
|
||||
'param',
|
||||
'approx',
|
||||
'_fillfuncargs',
|
||||
|
||||
'Item',
|
||||
'File',
|
||||
'Collector',
|
||||
'Session',
|
||||
'Module',
|
||||
'Class',
|
||||
'Instance',
|
||||
'Function',
|
||||
'Generator',
|
||||
'raises',
|
||||
|
||||
|
||||
]
|
||||
|
||||
if __name__ == '__main__':
|
||||
# if run as a script or by 'python -m pytest'
|
||||
# we trigger the below "else" condition by the following import
|
||||
import pytest
|
||||
raise SystemExit(pytest.main())
|
||||
else:
|
||||
|
||||
from _pytest.compat import _setup_collect_fakemodule
|
||||
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
|
||||
_setup_collect_fakemodule()
|
||||
|
|
|
@ -18,4 +18,3 @@ if os.path.isdir('.git'):
|
|||
else:
|
||||
print('No .git directory found, skipping checking the manifest file')
|
||||
sys.exit(0)
|
||||
|
||||
|
|
|
@ -9,6 +9,10 @@ upload-dir = doc/en/build/html
|
|||
[bdist_wheel]
|
||||
universal = 1
|
||||
|
||||
[check-manifest]
|
||||
ignore =
|
||||
_pytest/_version.py
|
||||
|
||||
[metadata]
|
||||
license_file = LICENSE
|
||||
|
||||
|
|
42
setup.py
42
setup.py
|
@ -1,32 +1,27 @@
|
|||
import os, sys
|
||||
import os
|
||||
import sys
|
||||
import setuptools
|
||||
import pkg_resources
|
||||
from setuptools import setup, Command
|
||||
|
||||
classifiers = ['Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: MacOS :: MacOS X',
|
||||
'Topic :: Software Development :: Testing',
|
||||
'Topic :: Software Development :: Libraries',
|
||||
'Topic :: Utilities'] + [
|
||||
('Programming Language :: Python :: %s' % x) for x in
|
||||
'2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split()]
|
||||
classifiers = [
|
||||
'Development Status :: 6 - Mature',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX',
|
||||
'Operating System :: Microsoft :: Windows',
|
||||
'Operating System :: MacOS :: MacOS X',
|
||||
'Topic :: Software Development :: Testing',
|
||||
'Topic :: Software Development :: Libraries',
|
||||
'Topic :: Utilities',
|
||||
] + [
|
||||
('Programming Language :: Python :: %s' % x)
|
||||
for x in '2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split()
|
||||
]
|
||||
|
||||
with open('README.rst') as fd:
|
||||
long_description = fd.read()
|
||||
|
||||
def get_version():
|
||||
p = os.path.join(os.path.dirname(
|
||||
os.path.abspath(__file__)), "_pytest", "__init__.py")
|
||||
with open(p) as f:
|
||||
for line in f.readlines():
|
||||
if "__version__" in line:
|
||||
return line.strip().split("=")[-1].strip(" '")
|
||||
raise ValueError("could not read version")
|
||||
|
||||
|
||||
def has_environment_marker_support():
|
||||
"""
|
||||
|
@ -63,7 +58,9 @@ def main():
|
|||
name='pytest',
|
||||
description='pytest: simple powerful testing with Python',
|
||||
long_description=long_description,
|
||||
version=get_version(),
|
||||
use_scm_version={
|
||||
'write_to': '_pytest/_version.py',
|
||||
},
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
@ -74,6 +71,7 @@ def main():
|
|||
keywords="test unittest",
|
||||
cmdclass={'test': PyTest},
|
||||
# the following should be enabled for release
|
||||
setup_requires=['setuptools-scm'],
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'],
|
||||
|
|
|
@ -0,0 +1,9 @@
|
|||
"""
|
||||
Invoke tasks to help with pytest development and release process.
|
||||
"""
|
||||
|
||||
import invoke
|
||||
|
||||
from . import generate
|
||||
|
||||
ns = invoke.Collection(generate)
|
|
@ -0,0 +1,112 @@
|
|||
import os
|
||||
from pathlib import Path
|
||||
from subprocess import check_output, check_call
|
||||
|
||||
import invoke
|
||||
|
||||
|
||||
@invoke.task(help={
|
||||
'version': 'version being released',
|
||||
})
|
||||
def announce(ctx, version):
|
||||
"""Generates a new release announcement entry in the docs."""
|
||||
# Get our list of authors
|
||||
stdout = check_output(["git", "describe", "--abbrev=0", '--tags'])
|
||||
stdout = stdout.decode('utf-8')
|
||||
last_version = stdout.strip()
|
||||
|
||||
stdout = check_output(["git", "log", "{}..HEAD".format(last_version), "--format=%aN"])
|
||||
stdout = stdout.decode('utf-8')
|
||||
|
||||
contributors = set(stdout.splitlines())
|
||||
|
||||
template_name = 'release.minor.rst' if version.endswith('.0') else 'release.patch.rst'
|
||||
template_text = Path(__file__).parent.joinpath(template_name).read_text(encoding='UTF-8')
|
||||
|
||||
contributors_text = '\n'.join('* {}'.format(name) for name in sorted(contributors)) + '\n'
|
||||
text = template_text.format(version=version, contributors=contributors_text)
|
||||
|
||||
target = Path(__file__).parent.joinpath('../doc/en/announce/release-{}.rst'.format(version))
|
||||
target.write_text(text, encoding='UTF-8')
|
||||
print("[generate.announce] Generated {}".format(target.name))
|
||||
|
||||
# Update index with the new release entry
|
||||
index_path = Path(__file__).parent.joinpath('../doc/en/announce/index.rst')
|
||||
lines = index_path.read_text(encoding='UTF-8').splitlines()
|
||||
indent = ' '
|
||||
for index, line in enumerate(lines):
|
||||
if line.startswith('{}release-'.format(indent)):
|
||||
new_line = indent + target.stem
|
||||
if line != new_line:
|
||||
lines.insert(index, new_line)
|
||||
index_path.write_text('\n'.join(lines) + '\n', encoding='UTF-8')
|
||||
print("[generate.announce] Updated {}".format(index_path.name))
|
||||
else:
|
||||
print("[generate.announce] Skip {} (already contains release)".format(index_path.name))
|
||||
break
|
||||
|
||||
check_call(['git', 'add', str(target)])
|
||||
|
||||
|
||||
@invoke.task()
|
||||
def regen(ctx):
|
||||
"""Call regendoc tool to update examples and pytest output in the docs."""
|
||||
print("[generate.regen] Updating docs")
|
||||
check_call(['tox', '-e', 'regen'])
|
||||
|
||||
|
||||
@invoke.task()
|
||||
def make_tag(ctx, version):
|
||||
"""Create a new (local) tag for the release, only if the repository is clean."""
|
||||
from git import Repo
|
||||
|
||||
repo = Repo('.')
|
||||
if repo.is_dirty():
|
||||
print('Current repository is dirty. Please commit any changes and try again.')
|
||||
raise invoke.Exit(code=2)
|
||||
|
||||
tag_names = [x.name for x in repo.tags]
|
||||
if version in tag_names:
|
||||
print("[generate.make_tag] Delete existing tag {}".format(version))
|
||||
repo.delete_tag(version)
|
||||
|
||||
print("[generate.make_tag] Create tag {}".format(version))
|
||||
repo.create_tag(version)
|
||||
|
||||
|
||||
@invoke.task()
|
||||
def devpi_upload(ctx, version, user, password=None):
|
||||
"""Creates and uploads a package to devpi for testing."""
|
||||
if password:
|
||||
print("[generate.devpi_upload] devpi login {}".format(user))
|
||||
check_call(['devpi', 'login', user, '--password', password])
|
||||
|
||||
check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)])
|
||||
|
||||
env = os.environ.copy()
|
||||
env['SETUPTOOLS_SCM_PRETEND_VERSION'] = version
|
||||
check_call(['devpi', 'upload', '--formats', 'sdist,bdist_wheel'], env=env)
|
||||
print("[generate.devpi_upload] package uploaded")
|
||||
|
||||
|
||||
@invoke.task(help={
|
||||
'version': 'version being released',
|
||||
'user': 'name of the user on devpi to stage the generated package',
|
||||
'password': 'user password on devpi to stage the generated package '
|
||||
'(if not given assumed logged in)',
|
||||
})
|
||||
def pre_release(ctx, version, user, password=None):
|
||||
"""Generates new docs, release announcements and uploads a new release to devpi for testing."""
|
||||
announce(ctx, version)
|
||||
regen(ctx)
|
||||
|
||||
msg = 'Preparing release version {}'.format(version)
|
||||
check_call(['git', 'commit', '-a', '-m', msg])
|
||||
|
||||
make_tag(ctx, version)
|
||||
|
||||
devpi_upload(ctx, version=version, user=user, password=password)
|
||||
|
||||
print()
|
||||
print('[generate.pre_release] Please push your branch and open a PR.')
|
||||
|
|
@ -0,0 +1,27 @@
|
|||
pytest-{version}
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the {version} release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1600 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
http://doc.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
http://docs.pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
{contributors}
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
|
@ -0,0 +1,17 @@
|
|||
pytest-{version}
|
||||
=======================================
|
||||
|
||||
pytest {version} has just been released to PyPI.
|
||||
|
||||
This is a bug-fix release, being a drop-in replacement. To upgrade::
|
||||
|
||||
pip install --upgrade pytest
|
||||
|
||||
The full changelog is available at http://doc.pytest.org/en/latest/changelog.html.
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
{contributors}
|
||||
|
||||
Happy testing,
|
||||
The pytest Development Team
|
|
@ -0,0 +1,3 @@
|
|||
invoke
|
||||
tox
|
||||
gitpython
|
|
@ -1,4 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import os
|
||||
import sys
|
||||
|
||||
|
@ -8,7 +9,7 @@ import pytest
|
|||
from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR
|
||||
|
||||
|
||||
class TestGeneralUsage:
|
||||
class TestGeneralUsage(object):
|
||||
def test_config_error(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_configure(config):
|
||||
|
@ -338,10 +339,16 @@ class TestGeneralUsage:
|
|||
"*ERROR*test_b.py::b*",
|
||||
])
|
||||
|
||||
@pytest.mark.usefixtures('recwarn')
|
||||
def test_namespace_import_doesnt_confuse_import_hook(self, testdir):
|
||||
# Ref #383. Python 3.3's namespace package messed with our import hooks
|
||||
# Importing a module that didn't exist, even if the ImportError was
|
||||
# gracefully handled, would make our test crash.
|
||||
"""
|
||||
Ref #383. Python 3.3's namespace package messed with our import hooks
|
||||
Importing a module that didn't exist, even if the ImportError was
|
||||
gracefully handled, would make our test crash.
|
||||
|
||||
Use recwarn here to silence this warning in Python 2.6 and 2.7:
|
||||
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
|
||||
"""
|
||||
testdir.mkdir('not_a_package')
|
||||
p = testdir.makepyfile("""
|
||||
try:
|
||||
|
@ -410,7 +417,7 @@ class TestGeneralUsage:
|
|||
])
|
||||
|
||||
|
||||
class TestInvocationVariants:
|
||||
class TestInvocationVariants(object):
|
||||
def test_earlyinit(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -502,7 +509,7 @@ class TestInvocationVariants:
|
|||
out, err = capsys.readouterr()
|
||||
|
||||
def test_invoke_plugin_api(self, testdir, capsys):
|
||||
class MyPlugin:
|
||||
class MyPlugin(object):
|
||||
def pytest_addoption(self, parser):
|
||||
parser.addoption("--myopt")
|
||||
|
||||
|
@ -523,6 +530,7 @@ class TestInvocationVariants:
|
|||
])
|
||||
|
||||
def test_cmdline_python_package(self, testdir, monkeypatch):
|
||||
import warnings
|
||||
monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False)
|
||||
path = testdir.mkpydir("tpkg")
|
||||
path.join("test_hello.py").write("def test_hello(): pass")
|
||||
|
@ -545,7 +553,11 @@ class TestInvocationVariants:
|
|||
return what
|
||||
empty_package = testdir.mkpydir("empty_package")
|
||||
monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package))
|
||||
result = testdir.runpytest("--pyargs", ".")
|
||||
# the path which is not a package raises a warning on pypy;
|
||||
# no idea why only pypy and not normal python warn about it here
|
||||
with warnings.catch_warnings():
|
||||
warnings.simplefilter('ignore', ImportWarning)
|
||||
result = testdir.runpytest("--pyargs", ".")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*"
|
||||
|
@ -670,7 +682,7 @@ class TestInvocationVariants:
|
|||
assert request.config.pluginmanager.hasplugin('python')
|
||||
|
||||
|
||||
class TestDurations:
|
||||
class TestDurations(object):
|
||||
source = """
|
||||
import time
|
||||
frag = 0.002
|
||||
|
@ -741,7 +753,7 @@ class TestDurations:
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
class TestDurationWithFixture:
|
||||
class TestDurationWithFixture(object):
|
||||
source = """
|
||||
import time
|
||||
frag = 0.001
|
||||
|
@ -781,3 +793,45 @@ def test_zipimport_hook(testdir, tmpdir):
|
|||
assert result.ret == 0
|
||||
result.stderr.fnmatch_lines(['*not found*foo*'])
|
||||
assert 'INTERNALERROR>' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_import_plugin_unicode_name(testdir):
|
||||
testdir.makepyfile(
|
||||
myplugin='',
|
||||
)
|
||||
testdir.makepyfile("""
|
||||
def test(): pass
|
||||
""")
|
||||
testdir.makeconftest("""
|
||||
pytest_plugins = [u'myplugin']
|
||||
""")
|
||||
r = testdir.runpytest()
|
||||
assert r.ret == 0
|
||||
|
||||
|
||||
def test_deferred_hook_checking(testdir):
|
||||
"""
|
||||
Check hooks as late as possible (#1821).
|
||||
"""
|
||||
testdir.syspathinsert()
|
||||
testdir.makepyfile(**{
|
||||
'plugin.py': """
|
||||
class Hooks:
|
||||
def pytest_my_hook(self, config):
|
||||
pass
|
||||
|
||||
def pytest_configure(config):
|
||||
config.pluginmanager.add_hookspecs(Hooks)
|
||||
""",
|
||||
'conftest.py': """
|
||||
pytest_plugins = ['plugin']
|
||||
def pytest_my_hook(config):
|
||||
return 40
|
||||
""",
|
||||
'test_foo.py': """
|
||||
def test(request):
|
||||
assert request.config.hook.pytest_my_hook(config=request.config) == [40]
|
||||
"""
|
||||
})
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['* 1 passed *'])
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
|
||||
import _pytest._code
|
||||
|
@ -20,7 +21,7 @@ def test_code_gives_back_name_for_not_existing_file():
|
|||
assert code.fullsource is None
|
||||
|
||||
def test_code_with_class():
|
||||
class A:
|
||||
class A(object):
|
||||
pass
|
||||
pytest.raises(TypeError, "_pytest._code.Code(A)")
|
||||
|
||||
|
@ -136,7 +137,7 @@ def test_frame_getargs():
|
|||
('z', {'c': 'd'})]
|
||||
|
||||
|
||||
class TestExceptionInfo:
|
||||
class TestExceptionInfo(object):
|
||||
|
||||
def test_bad_getsource(self):
|
||||
try:
|
||||
|
@ -147,7 +148,7 @@ class TestExceptionInfo:
|
|||
assert exci.getrepr()
|
||||
|
||||
|
||||
class TestTracebackEntry:
|
||||
class TestTracebackEntry(object):
|
||||
|
||||
def test_getsource(self):
|
||||
try:
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import operator
|
||||
import _pytest
|
||||
|
@ -25,7 +26,7 @@ else:
|
|||
import pytest
|
||||
pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3]))
|
||||
|
||||
class TWMock:
|
||||
class TWMock(object):
|
||||
WRITE = object()
|
||||
|
||||
def __init__(self):
|
||||
|
@ -89,7 +90,7 @@ def h():
|
|||
g()
|
||||
#
|
||||
|
||||
class TestTraceback_f_g_h:
|
||||
class TestTraceback_f_g_h(object):
|
||||
def setup_method(self, method):
|
||||
try:
|
||||
h()
|
||||
|
@ -369,7 +370,7 @@ def test_codepath_Queue_example():
|
|||
|
||||
def test_match_succeeds():
|
||||
with pytest.raises(ZeroDivisionError) as excinfo:
|
||||
0 / 0
|
||||
0 // 0
|
||||
excinfo.match(r'.*zero.*')
|
||||
|
||||
def test_match_raises_error(testdir):
|
||||
|
@ -386,7 +387,7 @@ def test_match_raises_error(testdir):
|
|||
"*AssertionError*Pattern*[123]*not found*",
|
||||
])
|
||||
|
||||
class TestFormattedExcinfo:
|
||||
class TestFormattedExcinfo(object):
|
||||
|
||||
@pytest.fixture
|
||||
def importasmod(self, request):
|
||||
|
@ -472,7 +473,7 @@ raise ValueError()
|
|||
pr = FormattedExcinfo()
|
||||
|
||||
class FakeCode(object):
|
||||
class raw:
|
||||
class raw(object):
|
||||
co_filename = '?'
|
||||
|
||||
path = '?'
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
# flake8: noqa
|
||||
# disable flake check on this file because some constructs are strange
|
||||
# or redundant on purpose and can't be disable on a line-by-line basis
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
|
||||
import _pytest._code
|
||||
|
@ -49,7 +50,7 @@ def test_source_from_function():
|
|||
assert str(source).startswith('def test_source_str_function():')
|
||||
|
||||
def test_source_from_method():
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method(self):
|
||||
pass
|
||||
source = _pytest._code.Source(TestClass().test_method)
|
||||
|
@ -119,7 +120,7 @@ def test_isparseable():
|
|||
assert not Source(" \nif 1:\npass").isparseable()
|
||||
assert not Source(chr(0)).isparseable()
|
||||
|
||||
class TestAccesses:
|
||||
class TestAccesses(object):
|
||||
source = Source("""\
|
||||
def f(x):
|
||||
pass
|
||||
|
@ -143,7 +144,7 @@ class TestAccesses:
|
|||
l = [x for x in self.source]
|
||||
assert len(l) == 4
|
||||
|
||||
class TestSourceParsingAndCompiling:
|
||||
class TestSourceParsingAndCompiling(object):
|
||||
source = Source("""\
|
||||
def f(x):
|
||||
assert (x ==
|
||||
|
@ -307,7 +308,7 @@ class TestSourceParsingAndCompiling:
|
|||
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
|
||||
|
||||
def test_getstartingblock_singleline():
|
||||
class A:
|
||||
class A(object):
|
||||
def __init__(self, *args):
|
||||
frame = sys._getframe(1)
|
||||
self.source = _pytest._code.Frame(frame).statement
|
||||
|
@ -318,7 +319,7 @@ def test_getstartingblock_singleline():
|
|||
assert len(l) == 1
|
||||
|
||||
def test_getstartingblock_multiline():
|
||||
class A:
|
||||
class A(object):
|
||||
def __init__(self, *args):
|
||||
frame = sys._getframe(1)
|
||||
self.source = _pytest._code.Frame(frame).statement
|
||||
|
@ -461,16 +462,16 @@ def test_getfslineno():
|
|||
assert lineno == A_lineno
|
||||
|
||||
assert getfslineno(3) == ("", -1)
|
||||
class B:
|
||||
class B(object):
|
||||
pass
|
||||
B.__name__ = "B2"
|
||||
assert getfslineno(B)[1] == -1
|
||||
|
||||
def test_code_of_object_instance_with_call():
|
||||
class A:
|
||||
class A(object):
|
||||
pass
|
||||
pytest.raises(TypeError, lambda: _pytest._code.Source(A()))
|
||||
class WithCall:
|
||||
class WithCall(object):
|
||||
def __call__(self):
|
||||
pass
|
||||
|
||||
|
@ -559,7 +560,7 @@ x = 3
|
|||
""")
|
||||
assert str(source) == "raise ValueError(\n 23\n)"
|
||||
|
||||
class TestTry:
|
||||
class TestTry(object):
|
||||
pytestmark = astonly
|
||||
source = """\
|
||||
try:
|
||||
|
@ -586,7 +587,7 @@ else:
|
|||
source = getstatement(5, self.source)
|
||||
assert str(source) == " raise KeyError()"
|
||||
|
||||
class TestTryFinally:
|
||||
class TestTryFinally(object):
|
||||
source = """\
|
||||
try:
|
||||
raise ValueError
|
||||
|
@ -604,7 +605,7 @@ finally:
|
|||
|
||||
|
||||
|
||||
class TestIf:
|
||||
class TestIf(object):
|
||||
pytestmark = astonly
|
||||
source = """\
|
||||
if 1:
|
||||
|
|
|
@ -1,3 +1,4 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import pytest
|
||||
|
||||
|
||||
|
@ -26,7 +27,7 @@ def test_funcarg_prefix_deprecation(testdir):
|
|||
""")
|
||||
result = testdir.runpytest('-ra')
|
||||
result.stdout.fnmatch_lines([
|
||||
('WC1 None pytest_funcarg__value: '
|
||||
('*pytest_funcarg__value: '
|
||||
'declaring fixtures using "pytest_funcarg__" prefix is deprecated '
|
||||
'and scheduled to be removed in pytest 4.0. '
|
||||
'Please remove the prefix and use the @pytest.fixture decorator instead.'),
|
||||
|
@ -48,7 +49,7 @@ def test_str_args_deprecated(tmpdir, testdir):
|
|||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
warnings = []
|
||||
|
||||
class Collect:
|
||||
class Collect(object):
|
||||
def pytest_logwarning(self, message):
|
||||
warnings.append(message)
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ class MyDocTestRunner(doctest.DocTestRunner):
|
|||
example.source.strip(), got.strip(), example.want.strip()))
|
||||
|
||||
|
||||
class TestApprox:
|
||||
class TestApprox(object):
|
||||
|
||||
def test_repr_string(self):
|
||||
# for some reason in Python 2.6 it is not displaying the tolerance representation correctly
|
||||
|
|
|
@ -12,7 +12,7 @@ from _pytest.main import (
|
|||
)
|
||||
|
||||
|
||||
class TestModule:
|
||||
class TestModule(object):
|
||||
def test_failing_import(self, testdir):
|
||||
modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
|
||||
pytest.raises(Collector.CollectError, modcol.collect)
|
||||
|
@ -104,7 +104,6 @@ class TestModule:
|
|||
else:
|
||||
assert name not in stdout
|
||||
|
||||
|
||||
def test_show_traceback_import_error_unicode(self, testdir):
|
||||
"""Check test modules collected which raise ImportError with unicode messages
|
||||
are handled properly (#2336).
|
||||
|
@ -122,17 +121,17 @@ class TestModule:
|
|||
assert result.ret == 2
|
||||
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_class_with_init_warning(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestClass1:
|
||||
class TestClass1(object):
|
||||
def __init__(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("-rw")
|
||||
result.stdout.fnmatch_lines_random("""
|
||||
WC1*test_class_with_init_warning.py*__init__*
|
||||
""")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*cannot collect test class 'TestClass1' because it has a __init__ constructor",
|
||||
])
|
||||
|
||||
def test_class_subclassobject(self, testdir):
|
||||
testdir.getmodulecol("""
|
||||
|
@ -146,7 +145,7 @@ class TestClass:
|
|||
|
||||
def test_setup_teardown_class_as_classmethod(self, testdir):
|
||||
testdir.makepyfile(test_mod1="""
|
||||
class TestClassMethod:
|
||||
class TestClassMethod(object):
|
||||
@classmethod
|
||||
def setup_class(cls):
|
||||
pass
|
||||
|
@ -194,7 +193,7 @@ class TestClass:
|
|||
assert result.ret == EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
class TestGenerator:
|
||||
class TestGenerator(object):
|
||||
def test_generative_functions(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def func1(arg, arg2):
|
||||
|
@ -219,7 +218,7 @@ class TestGenerator:
|
|||
modcol = testdir.getmodulecol("""
|
||||
def func1(arg, arg2):
|
||||
assert arg == arg2
|
||||
class TestGenMethods:
|
||||
class TestGenMethods(object):
|
||||
def test_gen(self):
|
||||
yield func1, 17, 3*5
|
||||
yield func1, 42, 6*7
|
||||
|
@ -273,7 +272,7 @@ class TestGenerator:
|
|||
modcol = testdir.getmodulecol("""
|
||||
def func1(arg, arg2):
|
||||
assert arg == arg2
|
||||
class TestGenMethods:
|
||||
class TestGenMethods(object):
|
||||
def test_gen(self):
|
||||
yield "m1", func1, 17, 3*5
|
||||
yield "m2", func1, 42, 6*7
|
||||
|
@ -291,6 +290,7 @@ class TestGenerator:
|
|||
|
||||
def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir):
|
||||
o = testdir.makepyfile("""
|
||||
from __future__ import print_function
|
||||
def test_generative_order_of_execution():
|
||||
import py, pytest
|
||||
test_list = []
|
||||
|
@ -300,8 +300,8 @@ class TestGenerator:
|
|||
test_list.append(item)
|
||||
|
||||
def assert_order_of_execution():
|
||||
py.builtin.print_('expected order', expected_list)
|
||||
py.builtin.print_('but got ', test_list)
|
||||
print('expected order', expected_list)
|
||||
print('but got ', test_list)
|
||||
assert test_list == expected_list
|
||||
|
||||
for i in expected_list:
|
||||
|
@ -315,6 +315,7 @@ class TestGenerator:
|
|||
|
||||
def test_order_of_execution_generator_different_codeline(self, testdir):
|
||||
o = testdir.makepyfile("""
|
||||
from __future__ import print_function
|
||||
def test_generative_tests_different_codeline():
|
||||
import py, pytest
|
||||
test_list = []
|
||||
|
@ -330,8 +331,8 @@ class TestGenerator:
|
|||
test_list.append(0)
|
||||
|
||||
def assert_order_of_execution():
|
||||
py.builtin.print_('expected order', expected_list)
|
||||
py.builtin.print_('but got ', test_list)
|
||||
print('expected order', expected_list)
|
||||
print('but got ', test_list)
|
||||
assert test_list == expected_list
|
||||
|
||||
yield list_append_0
|
||||
|
@ -353,7 +354,7 @@ class TestGenerator:
|
|||
# has been used during collection.
|
||||
o = testdir.makepyfile("""
|
||||
setuplist = []
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def setup_method(self, func):
|
||||
#print "setup_method", self, func
|
||||
setuplist.append(self)
|
||||
|
@ -387,7 +388,7 @@ class TestGenerator:
|
|||
assert not skipped and not failed
|
||||
|
||||
|
||||
class TestFunction:
|
||||
class TestFunction(object):
|
||||
def test_getmodulecollector(self, testdir):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
modcol = item.getparent(pytest.Module)
|
||||
|
@ -396,7 +397,7 @@ class TestFunction:
|
|||
|
||||
def test_function_as_object_instance_ignored(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class A:
|
||||
class A(object):
|
||||
def __call__(self, tmpdir):
|
||||
0/0
|
||||
|
||||
|
@ -447,7 +448,7 @@ class TestFunction:
|
|||
def test_issue213_parametrize_value_no_equal(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
class A:
|
||||
class A(object):
|
||||
def __eq__(self, other):
|
||||
raise ValueError("not possible")
|
||||
@pytest.mark.parametrize('arg', [A()])
|
||||
|
@ -578,11 +579,11 @@ class TestFunction:
|
|||
item = testdir.getitem("def test_func(): raise ValueError")
|
||||
config = item.config
|
||||
|
||||
class MyPlugin1:
|
||||
class MyPlugin1(object):
|
||||
def pytest_pyfunc_call(self, pyfuncitem):
|
||||
raise ValueError
|
||||
|
||||
class MyPlugin2:
|
||||
class MyPlugin2(object):
|
||||
def pytest_pyfunc_call(self, pyfuncitem):
|
||||
return True
|
||||
|
||||
|
@ -710,7 +711,7 @@ class TestFunction:
|
|||
assert [x.originalname for x in items] == ['test_func', 'test_func']
|
||||
|
||||
|
||||
class TestSorting:
|
||||
class TestSorting(object):
|
||||
def test_check_equality(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_pass(): pass
|
||||
|
@ -760,7 +761,7 @@ class TestSorting:
|
|||
assert [item.name for item in colitems] == ['test_b', 'test_a']
|
||||
|
||||
|
||||
class TestConftestCustomization:
|
||||
class TestConftestCustomization(object):
|
||||
def test_pytest_pycollect_module(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
@ -902,7 +903,7 @@ def test_modulecol_roundtrip(testdir):
|
|||
assert modcol.name == newcol.name
|
||||
|
||||
|
||||
class TestTracebackCutting:
|
||||
class TestTracebackCutting(object):
|
||||
def test_skip_simple(self):
|
||||
excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")')
|
||||
assert excinfo.traceback[-1].frame.code.name == "skip"
|
||||
|
@ -1028,7 +1029,7 @@ class TestTracebackCutting:
|
|||
assert filter_traceback(tb[-1])
|
||||
|
||||
|
||||
class TestReportInfo:
|
||||
class TestReportInfo(object):
|
||||
def test_itemreport_reportinfo(self, testdir, linecomp):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
@ -1053,7 +1054,7 @@ class TestReportInfo:
|
|||
def test_class_reportinfo(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
# lineno 0
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_hello(self): pass
|
||||
""")
|
||||
classcol = testdir.collect_by_name(modcol, "TestClass")
|
||||
|
@ -1088,7 +1089,7 @@ class TestReportInfo:
|
|||
def check(x):
|
||||
pass
|
||||
yield check, 3
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method(self):
|
||||
pass
|
||||
"""
|
||||
|
@ -1097,7 +1098,7 @@ class TestReportInfo:
|
|||
# https://github.com/pytest-dev/pytest/issues/1204
|
||||
modcol = testdir.getmodulecol("""
|
||||
# lineno 0
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def __getattr__(self, name):
|
||||
return "this is not an int"
|
||||
|
||||
|
@ -1119,7 +1120,7 @@ def test_customized_python_discovery(testdir):
|
|||
p = testdir.makepyfile("""
|
||||
def check_simple():
|
||||
pass
|
||||
class CheckMyApp:
|
||||
class CheckMyApp(object):
|
||||
def check_meth(self):
|
||||
pass
|
||||
""")
|
||||
|
@ -1194,7 +1195,7 @@ def test_customize_through_attributes(testdir):
|
|||
return MyClass(name, parent=collector)
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
class MyTestClass:
|
||||
class MyTestClass(object):
|
||||
def test_hello(self):
|
||||
pass
|
||||
""")
|
||||
|
@ -1208,11 +1209,11 @@ def test_customize_through_attributes(testdir):
|
|||
|
||||
def test_unorderable_types(testdir):
|
||||
testdir.makepyfile("""
|
||||
class TestJoinEmpty:
|
||||
class TestJoinEmpty(object):
|
||||
pass
|
||||
|
||||
def make_test():
|
||||
class Test:
|
||||
class Test(object):
|
||||
pass
|
||||
Test.__name__ = "TestFoo"
|
||||
return Test
|
||||
|
@ -1286,8 +1287,8 @@ def test_dont_collect_non_function_callable(testdir):
|
|||
result = testdir.runpytest('-rw')
|
||||
result.stdout.fnmatch_lines([
|
||||
'*collected 1 item*',
|
||||
'WC2 *',
|
||||
'*1 passed, 1 pytest-warnings in *',
|
||||
"*cannot collect 'test_a' because it is not a function*",
|
||||
'*1 passed, 1 warnings in *',
|
||||
])
|
||||
|
||||
|
||||
|
|
|
@ -20,7 +20,7 @@ def test_getfuncargnames():
|
|||
def h(arg1, arg2, arg3="hello"): pass
|
||||
assert fixtures.getfuncargnames(h) == ('arg1', 'arg2')
|
||||
|
||||
class A:
|
||||
class A(object):
|
||||
def f(self, arg1, arg2="hello"):
|
||||
pass
|
||||
|
||||
|
@ -28,7 +28,7 @@ def test_getfuncargnames():
|
|||
if sys.version_info < (3,0):
|
||||
assert fixtures.getfuncargnames(A.f) == ('arg1',)
|
||||
|
||||
class TestFillFixtures:
|
||||
class TestFillFixtures(object):
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit, kept for compatibility
|
||||
assert pytest._fillfuncargs == fixtures.fillfixtures
|
||||
|
@ -79,7 +79,7 @@ class TestFillFixtures:
|
|||
def something(request):
|
||||
return request.function.__name__
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method(self, something):
|
||||
assert something == "test_method"
|
||||
def test_func(something):
|
||||
|
@ -91,7 +91,7 @@ class TestFillFixtures:
|
|||
def test_funcarg_lookup_classlevel(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
|
||||
@pytest.fixture
|
||||
def something(self, request):
|
||||
|
@ -134,7 +134,7 @@ class TestFillFixtures:
|
|||
def spam():
|
||||
return 'spam'
|
||||
|
||||
class TestSpam:
|
||||
class TestSpam(object):
|
||||
|
||||
@pytest.fixture
|
||||
def spam(self, spam):
|
||||
|
@ -463,7 +463,7 @@ class TestFillFixtures:
|
|||
assert result.ret == 0
|
||||
|
||||
|
||||
class TestRequestBasic:
|
||||
class TestRequestBasic(object):
|
||||
def test_request_attributes(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import pytest
|
||||
|
@ -484,7 +484,7 @@ class TestRequestBasic:
|
|||
def test_request_attributes_method(self, testdir):
|
||||
item, = testdir.getitems("""
|
||||
import pytest
|
||||
class TestB:
|
||||
class TestB(object):
|
||||
|
||||
@pytest.fixture
|
||||
def something(self, request):
|
||||
|
@ -502,7 +502,7 @@ class TestRequestBasic:
|
|||
@pytest.fixture
|
||||
def something(request):
|
||||
pass
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_method(self, something):
|
||||
pass
|
||||
""")
|
||||
|
@ -545,22 +545,33 @@ class TestRequestBasic:
|
|||
return l.pop()
|
||||
def test_func(something): pass
|
||||
""")
|
||||
import contextlib
|
||||
if getfixmethod == 'getfuncargvalue':
|
||||
warning_expectation = pytest.warns(DeprecationWarning)
|
||||
else:
|
||||
# see #1830 for a cleaner way to accomplish this
|
||||
@contextlib.contextmanager
|
||||
def expecting_no_warning(): yield
|
||||
|
||||
warning_expectation = expecting_no_warning()
|
||||
|
||||
req = item._request
|
||||
fixture_fetcher = getattr(req, getfixmethod)
|
||||
pytest.raises(FixtureLookupError, fixture_fetcher, "notexists")
|
||||
val = fixture_fetcher("something")
|
||||
assert val == 1
|
||||
val = fixture_fetcher("something")
|
||||
assert val == 1
|
||||
val2 = fixture_fetcher("other")
|
||||
assert val2 == 2
|
||||
val2 = fixture_fetcher("other") # see about caching
|
||||
assert val2 == 2
|
||||
pytest._fillfuncargs(item)
|
||||
assert item.funcargs["something"] == 1
|
||||
assert len(get_public_names(item.funcargs)) == 2
|
||||
assert "request" in item.funcargs
|
||||
#assert item.funcargs == {'something': 1, "other": 2}
|
||||
with warning_expectation:
|
||||
fixture_fetcher = getattr(req, getfixmethod)
|
||||
with pytest.raises(FixtureLookupError):
|
||||
fixture_fetcher("notexists")
|
||||
val = fixture_fetcher("something")
|
||||
assert val == 1
|
||||
val = fixture_fetcher("something")
|
||||
assert val == 1
|
||||
val2 = fixture_fetcher("other")
|
||||
assert val2 == 2
|
||||
val2 = fixture_fetcher("other") # see about caching
|
||||
assert val2 == 2
|
||||
pytest._fillfuncargs(item)
|
||||
assert item.funcargs["something"] == 1
|
||||
assert len(get_public_names(item.funcargs)) == 2
|
||||
assert "request" in item.funcargs
|
||||
|
||||
def test_request_addfinalizer(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
|
@ -704,7 +715,7 @@ class TestRequestBasic:
|
|||
def test_func():
|
||||
pass
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setup_class(self):
|
||||
l.append("class")
|
||||
|
@ -771,7 +782,7 @@ class TestRequestBasic:
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
class TestRequestMarking:
|
||||
class TestRequestMarking(object):
|
||||
def test_applymarker(self, testdir):
|
||||
item1,item2 = testdir.getitems("""
|
||||
import pytest
|
||||
|
@ -779,7 +790,7 @@ class TestRequestMarking:
|
|||
@pytest.fixture
|
||||
def something(request):
|
||||
pass
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_func1(self, something):
|
||||
pass
|
||||
def test_func2(self, something):
|
||||
|
@ -831,7 +842,7 @@ class TestRequestMarking:
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
class TestRequestCachedSetup:
|
||||
class TestRequestCachedSetup(object):
|
||||
def test_request_cachedsetup_defaultmodule(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
mysetup = ["hello",].pop
|
||||
|
@ -844,7 +855,7 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_func1(something):
|
||||
assert something == "hello"
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_func1a(self, something):
|
||||
assert something == "hello"
|
||||
""")
|
||||
|
@ -862,7 +873,7 @@ class TestRequestCachedSetup:
|
|||
assert something == "hello3"
|
||||
def test_func2(something):
|
||||
assert something == "hello2"
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_func1a(self, something):
|
||||
assert something == "hello"
|
||||
def test_func2b(self, something):
|
||||
|
@ -996,7 +1007,7 @@ class TestRequestCachedSetup:
|
|||
"*ZeroDivisionError*",
|
||||
])
|
||||
|
||||
class TestFixtureUsages:
|
||||
class TestFixtureUsages(object):
|
||||
def test_noargfixturedec(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -1138,7 +1149,7 @@ class TestFixtureUsages:
|
|||
def test_factory_setup_as_classes_fails(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
class arg1:
|
||||
class arg1(object):
|
||||
def __init__(self, request):
|
||||
self.x = 1
|
||||
arg1 = pytest.fixture()(arg1)
|
||||
|
@ -1172,7 +1183,7 @@ class TestFixtureUsages:
|
|||
request.cls.hello = "world"
|
||||
l.append(1)
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_one(self):
|
||||
assert self.hello == "world"
|
||||
assert len(l) == 1
|
||||
|
@ -1198,7 +1209,7 @@ class TestFixtureUsages:
|
|||
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_one(self):
|
||||
assert self.hello == "world"
|
||||
def test_two(self):
|
||||
|
@ -1217,7 +1228,7 @@ class TestFixtureUsages:
|
|||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.fixture
|
||||
def setup1(self, request):
|
||||
assert self == request.instance
|
||||
|
@ -1256,7 +1267,7 @@ class TestFixtureUsages:
|
|||
assert l == [1,2, 10,20]
|
||||
|
||||
|
||||
class TestFixtureManagerParseFactories:
|
||||
class TestFixtureManagerParseFactories(object):
|
||||
|
||||
@pytest.fixture
|
||||
def testdir(self, request):
|
||||
|
@ -1280,7 +1291,7 @@ class TestFixtureManagerParseFactories:
|
|||
|
||||
def test_parsefactories_evil_objects_issue214(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
class A:
|
||||
class A(object):
|
||||
def __call__(self):
|
||||
pass
|
||||
def __getattr__(self, name):
|
||||
|
@ -1311,7 +1322,7 @@ class TestFixtureManagerParseFactories:
|
|||
@pytest.fixture
|
||||
def hello(request):
|
||||
return "module"
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.fixture
|
||||
def hello(self, request):
|
||||
return "class"
|
||||
|
@ -1360,7 +1371,7 @@ class TestFixtureManagerParseFactories:
|
|||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
class TestAutouseDiscovery:
|
||||
class TestAutouseDiscovery(object):
|
||||
|
||||
@pytest.fixture
|
||||
def testdir(self, testdir):
|
||||
|
@ -1402,14 +1413,14 @@ class TestAutouseDiscovery:
|
|||
def test_two_classes_separated_autouse(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
class TestA:
|
||||
class TestA(object):
|
||||
l = []
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup1(self):
|
||||
self.l.append(1)
|
||||
def test_setup1(self):
|
||||
assert self.l == [1]
|
||||
class TestB:
|
||||
class TestB(object):
|
||||
l = []
|
||||
@pytest.fixture(autouse=True)
|
||||
def setup2(self):
|
||||
|
@ -1423,7 +1434,7 @@ class TestAutouseDiscovery:
|
|||
def test_setup_at_classlevel(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.fixture(autouse=True)
|
||||
def permethod(self, request):
|
||||
request.instance.funcname = request.function.__name__
|
||||
|
@ -1505,13 +1516,13 @@ class TestAutouseDiscovery:
|
|||
def test_x():
|
||||
assert l == ["module"]
|
||||
|
||||
class TestA:
|
||||
class TestA(object):
|
||||
@pytest.fixture(autouse=True)
|
||||
def append2(self):
|
||||
l.append("A")
|
||||
def test_hello(self):
|
||||
assert l == ["module", "module", "A"], l
|
||||
class TestA2:
|
||||
class TestA2(object):
|
||||
def test_world(self):
|
||||
assert l == ["module", "module", "A", "module"], l
|
||||
""")
|
||||
|
@ -1519,7 +1530,7 @@ class TestAutouseDiscovery:
|
|||
reprec.assertoutcome(passed=3)
|
||||
|
||||
|
||||
class TestAutouseManagement:
|
||||
class TestAutouseManagement(object):
|
||||
def test_autouse_conftest_mid_directory(self, testdir):
|
||||
pkgdir = testdir.mkpydir("xyz123")
|
||||
pkgdir.join("conftest.py").write(_pytest._code.Source("""
|
||||
|
@ -1654,10 +1665,10 @@ class TestAutouseManagement:
|
|||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_1(self):
|
||||
pass
|
||||
class TestClass2:
|
||||
class TestClass2(object):
|
||||
def test_2(self):
|
||||
pass
|
||||
""")
|
||||
|
@ -1682,7 +1693,7 @@ class TestAutouseManagement:
|
|||
def mappend():
|
||||
l.append(1)
|
||||
|
||||
class TestHallo:
|
||||
class TestHallo(object):
|
||||
def test_method(self):
|
||||
assert l == [1,3,2]
|
||||
""")
|
||||
|
@ -1696,7 +1707,7 @@ class TestAutouseManagement:
|
|||
def pytest_generate_tests(metafunc):
|
||||
if metafunc.cls is not None:
|
||||
metafunc.parametrize("item", [1,2], scope="class")
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def addteardown(self, item, request):
|
||||
l.append("setup-%d" % item)
|
||||
|
@ -1756,7 +1767,7 @@ class TestAutouseManagement:
|
|||
reprec.assertoutcome(passed=2)
|
||||
|
||||
|
||||
class TestFixtureMarker:
|
||||
class TestFixtureMarker(object):
|
||||
def test_parametrize(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -1822,7 +1833,7 @@ class TestFixtureMarker:
|
|||
def test_2(arg):
|
||||
assert arg == 1
|
||||
assert len(l) == 1
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test3(self, arg):
|
||||
assert arg == 1
|
||||
assert len(l) == 1
|
||||
|
@ -1916,7 +1927,7 @@ class TestFixtureMarker:
|
|||
def test_2(arg):
|
||||
assert arg == 1
|
||||
assert len(l) == 1
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test3(self, arg):
|
||||
assert arg == 1
|
||||
assert len(l) == 1
|
||||
|
@ -2135,12 +2146,12 @@ class TestFixtureMarker:
|
|||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
class TestClass2:
|
||||
class TestClass2(object):
|
||||
def test_1(self):
|
||||
pass
|
||||
def test_2(self):
|
||||
pass
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
def test_3(self):
|
||||
pass
|
||||
""")
|
||||
|
@ -2213,7 +2224,7 @@ class TestFixtureMarker:
|
|||
|
||||
l = []
|
||||
|
||||
class TestClass:
|
||||
class TestClass(object):
|
||||
@classmethod
|
||||
@pytest.fixture(scope="class", autouse=True)
|
||||
def setup1(self, request, param1):
|
||||
|
@ -2273,7 +2284,7 @@ class TestFixtureMarker:
|
|||
testpath = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
class Box:
|
||||
class Box(object):
|
||||
value = 0
|
||||
|
||||
@pytest.fixture(scope='class')
|
||||
|
@ -2284,11 +2295,11 @@ class TestFixtureMarker:
|
|||
def test_a(a):
|
||||
assert a == 1
|
||||
|
||||
class Test1:
|
||||
class Test1(object):
|
||||
def test_b(self, a):
|
||||
assert a == 2
|
||||
|
||||
class Test2:
|
||||
class Test2(object):
|
||||
def test_c(self, a):
|
||||
assert a == 3""")
|
||||
reprec = testdir.inline_run(testpath)
|
||||
|
@ -2402,11 +2413,11 @@ class TestFixtureMarker:
|
|||
request.addfinalizer(lambda: l.append("fin %s" % request.param))
|
||||
return request.param
|
||||
|
||||
class TestGreetings:
|
||||
class TestGreetings(object):
|
||||
def test_hello(self, human):
|
||||
l.append("test_hello")
|
||||
|
||||
class TestMetrics:
|
||||
class TestMetrics(object):
|
||||
def test_name(self, human):
|
||||
l.append("test_name")
|
||||
|
||||
|
@ -2499,7 +2510,7 @@ class TestFixtureMarker:
|
|||
'*test_foo*beta*'])
|
||||
|
||||
|
||||
class TestRequestScopeAccess:
|
||||
class TestRequestScopeAccess(object):
|
||||
pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[
|
||||
["session", "", "fspath class function module"],
|
||||
["module", "module fspath", "cls function"],
|
||||
|
@ -2543,7 +2554,7 @@ class TestRequestScopeAccess:
|
|||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
class TestErrors:
|
||||
class TestErrors(object):
|
||||
def test_subfactory_missing_funcarg(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
@ -2607,7 +2618,7 @@ class TestErrors:
|
|||
"*1 error*",
|
||||
])
|
||||
|
||||
class TestShowFixtures:
|
||||
class TestShowFixtures(object):
|
||||
def test_funcarg_compat(self, testdir):
|
||||
config = testdir.parseconfigure("--funcargs")
|
||||
assert config.option.showfixtures
|
||||
|
@ -2770,7 +2781,7 @@ class TestShowFixtures:
|
|||
|
||||
|
||||
@pytest.mark.parametrize('flavor', ['fixture', 'yield_fixture'])
|
||||
class TestContextManagerFixtureFuncs:
|
||||
class TestContextManagerFixtureFuncs(object):
|
||||
|
||||
def test_simple(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
|
@ -2877,7 +2888,7 @@ class TestContextManagerFixtureFuncs:
|
|||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("*mew*")
|
||||
|
||||
class TestParameterizedSubRequest:
|
||||
class TestParameterizedSubRequest(object):
|
||||
def test_call_from_fixture(self, testdir):
|
||||
testfile = testdir.makepyfile("""
|
||||
import pytest
|
||||
|
|
|
@ -3,7 +3,7 @@ from _pytest import python
|
|||
from _pytest import runner
|
||||
|
||||
|
||||
class TestOEJSKITSpecials:
|
||||
class TestOEJSKITSpecials(object):
|
||||
def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
@ -19,7 +19,7 @@ class TestOEJSKITSpecials:
|
|||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return 42
|
||||
class MyClass:
|
||||
class MyClass(object):
|
||||
pass
|
||||
""")
|
||||
# this hook finds funcarg factories
|
||||
|
@ -48,7 +48,7 @@ class TestOEJSKITSpecials:
|
|||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return 42
|
||||
class MyClass:
|
||||
class MyClass(object):
|
||||
pass
|
||||
""")
|
||||
# this hook finds funcarg factories
|
||||
|
@ -76,7 +76,7 @@ def test_wrapped_getfslineno():
|
|||
fs2, lineno2 = python.getfslineno(wrap)
|
||||
assert lineno > lineno2, "getfslineno does not unwrap correctly"
|
||||
|
||||
class TestMockDecoration:
|
||||
class TestMockDecoration(object):
|
||||
def test_wrapped_getfuncargnames(self):
|
||||
from _pytest.compat import getfuncargnames
|
||||
|
||||
|
@ -207,7 +207,7 @@ class TestMockDecoration:
|
|||
@patch('os.getcwd')
|
||||
@patch('os.path')
|
||||
@mark.slow
|
||||
class TestSimple:
|
||||
class TestSimple(object):
|
||||
def test_simple_thing(self, mock_path, mock_getcwd):
|
||||
pass
|
||||
""")
|
||||
|
@ -215,7 +215,7 @@ class TestMockDecoration:
|
|||
reprec.assertoutcome(passed=1)
|
||||
|
||||
|
||||
class TestReRunTests:
|
||||
class TestReRunTests(object):
|
||||
def test_rerun(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
from _pytest.runner import runtestprotocol
|
||||
|
@ -251,7 +251,7 @@ def test_pytestconfig_is_session_scoped():
|
|||
assert pytestconfig._pytestfixturefunction.scope == "session"
|
||||
|
||||
|
||||
class TestNoselikeTestAttribute:
|
||||
class TestNoselikeTestAttribute(object):
|
||||
def test_module_with_global_test(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
__test__ = False
|
||||
|
@ -270,7 +270,7 @@ class TestNoselikeTestAttribute:
|
|||
pass
|
||||
test_func.__test__ = False
|
||||
|
||||
class TestSome:
|
||||
class TestSome(object):
|
||||
__test__ = False
|
||||
def test_method(self):
|
||||
pass
|
||||
|
@ -328,7 +328,7 @@ class TestNoselikeTestAttribute:
|
|||
|
||||
|
||||
@pytest.mark.issue351
|
||||
class TestParameterize:
|
||||
class TestParameterize(object):
|
||||
|
||||
def test_idfn_marker(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue