Merge remote-tracking branch 'upstream/features'
This commit is contained in:
commit
4678cbeb91
|
@ -2,6 +2,3 @@
|
|||
omit =
|
||||
# standlonetemplate is read dynamically and tested by test_genscript
|
||||
*standalonetemplate.py
|
||||
# oldinterpret could be removed, as it is no longer used in py26+
|
||||
*oldinterpret.py
|
||||
vendored_packages
|
||||
|
|
|
@ -19,20 +19,18 @@ env:
|
|||
- TOXENV=py27-xdist
|
||||
- TOXENV=py27-trial
|
||||
- TOXENV=py27-numpy
|
||||
- TOXENV=py27-pluggymaster
|
||||
- TOXENV=py36-pexpect
|
||||
- TOXENV=py36-xdist
|
||||
- TOXENV=py36-trial
|
||||
- TOXENV=py36-numpy
|
||||
- TOXENV=py36-pluggymaster
|
||||
- TOXENV=py27-nobyte
|
||||
- TOXENV=doctesting
|
||||
- TOXENV=docs
|
||||
|
||||
matrix:
|
||||
include:
|
||||
- env: TOXENV=py26
|
||||
python: '2.6'
|
||||
- env: TOXENV=py33
|
||||
python: '3.3'
|
||||
- env: TOXENV=pypy
|
||||
python: 'pypy-5.4'
|
||||
- env: TOXENV=py35
|
||||
|
|
8
AUTHORS
8
AUTHORS
|
@ -30,6 +30,7 @@ Brianna Laugher
|
|||
Bruno Oliveira
|
||||
Cal Leeming
|
||||
Carl Friedrich Bolz
|
||||
Ceridwen
|
||||
Charles Cloud
|
||||
Charnjit SiNGH (CCSJ)
|
||||
Chris Lamb
|
||||
|
@ -65,6 +66,7 @@ Feng Ma
|
|||
Florian Bruhin
|
||||
Floris Bruynooghe
|
||||
Gabriel Reis
|
||||
George Kussumoto
|
||||
Georgy Dyuldin
|
||||
Graham Horler
|
||||
Greg Price
|
||||
|
@ -72,6 +74,7 @@ Grig Gheorghiu
|
|||
Grigorii Eremeev (budulianin)
|
||||
Guido Wesdorp
|
||||
Harald Armin Massa
|
||||
Hugo van Kemenade
|
||||
Hui Wang (coldnight)
|
||||
Ian Bicking
|
||||
Jaap Broekhuizen
|
||||
|
@ -81,6 +84,7 @@ Jason R. Coombs
|
|||
Javier Domingo Cansino
|
||||
Javier Romero
|
||||
Jeff Widman
|
||||
John Eddie Ayson
|
||||
John Towler
|
||||
Jon Sonesen
|
||||
Jonas Obrist
|
||||
|
@ -118,6 +122,7 @@ Matt Bachmann
|
|||
Matt Duck
|
||||
Matt Williams
|
||||
Matthias Hafner
|
||||
Maxim Filipenko
|
||||
mbyt
|
||||
Michael Aquilina
|
||||
Michael Birtwell
|
||||
|
@ -152,6 +157,7 @@ Ronny Pfannschmidt
|
|||
Ross Lawley
|
||||
Russel Winder
|
||||
Ryan Wooden
|
||||
Samuel Dion-Girardeau
|
||||
Samuele Pedroni
|
||||
Segev Finer
|
||||
Simon Gomizelj
|
||||
|
@ -162,9 +168,11 @@ Stefan Zimmermann
|
|||
Stefano Taschini
|
||||
Steffen Allner
|
||||
Stephan Obermann
|
||||
Tarcisio Fischer
|
||||
Tareq Alayan
|
||||
Ted Xiao
|
||||
Thomas Grainger
|
||||
Thomas Hisch
|
||||
Tom Dalton
|
||||
Tom Viner
|
||||
Trevor Bekolay
|
||||
|
|
171
CHANGELOG.rst
171
CHANGELOG.rst
|
@ -8,6 +8,175 @@
|
|||
|
||||
.. towncrier release notes start
|
||||
|
||||
Pytest 3.3.0 (2017-11-23)
|
||||
=========================
|
||||
|
||||
Deprecations and Removals
|
||||
-------------------------
|
||||
|
||||
- Pytest no longer supports Python **2.6** and **3.3**. Those Python versions
|
||||
are EOL for some time now and incur maintenance and compatibility costs on
|
||||
the pytest core team, and following up with the rest of the community we
|
||||
decided that they will no longer be supported starting on this version. Users
|
||||
which still require those versions should pin pytest to ``<3.3``. (`#2812
|
||||
<https://github.com/pytest-dev/pytest/issues/2812>`_)
|
||||
|
||||
- Remove internal ``_preloadplugins()`` function. This removal is part of the
|
||||
``pytest_namespace()`` hook deprecation. (`#2236
|
||||
<https://github.com/pytest-dev/pytest/issues/2236>`_)
|
||||
|
||||
- Internally change ``CallSpec2`` to have a list of marks instead of a broken
|
||||
mapping of keywords. This removes the keywords attribute of the internal
|
||||
``CallSpec2`` class. (`#2672
|
||||
<https://github.com/pytest-dev/pytest/issues/2672>`_)
|
||||
|
||||
- Remove ParameterSet.deprecated_arg_dict - its not a public api and the lack
|
||||
of the underscore was a naming error. (`#2675
|
||||
<https://github.com/pytest-dev/pytest/issues/2675>`_)
|
||||
|
||||
- Remove the internal multi-typed attribute ``Node._evalskip`` and replace it
|
||||
with the boolean ``Node._skipped_by_mark``. (`#2767
|
||||
<https://github.com/pytest-dev/pytest/issues/2767>`_)
|
||||
|
||||
Features
|
||||
--------
|
||||
|
||||
- ``pytest_fixture_post_finalizer`` hook can now receive a ``request``
|
||||
argument. (`#2124 <https://github.com/pytest-dev/pytest/issues/2124>`_)
|
||||
|
||||
- Replace the old introspection code in compat.py that determines the available
|
||||
arguments of fixtures with inspect.signature on Python 3 and
|
||||
funcsigs.signature on Python 2. This should respect ``__signature__``
|
||||
declarations on functions. (`#2267
|
||||
<https://github.com/pytest-dev/pytest/issues/2267>`_)
|
||||
|
||||
- Report tests with global ``pytestmark`` variable only once. (`#2549
|
||||
<https://github.com/pytest-dev/pytest/issues/2549>`_)
|
||||
|
||||
- Now pytest displays the total progress percentage while running tests. The
|
||||
previous output style can be set by configuring the ``console_output_style``
|
||||
setting to ``classic``. (`#2657 <https://github.com/pytest-dev/pytest/issues/2657>`_)
|
||||
|
||||
- Match ``warns`` signature to ``raises`` by adding ``match`` keyword. (`#2708
|
||||
<https://github.com/pytest-dev/pytest/issues/2708>`_)
|
||||
|
||||
- Pytest now captures and displays output from the standard `logging` module.
|
||||
The user can control the logging level to be captured by specifying options
|
||||
in ``pytest.ini``, the command line and also during individual tests using
|
||||
markers. Also, a ``caplog`` fixture is available that enables users to test
|
||||
the captured log during specific tests (similar to ``capsys`` for example).
|
||||
For more information, please see the `logging docs
|
||||
<https://docs.pytest.org/en/latest/logging.html>`_. This feature was
|
||||
introduced by merging the popular `pytest-catchlog
|
||||
<https://pypi.org/project/pytest-catchlog/>`_ plugin, thanks to `Thomas Hisch
|
||||
<https://github.com/thisch>`_. Be advised that during the merging the
|
||||
backward compatibility interface with the defunct ``pytest-capturelog`` has
|
||||
been dropped. (`#2794 <https://github.com/pytest-dev/pytest/issues/2794>`_)
|
||||
|
||||
- Add ``allow_module_level`` kwarg to ``pytest.skip()``, enabling to skip the
|
||||
whole module. (`#2808 <https://github.com/pytest-dev/pytest/issues/2808>`_)
|
||||
|
||||
- Allow setting ``file_or_dir``, ``-c``, and ``-o`` in PYTEST_ADDOPTS. (`#2824
|
||||
<https://github.com/pytest-dev/pytest/issues/2824>`_)
|
||||
|
||||
- Return stdout/stderr capture results as a ``namedtuple``, so ``out`` and
|
||||
``err`` can be accessed by attribute. (`#2879
|
||||
<https://github.com/pytest-dev/pytest/issues/2879>`_)
|
||||
|
||||
- Add ``capfdbinary``, a version of ``capfd`` which returns bytes from
|
||||
``readouterr()``. (`#2923
|
||||
<https://github.com/pytest-dev/pytest/issues/2923>`_)
|
||||
|
||||
- Add ``capsysbinary`` a version of ``capsys`` which returns bytes from
|
||||
``readouterr()``. (`#2934
|
||||
<https://github.com/pytest-dev/pytest/issues/2934>`_)
|
||||
|
||||
- Implement feature to skip ``setup.py`` files when run with
|
||||
``--doctest-modules``. (`#502
|
||||
<https://github.com/pytest-dev/pytest/issues/502>`_)
|
||||
|
||||
|
||||
Bug Fixes
|
||||
---------
|
||||
|
||||
- Resume output capturing after ``capsys/capfd.disabled()`` context manager.
|
||||
(`#1993 <https://github.com/pytest-dev/pytest/issues/1993>`_)
|
||||
|
||||
- ``pytest_fixture_setup`` and ``pytest_fixture_post_finalizer`` hooks are now
|
||||
called for all ``conftest.py`` files. (`#2124
|
||||
<https://github.com/pytest-dev/pytest/issues/2124>`_)
|
||||
|
||||
- If an exception happens while loading a plugin, pytest no longer hides the
|
||||
original traceback. In python2 it will show the original traceback with a new
|
||||
message that explains in which plugin. In python3 it will show 2 canonized
|
||||
exceptions, the original exception while loading the plugin in addition to an
|
||||
exception that PyTest throws about loading a plugin. (`#2491
|
||||
<https://github.com/pytest-dev/pytest/issues/2491>`_)
|
||||
|
||||
- ``capsys`` and ``capfd`` can now be used by other fixtures. (`#2709
|
||||
<https://github.com/pytest-dev/pytest/issues/2709>`_)
|
||||
|
||||
- Internal ``pytester`` plugin properly encodes ``bytes`` arguments to
|
||||
``utf-8``. (`#2738 <https://github.com/pytest-dev/pytest/issues/2738>`_)
|
||||
|
||||
- ``testdir`` now uses use the same method used by ``tmpdir`` to create its
|
||||
temporary directory. This changes the final structure of the ``testdir``
|
||||
directory slightly, but should not affect usage in normal scenarios and
|
||||
avoids a number of potential problems. (`#2751
|
||||
<https://github.com/pytest-dev/pytest/issues/2751>`_)
|
||||
|
||||
- Pytest no longer complains about warnings with unicode messages being
|
||||
non-ascii compatible even for ascii-compatible messages. As a result of this,
|
||||
warnings with unicode messages are converted first to an ascii representation
|
||||
for safety. (`#2809 <https://github.com/pytest-dev/pytest/issues/2809>`_)
|
||||
|
||||
- Change return value of pytest command when ``--maxfail`` is reached from
|
||||
``2`` (interrupted) to ``1`` (failed). (`#2845
|
||||
<https://github.com/pytest-dev/pytest/issues/2845>`_)
|
||||
|
||||
- Fix issue in assertion rewriting which could lead it to rewrite modules which
|
||||
should not be rewritten. (`#2939
|
||||
<https://github.com/pytest-dev/pytest/issues/2939>`_)
|
||||
|
||||
- Handle marks without description in ``pytest.ini``. (`#2942
|
||||
<https://github.com/pytest-dev/pytest/issues/2942>`_)
|
||||
|
||||
|
||||
Trivial/Internal Changes
|
||||
------------------------
|
||||
|
||||
- pytest now depends on `attrs <https://pypi.org/project/attrs/>`_ for internal
|
||||
structures to ease code maintainability. (`#2641
|
||||
<https://github.com/pytest-dev/pytest/issues/2641>`_)
|
||||
|
||||
- Refactored internal Python 2/3 compatibility code to use ``six``. (`#2642
|
||||
<https://github.com/pytest-dev/pytest/issues/2642>`_)
|
||||
|
||||
- Stop vendoring ``pluggy`` - we're missing out on its latest changes for not
|
||||
much benefit (`#2719 <https://github.com/pytest-dev/pytest/issues/2719>`_)
|
||||
|
||||
- Internal refactor: simplify ascii string escaping by using the
|
||||
backslashreplace error handler in newer Python 3 versions. (`#2734
|
||||
<https://github.com/pytest-dev/pytest/issues/2734>`_)
|
||||
|
||||
- Remove unnecessary mark evaluator in unittest plugin (`#2767
|
||||
<https://github.com/pytest-dev/pytest/issues/2767>`_)
|
||||
|
||||
- Calls to ``Metafunc.addcall`` now emit a deprecation warning. This function
|
||||
is scheduled to be removed in ``pytest-4.0``. (`#2876
|
||||
<https://github.com/pytest-dev/pytest/issues/2876>`_)
|
||||
|
||||
- Internal move of the parameterset extraction to a more maintainable place.
|
||||
(`#2877 <https://github.com/pytest-dev/pytest/issues/2877>`_)
|
||||
|
||||
- Internal refactoring to simplify scope node lookup. (`#2910
|
||||
<https://github.com/pytest-dev/pytest/issues/2910>`_)
|
||||
|
||||
- Configure ``pytest`` to prevent pip from installing pytest in unsupported
|
||||
Python versions. (`#2922
|
||||
<https://github.com/pytest-dev/pytest/issues/2922>`_)
|
||||
|
||||
|
||||
Pytest 3.2.5 (2017-11-15)
|
||||
=========================
|
||||
|
||||
|
@ -192,7 +361,7 @@ Deprecations and Removals
|
|||
-------------------------
|
||||
|
||||
- ``pytest.approx`` no longer supports ``>``, ``>=``, ``<`` and ``<=``
|
||||
operators to avoid surprising/inconsistent behavior. See `the docs
|
||||
operators to avoid surprising/inconsistent behavior. See `the approx docs
|
||||
<https://docs.pytest.org/en/latest/builtin.html#pytest.approx>`_ for more
|
||||
information. (`#2003 <https://github.com/pytest-dev/pytest/issues/2003>`_)
|
||||
|
||||
|
|
|
@ -76,7 +76,7 @@ Features
|
|||
- Can run `unittest <http://docs.pytest.org/en/latest/unittest.html>`_ (or trial),
|
||||
`nose <http://docs.pytest.org/en/latest/nose.html>`_ test suites out of the box;
|
||||
|
||||
- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
|
||||
- Python 2.7, Python 3.4+, PyPy 2.3, Jython 2.5 (untested);
|
||||
|
||||
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community;
|
||||
|
||||
|
|
|
@ -4,9 +4,6 @@ needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
|
|||
to find the magic string, so _ARGCOMPLETE env. var is never set, and
|
||||
this does not need special code.
|
||||
|
||||
argcomplete does not support python 2.5 (although the changes for that
|
||||
are minor).
|
||||
|
||||
Function try_argcomplete(parser) should be called directly before
|
||||
the call to ArgumentParser.parse_args().
|
||||
|
||||
|
|
|
@ -8,8 +8,6 @@ from _pytest.compat import _PY2, _PY3, PY35, safe_str
|
|||
import py
|
||||
builtin_repr = repr
|
||||
|
||||
reprlib = py.builtin._tryimport('repr', 'reprlib')
|
||||
|
||||
if _PY3:
|
||||
from traceback import format_exception_only
|
||||
else:
|
||||
|
@ -235,7 +233,7 @@ class TracebackEntry(object):
|
|||
except KeyError:
|
||||
return False
|
||||
|
||||
if py.builtin.callable(tbh):
|
||||
if callable(tbh):
|
||||
return tbh(None if self._excinfo is None else self._excinfo())
|
||||
else:
|
||||
return tbh
|
||||
|
|
|
@ -2,6 +2,7 @@ from __future__ import absolute_import, division, generators, print_function
|
|||
|
||||
from bisect import bisect_right
|
||||
import sys
|
||||
import six
|
||||
import inspect
|
||||
import tokenize
|
||||
import py
|
||||
|
@ -32,7 +33,7 @@ class Source(object):
|
|||
partlines = part.lines
|
||||
elif isinstance(part, (tuple, list)):
|
||||
partlines = [x.rstrip("\n") for x in part]
|
||||
elif isinstance(part, py.builtin._basestring):
|
||||
elif isinstance(part, six.string_types):
|
||||
partlines = part.split('\n')
|
||||
if rstrip:
|
||||
while partlines:
|
||||
|
@ -341,8 +342,6 @@ def get_statement_startend2(lineno, node):
|
|||
def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
|
||||
if astnode is None:
|
||||
content = str(source)
|
||||
if sys.version_info < (2, 7):
|
||||
content += "\n"
|
||||
try:
|
||||
astnode = compile(content, "source", "exec", 1024) # 1024 for AST
|
||||
except ValueError:
|
||||
|
|
|
@ -1,11 +0,0 @@
|
|||
"""
|
||||
imports symbols from vendored "pluggy" if available, otherwise
|
||||
falls back to importing "pluggy" from the default namespace.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
try:
|
||||
from _pytest.vendored_packages.pluggy import * # noqa
|
||||
from _pytest.vendored_packages.pluggy import __version__ # noqa
|
||||
except ImportError:
|
||||
from pluggy import * # noqa
|
||||
from pluggy import __version__ # noqa
|
|
@ -2,8 +2,8 @@
|
|||
support for presenting detailed information in failing assertions.
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import py
|
||||
import sys
|
||||
import six
|
||||
|
||||
from _pytest.assertion import util
|
||||
from _pytest.assertion import rewrite
|
||||
|
@ -67,10 +67,8 @@ class AssertionState:
|
|||
|
||||
def install_importhook(config):
|
||||
"""Try to install the rewrite hook, raise SystemError if it fails."""
|
||||
# Both Jython and CPython 2.6.0 have AST bugs that make the
|
||||
# assertion rewriting hook malfunction.
|
||||
if (sys.platform.startswith('java') or
|
||||
sys.version_info[:3] == (2, 6, 0)):
|
||||
# Jython has an AST bug that make the assertion rewriting hook malfunction.
|
||||
if (sys.platform.startswith('java')):
|
||||
raise SystemError('rewrite not supported')
|
||||
|
||||
config._assertstate = AssertionState(config, 'rewrite')
|
||||
|
@ -126,7 +124,7 @@ def pytest_runtest_setup(item):
|
|||
if new_expl:
|
||||
new_expl = truncate.truncate_if_required(new_expl, item)
|
||||
new_expl = [line.replace("\n", "\\n") for line in new_expl]
|
||||
res = py.builtin._totext("\n~").join(new_expl)
|
||||
res = six.text_type("\n~").join(new_expl)
|
||||
if item.config.getvalue("assertmode") == "rewrite":
|
||||
res = res.replace("%", "%%")
|
||||
return res
|
||||
|
|
|
@ -8,6 +8,7 @@ import imp
|
|||
import marshal
|
||||
import os
|
||||
import re
|
||||
import six
|
||||
import struct
|
||||
import sys
|
||||
import types
|
||||
|
@ -33,7 +34,6 @@ else:
|
|||
PYC_EXT = ".py" + (__debug__ and "c" or "o")
|
||||
PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
|
||||
|
||||
REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
|
||||
ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
|
||||
|
||||
if sys.version_info >= (3, 5):
|
||||
|
@ -320,10 +320,6 @@ def _rewrite_test(config, fn):
|
|||
return None, None
|
||||
finally:
|
||||
del state._indecode
|
||||
# On Python versions which are not 2.7 and less than or equal to 3.1, the
|
||||
# parser expects *nix newlines.
|
||||
if REWRITE_NEWLINES:
|
||||
source = source.replace(RN, N) + N
|
||||
try:
|
||||
tree = ast.parse(source)
|
||||
except SyntaxError:
|
||||
|
@ -405,10 +401,10 @@ def _saferepr(obj):
|
|||
|
||||
"""
|
||||
repr = py.io.saferepr(obj)
|
||||
if py.builtin._istext(repr):
|
||||
t = py.builtin.text
|
||||
if isinstance(repr, six.text_type):
|
||||
t = six.text_type
|
||||
else:
|
||||
t = py.builtin.bytes
|
||||
t = six.binary_type
|
||||
return repr.replace(t("\n"), t("\\n"))
|
||||
|
||||
|
||||
|
@ -427,16 +423,16 @@ def _format_assertmsg(obj):
|
|||
# contains a newline it gets escaped, however if an object has a
|
||||
# .__repr__() which contains newlines it does not get escaped.
|
||||
# However in either case we want to preserve the newline.
|
||||
if py.builtin._istext(obj) or py.builtin._isbytes(obj):
|
||||
if isinstance(obj, six.text_type) or isinstance(obj, six.binary_type):
|
||||
s = obj
|
||||
is_repr = False
|
||||
else:
|
||||
s = py.io.saferepr(obj)
|
||||
is_repr = True
|
||||
if py.builtin._istext(s):
|
||||
t = py.builtin.text
|
||||
if isinstance(s, six.text_type):
|
||||
t = six.text_type
|
||||
else:
|
||||
t = py.builtin.bytes
|
||||
t = six.binary_type
|
||||
s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
|
||||
if is_repr:
|
||||
s = s.replace(t("\\n"), t("\n~"))
|
||||
|
@ -444,15 +440,15 @@ def _format_assertmsg(obj):
|
|||
|
||||
|
||||
def _should_repr_global_name(obj):
|
||||
return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
|
||||
return not hasattr(obj, "__name__") and not callable(obj)
|
||||
|
||||
|
||||
def _format_boolop(explanations, is_or):
|
||||
explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
|
||||
if py.builtin._istext(explanation):
|
||||
t = py.builtin.text
|
||||
if isinstance(explanation, six.text_type):
|
||||
t = six.text_type
|
||||
else:
|
||||
t = py.builtin.bytes
|
||||
t = six.binary_type
|
||||
return explanation.replace(t('%'), t('%%'))
|
||||
|
||||
|
||||
|
|
|
@ -7,7 +7,7 @@ Current default behaviour is to truncate assertion explanations at
|
|||
from __future__ import absolute_import, division, print_function
|
||||
import os
|
||||
|
||||
import py
|
||||
import six
|
||||
|
||||
|
||||
DEFAULT_MAX_LINES = 8
|
||||
|
@ -74,8 +74,8 @@ def _truncate_explanation(input_lines, max_lines=None, max_chars=None):
|
|||
msg += ' ({0} lines hidden)'.format(truncated_line_count)
|
||||
msg += ", {0}" .format(USAGE_MSG)
|
||||
truncated_explanation.extend([
|
||||
py.builtin._totext(""),
|
||||
py.builtin._totext(msg),
|
||||
six.text_type(""),
|
||||
six.text_type(msg),
|
||||
])
|
||||
return truncated_explanation
|
||||
|
||||
|
|
|
@ -4,13 +4,14 @@ import pprint
|
|||
|
||||
import _pytest._code
|
||||
import py
|
||||
import six
|
||||
try:
|
||||
from collections import Sequence
|
||||
except ImportError:
|
||||
Sequence = list
|
||||
|
||||
|
||||
u = py.builtin._totext
|
||||
u = six.text_type
|
||||
|
||||
# The _reprcompare attribute on the util module is used by the new assertion
|
||||
# interpretation code and assertion rewriter to detect this plugin was
|
||||
|
@ -174,9 +175,9 @@ def _diff_text(left, right, verbose=False):
|
|||
"""
|
||||
from difflib import ndiff
|
||||
explanation = []
|
||||
if isinstance(left, py.builtin.bytes):
|
||||
if isinstance(left, six.binary_type):
|
||||
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
|
||||
if isinstance(right, py.builtin.bytes):
|
||||
if isinstance(right, six.binary_type):
|
||||
right = u(repr(right)[1:-1]).replace(r'\n', '\n')
|
||||
if not verbose:
|
||||
i = 0 # just in case left or right has zero length
|
||||
|
|
|
@ -4,6 +4,7 @@ per-test stdout/stderr capturing mechanism.
|
|||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import collections
|
||||
import contextlib
|
||||
import sys
|
||||
import os
|
||||
|
@ -11,11 +12,10 @@ import io
|
|||
from io import UnsupportedOperation
|
||||
from tempfile import TemporaryFile
|
||||
|
||||
import py
|
||||
import six
|
||||
import pytest
|
||||
from _pytest.compat import CaptureIO
|
||||
|
||||
unicode = py.builtin.text
|
||||
|
||||
patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
|
||||
|
||||
|
@ -44,7 +44,7 @@ def pytest_load_initial_conftests(early_config, parser, args):
|
|||
pluginmanager.register(capman, "capturemanager")
|
||||
|
||||
# make sure that capturemanager is properly reset at final shutdown
|
||||
early_config.add_cleanup(capman.reset_capturings)
|
||||
early_config.add_cleanup(capman.stop_global_capturing)
|
||||
|
||||
# make sure logging does not raise exceptions at the end
|
||||
def silence_logging_at_shutdown():
|
||||
|
@ -53,17 +53,30 @@ def pytest_load_initial_conftests(early_config, parser, args):
|
|||
early_config.add_cleanup(silence_logging_at_shutdown)
|
||||
|
||||
# finally trigger conftest loading but while capturing (issue93)
|
||||
capman.init_capturings()
|
||||
capman.start_global_capturing()
|
||||
outcome = yield
|
||||
out, err = capman.suspendcapture()
|
||||
out, err = capman.suspend_global_capture()
|
||||
if outcome.excinfo is not None:
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
|
||||
class CaptureManager:
|
||||
"""
|
||||
Capture plugin, manages that the appropriate capture method is enabled/disabled during collection and each
|
||||
test phase (setup, call, teardown). After each of those points, the captured output is obtained and
|
||||
attached to the collection/runtest report.
|
||||
|
||||
There are two levels of capture:
|
||||
* global: which is enabled by default and can be suppressed by the ``-s`` option. This is always enabled/disabled
|
||||
during collection and each test phase.
|
||||
* fixture: when a test function or one of its fixture depend on the ``capsys`` or ``capfd`` fixtures. In this
|
||||
case special handling is needed to ensure the fixtures take precedence over the global capture.
|
||||
"""
|
||||
|
||||
def __init__(self, method):
|
||||
self._method = method
|
||||
self._global_capturing = None
|
||||
|
||||
def _getcapture(self, method):
|
||||
if method == "fd":
|
||||
|
@ -75,23 +88,24 @@ class CaptureManager:
|
|||
else:
|
||||
raise ValueError("unknown capturing method: %r" % method)
|
||||
|
||||
def init_capturings(self):
|
||||
assert not hasattr(self, "_capturing")
|
||||
self._capturing = self._getcapture(self._method)
|
||||
self._capturing.start_capturing()
|
||||
def start_global_capturing(self):
|
||||
assert self._global_capturing is None
|
||||
self._global_capturing = self._getcapture(self._method)
|
||||
self._global_capturing.start_capturing()
|
||||
|
||||
def reset_capturings(self):
|
||||
cap = self.__dict__.pop("_capturing", None)
|
||||
if cap is not None:
|
||||
cap.pop_outerr_to_orig()
|
||||
cap.stop_capturing()
|
||||
def stop_global_capturing(self):
|
||||
if self._global_capturing is not None:
|
||||
self._global_capturing.pop_outerr_to_orig()
|
||||
self._global_capturing.stop_capturing()
|
||||
self._global_capturing = None
|
||||
|
||||
def resumecapture(self):
|
||||
self._capturing.resume_capturing()
|
||||
def resume_global_capture(self):
|
||||
self._global_capturing.resume_capturing()
|
||||
|
||||
def suspendcapture(self, in_=False):
|
||||
self.deactivate_funcargs()
|
||||
cap = getattr(self, "_capturing", None)
|
||||
def suspend_global_capture(self, item=None, in_=False):
|
||||
if item is not None:
|
||||
self.deactivate_fixture(item)
|
||||
cap = getattr(self, "_global_capturing", None)
|
||||
if cap is not None:
|
||||
try:
|
||||
outerr = cap.readouterr()
|
||||
|
@ -99,23 +113,26 @@ class CaptureManager:
|
|||
cap.suspend_capturing(in_=in_)
|
||||
return outerr
|
||||
|
||||
def activate_funcargs(self, pyfuncitem):
|
||||
capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None)
|
||||
if capfuncarg is not None:
|
||||
capfuncarg._start()
|
||||
self._capfuncarg = capfuncarg
|
||||
def activate_fixture(self, item):
|
||||
"""If the current item is using ``capsys`` or ``capfd``, activate them so they take precedence over
|
||||
the global capture.
|
||||
"""
|
||||
fixture = getattr(item, "_capture_fixture", None)
|
||||
if fixture is not None:
|
||||
fixture._start()
|
||||
|
||||
def deactivate_funcargs(self):
|
||||
capfuncarg = self.__dict__.pop("_capfuncarg", None)
|
||||
if capfuncarg is not None:
|
||||
capfuncarg.close()
|
||||
def deactivate_fixture(self, item):
|
||||
"""Deactivates the ``capsys`` or ``capfd`` fixture of this item, if any."""
|
||||
fixture = getattr(item, "_capture_fixture", None)
|
||||
if fixture is not None:
|
||||
fixture.close()
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_make_collect_report(self, collector):
|
||||
if isinstance(collector, pytest.File):
|
||||
self.resumecapture()
|
||||
self.resume_global_capture()
|
||||
outcome = yield
|
||||
out, err = self.suspendcapture()
|
||||
out, err = self.suspend_global_capture()
|
||||
rep = outcome.get_result()
|
||||
if out:
|
||||
rep.sections.append(("Captured stdout", out))
|
||||
|
@ -126,65 +143,132 @@ class CaptureManager:
|
|||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_setup(self, item):
|
||||
self.resumecapture()
|
||||
self.resume_global_capture()
|
||||
# no need to activate a capture fixture because they activate themselves during creation; this
|
||||
# only makes sense when a fixture uses a capture fixture, otherwise the capture fixture will
|
||||
# be activated during pytest_runtest_call
|
||||
yield
|
||||
self.suspendcapture_item(item, "setup")
|
||||
self.suspend_capture_item(item, "setup")
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_call(self, item):
|
||||
self.resumecapture()
|
||||
self.activate_funcargs(item)
|
||||
self.resume_global_capture()
|
||||
# it is important to activate this fixture during the call phase so it overwrites the "global"
|
||||
# capture
|
||||
self.activate_fixture(item)
|
||||
yield
|
||||
# self.deactivate_funcargs() called from suspendcapture()
|
||||
self.suspendcapture_item(item, "call")
|
||||
self.suspend_capture_item(item, "call")
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_teardown(self, item):
|
||||
self.resumecapture()
|
||||
self.resume_global_capture()
|
||||
self.activate_fixture(item)
|
||||
yield
|
||||
self.suspendcapture_item(item, "teardown")
|
||||
self.suspend_capture_item(item, "teardown")
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
self.reset_capturings()
|
||||
self.stop_global_capturing()
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_internalerror(self, excinfo):
|
||||
self.reset_capturings()
|
||||
self.stop_global_capturing()
|
||||
|
||||
def suspendcapture_item(self, item, when, in_=False):
|
||||
out, err = self.suspendcapture(in_=in_)
|
||||
def suspend_capture_item(self, item, when, in_=False):
|
||||
out, err = self.suspend_global_capture(item, in_=in_)
|
||||
item.add_report_section(when, "stdout", out)
|
||||
item.add_report_section(when, "stderr", err)
|
||||
|
||||
|
||||
error_capsysfderror = "cannot use capsys and capfd at the same time"
|
||||
capture_fixtures = {'capfd', 'capfdbinary', 'capsys', 'capsysbinary'}
|
||||
|
||||
|
||||
def _ensure_only_one_capture_fixture(request, name):
|
||||
fixtures = set(request.fixturenames) & capture_fixtures - set((name,))
|
||||
if fixtures:
|
||||
fixtures = sorted(fixtures)
|
||||
fixtures = fixtures[0] if len(fixtures) == 1 else fixtures
|
||||
raise request.raiseerror(
|
||||
"cannot use {0} and {1} at the same time".format(
|
||||
fixtures, name,
|
||||
),
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def capsys(request):
|
||||
"""Enable capturing of writes to sys.stdout/sys.stderr and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
"""
|
||||
if "capfd" in request.fixturenames:
|
||||
raise request.raiseerror(error_capsysfderror)
|
||||
request.node._capfuncarg = c = CaptureFixture(SysCapture, request)
|
||||
return c
|
||||
_ensure_only_one_capture_fixture(request, 'capsys')
|
||||
with _install_capture_fixture_on_item(request, SysCapture) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def capsysbinary(request):
|
||||
"""Enable capturing of writes to sys.stdout/sys.stderr and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
|
||||
objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, 'capsysbinary')
|
||||
# Currently, the implementation uses the python3 specific `.buffer`
|
||||
# property of CaptureIO.
|
||||
if sys.version_info < (3,):
|
||||
raise request.raiseerror('capsysbinary is only supported on python 3')
|
||||
with _install_capture_fixture_on_item(request, SysCaptureBinary) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def capfd(request):
|
||||
"""Enable capturing of writes to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
"""
|
||||
if "capsys" in request.fixturenames:
|
||||
request.raiseerror(error_capsysfderror)
|
||||
_ensure_only_one_capture_fixture(request, 'capfd')
|
||||
if not hasattr(os, 'dup'):
|
||||
pytest.skip("capfd funcarg needs os.dup")
|
||||
request.node._capfuncarg = c = CaptureFixture(FDCapture, request)
|
||||
return c
|
||||
pytest.skip("capfd fixture needs os.dup function which is not available in this system")
|
||||
with _install_capture_fixture_on_item(request, FDCapture) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def capfdbinary(request):
|
||||
"""Enable capturing of write to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfdbinary.readouterr`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
|
||||
``bytes`` objects.
|
||||
"""
|
||||
_ensure_only_one_capture_fixture(request, 'capfdbinary')
|
||||
if not hasattr(os, 'dup'):
|
||||
pytest.skip("capfdbinary fixture needs os.dup function which is not available in this system")
|
||||
with _install_capture_fixture_on_item(request, FDCaptureBinary) as fixture:
|
||||
yield fixture
|
||||
|
||||
|
||||
@contextlib.contextmanager
|
||||
def _install_capture_fixture_on_item(request, capture_class):
|
||||
"""
|
||||
Context manager which creates a ``CaptureFixture`` instance and "installs" it on
|
||||
the item/node of the given request. Used by ``capsys`` and ``capfd``.
|
||||
|
||||
The CaptureFixture is added as attribute of the item because it needs to accessed
|
||||
by ``CaptureManager`` during its ``pytest_runtest_*`` hooks.
|
||||
"""
|
||||
request.node._capture_fixture = fixture = CaptureFixture(capture_class, request)
|
||||
capmanager = request.config.pluginmanager.getplugin('capturemanager')
|
||||
# need to active this fixture right away in case it is being used by another fixture (setup phase)
|
||||
# if this fixture is being used only by a test function (call phase), then we wouldn't need this
|
||||
# activation, but it doesn't hurt
|
||||
capmanager.activate_fixture(request.node)
|
||||
yield fixture
|
||||
fixture.close()
|
||||
del request.node._capture_fixture
|
||||
|
||||
|
||||
class CaptureFixture:
|
||||
|
@ -211,12 +295,14 @@ class CaptureFixture:
|
|||
|
||||
@contextlib.contextmanager
|
||||
def disabled(self):
|
||||
self._capture.suspend_capturing()
|
||||
capmanager = self.request.config.pluginmanager.getplugin('capturemanager')
|
||||
capmanager.suspendcapture_item(self.request.node, "call", in_=True)
|
||||
capmanager.suspend_global_capture(item=None, in_=False)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
capmanager.resumecapture()
|
||||
capmanager.resume_global_capture()
|
||||
self._capture.resume_capturing()
|
||||
|
||||
|
||||
def safe_text_dupfile(f, mode, default_encoding="UTF8"):
|
||||
|
@ -246,7 +332,7 @@ class EncodedFile(object):
|
|||
self.encoding = encoding
|
||||
|
||||
def write(self, obj):
|
||||
if isinstance(obj, unicode):
|
||||
if isinstance(obj, six.text_type):
|
||||
obj = obj.encode(self.encoding, "replace")
|
||||
self.buffer.write(obj)
|
||||
|
||||
|
@ -263,6 +349,9 @@ class EncodedFile(object):
|
|||
return getattr(object.__getattribute__(self, "buffer"), name)
|
||||
|
||||
|
||||
CaptureResult = collections.namedtuple("CaptureResult", ["out", "err"])
|
||||
|
||||
|
||||
class MultiCapture(object):
|
||||
out = err = in_ = None
|
||||
|
||||
|
@ -323,16 +412,19 @@ class MultiCapture(object):
|
|||
|
||||
def readouterr(self):
|
||||
""" return snapshot unicode value of stdout/stderr capturings. """
|
||||
return (self.out.snap() if self.out is not None else "",
|
||||
self.err.snap() if self.err is not None else "")
|
||||
return CaptureResult(self.out.snap() if self.out is not None else "",
|
||||
self.err.snap() if self.err is not None else "")
|
||||
|
||||
|
||||
class NoCapture:
|
||||
__init__ = start = done = suspend = resume = lambda *args: None
|
||||
|
||||
|
||||
class FDCapture:
|
||||
""" Capture IO to/from a given os-level filedescriptor. """
|
||||
class FDCaptureBinary:
|
||||
"""Capture IO to/from a given os-level filedescriptor.
|
||||
|
||||
snap() produces `bytes`
|
||||
"""
|
||||
|
||||
def __init__(self, targetfd, tmpfile=None):
|
||||
self.targetfd = targetfd
|
||||
|
@ -371,17 +463,11 @@ class FDCapture:
|
|||
self.syscapture.start()
|
||||
|
||||
def snap(self):
|
||||
f = self.tmpfile
|
||||
f.seek(0)
|
||||
res = f.read()
|
||||
if res:
|
||||
enc = getattr(f, "encoding", None)
|
||||
if enc and isinstance(res, bytes):
|
||||
res = py.builtin._totext(res, enc, "replace")
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
return res
|
||||
return ''
|
||||
self.tmpfile.seek(0)
|
||||
res = self.tmpfile.read()
|
||||
self.tmpfile.seek(0)
|
||||
self.tmpfile.truncate()
|
||||
return res
|
||||
|
||||
def done(self):
|
||||
""" stop capturing, restore streams, return original capture file,
|
||||
|
@ -402,11 +488,24 @@ class FDCapture:
|
|||
|
||||
def writeorg(self, data):
|
||||
""" write to original file descriptor. """
|
||||
if py.builtin._istext(data):
|
||||
if isinstance(data, six.text_type):
|
||||
data = data.encode("utf8") # XXX use encoding of original stream
|
||||
os.write(self.targetfd_save, data)
|
||||
|
||||
|
||||
class FDCapture(FDCaptureBinary):
|
||||
"""Capture IO to/from a given os-level filedescriptor.
|
||||
|
||||
snap() produces text
|
||||
"""
|
||||
def snap(self):
|
||||
res = FDCaptureBinary.snap(self)
|
||||
enc = getattr(self.tmpfile, "encoding", None)
|
||||
if enc and isinstance(res, bytes):
|
||||
res = six.text_type(res, enc, "replace")
|
||||
return res
|
||||
|
||||
|
||||
class SysCapture:
|
||||
def __init__(self, fd, tmpfile=None):
|
||||
name = patchsysdict[fd]
|
||||
|
@ -423,10 +522,9 @@ class SysCapture:
|
|||
setattr(sys, self.name, self.tmpfile)
|
||||
|
||||
def snap(self):
|
||||
f = self.tmpfile
|
||||
res = f.getvalue()
|
||||
f.truncate(0)
|
||||
f.seek(0)
|
||||
res = self.tmpfile.getvalue()
|
||||
self.tmpfile.seek(0)
|
||||
self.tmpfile.truncate()
|
||||
return res
|
||||
|
||||
def done(self):
|
||||
|
@ -445,6 +543,14 @@ class SysCapture:
|
|||
self._old.flush()
|
||||
|
||||
|
||||
class SysCaptureBinary(SysCapture):
|
||||
def snap(self):
|
||||
res = self.tmpfile.buffer.getvalue()
|
||||
self.tmpfile.seek(0)
|
||||
self.tmpfile.truncate()
|
||||
return res
|
||||
|
||||
|
||||
class DontReadFromInput:
|
||||
"""Temporary stub class. Ideally when stdin is accessed, the
|
||||
capturing should be turned off, with possibly all data captured
|
||||
|
|
|
@ -2,18 +2,18 @@
|
|||
python version compatibility code
|
||||
"""
|
||||
from __future__ import absolute_import, division, print_function
|
||||
import sys
|
||||
import inspect
|
||||
import types
|
||||
import re
|
||||
|
||||
import codecs
|
||||
import functools
|
||||
import inspect
|
||||
import re
|
||||
import sys
|
||||
|
||||
import py
|
||||
|
||||
import _pytest
|
||||
from _pytest.outcomes import TEST_OUTCOME
|
||||
|
||||
|
||||
try:
|
||||
import enum
|
||||
except ImportError: # pragma: no cover
|
||||
|
@ -25,6 +25,12 @@ _PY3 = sys.version_info > (3, 0)
|
|||
_PY2 = not _PY3
|
||||
|
||||
|
||||
if _PY3:
|
||||
from inspect import signature, Parameter as Parameter
|
||||
else:
|
||||
from funcsigs import signature, Parameter as Parameter
|
||||
|
||||
|
||||
NoneType = type(None)
|
||||
NOTSET = object()
|
||||
|
||||
|
@ -32,12 +38,10 @@ PY35 = sys.version_info[:2] >= (3, 5)
|
|||
PY36 = sys.version_info[:2] >= (3, 6)
|
||||
MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError'
|
||||
|
||||
if hasattr(inspect, 'signature'):
|
||||
def _format_args(func):
|
||||
return str(inspect.signature(func))
|
||||
else:
|
||||
def _format_args(func):
|
||||
return inspect.formatargspec(*inspect.getargspec(func))
|
||||
|
||||
def _format_args(func):
|
||||
return str(signature(func))
|
||||
|
||||
|
||||
isfunction = inspect.isfunction
|
||||
isclass = inspect.isclass
|
||||
|
@ -63,7 +67,6 @@ def iscoroutinefunction(func):
|
|||
|
||||
|
||||
def getlocation(function, curdir):
|
||||
import inspect
|
||||
fn = py.path.local(inspect.getfile(function))
|
||||
lineno = py.builtin._getcode(function).co_firstlineno
|
||||
if fn.relto(curdir):
|
||||
|
@ -83,60 +86,64 @@ def num_mock_patch_args(function):
|
|||
return len(patchings)
|
||||
|
||||
|
||||
def getfuncargnames(function, startindex=None, cls=None):
|
||||
"""
|
||||
@RonnyPfannschmidt: This function should be refactored when we revisit fixtures. The
|
||||
fixture mechanism should ask the node for the fixture names, and not try to obtain
|
||||
directly from the function object well after collection has occurred.
|
||||
"""
|
||||
if startindex is None and cls is not None:
|
||||
is_staticmethod = isinstance(cls.__dict__.get(function.__name__, None), staticmethod)
|
||||
startindex = 0 if is_staticmethod else 1
|
||||
# XXX merge with main.py's varnames
|
||||
# assert not isclass(function)
|
||||
realfunction = function
|
||||
while hasattr(realfunction, "__wrapped__"):
|
||||
realfunction = realfunction.__wrapped__
|
||||
if startindex is None:
|
||||
startindex = inspect.ismethod(function) and 1 or 0
|
||||
if realfunction != function:
|
||||
startindex += num_mock_patch_args(function)
|
||||
function = realfunction
|
||||
if isinstance(function, functools.partial):
|
||||
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
|
||||
partial = function
|
||||
argnames = argnames[len(partial.args):]
|
||||
if partial.keywords:
|
||||
for kw in partial.keywords:
|
||||
argnames.remove(kw)
|
||||
else:
|
||||
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
|
||||
defaults = getattr(function, 'func_defaults',
|
||||
getattr(function, '__defaults__', None)) or ()
|
||||
numdefaults = len(defaults)
|
||||
if numdefaults:
|
||||
return tuple(argnames[startindex:-numdefaults])
|
||||
return tuple(argnames[startindex:])
|
||||
def getfuncargnames(function, is_method=False, cls=None):
|
||||
"""Returns the names of a function's mandatory arguments.
|
||||
|
||||
This should return the names of all function arguments that:
|
||||
* Aren't bound to an instance or type as in instance or class methods.
|
||||
* Don't have default values.
|
||||
* Aren't bound with functools.partial.
|
||||
* Aren't replaced with mocks.
|
||||
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
def isclass(object):
|
||||
""" Return true if the object is a class. Overrides inspect.isclass for
|
||||
python 2.6 because it will return True for objects which always return
|
||||
something on __getattr__ calls (see #1035).
|
||||
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
|
||||
"""
|
||||
return isinstance(object, (type, types.ClassType))
|
||||
The is_method and cls arguments indicate that the function should
|
||||
be treated as a bound method even though it's not unless, only in
|
||||
the case of cls, the function is a static method.
|
||||
|
||||
@RonnyPfannschmidt: This function should be refactored when we
|
||||
revisit fixtures. The fixture mechanism should ask the node for
|
||||
the fixture names, and not try to obtain directly from the
|
||||
function object well after collection has occurred.
|
||||
|
||||
"""
|
||||
# The parameters attribute of a Signature object contains an
|
||||
# ordered mapping of parameter names to Parameter instances. This
|
||||
# creates a tuple of the names of the parameters that don't have
|
||||
# defaults.
|
||||
arg_names = tuple(p.name for p in signature(function).parameters.values()
|
||||
if (p.kind is Parameter.POSITIONAL_OR_KEYWORD or
|
||||
p.kind is Parameter.KEYWORD_ONLY) and
|
||||
p.default is Parameter.empty)
|
||||
# If this function should be treated as a bound method even though
|
||||
# it's passed as an unbound method or function, remove the first
|
||||
# parameter name.
|
||||
if (is_method or
|
||||
(cls and not isinstance(cls.__dict__.get(function.__name__, None),
|
||||
staticmethod))):
|
||||
arg_names = arg_names[1:]
|
||||
# Remove any names that will be replaced with mocks.
|
||||
if hasattr(function, "__wrapped__"):
|
||||
arg_names = arg_names[num_mock_patch_args(function):]
|
||||
return arg_names
|
||||
|
||||
|
||||
if _PY3:
|
||||
import codecs
|
||||
imap = map
|
||||
izip = zip
|
||||
STRING_TYPES = bytes, str
|
||||
UNICODE_TYPES = str,
|
||||
|
||||
def _ascii_escaped(val):
|
||||
if PY35:
|
||||
def _bytes_to_ascii(val):
|
||||
return val.decode('ascii', 'backslashreplace')
|
||||
else:
|
||||
def _bytes_to_ascii(val):
|
||||
if val:
|
||||
# source: http://goo.gl/bGsnwC
|
||||
encoded_bytes, _ = codecs.escape_encode(val)
|
||||
return encoded_bytes.decode('ascii')
|
||||
else:
|
||||
# empty bytes crashes codecs.escape_encode (#1087)
|
||||
return ''
|
||||
|
||||
def ascii_escaped(val):
|
||||
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
|
||||
bytes objects into a sequence of escaped bytes:
|
||||
|
||||
|
@ -155,22 +162,14 @@ if _PY3:
|
|||
|
||||
"""
|
||||
if isinstance(val, bytes):
|
||||
if val:
|
||||
# source: http://goo.gl/bGsnwC
|
||||
encoded_bytes, _ = codecs.escape_encode(val)
|
||||
return encoded_bytes.decode('ascii')
|
||||
else:
|
||||
# empty bytes crashes codecs.escape_encode (#1087)
|
||||
return ''
|
||||
return _bytes_to_ascii(val)
|
||||
else:
|
||||
return val.encode('unicode_escape').decode('ascii')
|
||||
else:
|
||||
STRING_TYPES = bytes, str, unicode
|
||||
UNICODE_TYPES = unicode,
|
||||
|
||||
from itertools import imap, izip # NOQA
|
||||
|
||||
def _ascii_escaped(val):
|
||||
def ascii_escaped(val):
|
||||
"""In py2 bytes and str are the same type, so return if it's a bytes
|
||||
object, return it unchanged if it is a full ascii string,
|
||||
otherwise escape it into its binary form.
|
||||
|
@ -223,10 +222,7 @@ def getimfunc(func):
|
|||
try:
|
||||
return func.__func__
|
||||
except AttributeError:
|
||||
try:
|
||||
return func.im_func
|
||||
except AttributeError:
|
||||
return func
|
||||
return func
|
||||
|
||||
|
||||
def safe_getattr(object, name, default):
|
||||
|
|
|
@ -6,6 +6,7 @@ import traceback
|
|||
import types
|
||||
import warnings
|
||||
|
||||
import six
|
||||
import py
|
||||
# DON't import pytest here because it causes import cycle troubles
|
||||
import sys
|
||||
|
@ -13,7 +14,7 @@ import os
|
|||
import _pytest._code
|
||||
import _pytest.hookspec # the extension point definitions
|
||||
import _pytest.assertion
|
||||
from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
|
||||
from pluggy import PluginManager, HookimplMarker, HookspecMarker
|
||||
from _pytest.compat import safe_str
|
||||
|
||||
hookimpl = HookimplMarker("pytest")
|
||||
|
@ -100,27 +101,18 @@ def directory_arg(path, optname):
|
|||
return path
|
||||
|
||||
|
||||
_preinit = []
|
||||
|
||||
default_plugins = (
|
||||
"mark main terminal runner python fixtures debugging unittest capture skipping "
|
||||
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
|
||||
"junitxml resultlog doctest cacheprovider freeze_support "
|
||||
"setuponly setupplan warnings").split()
|
||||
"setuponly setupplan warnings logging").split()
|
||||
|
||||
|
||||
builtin_plugins = set(default_plugins)
|
||||
builtin_plugins.add("pytester")
|
||||
|
||||
|
||||
def _preloadplugins():
|
||||
assert not _preinit
|
||||
_preinit.append(get_config())
|
||||
|
||||
|
||||
def get_config():
|
||||
if _preinit:
|
||||
return _preinit.pop(0)
|
||||
# subsequent calls to main will create a fresh instance
|
||||
pluginmanager = PytestPluginManager()
|
||||
config = Config(pluginmanager)
|
||||
|
@ -158,7 +150,7 @@ def _prepareconfig(args=None, plugins=None):
|
|||
try:
|
||||
if plugins:
|
||||
for plugin in plugins:
|
||||
if isinstance(plugin, py.builtin._basestring):
|
||||
if isinstance(plugin, six.string_types):
|
||||
pluginmanager.consider_pluginarg(plugin)
|
||||
else:
|
||||
pluginmanager.register(plugin)
|
||||
|
@ -173,7 +165,7 @@ def _prepareconfig(args=None, plugins=None):
|
|||
|
||||
class PytestPluginManager(PluginManager):
|
||||
"""
|
||||
Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific
|
||||
Overwrites :py:class:`pluggy.PluginManager <pluggy.PluginManager>` to add pytest-specific
|
||||
functionality:
|
||||
|
||||
* loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
|
||||
|
@ -211,7 +203,7 @@ class PytestPluginManager(PluginManager):
|
|||
"""
|
||||
.. deprecated:: 2.8
|
||||
|
||||
Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>`
|
||||
Use :py:meth:`pluggy.PluginManager.add_hookspecs <PluginManager.add_hookspecs>`
|
||||
instead.
|
||||
"""
|
||||
warning = dict(code="I2",
|
||||
|
@ -249,18 +241,11 @@ class PytestPluginManager(PluginManager):
|
|||
"historic": hasattr(method, "historic")}
|
||||
return opts
|
||||
|
||||
def _verify_hook(self, hook, hookmethod):
|
||||
super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
|
||||
if "__multicall__" in hookmethod.argnames:
|
||||
fslineno = _pytest._code.getfslineno(hookmethod.function)
|
||||
warning = dict(code="I1",
|
||||
fslocation=fslineno,
|
||||
nodeid=None,
|
||||
message="%r hook uses deprecated __multicall__ "
|
||||
"argument" % (hook.name))
|
||||
self._warn(warning)
|
||||
|
||||
def register(self, plugin, name=None):
|
||||
if name == 'pytest_catchlog':
|
||||
self._warn('pytest-catchlog plugin has been merged into the core, '
|
||||
'please remove it from your requirements.')
|
||||
return
|
||||
ret = super(PytestPluginManager, self).register(plugin, name)
|
||||
if ret:
|
||||
self.hook.pytest_plugin_registered.call_historic(
|
||||
|
@ -430,7 +415,7 @@ class PytestPluginManager(PluginManager):
|
|||
# "terminal" or "capture". Those plugins are registered under their
|
||||
# basename for historic purposes but must be imported with the
|
||||
# _pytest prefix.
|
||||
assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname
|
||||
assert isinstance(modname, (six.text_type, str)), "module name as text required, got %r" % modname
|
||||
modname = str(modname)
|
||||
if self.get_plugin(modname) is not None:
|
||||
return
|
||||
|
@ -442,12 +427,12 @@ class PytestPluginManager(PluginManager):
|
|||
try:
|
||||
__import__(importspec)
|
||||
except ImportError as e:
|
||||
new_exc = ImportError('Error importing plugin "%s": %s' % (modname, safe_str(e.args[0])))
|
||||
# copy over name and path attributes
|
||||
for attr in ('name', 'path'):
|
||||
if hasattr(e, attr):
|
||||
setattr(new_exc, attr, getattr(e, attr))
|
||||
raise new_exc
|
||||
new_exc_type = ImportError
|
||||
new_exc_message = 'Error importing plugin "%s": %s' % (modname, safe_str(e.args[0]))
|
||||
new_exc = new_exc_type(new_exc_message)
|
||||
|
||||
six.reraise(new_exc_type, new_exc, sys.exc_info()[2])
|
||||
|
||||
except Exception as e:
|
||||
import pytest
|
||||
if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
|
||||
|
@ -643,7 +628,7 @@ class Argument:
|
|||
pass
|
||||
else:
|
||||
# this might raise a keyerror as well, don't want to catch that
|
||||
if isinstance(typ, py.builtin._basestring):
|
||||
if isinstance(typ, six.string_types):
|
||||
if typ == 'choice':
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
|
@ -968,7 +953,7 @@ class Config(object):
|
|||
)
|
||||
res = self.hook.pytest_internalerror(excrepr=excrepr,
|
||||
excinfo=excinfo)
|
||||
if not py.builtin.any(res):
|
||||
if not any(res):
|
||||
for line in str(excrepr).split("\n"):
|
||||
sys.stderr.write("INTERNALERROR> %s\n" % line)
|
||||
sys.stderr.flush()
|
||||
|
@ -1074,9 +1059,10 @@ class Config(object):
|
|||
"(are you using python -O?)\n")
|
||||
|
||||
def _preparse(self, args, addopts=True):
|
||||
self._initini(args)
|
||||
if addopts:
|
||||
args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
|
||||
self._initini(args)
|
||||
if addopts:
|
||||
args[:] = self.getini("addopts") + args
|
||||
self._checkversion()
|
||||
self._consider_importhook(args)
|
||||
|
|
|
@ -54,7 +54,7 @@ class pytestPDB:
|
|||
if cls._pluginmanager is not None:
|
||||
capman = cls._pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
capman.suspendcapture(in_=True)
|
||||
capman.suspend_global_capture(in_=True)
|
||||
tw = _pytest.config.create_terminal_writer(cls._config)
|
||||
tw.line()
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
|
@ -66,7 +66,7 @@ class PdbInvoke:
|
|||
def pytest_exception_interact(self, node, call, report):
|
||||
capman = node.config.pluginmanager.getplugin("capturemanager")
|
||||
if capman:
|
||||
out, err = capman.suspendcapture(in_=True)
|
||||
out, err = capman.suspend_global_capture(in_=True)
|
||||
sys.stdout.write(out)
|
||||
sys.stdout.write(err)
|
||||
_enter_pdb(node, call.excinfo, report)
|
||||
|
|
|
@ -40,3 +40,13 @@ MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning(
|
|||
" please use pytest.param(..., marks=...) instead.\n"
|
||||
"For more details, see: https://docs.pytest.org/en/latest/parametrize.html"
|
||||
)
|
||||
|
||||
COLLECTOR_MAKEITEM = RemovedInPytest4Warning(
|
||||
"pycollector makeitem was removed "
|
||||
"as it is an accidentially leaked internal api"
|
||||
)
|
||||
|
||||
METAFUNC_ADD_CALL = (
|
||||
"Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.\n"
|
||||
"Please use Metafunc.parametrize instead."
|
||||
)
|
||||
|
|
|
@ -50,12 +50,19 @@ def pytest_addoption(parser):
|
|||
def pytest_collect_file(path, parent):
|
||||
config = parent.config
|
||||
if path.ext == ".py":
|
||||
if config.option.doctestmodules:
|
||||
if config.option.doctestmodules and not _is_setup_py(config, path, parent):
|
||||
return DoctestModule(path, parent)
|
||||
elif _is_doctest(config, path, parent):
|
||||
return DoctestTextfile(path, parent)
|
||||
|
||||
|
||||
def _is_setup_py(config, path, parent):
|
||||
if path.basename != "setup.py":
|
||||
return False
|
||||
contents = path.read()
|
||||
return 'setuptools' in contents or 'distutils' in contents
|
||||
|
||||
|
||||
def _is_doctest(config, path, parent):
|
||||
if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
|
||||
return True
|
||||
|
|
|
@ -1,9 +1,12 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import functools
|
||||
import inspect
|
||||
import sys
|
||||
import warnings
|
||||
from collections import OrderedDict
|
||||
|
||||
import attr
|
||||
import py
|
||||
from py._code.code import FormattedExcinfo
|
||||
|
||||
|
@ -21,18 +24,14 @@ from _pytest.compat import (
|
|||
from _pytest.outcomes import fail, TEST_OUTCOME
|
||||
|
||||
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
from ordereddict import OrderedDict
|
||||
else:
|
||||
from collections import OrderedDict
|
||||
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
import _pytest.python
|
||||
|
||||
scopename2class.update({
|
||||
'class': _pytest.python.Class,
|
||||
'module': _pytest.python.Module,
|
||||
'function': _pytest.main.Item,
|
||||
'session': _pytest.main.Session,
|
||||
})
|
||||
session._fixturemanager = FixtureManager(session)
|
||||
|
||||
|
@ -64,8 +63,6 @@ def scopeproperty(name=None, doc=None):
|
|||
def get_scope_node(node, scope):
|
||||
cls = scopename2class.get(scope)
|
||||
if cls is None:
|
||||
if scope == "session":
|
||||
return node.session
|
||||
raise ValueError("unknown scope")
|
||||
return node.getparent(cls)
|
||||
|
||||
|
@ -521,7 +518,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
|
|||
val = fixturedef.execute(request=subrequest)
|
||||
finally:
|
||||
# if fixture function failed it might have registered finalizers
|
||||
self.session._setupstate.addfinalizer(fixturedef.finish,
|
||||
self.session._setupstate.addfinalizer(functools.partial(fixturedef.finish, request=subrequest),
|
||||
subrequest.node)
|
||||
return val
|
||||
|
||||
|
@ -735,21 +732,20 @@ class FixtureDef:
|
|||
where=baseid
|
||||
)
|
||||
self.params = params
|
||||
startindex = unittest and 1 or None
|
||||
self.argnames = getfuncargnames(func, startindex=startindex)
|
||||
self.argnames = getfuncargnames(func, is_method=unittest)
|
||||
self.unittest = unittest
|
||||
self.ids = ids
|
||||
self._finalizer = []
|
||||
self._finalizers = []
|
||||
|
||||
def addfinalizer(self, finalizer):
|
||||
self._finalizer.append(finalizer)
|
||||
self._finalizers.append(finalizer)
|
||||
|
||||
def finish(self):
|
||||
def finish(self, request):
|
||||
exceptions = []
|
||||
try:
|
||||
while self._finalizer:
|
||||
while self._finalizers:
|
||||
try:
|
||||
func = self._finalizer.pop()
|
||||
func = self._finalizers.pop()
|
||||
func()
|
||||
except: # noqa
|
||||
exceptions.append(sys.exc_info())
|
||||
|
@ -759,12 +755,15 @@ class FixtureDef:
|
|||
py.builtin._reraise(*e)
|
||||
|
||||
finally:
|
||||
ihook = self._fixturemanager.session.ihook
|
||||
ihook.pytest_fixture_post_finalizer(fixturedef=self)
|
||||
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
|
||||
hook.pytest_fixture_post_finalizer(fixturedef=self, request=request)
|
||||
# even if finalization fails, we invalidate
|
||||
# the cached fixture value
|
||||
# the cached fixture value and remove
|
||||
# all finalizers because they may be bound methods which will
|
||||
# keep instances alive
|
||||
if hasattr(self, "cached_result"):
|
||||
del self.cached_result
|
||||
self._finalizers = []
|
||||
|
||||
def execute(self, request):
|
||||
# get required arguments and register our own finish()
|
||||
|
@ -772,7 +771,7 @@ class FixtureDef:
|
|||
for argname in self.argnames:
|
||||
fixturedef = request._get_active_fixturedef(argname)
|
||||
if argname != "request":
|
||||
fixturedef.addfinalizer(self.finish)
|
||||
fixturedef.addfinalizer(functools.partial(self.finish, request=request))
|
||||
|
||||
my_cache_key = request.param_index
|
||||
cached_result = getattr(self, "cached_result", None)
|
||||
|
@ -785,11 +784,11 @@ class FixtureDef:
|
|||
return result
|
||||
# we have a previous but differently parametrized fixture instance
|
||||
# so we need to tear it down before creating a new one
|
||||
self.finish()
|
||||
self.finish(request)
|
||||
assert not hasattr(self, "cached_result")
|
||||
|
||||
ihook = self._fixturemanager.session.ihook
|
||||
return ihook.pytest_fixture_setup(fixturedef=self, request=request)
|
||||
hook = self._fixturemanager.session.gethookproxy(request.node.fspath)
|
||||
return hook.pytest_fixture_setup(fixturedef=self, request=request)
|
||||
|
||||
def __repr__(self):
|
||||
return ("<FixtureDef name=%r scope=%r baseid=%r >" %
|
||||
|
@ -828,13 +827,21 @@ def pytest_fixture_setup(fixturedef, request):
|
|||
return result
|
||||
|
||||
|
||||
class FixtureFunctionMarker:
|
||||
def __init__(self, scope, params, autouse=False, ids=None, name=None):
|
||||
self.scope = scope
|
||||
self.params = params
|
||||
self.autouse = autouse
|
||||
self.ids = ids
|
||||
self.name = name
|
||||
def _ensure_immutable_ids(ids):
|
||||
if ids is None:
|
||||
return
|
||||
if callable(ids):
|
||||
return ids
|
||||
return tuple(ids)
|
||||
|
||||
|
||||
@attr.s(frozen=True)
|
||||
class FixtureFunctionMarker(object):
|
||||
scope = attr.ib()
|
||||
params = attr.ib(convert=attr.converters.optional(tuple))
|
||||
autouse = attr.ib(default=False)
|
||||
ids = attr.ib(default=None, convert=_ensure_immutable_ids)
|
||||
name = attr.ib(default=None)
|
||||
|
||||
def __call__(self, function):
|
||||
if isclass(function):
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
|
||||
|
||||
from _pytest._pluggy import HookspecMarker
|
||||
from pluggy import HookspecMarker
|
||||
|
||||
hookspec = HookspecMarker("pytest")
|
||||
|
||||
|
@ -296,7 +296,7 @@ def pytest_fixture_setup(fixturedef, request):
|
|||
Stops at first non-None result, see :ref:`firstresult` """
|
||||
|
||||
|
||||
def pytest_fixture_post_finalizer(fixturedef):
|
||||
def pytest_fixture_post_finalizer(fixturedef, request):
|
||||
""" called after fixture teardown, but before the cache is cleared so
|
||||
the fixture result cache ``fixturedef.cached_result`` can
|
||||
still be accessed."""
|
||||
|
|
|
@ -0,0 +1,337 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import logging
|
||||
from contextlib import closing, contextmanager
|
||||
import sys
|
||||
import six
|
||||
|
||||
import pytest
|
||||
import py
|
||||
|
||||
|
||||
DEFAULT_LOG_FORMAT = '%(filename)-25s %(lineno)4d %(levelname)-8s %(message)s'
|
||||
DEFAULT_LOG_DATE_FORMAT = '%H:%M:%S'
|
||||
|
||||
|
||||
def get_option_ini(config, *names):
|
||||
for name in names:
|
||||
ret = config.getoption(name) # 'default' arg won't work as expected
|
||||
if ret is None:
|
||||
ret = config.getini(name)
|
||||
if ret:
|
||||
return ret
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
"""Add options to control log capturing."""
|
||||
group = parser.getgroup('logging')
|
||||
|
||||
def add_option_ini(option, dest, default=None, type=None, **kwargs):
|
||||
parser.addini(dest, default=default, type=type,
|
||||
help='default value for ' + option)
|
||||
group.addoption(option, dest=dest, **kwargs)
|
||||
|
||||
add_option_ini(
|
||||
'--no-print-logs',
|
||||
dest='log_print', action='store_const', const=False, default=True,
|
||||
type='bool',
|
||||
help='disable printing caught logs on failed tests.')
|
||||
add_option_ini(
|
||||
'--log-level',
|
||||
dest='log_level', default=None,
|
||||
help='logging level used by the logging module')
|
||||
add_option_ini(
|
||||
'--log-format',
|
||||
dest='log_format', default=DEFAULT_LOG_FORMAT,
|
||||
help='log format as used by the logging module.')
|
||||
add_option_ini(
|
||||
'--log-date-format',
|
||||
dest='log_date_format', default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help='log date format as used by the logging module.')
|
||||
add_option_ini(
|
||||
'--log-cli-level',
|
||||
dest='log_cli_level', default=None,
|
||||
help='cli logging level.')
|
||||
add_option_ini(
|
||||
'--log-cli-format',
|
||||
dest='log_cli_format', default=None,
|
||||
help='log format as used by the logging module.')
|
||||
add_option_ini(
|
||||
'--log-cli-date-format',
|
||||
dest='log_cli_date_format', default=None,
|
||||
help='log date format as used by the logging module.')
|
||||
add_option_ini(
|
||||
'--log-file',
|
||||
dest='log_file', default=None,
|
||||
help='path to a file when logging will be written to.')
|
||||
add_option_ini(
|
||||
'--log-file-level',
|
||||
dest='log_file_level', default=None,
|
||||
help='log file logging level.')
|
||||
add_option_ini(
|
||||
'--log-file-format',
|
||||
dest='log_file_format', default=DEFAULT_LOG_FORMAT,
|
||||
help='log format as used by the logging module.')
|
||||
add_option_ini(
|
||||
'--log-file-date-format',
|
||||
dest='log_file_date_format', default=DEFAULT_LOG_DATE_FORMAT,
|
||||
help='log date format as used by the logging module.')
|
||||
|
||||
|
||||
@contextmanager
|
||||
def logging_using_handler(handler, logger=None):
|
||||
"""Context manager that safely registers a given handler."""
|
||||
logger = logger or logging.getLogger(logger)
|
||||
|
||||
if handler in logger.handlers: # reentrancy
|
||||
# Adding the same handler twice would confuse logging system.
|
||||
# Just don't do that.
|
||||
yield
|
||||
else:
|
||||
logger.addHandler(handler)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
logger.removeHandler(handler)
|
||||
|
||||
|
||||
@contextmanager
|
||||
def catching_logs(handler, formatter=None,
|
||||
level=logging.NOTSET, logger=None):
|
||||
"""Context manager that prepares the whole logging machinery properly."""
|
||||
logger = logger or logging.getLogger(logger)
|
||||
|
||||
if formatter is not None:
|
||||
handler.setFormatter(formatter)
|
||||
handler.setLevel(level)
|
||||
|
||||
with logging_using_handler(handler, logger):
|
||||
orig_level = logger.level
|
||||
logger.setLevel(min(orig_level, level))
|
||||
try:
|
||||
yield handler
|
||||
finally:
|
||||
logger.setLevel(orig_level)
|
||||
|
||||
|
||||
class LogCaptureHandler(logging.StreamHandler):
|
||||
"""A logging handler that stores log records and the log text."""
|
||||
|
||||
def __init__(self):
|
||||
"""Creates a new log handler."""
|
||||
logging.StreamHandler.__init__(self, py.io.TextIO())
|
||||
self.records = []
|
||||
|
||||
def emit(self, record):
|
||||
"""Keep the log records in a list in addition to the log text."""
|
||||
self.records.append(record)
|
||||
logging.StreamHandler.emit(self, record)
|
||||
|
||||
|
||||
class LogCaptureFixture(object):
|
||||
"""Provides access and control of log capturing."""
|
||||
|
||||
def __init__(self, item):
|
||||
"""Creates a new funcarg."""
|
||||
self._item = item
|
||||
|
||||
@property
|
||||
def handler(self):
|
||||
return self._item.catch_log_handler
|
||||
|
||||
@property
|
||||
def text(self):
|
||||
"""Returns the log text."""
|
||||
return self.handler.stream.getvalue()
|
||||
|
||||
@property
|
||||
def records(self):
|
||||
"""Returns the list of log records."""
|
||||
return self.handler.records
|
||||
|
||||
@property
|
||||
def record_tuples(self):
|
||||
"""Returns a list of a striped down version of log records intended
|
||||
for use in assertion comparison.
|
||||
|
||||
The format of the tuple is:
|
||||
|
||||
(logger_name, log_level, message)
|
||||
"""
|
||||
return [(r.name, r.levelno, r.getMessage()) for r in self.records]
|
||||
|
||||
def clear(self):
|
||||
"""Reset the list of log records."""
|
||||
self.handler.records = []
|
||||
|
||||
def set_level(self, level, logger=None):
|
||||
"""Sets the level for capturing of logs.
|
||||
|
||||
By default, the level is set on the handler used to capture
|
||||
logs. Specify a logger name to instead set the level of any
|
||||
logger.
|
||||
"""
|
||||
if logger is None:
|
||||
logger = self.handler
|
||||
else:
|
||||
logger = logging.getLogger(logger)
|
||||
logger.setLevel(level)
|
||||
|
||||
@contextmanager
|
||||
def at_level(self, level, logger=None):
|
||||
"""Context manager that sets the level for capturing of logs.
|
||||
|
||||
By default, the level is set on the handler used to capture
|
||||
logs. Specify a logger name to instead set the level of any
|
||||
logger.
|
||||
"""
|
||||
if logger is None:
|
||||
logger = self.handler
|
||||
else:
|
||||
logger = logging.getLogger(logger)
|
||||
|
||||
orig_level = logger.level
|
||||
logger.setLevel(level)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
logger.setLevel(orig_level)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def caplog(request):
|
||||
"""Access and control log capturing.
|
||||
|
||||
Captured logs are available through the following methods::
|
||||
|
||||
* caplog.text() -> string containing formatted log output
|
||||
* caplog.records() -> list of logging.LogRecord instances
|
||||
* caplog.record_tuples() -> list of (logger_name, level, message) tuples
|
||||
"""
|
||||
return LogCaptureFixture(request.node)
|
||||
|
||||
|
||||
def get_actual_log_level(config, *setting_names):
|
||||
"""Return the actual logging level."""
|
||||
|
||||
for setting_name in setting_names:
|
||||
log_level = config.getoption(setting_name)
|
||||
if log_level is None:
|
||||
log_level = config.getini(setting_name)
|
||||
if log_level:
|
||||
break
|
||||
else:
|
||||
return
|
||||
|
||||
if isinstance(log_level, six.string_types):
|
||||
log_level = log_level.upper()
|
||||
try:
|
||||
return int(getattr(logging, log_level, log_level))
|
||||
except ValueError:
|
||||
# Python logging does not recognise this as a logging level
|
||||
raise pytest.UsageError(
|
||||
"'{0}' is not recognized as a logging level name for "
|
||||
"'{1}'. Please consider passing the "
|
||||
"logging level num instead.".format(
|
||||
log_level,
|
||||
setting_name))
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.pluginmanager.register(LoggingPlugin(config),
|
||||
'logging-plugin')
|
||||
|
||||
|
||||
class LoggingPlugin(object):
|
||||
"""Attaches to the logging module and captures log messages for each test.
|
||||
"""
|
||||
|
||||
def __init__(self, config):
|
||||
"""Creates a new plugin to capture log messages.
|
||||
|
||||
The formatter can be safely shared across all handlers so
|
||||
create a single one for the entire test session here.
|
||||
"""
|
||||
self.log_cli_level = get_actual_log_level(
|
||||
config, 'log_cli_level', 'log_level') or logging.WARNING
|
||||
|
||||
self.print_logs = get_option_ini(config, 'log_print')
|
||||
self.formatter = logging.Formatter(
|
||||
get_option_ini(config, 'log_format'),
|
||||
get_option_ini(config, 'log_date_format'))
|
||||
|
||||
log_cli_handler = logging.StreamHandler(sys.stderr)
|
||||
log_cli_format = get_option_ini(
|
||||
config, 'log_cli_format', 'log_format')
|
||||
log_cli_date_format = get_option_ini(
|
||||
config, 'log_cli_date_format', 'log_date_format')
|
||||
log_cli_formatter = logging.Formatter(
|
||||
log_cli_format,
|
||||
datefmt=log_cli_date_format)
|
||||
self.log_cli_handler = log_cli_handler # needed for a single unittest
|
||||
self.live_logs = catching_logs(log_cli_handler,
|
||||
formatter=log_cli_formatter,
|
||||
level=self.log_cli_level)
|
||||
|
||||
log_file = get_option_ini(config, 'log_file')
|
||||
if log_file:
|
||||
self.log_file_level = get_actual_log_level(
|
||||
config, 'log_file_level') or logging.WARNING
|
||||
|
||||
log_file_format = get_option_ini(
|
||||
config, 'log_file_format', 'log_format')
|
||||
log_file_date_format = get_option_ini(
|
||||
config, 'log_file_date_format', 'log_date_format')
|
||||
self.log_file_handler = logging.FileHandler(
|
||||
log_file,
|
||||
# Each pytest runtests session will write to a clean logfile
|
||||
mode='w')
|
||||
log_file_formatter = logging.Formatter(
|
||||
log_file_format,
|
||||
datefmt=log_file_date_format)
|
||||
self.log_file_handler.setFormatter(log_file_formatter)
|
||||
else:
|
||||
self.log_file_handler = None
|
||||
|
||||
@contextmanager
|
||||
def _runtest_for(self, item, when):
|
||||
"""Implements the internals of pytest_runtest_xxx() hook."""
|
||||
with catching_logs(LogCaptureHandler(),
|
||||
formatter=self.formatter) as log_handler:
|
||||
item.catch_log_handler = log_handler
|
||||
try:
|
||||
yield # run test
|
||||
finally:
|
||||
del item.catch_log_handler
|
||||
|
||||
if self.print_logs:
|
||||
# Add a captured log section to the report.
|
||||
log = log_handler.stream.getvalue().strip()
|
||||
item.add_report_section(when, 'log', log)
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_setup(self, item):
|
||||
with self._runtest_for(item, 'setup'):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_call(self, item):
|
||||
with self._runtest_for(item, 'call'):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtest_teardown(self, item):
|
||||
with self._runtest_for(item, 'teardown'):
|
||||
yield
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_runtestloop(self, session):
|
||||
"""Runs all collected test items."""
|
||||
with self.live_logs:
|
||||
if self.log_file_handler is not None:
|
||||
with closing(self.log_file_handler):
|
||||
with catching_logs(self.log_file_handler,
|
||||
level=self.log_file_level):
|
||||
yield # run all the tests
|
||||
else:
|
||||
yield # run all the tests
|
|
@ -3,6 +3,7 @@ from __future__ import absolute_import, division, print_function
|
|||
|
||||
import functools
|
||||
import os
|
||||
import six
|
||||
import sys
|
||||
|
||||
import _pytest
|
||||
|
@ -84,15 +85,6 @@ def pytest_addoption(parser):
|
|||
help="base temporary directory for this test run.")
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
"""keeping this one works around a deeper startup issue in pytest
|
||||
|
||||
i tried to find it for a while but the amount of time turned unsustainable,
|
||||
so i put a hack in to revisit later
|
||||
"""
|
||||
return {}
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
__import__('pytest').config = config # compatibiltiy
|
||||
|
||||
|
@ -111,6 +103,8 @@ def wrap_session(config, doit):
|
|||
session.exitstatus = doit(config, session) or 0
|
||||
except UsageError:
|
||||
raise
|
||||
except Failed:
|
||||
session.exitstatus = EXIT_TESTSFAILED
|
||||
except KeyboardInterrupt:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
if initstate < 2 and isinstance(excinfo.value, exit.Exception):
|
||||
|
@ -168,6 +162,8 @@ def pytest_runtestloop(session):
|
|||
for i, item in enumerate(session.items):
|
||||
nextitem = session.items[i + 1] if i + 1 < len(session.items) else None
|
||||
item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
|
||||
if session.shouldfail:
|
||||
raise session.Failed(session.shouldfail)
|
||||
if session.shouldstop:
|
||||
raise session.Interrupted(session.shouldstop)
|
||||
return True
|
||||
|
@ -364,24 +360,6 @@ class Node(object):
|
|||
def teardown(self):
|
||||
pass
|
||||
|
||||
def _memoizedcall(self, attrname, function):
|
||||
exattrname = "_ex_" + attrname
|
||||
failure = getattr(self, exattrname, None)
|
||||
if failure is not None:
|
||||
py.builtin._reraise(failure[0], failure[1], failure[2])
|
||||
if hasattr(self, attrname):
|
||||
return getattr(self, attrname)
|
||||
try:
|
||||
res = function()
|
||||
except py.builtin._sysex:
|
||||
raise
|
||||
except: # noqa
|
||||
failure = sys.exc_info()
|
||||
setattr(self, exattrname, failure)
|
||||
raise
|
||||
setattr(self, attrname, res)
|
||||
return res
|
||||
|
||||
def listchain(self):
|
||||
""" return list of all parent collectors up to self,
|
||||
starting from root of collection tree. """
|
||||
|
@ -399,7 +377,7 @@ class Node(object):
|
|||
``marker`` can be a string or pytest.mark.* instance.
|
||||
"""
|
||||
from _pytest.mark import MarkDecorator, MARK_GEN
|
||||
if isinstance(marker, py.builtin._basestring):
|
||||
if isinstance(marker, six.string_types):
|
||||
marker = getattr(MARK_GEN, marker)
|
||||
elif not isinstance(marker, MarkDecorator):
|
||||
raise ValueError("is not a string or pytest.mark.* Marker")
|
||||
|
@ -599,8 +577,13 @@ class Interrupted(KeyboardInterrupt):
|
|||
__module__ = 'builtins' # for py3
|
||||
|
||||
|
||||
class Failed(Exception):
|
||||
""" signals an stop as failed test run. """
|
||||
|
||||
|
||||
class Session(FSCollector):
|
||||
Interrupted = Interrupted
|
||||
Failed = Failed
|
||||
|
||||
def __init__(self, config):
|
||||
FSCollector.__init__(self, config.rootdir, parent=None,
|
||||
|
@ -608,6 +591,7 @@ class Session(FSCollector):
|
|||
self.testsfailed = 0
|
||||
self.testscollected = 0
|
||||
self.shouldstop = False
|
||||
self.shouldfail = False
|
||||
self.trace = config.trace.root.get("collection")
|
||||
self._norecursepatterns = config.getini("norecursedirs")
|
||||
self.startdir = py.path.local()
|
||||
|
@ -618,6 +602,8 @@ class Session(FSCollector):
|
|||
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_collectstart(self):
|
||||
if self.shouldfail:
|
||||
raise self.Failed(self.shouldfail)
|
||||
if self.shouldstop:
|
||||
raise self.Interrupted(self.shouldstop)
|
||||
|
||||
|
@ -627,7 +613,7 @@ class Session(FSCollector):
|
|||
self.testsfailed += 1
|
||||
maxfail = self.config.getvalue("maxfail")
|
||||
if maxfail and self.testsfailed >= maxfail:
|
||||
self.shouldstop = "stopping after %d failures" % (
|
||||
self.shouldfail = "stopping after %d failures" % (
|
||||
self.testsfailed)
|
||||
pytest_collectreport = pytest_runtest_logreport
|
||||
|
||||
|
|
|
@ -3,10 +3,12 @@ from __future__ import absolute_import, division, print_function
|
|||
|
||||
import inspect
|
||||
import warnings
|
||||
import attr
|
||||
from collections import namedtuple
|
||||
from operator import attrgetter
|
||||
from .compat import imap
|
||||
from six.moves import map
|
||||
from .deprecated import MARK_PARAMETERSET_UNPACKING
|
||||
from .compat import NOTSET, getfslineno
|
||||
|
||||
|
||||
def alias(name, warning=None):
|
||||
|
@ -67,9 +69,29 @@ class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')):
|
|||
|
||||
return cls(argval, marks=newmarks, id=None)
|
||||
|
||||
@property
|
||||
def deprecated_arg_dict(self):
|
||||
return dict((mark.name, mark) for mark in self.marks)
|
||||
@classmethod
|
||||
def _for_parameterize(cls, argnames, argvalues, function):
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
|
||||
force_tuple = len(argnames) == 1
|
||||
else:
|
||||
force_tuple = False
|
||||
parameters = [
|
||||
ParameterSet.extract_from(x, legacy_force_tuple=force_tuple)
|
||||
for x in argvalues]
|
||||
del argvalues
|
||||
|
||||
if not parameters:
|
||||
fs, lineno = getfslineno(function)
|
||||
reason = "got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames, function.__name__, fs, lineno)
|
||||
mark = MARK_GEN.skip(reason=reason)
|
||||
parameters.append(ParameterSet(
|
||||
values=(NOTSET,) * len(argnames),
|
||||
marks=[mark],
|
||||
id=None,
|
||||
))
|
||||
return argnames, parameters
|
||||
|
||||
|
||||
class MarkerError(Exception):
|
||||
|
@ -166,22 +188,26 @@ def pytest_collection_modifyitems(items, config):
|
|||
items[:] = remaining
|
||||
|
||||
|
||||
class MarkMapping:
|
||||
@attr.s
|
||||
class MarkMapping(object):
|
||||
"""Provides a local mapping for markers where item access
|
||||
resolves to True if the marker is present. """
|
||||
|
||||
def __init__(self, keywords):
|
||||
mymarks = set()
|
||||
own_mark_names = attr.ib()
|
||||
|
||||
@classmethod
|
||||
def from_keywords(cls, keywords):
|
||||
mark_names = set()
|
||||
for key, value in keywords.items():
|
||||
if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
|
||||
mymarks.add(key)
|
||||
self._mymarks = mymarks
|
||||
mark_names.add(key)
|
||||
return cls(mark_names)
|
||||
|
||||
def __getitem__(self, name):
|
||||
return name in self._mymarks
|
||||
return name in self.own_mark_names
|
||||
|
||||
|
||||
class KeywordMapping:
|
||||
class KeywordMapping(object):
|
||||
"""Provides a local mapping for keywords.
|
||||
Given a list of names, map any substring of one of these names to True.
|
||||
"""
|
||||
|
@ -198,7 +224,7 @@ class KeywordMapping:
|
|||
|
||||
def matchmark(colitem, markexpr):
|
||||
"""Tries to match on any marker names, attached to the given colitem."""
|
||||
return eval(markexpr, {}, MarkMapping(colitem.keywords))
|
||||
return eval(markexpr, {}, MarkMapping.from_keywords(colitem.keywords))
|
||||
|
||||
|
||||
def matchkeyword(colitem, keywordexpr):
|
||||
|
@ -287,7 +313,21 @@ def istestfunc(func):
|
|||
getattr(func, "__name__", "<lambda>") != "<lambda>"
|
||||
|
||||
|
||||
class MarkDecorator:
|
||||
@attr.s(frozen=True)
|
||||
class Mark(object):
|
||||
name = attr.ib()
|
||||
args = attr.ib()
|
||||
kwargs = attr.ib()
|
||||
|
||||
def combined_with(self, other):
|
||||
assert self.name == other.name
|
||||
return Mark(
|
||||
self.name, self.args + other.args,
|
||||
dict(self.kwargs, **other.kwargs))
|
||||
|
||||
|
||||
@attr.s
|
||||
class MarkDecorator(object):
|
||||
""" A decorator for test functions and test classes. When applied
|
||||
it will create :class:`MarkInfo` objects which may be
|
||||
:ref:`retrieved by hooks as item keywords <excontrolskip>`.
|
||||
|
@ -321,9 +361,7 @@ class MarkDecorator:
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self, mark):
|
||||
assert isinstance(mark, Mark), repr(mark)
|
||||
self.mark = mark
|
||||
mark = attr.ib(validator=attr.validators.instance_of(Mark))
|
||||
|
||||
name = alias('mark.name')
|
||||
args = alias('mark.args')
|
||||
|
@ -403,15 +441,6 @@ def store_legacy_markinfo(func, mark):
|
|||
holder.add_mark(mark)
|
||||
|
||||
|
||||
class Mark(namedtuple('Mark', 'name, args, kwargs')):
|
||||
|
||||
def combined_with(self, other):
|
||||
assert self.name == other.name
|
||||
return Mark(
|
||||
self.name, self.args + other.args,
|
||||
dict(self.kwargs, **other.kwargs))
|
||||
|
||||
|
||||
class MarkInfo(object):
|
||||
""" Marking object created by :class:`MarkDecorator` instances. """
|
||||
|
||||
|
@ -434,7 +463,7 @@ class MarkInfo(object):
|
|||
|
||||
def __iter__(self):
|
||||
""" yield MarkInfo objects each relating to a marking-call. """
|
||||
return imap(MarkInfo, self._marks)
|
||||
return map(MarkInfo, self._marks)
|
||||
|
||||
|
||||
MARK_GEN = MarkGenerator()
|
||||
|
|
|
@ -4,8 +4,7 @@ from __future__ import absolute_import, division, print_function
|
|||
import os
|
||||
import sys
|
||||
import re
|
||||
|
||||
from py.builtin import _basestring
|
||||
import six
|
||||
from _pytest.fixtures import fixture
|
||||
|
||||
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
|
||||
|
@ -79,7 +78,7 @@ def annotated_getattr(obj, name, ann):
|
|||
|
||||
|
||||
def derive_importpath(import_path, raising):
|
||||
if not isinstance(import_path, _basestring) or "." not in import_path:
|
||||
if not isinstance(import_path, six.string_types) or "." not in import_path:
|
||||
raise TypeError("must be absolute import path string, not %r" %
|
||||
(import_path,))
|
||||
module, attr = import_path.rsplit('.', 1)
|
||||
|
@ -125,7 +124,7 @@ class MonkeyPatch:
|
|||
import inspect
|
||||
|
||||
if value is notset:
|
||||
if not isinstance(target, _basestring):
|
||||
if not isinstance(target, six.string_types):
|
||||
raise TypeError("use setattr(target, name, value) or "
|
||||
"setattr(target, value) with target being a dotted "
|
||||
"import string")
|
||||
|
@ -155,7 +154,7 @@ class MonkeyPatch:
|
|||
"""
|
||||
__tracebackhide__ = True
|
||||
if name is notset:
|
||||
if not isinstance(target, _basestring):
|
||||
if not isinstance(target, six.string_types):
|
||||
raise TypeError("use delattr(target, name) or "
|
||||
"delattr(target) with target being a dotted "
|
||||
"import string")
|
||||
|
|
|
@ -3,7 +3,6 @@ from __future__ import absolute_import, division, print_function
|
|||
|
||||
import sys
|
||||
|
||||
import py
|
||||
from _pytest import unittest, runner, python
|
||||
from _pytest.config import hookimpl
|
||||
|
||||
|
@ -66,7 +65,7 @@ def is_potential_nosetest(item):
|
|||
def call_optional(obj, name):
|
||||
method = getattr(obj, name, None)
|
||||
isfixture = hasattr(method, "_pytestfixturefunction")
|
||||
if method is not None and not isfixture and py.builtin.callable(method):
|
||||
if method is not None and not isfixture and callable(method):
|
||||
# If there's any problems allow the exception to raise rather than
|
||||
# silently ignoring them
|
||||
method()
|
||||
|
|
|
@ -62,14 +62,21 @@ def exit(msg):
|
|||
exit.Exception = Exit
|
||||
|
||||
|
||||
def skip(msg=""):
|
||||
def skip(msg="", **kwargs):
|
||||
""" skip an executing test with the given message. Note: it's usually
|
||||
better to use the pytest.mark.skipif marker to declare a test to be
|
||||
skipped under certain conditions like mismatching platforms or
|
||||
dependencies. See the pytest_skipping plugin for details.
|
||||
|
||||
:kwarg bool allow_module_level: allows this function to be called at
|
||||
module level, skipping the rest of the module. Default to False.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
raise Skipped(msg=msg)
|
||||
allow_module_level = kwargs.pop('allow_module_level', False)
|
||||
if kwargs:
|
||||
keys = [k for k in kwargs.keys()]
|
||||
raise TypeError('unexpected keyword arguments: {0}'.format(keys))
|
||||
raise Skipped(msg=msg, allow_module_level=allow_module_level)
|
||||
|
||||
|
||||
skip.Exception = Skipped
|
||||
|
|
|
@ -2,6 +2,7 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import pytest
|
||||
import six
|
||||
import sys
|
||||
import tempfile
|
||||
|
||||
|
@ -16,7 +17,6 @@ def pytest_addoption(parser):
|
|||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_configure(config):
|
||||
import py
|
||||
if config.option.pastebin == "all":
|
||||
tr = config.pluginmanager.getplugin('terminalreporter')
|
||||
# if no terminal reporter plugin is present, nothing we can do here;
|
||||
|
@ -29,7 +29,7 @@ def pytest_configure(config):
|
|||
|
||||
def tee_write(s, **kwargs):
|
||||
oldwrite(s, **kwargs)
|
||||
if py.builtin._istext(s):
|
||||
if isinstance(s, six.text_type):
|
||||
s = s.encode('utf-8')
|
||||
config._pastebinfile.write(s)
|
||||
|
||||
|
|
|
@ -7,6 +7,7 @@ import os
|
|||
import platform
|
||||
import re
|
||||
import subprocess
|
||||
import six
|
||||
import sys
|
||||
import time
|
||||
import traceback
|
||||
|
@ -22,6 +23,9 @@ from _pytest.main import Session, EXIT_OK
|
|||
from _pytest.assertion.rewrite import AssertionRewritingHook
|
||||
|
||||
|
||||
PYTEST_FULLPATH = os.path.abspath(pytest.__file__.rstrip("oc")).replace("$py.class", ".py")
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
# group = parser.getgroup("pytester", "pytester (self-tests) options")
|
||||
parser.addoption('--lsof',
|
||||
|
@ -35,14 +39,6 @@ def pytest_addoption(parser):
|
|||
|
||||
|
||||
def pytest_configure(config):
|
||||
# This might be called multiple times. Only take the first.
|
||||
global _pytest_fullpath
|
||||
try:
|
||||
_pytest_fullpath
|
||||
except NameError:
|
||||
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
|
||||
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
|
||||
|
||||
if config.getvalue("lsof"):
|
||||
checker = LsofFdLeakChecker()
|
||||
if checker.matching_platform():
|
||||
|
@ -114,12 +110,9 @@ class LsofFdLeakChecker(object):
|
|||
# XXX copied from execnet's conftest.py - needs to be merged
|
||||
winpymap = {
|
||||
'python2.7': r'C:\Python27\python.exe',
|
||||
'python2.6': r'C:\Python26\python.exe',
|
||||
'python3.1': r'C:\Python31\python.exe',
|
||||
'python3.2': r'C:\Python32\python.exe',
|
||||
'python3.3': r'C:\Python33\python.exe',
|
||||
'python3.4': r'C:\Python34\python.exe',
|
||||
'python3.5': r'C:\Python35\python.exe',
|
||||
'python3.6': r'C:\Python36\python.exe',
|
||||
}
|
||||
|
||||
|
||||
|
@ -145,8 +138,7 @@ def getexecutable(name, cache={}):
|
|||
return executable
|
||||
|
||||
|
||||
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
|
||||
'pypy', 'pypy3'])
|
||||
@pytest.fixture(params=['python2.7', 'python3.4', 'pypy', 'pypy3'])
|
||||
def anypython(request):
|
||||
name = request.param
|
||||
executable = getexecutable(name)
|
||||
|
@ -418,16 +410,8 @@ class Testdir:
|
|||
def __init__(self, request, tmpdir_factory):
|
||||
self.request = request
|
||||
self._mod_collections = WeakKeyDictionary()
|
||||
# XXX remove duplication with tmpdir plugin
|
||||
basetmp = tmpdir_factory.ensuretemp("testdir")
|
||||
name = request.function.__name__
|
||||
for i in range(100):
|
||||
try:
|
||||
tmpdir = basetmp.mkdir(name + str(i))
|
||||
except py.error.EEXIST:
|
||||
continue
|
||||
break
|
||||
self.tmpdir = tmpdir
|
||||
self.tmpdir = tmpdir_factory.mktemp(name, numbered=True)
|
||||
self.plugins = []
|
||||
self._savesyspath = (list(sys.path), list(sys.meta_path))
|
||||
self._savemodulekeys = set(sys.modules)
|
||||
|
@ -486,29 +470,24 @@ class Testdir:
|
|||
if not hasattr(self, '_olddir'):
|
||||
self._olddir = old
|
||||
|
||||
def _makefile(self, ext, args, kwargs, encoding="utf-8"):
|
||||
def _makefile(self, ext, args, kwargs, encoding='utf-8'):
|
||||
items = list(kwargs.items())
|
||||
|
||||
def to_text(s):
|
||||
return s.decode(encoding) if isinstance(s, bytes) else six.text_type(s)
|
||||
|
||||
if args:
|
||||
source = py.builtin._totext("\n").join(
|
||||
map(py.builtin._totext, args)) + py.builtin._totext("\n")
|
||||
source = u"\n".join(to_text(x) for x in args)
|
||||
basename = self.request.function.__name__
|
||||
items.insert(0, (basename, source))
|
||||
|
||||
ret = None
|
||||
for name, value in items:
|
||||
p = self.tmpdir.join(name).new(ext=ext)
|
||||
for basename, value in items:
|
||||
p = self.tmpdir.join(basename).new(ext=ext)
|
||||
p.dirpath().ensure_dir()
|
||||
source = Source(value)
|
||||
|
||||
def my_totext(s, encoding="utf-8"):
|
||||
if py.builtin._isbytes(s):
|
||||
s = py.builtin._totext(s, encoding=encoding)
|
||||
return s
|
||||
|
||||
source_unicode = "\n".join([my_totext(line) for line in source.lines])
|
||||
source = py.builtin._totext(source_unicode)
|
||||
content = source.strip().encode(encoding) # + "\n"
|
||||
# content = content.rstrip() + "\n"
|
||||
p.write(content, "wb")
|
||||
source = u"\n".join(to_text(line) for line in source.lines)
|
||||
p.write(source.strip().encode(encoding), "wb")
|
||||
if ret is None:
|
||||
ret = p
|
||||
return ret
|
||||
|
@ -975,7 +954,7 @@ class Testdir:
|
|||
def _getpytestargs(self):
|
||||
# we cannot use "(sys.executable,script)"
|
||||
# because on windows the script is e.g. a pytest.exe
|
||||
return (sys.executable, _pytest_fullpath,) # noqa
|
||||
return (sys.executable, PYTEST_FULLPATH) # noqa
|
||||
|
||||
def runpython(self, script):
|
||||
"""Run a python script using sys.executable as interpreter.
|
||||
|
@ -1098,6 +1077,23 @@ class LineMatcher:
|
|||
return lines2
|
||||
|
||||
def fnmatch_lines_random(self, lines2):
|
||||
"""Check lines exist in the output using ``fnmatch.fnmatch``, in any order.
|
||||
|
||||
The argument is a list of lines which have to occur in the
|
||||
output, in any order.
|
||||
"""
|
||||
self._match_lines_random(lines2, fnmatch)
|
||||
|
||||
def re_match_lines_random(self, lines2):
|
||||
"""Check lines exist in the output using ``re.match``, in any order.
|
||||
|
||||
The argument is a list of lines which have to occur in the
|
||||
output, in any order.
|
||||
|
||||
"""
|
||||
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
|
||||
|
||||
def _match_lines_random(self, lines2, match_func):
|
||||
"""Check lines exist in the output.
|
||||
|
||||
The argument is a list of lines which have to occur in the
|
||||
|
@ -1107,7 +1103,7 @@ class LineMatcher:
|
|||
lines2 = self._getlines(lines2)
|
||||
for line in lines2:
|
||||
for x in self.lines:
|
||||
if line == x or fnmatch(x, line):
|
||||
if line == x or match_func(x, line):
|
||||
self._log("matched: ", repr(line))
|
||||
break
|
||||
else:
|
||||
|
@ -1132,13 +1128,37 @@ class LineMatcher:
|
|||
return '\n'.join(self._log_output)
|
||||
|
||||
def fnmatch_lines(self, lines2):
|
||||
"""Search the text for matching lines.
|
||||
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
|
||||
|
||||
The argument is a list of lines which have to match and can
|
||||
use glob wildcards. If they do not match an pytest.fail() is
|
||||
use glob wildcards. If they do not match a pytest.fail() is
|
||||
called. The matches and non-matches are also printed on
|
||||
stdout.
|
||||
|
||||
"""
|
||||
self._match_lines(lines2, fnmatch, 'fnmatch')
|
||||
|
||||
def re_match_lines(self, lines2):
|
||||
"""Search captured text for matching lines using ``re.match``.
|
||||
|
||||
The argument is a list of lines which have to match using ``re.match``.
|
||||
If they do not match a pytest.fail() is called.
|
||||
|
||||
The matches and non-matches are also printed on
|
||||
stdout.
|
||||
"""
|
||||
self._match_lines(lines2, lambda name, pat: re.match(pat, name), 're.match')
|
||||
|
||||
def _match_lines(self, lines2, match_func, match_nickname):
|
||||
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
|
||||
|
||||
:param list[str] lines2: list of string patterns to match. The actual format depends on
|
||||
``match_func``.
|
||||
:param match_func: a callable ``match_func(line, pattern)`` where line is the captured
|
||||
line from stdout/stderr and pattern is the matching pattern.
|
||||
|
||||
:param str match_nickname: the nickname for the match function that will be logged
|
||||
to stdout when a match occurs.
|
||||
"""
|
||||
lines2 = self._getlines(lines2)
|
||||
lines1 = self.lines[:]
|
||||
|
@ -1152,8 +1172,8 @@ class LineMatcher:
|
|||
if line == nextline:
|
||||
self._log("exact match:", repr(line))
|
||||
break
|
||||
elif fnmatch(nextline, line):
|
||||
self._log("fnmatch:", repr(line))
|
||||
elif match_func(nextline, line):
|
||||
self._log("%s:" % match_nickname, repr(line))
|
||||
self._log(" with:", repr(nextline))
|
||||
break
|
||||
else:
|
||||
|
|
|
@ -6,19 +6,23 @@ import inspect
|
|||
import sys
|
||||
import os
|
||||
import collections
|
||||
import warnings
|
||||
from textwrap import dedent
|
||||
from itertools import count
|
||||
|
||||
|
||||
import py
|
||||
import six
|
||||
from _pytest.mark import MarkerError
|
||||
from _pytest.config import hookimpl
|
||||
|
||||
import _pytest
|
||||
import _pytest._pluggy as pluggy
|
||||
import pluggy
|
||||
from _pytest import fixtures
|
||||
from _pytest import main
|
||||
from _pytest import deprecated
|
||||
from _pytest.compat import (
|
||||
isclass, isfunction, is_generator, _ascii_escaped,
|
||||
isclass, isfunction, is_generator, ascii_escaped,
|
||||
REGEX_TYPE, STRING_TYPES, NoneType, NOTSET,
|
||||
get_real_func, getfslineno, safe_getattr,
|
||||
safe_str, getlocation, enum,
|
||||
|
@ -327,7 +331,7 @@ class PyCollector(PyobjMixin, main.Collector):
|
|||
if name in seen:
|
||||
continue
|
||||
seen[name] = True
|
||||
res = self.makeitem(name, obj)
|
||||
res = self._makeitem(name, obj)
|
||||
if res is None:
|
||||
continue
|
||||
if not isinstance(res, list):
|
||||
|
@ -337,6 +341,10 @@ class PyCollector(PyobjMixin, main.Collector):
|
|||
return values
|
||||
|
||||
def makeitem(self, name, obj):
|
||||
warnings.warn(deprecated.COLLECTOR_MAKEITEM, stacklevel=2)
|
||||
self._makeitem(name, obj)
|
||||
|
||||
def _makeitem(self, name, obj):
|
||||
# assert self.ihook.fspath == self.fspath, self
|
||||
return self.ihook.pytest_pycollect_makeitem(
|
||||
collector=self, name=name, obj=obj)
|
||||
|
@ -613,7 +621,7 @@ class Generator(FunctionMixin, PyCollector):
|
|||
if not isinstance(obj, (tuple, list)):
|
||||
obj = (obj,)
|
||||
# explicit naming
|
||||
if isinstance(obj[0], py.builtin._basestring):
|
||||
if isinstance(obj[0], six.string_types):
|
||||
name = obj[0]
|
||||
obj = obj[1:]
|
||||
else:
|
||||
|
@ -644,14 +652,14 @@ class CallSpec2(object):
|
|||
self._globalid_args = set()
|
||||
self._globalparam = NOTSET
|
||||
self._arg2scopenum = {} # used for sorting parametrized resources
|
||||
self.keywords = {}
|
||||
self.marks = []
|
||||
self.indices = {}
|
||||
|
||||
def copy(self, metafunc):
|
||||
cs = CallSpec2(self.metafunc)
|
||||
cs.funcargs.update(self.funcargs)
|
||||
cs.params.update(self.params)
|
||||
cs.keywords.update(self.keywords)
|
||||
cs.marks.extend(self.marks)
|
||||
cs.indices.update(self.indices)
|
||||
cs._arg2scopenum.update(self._arg2scopenum)
|
||||
cs._idlist = list(self._idlist)
|
||||
|
@ -676,8 +684,8 @@ class CallSpec2(object):
|
|||
def id(self):
|
||||
return "-".join(map(str, filter(None, self._idlist)))
|
||||
|
||||
def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
|
||||
param_index):
|
||||
def setmulti2(self, valtypes, argnames, valset, id, marks, scopenum,
|
||||
param_index):
|
||||
for arg, val in zip(argnames, valset):
|
||||
self._checkargnotcontained(arg)
|
||||
valtype_for_arg = valtypes[arg]
|
||||
|
@ -685,7 +693,7 @@ class CallSpec2(object):
|
|||
self.indices[arg] = param_index
|
||||
self._arg2scopenum[arg] = scopenum
|
||||
self._idlist.append(id)
|
||||
self.keywords.update(keywords)
|
||||
self.marks.extend(marks)
|
||||
|
||||
def setall(self, funcargs, id, param):
|
||||
for x in funcargs:
|
||||
|
@ -725,7 +733,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
self.cls = cls
|
||||
|
||||
self._calls = []
|
||||
self._ids = py.builtin.set()
|
||||
self._ids = set()
|
||||
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
|
||||
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
|
||||
|
@ -768,30 +776,12 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
to set a dynamic scope using test context or configuration.
|
||||
"""
|
||||
from _pytest.fixtures import scope2index
|
||||
from _pytest.mark import MARK_GEN, ParameterSet
|
||||
from _pytest.mark import ParameterSet
|
||||
from py.io import saferepr
|
||||
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = [x.strip() for x in argnames.split(",") if x.strip()]
|
||||
force_tuple = len(argnames) == 1
|
||||
else:
|
||||
force_tuple = False
|
||||
parameters = [
|
||||
ParameterSet.extract_from(x, legacy_force_tuple=force_tuple)
|
||||
for x in argvalues]
|
||||
argnames, parameters = ParameterSet._for_parameterize(
|
||||
argnames, argvalues, self.function)
|
||||
del argvalues
|
||||
|
||||
if not parameters:
|
||||
fs, lineno = getfslineno(self.function)
|
||||
reason = "got empty parameter set %r, function %s at %s:%d" % (
|
||||
argnames, self.function.__name__, fs, lineno)
|
||||
mark = MARK_GEN.skip(reason=reason)
|
||||
parameters.append(ParameterSet(
|
||||
values=(NOTSET,) * len(argnames),
|
||||
marks=[mark],
|
||||
id=None,
|
||||
))
|
||||
|
||||
if scope is None:
|
||||
scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect)
|
||||
|
||||
|
@ -827,7 +817,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
raise ValueError('%d tests specified with %d ids' % (
|
||||
len(parameters), len(ids)))
|
||||
for id_value in ids:
|
||||
if id_value is not None and not isinstance(id_value, py.builtin._basestring):
|
||||
if id_value is not None and not isinstance(id_value, six.string_types):
|
||||
msg = 'ids must be list of strings, found: %s (type: %s)'
|
||||
raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
|
||||
ids = idmaker(argnames, parameters, idfn, ids, self.config)
|
||||
|
@ -841,15 +831,19 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
'equal to the number of names ({1})'.format(
|
||||
param.values, argnames))
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti(valtypes, argnames, param.values, a_id,
|
||||
param.deprecated_arg_dict, scopenum, param_index)
|
||||
newcallspec.setmulti2(valtypes, argnames, param.values, a_id,
|
||||
param.marks, scopenum, param_index)
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
def addcall(self, funcargs=None, id=NOTSET, param=NOTSET):
|
||||
""" (deprecated, use parametrize) Add a new call to the underlying
|
||||
test function during the collection phase of a test run. Note that
|
||||
request.addcall() is called during the test collection phase prior and
|
||||
""" Add a new call to the underlying test function during the collection phase of a test run.
|
||||
|
||||
.. deprecated:: 3.3
|
||||
|
||||
Use :meth:`parametrize` instead.
|
||||
|
||||
Note that request.addcall() is called during the test collection phase prior and
|
||||
independently to actual test execution. You should only use addcall()
|
||||
if you need to specify multiple arguments of a test function.
|
||||
|
||||
|
@ -862,6 +856,8 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
|
|||
:arg param: a parameter which will be exposed to a later fixture function
|
||||
invocation through the ``request.param`` attribute.
|
||||
"""
|
||||
if self.config:
|
||||
self.config.warn('C1', message=deprecated.METAFUNC_ADD_CALL, fslocation=None)
|
||||
assert funcargs is None or isinstance(funcargs, dict)
|
||||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
|
@ -921,7 +917,7 @@ def _idval(val, argname, idx, idfn, config=None):
|
|||
msg += '\nUpdate your code as this will raise an error in pytest-4.0.'
|
||||
warnings.warn(msg, DeprecationWarning)
|
||||
if s:
|
||||
return _ascii_escaped(s)
|
||||
return ascii_escaped(s)
|
||||
|
||||
if config:
|
||||
hook_id = config.hook.pytest_make_parametrize_id(
|
||||
|
@ -930,11 +926,11 @@ def _idval(val, argname, idx, idfn, config=None):
|
|||
return hook_id
|
||||
|
||||
if isinstance(val, STRING_TYPES):
|
||||
return _ascii_escaped(val)
|
||||
return ascii_escaped(val)
|
||||
elif isinstance(val, (float, int, bool, NoneType)):
|
||||
return str(val)
|
||||
elif isinstance(val, REGEX_TYPE):
|
||||
return _ascii_escaped(val.pattern)
|
||||
return ascii_escaped(val.pattern)
|
||||
elif enum is not None and isinstance(val, enum.Enum):
|
||||
return str(val)
|
||||
elif isclass(val) and hasattr(val, '__name__'):
|
||||
|
@ -950,7 +946,7 @@ def _idvalset(idx, parameterset, argnames, idfn, ids, config=None):
|
|||
for val, argname in zip(parameterset.values, argnames)]
|
||||
return "-".join(this_id)
|
||||
else:
|
||||
return _ascii_escaped(ids[idx])
|
||||
return ascii_escaped(ids[idx])
|
||||
|
||||
|
||||
def idmaker(argnames, parametersets, idfn=None, ids=None, config=None):
|
||||
|
@ -1112,7 +1108,13 @@ class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr):
|
|||
self.keywords.update(self.obj.__dict__)
|
||||
if callspec:
|
||||
self.callspec = callspec
|
||||
self.keywords.update(callspec.keywords)
|
||||
# this is total hostile and a mess
|
||||
# keywords are broken by design by now
|
||||
# this will be redeemed later
|
||||
for mark in callspec.marks:
|
||||
# feel free to cry, this was broken for years before
|
||||
# and keywords cant fix it per design
|
||||
self.keywords[mark.name] = mark
|
||||
if keywords:
|
||||
self.keywords.update(keywords)
|
||||
|
||||
|
|
|
@ -2,8 +2,9 @@ import math
|
|||
import sys
|
||||
|
||||
import py
|
||||
from six.moves import zip
|
||||
|
||||
from _pytest.compat import isclass, izip
|
||||
from _pytest.compat import isclass
|
||||
from _pytest.outcomes import fail
|
||||
import _pytest._code
|
||||
|
||||
|
@ -145,7 +146,7 @@ class ApproxSequence(ApproxBase):
|
|||
return ApproxBase.__eq__(self, actual)
|
||||
|
||||
def _yield_comparisons(self, actual):
|
||||
return izip(actual, self.expected)
|
||||
return zip(actual, self.expected)
|
||||
|
||||
|
||||
class ApproxScalar(ApproxBase):
|
||||
|
@ -454,8 +455,7 @@ def raises(expected_exception, *args, **kwargs):
|
|||
|
||||
This helper produces a ``ExceptionInfo()`` object (see below).
|
||||
|
||||
If using Python 2.5 or above, you may use this function as a
|
||||
context manager::
|
||||
You may use this function as a context manager::
|
||||
|
||||
>>> with raises(ZeroDivisionError):
|
||||
... 1/0
|
||||
|
@ -610,13 +610,6 @@ class RaisesContext(object):
|
|||
__tracebackhide__ = True
|
||||
if tp[0] is None:
|
||||
fail(self.message)
|
||||
if sys.version_info < (2, 7):
|
||||
# py26: on __exit__() exc_value often does not contain the
|
||||
# exception value.
|
||||
# http://bugs.python.org/issue7853
|
||||
if not isinstance(tp[1], BaseException):
|
||||
exc_type, value, traceback = tp
|
||||
tp = exc_type, exc_type(value), traceback
|
||||
self.excinfo.__init__(tp)
|
||||
suppress_exception = issubclass(self.excinfo.type, self.expected_exception)
|
||||
if sys.version_info[0] == 2 and suppress_exception:
|
||||
|
|
|
@ -8,6 +8,8 @@ import py
|
|||
import sys
|
||||
import warnings
|
||||
|
||||
import re
|
||||
|
||||
from _pytest.fixtures import yield_fixture
|
||||
from _pytest.outcomes import fail
|
||||
|
||||
|
@ -98,10 +100,28 @@ def warns(expected_warning, *args, **kwargs):
|
|||
|
||||
>>> with warns(RuntimeWarning):
|
||||
... warnings.warn("my warning", RuntimeWarning)
|
||||
|
||||
In the context manager form you may use the keyword argument ``match`` to assert
|
||||
that the exception matches a text or regex::
|
||||
|
||||
>>> with warns(UserWarning, match='must be 0 or None'):
|
||||
... warnings.warn("value must be 0 or None", UserWarning)
|
||||
|
||||
>>> with warns(UserWarning, match=r'must be \d+$'):
|
||||
... warnings.warn("value must be 42", UserWarning)
|
||||
|
||||
>>> with warns(UserWarning, match=r'must be \d+$'):
|
||||
... warnings.warn("this is not here", UserWarning)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
|
||||
|
||||
"""
|
||||
wcheck = WarningsChecker(expected_warning)
|
||||
match_expr = None
|
||||
if not args:
|
||||
return wcheck
|
||||
if "match" in kwargs:
|
||||
match_expr = kwargs.pop("match")
|
||||
return WarningsChecker(expected_warning, match_expr=match_expr)
|
||||
elif isinstance(args[0], str):
|
||||
code, = args
|
||||
assert isinstance(code, str)
|
||||
|
@ -109,12 +129,12 @@ def warns(expected_warning, *args, **kwargs):
|
|||
loc = frame.f_locals.copy()
|
||||
loc.update(kwargs)
|
||||
|
||||
with wcheck:
|
||||
with WarningsChecker(expected_warning, match_expr=match_expr):
|
||||
code = _pytest._code.Source(code).compile()
|
||||
py.builtin.exec_(code, frame.f_globals, loc)
|
||||
else:
|
||||
func = args[0]
|
||||
with wcheck:
|
||||
with WarningsChecker(expected_warning, match_expr=match_expr):
|
||||
return func(*args[1:], **kwargs)
|
||||
|
||||
|
||||
|
@ -174,7 +194,7 @@ class WarningsRecorder(warnings.catch_warnings):
|
|||
|
||||
|
||||
class WarningsChecker(WarningsRecorder):
|
||||
def __init__(self, expected_warning=None):
|
||||
def __init__(self, expected_warning=None, match_expr=None):
|
||||
super(WarningsChecker, self).__init__()
|
||||
|
||||
msg = ("exceptions must be old-style classes or "
|
||||
|
@ -189,6 +209,7 @@ class WarningsChecker(WarningsRecorder):
|
|||
raise TypeError(msg % type(expected_warning))
|
||||
|
||||
self.expected_warning = expected_warning
|
||||
self.match_expr = match_expr
|
||||
|
||||
def __exit__(self, *exc_info):
|
||||
super(WarningsChecker, self).__exit__(*exc_info)
|
||||
|
@ -203,3 +224,13 @@ class WarningsChecker(WarningsRecorder):
|
|||
"The list of emitted warnings is: {1}.".format(
|
||||
self.expected_warning,
|
||||
[each.message for each in self]))
|
||||
elif self.match_expr is not None:
|
||||
for r in self:
|
||||
if issubclass(r.category, self.expected_warning):
|
||||
if re.compile(self.match_expr).search(str(r.message)):
|
||||
break
|
||||
else:
|
||||
fail("DID NOT WARN. No warnings of type {0} matching"
|
||||
" ('{1}') was emitted. The list of emitted warnings"
|
||||
" is: {2}.".format(self.expected_warning, self.match_expr,
|
||||
[each.message for each in self]))
|
||||
|
|
|
@ -431,7 +431,7 @@ class SetupState(object):
|
|||
is called at the end of teardown_all().
|
||||
"""
|
||||
assert colitem and not isinstance(colitem, tuple)
|
||||
assert py.builtin.callable(finalizer)
|
||||
assert callable(finalizer)
|
||||
# assert colitem in self.stack # some unit tests don't setup stack :/
|
||||
self._finalizers.setdefault(colitem, []).append(finalizer)
|
||||
|
||||
|
|
|
@ -44,7 +44,7 @@ def _show_fixture_action(fixturedef, msg):
|
|||
config = fixturedef._fixturemanager.config
|
||||
capman = config.pluginmanager.getplugin('capturemanager')
|
||||
if capman:
|
||||
out, err = capman.suspendcapture()
|
||||
out, err = capman.suspend_global_capture()
|
||||
|
||||
tw = config.get_terminal_writer()
|
||||
tw.line()
|
||||
|
@ -63,7 +63,7 @@ def _show_fixture_action(fixturedef, msg):
|
|||
tw.write('[{0}]'.format(fixturedef.cached_param))
|
||||
|
||||
if capman:
|
||||
capman.resumecapture()
|
||||
capman.resume_global_capture()
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
|
|
|
@ -2,10 +2,10 @@
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import os
|
||||
import six
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import py
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.mark import MarkInfo, MarkDecorator
|
||||
from _pytest.outcomes import fail, skip, xfail, TEST_OUTCOME
|
||||
|
@ -60,22 +60,31 @@ def pytest_configure(config):
|
|||
)
|
||||
|
||||
|
||||
class MarkEvaluator:
|
||||
class MarkEvaluator(object):
|
||||
def __init__(self, item, name):
|
||||
self.item = item
|
||||
self.name = name
|
||||
|
||||
@property
|
||||
def holder(self):
|
||||
return self.item.keywords.get(self.name)
|
||||
self._marks = None
|
||||
self._mark = None
|
||||
self._mark_name = name
|
||||
|
||||
def __bool__(self):
|
||||
return bool(self.holder)
|
||||
self._marks = self._get_marks()
|
||||
return bool(self._marks)
|
||||
__nonzero__ = __bool__
|
||||
|
||||
def wasvalid(self):
|
||||
return not hasattr(self, 'exc')
|
||||
|
||||
def _get_marks(self):
|
||||
|
||||
keyword = self.item.keywords.get(self._mark_name)
|
||||
if isinstance(keyword, MarkDecorator):
|
||||
return [keyword.mark]
|
||||
elif isinstance(keyword, MarkInfo):
|
||||
return [x.combined for x in keyword]
|
||||
else:
|
||||
return []
|
||||
|
||||
def invalidraise(self, exc):
|
||||
raises = self.get('raises')
|
||||
if not raises:
|
||||
|
@ -95,7 +104,7 @@ class MarkEvaluator:
|
|||
fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
% (self.name, self.expr, "\n".join(msg)),
|
||||
% (self._mark_name, self.expr, "\n".join(msg)),
|
||||
pytrace=False)
|
||||
|
||||
def _getglobals(self):
|
||||
|
@ -107,40 +116,45 @@ class MarkEvaluator:
|
|||
def _istrue(self):
|
||||
if hasattr(self, 'result'):
|
||||
return self.result
|
||||
if self.holder:
|
||||
if self.holder.args or 'condition' in self.holder.kwargs:
|
||||
self.result = False
|
||||
# "holder" might be a MarkInfo or a MarkDecorator; only
|
||||
# MarkInfo keeps track of all parameters it received in an
|
||||
# _arglist attribute
|
||||
marks = getattr(self.holder, '_marks', None) \
|
||||
or [self.holder.mark]
|
||||
for _, args, kwargs in marks:
|
||||
if 'condition' in kwargs:
|
||||
args = (kwargs['condition'],)
|
||||
for expr in args:
|
||||
self._marks = self._get_marks()
|
||||
|
||||
if self._marks:
|
||||
self.result = False
|
||||
for mark in self._marks:
|
||||
self._mark = mark
|
||||
if 'condition' in mark.kwargs:
|
||||
args = (mark.kwargs['condition'],)
|
||||
else:
|
||||
args = mark.args
|
||||
|
||||
for expr in args:
|
||||
self.expr = expr
|
||||
if isinstance(expr, six.string_types):
|
||||
d = self._getglobals()
|
||||
result = cached_eval(self.item.config, expr, d)
|
||||
else:
|
||||
if "reason" not in mark.kwargs:
|
||||
# XXX better be checked at collection time
|
||||
msg = "you need to specify reason=STRING " \
|
||||
"when using booleans as conditions."
|
||||
fail(msg)
|
||||
result = bool(expr)
|
||||
if result:
|
||||
self.result = True
|
||||
self.reason = mark.kwargs.get('reason', None)
|
||||
self.expr = expr
|
||||
if isinstance(expr, py.builtin._basestring):
|
||||
d = self._getglobals()
|
||||
result = cached_eval(self.item.config, expr, d)
|
||||
else:
|
||||
if "reason" not in kwargs:
|
||||
# XXX better be checked at collection time
|
||||
msg = "you need to specify reason=STRING " \
|
||||
"when using booleans as conditions."
|
||||
fail(msg)
|
||||
result = bool(expr)
|
||||
if result:
|
||||
self.result = True
|
||||
self.reason = kwargs.get('reason', None)
|
||||
self.expr = expr
|
||||
return self.result
|
||||
else:
|
||||
self.result = True
|
||||
return getattr(self, 'result', False)
|
||||
return self.result
|
||||
|
||||
if not args:
|
||||
self.result = True
|
||||
self.reason = mark.kwargs.get('reason', None)
|
||||
return self.result
|
||||
return False
|
||||
|
||||
def get(self, attr, default=None):
|
||||
return self.holder.kwargs.get(attr, default)
|
||||
if self._mark is None:
|
||||
return default
|
||||
return self._mark.kwargs.get(attr, default)
|
||||
|
||||
def getexplanation(self):
|
||||
expl = getattr(self, 'reason', None) or self.get('reason', None)
|
||||
|
@ -155,17 +169,17 @@ class MarkEvaluator:
|
|||
@hookimpl(tryfirst=True)
|
||||
def pytest_runtest_setup(item):
|
||||
# Check if skip or skipif are specified as pytest marks
|
||||
|
||||
item._skipped_by_mark = False
|
||||
skipif_info = item.keywords.get('skipif')
|
||||
if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
|
||||
eval_skipif = MarkEvaluator(item, 'skipif')
|
||||
if eval_skipif.istrue():
|
||||
item._evalskip = eval_skipif
|
||||
item._skipped_by_mark = True
|
||||
skip(eval_skipif.getexplanation())
|
||||
|
||||
skip_info = item.keywords.get('skip')
|
||||
if isinstance(skip_info, (MarkInfo, MarkDecorator)):
|
||||
item._evalskip = True
|
||||
item._skipped_by_mark = True
|
||||
if 'reason' in skip_info.kwargs:
|
||||
skip(skip_info.kwargs['reason'])
|
||||
elif skip_info.args:
|
||||
|
@ -212,7 +226,6 @@ def pytest_runtest_makereport(item, call):
|
|||
outcome = yield
|
||||
rep = outcome.get_result()
|
||||
evalxfail = getattr(item, '_evalxfail', None)
|
||||
evalskip = getattr(item, '_evalskip', None)
|
||||
# unitttest special case, see setting of _unexpectedsuccess
|
||||
if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
|
||||
from _pytest.compat import _is_unittest_unexpected_success_a_failure
|
||||
|
@ -248,7 +261,7 @@ def pytest_runtest_makereport(item, call):
|
|||
else:
|
||||
rep.outcome = "passed"
|
||||
rep.wasxfail = explanation
|
||||
elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
|
||||
elif item._skipped_by_mark and rep.skipped and type(rep.longrepr) is tuple:
|
||||
# skipped by mark.skipif; change the location of the failure
|
||||
# to point to the item definition, otherwise it will display
|
||||
# the location of where the skip exception was raised within pytest
|
||||
|
@ -345,6 +358,13 @@ def folded_skips(skipped):
|
|||
for event in skipped:
|
||||
key = event.longrepr
|
||||
assert len(key) == 3, (event, key)
|
||||
keywords = getattr(event, 'keywords', {})
|
||||
# folding reports with global pytestmark variable
|
||||
# this is workaround, because for now we cannot identify the scope of a skip marker
|
||||
# TODO: revisit after marks scope would be fixed
|
||||
when = getattr(event, 'when', None)
|
||||
if when == 'setup' and 'skip' in keywords and 'pytestmark' not in keywords:
|
||||
key = (key[0], None, key[2], )
|
||||
d.setdefault(key, []).append(event)
|
||||
values = []
|
||||
for key, events in d.items():
|
||||
|
@ -367,6 +387,11 @@ def show_skipped(terminalreporter, lines):
|
|||
for num, fspath, lineno, reason in fskips:
|
||||
if reason.startswith("Skipped: "):
|
||||
reason = reason[9:]
|
||||
lines.append(
|
||||
"SKIP [%d] %s:%d: %s" %
|
||||
(num, fspath, lineno + 1, reason))
|
||||
if lineno is not None:
|
||||
lines.append(
|
||||
"SKIP [%d] %s:%d: %s" %
|
||||
(num, fspath, lineno + 1, reason))
|
||||
else:
|
||||
lines.append(
|
||||
"SKIP [%d] %s: %s" %
|
||||
(num, fspath, reason))
|
||||
|
|
|
@ -5,16 +5,18 @@ This is a good source for looking at the various reporting hooks.
|
|||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import itertools
|
||||
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
|
||||
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
|
||||
import pytest
|
||||
import py
|
||||
import platform
|
||||
import sys
|
||||
import time
|
||||
import platform
|
||||
|
||||
import pluggy
|
||||
import py
|
||||
import six
|
||||
|
||||
import pytest
|
||||
from _pytest import nodes
|
||||
import _pytest._pluggy as pluggy
|
||||
from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
|
||||
EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
|
@ -48,6 +50,10 @@ def pytest_addoption(parser):
|
|||
choices=['yes', 'no', 'auto'],
|
||||
help="color terminal output (yes/no/auto).")
|
||||
|
||||
parser.addini("console_output_style",
|
||||
help="console output: classic or with additional progress information (classic|progress).",
|
||||
default='progress')
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.option.verbose -= config.option.quiet
|
||||
|
@ -132,17 +138,20 @@ class TerminalReporter:
|
|||
self.showfspath = self.verbosity >= 0
|
||||
self.showlongtestinfo = self.verbosity > 0
|
||||
self._numcollected = 0
|
||||
self._session = None
|
||||
|
||||
self.stats = {}
|
||||
self.startdir = py.path.local()
|
||||
if file is None:
|
||||
file = sys.stdout
|
||||
self._tw = self.writer = _pytest.config.create_terminal_writer(config,
|
||||
file)
|
||||
self._tw = _pytest.config.create_terminal_writer(config, file)
|
||||
self._screen_width = self._tw.fullwidth
|
||||
self.currentfspath = None
|
||||
self.reportchars = getreportopt(config)
|
||||
self.hasmarkup = self._tw.hasmarkup
|
||||
self.isatty = file.isatty()
|
||||
self._progress_items_reported = 0
|
||||
self._show_progress_info = self.config.getini('console_output_style') == 'progress'
|
||||
|
||||
def hasopt(self, char):
|
||||
char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
|
||||
|
@ -151,6 +160,8 @@ class TerminalReporter:
|
|||
def write_fspath_result(self, nodeid, res):
|
||||
fspath = self.config.rootdir.join(nodeid.split("::")[0])
|
||||
if fspath != self.currentfspath:
|
||||
if self.currentfspath is not None:
|
||||
self._write_progress_information_filling_space()
|
||||
self.currentfspath = fspath
|
||||
fspath = self.startdir.bestrelpath(fspath)
|
||||
self._tw.line()
|
||||
|
@ -165,6 +176,7 @@ class TerminalReporter:
|
|||
if extra:
|
||||
self._tw.write(extra, **kwargs)
|
||||
self.currentfspath = -2
|
||||
self._write_progress_information_filling_space()
|
||||
|
||||
def ensure_newline(self):
|
||||
if self.currentfspath:
|
||||
|
@ -175,8 +187,8 @@ class TerminalReporter:
|
|||
self._tw.write(content, **markup)
|
||||
|
||||
def write_line(self, line, **markup):
|
||||
if not py.builtin._istext(line):
|
||||
line = py.builtin.text(line, errors="replace")
|
||||
if not isinstance(line, six.text_type):
|
||||
line = six.text_type(line, errors="replace")
|
||||
self.ensure_newline()
|
||||
self._tw.line(line, **markup)
|
||||
|
||||
|
@ -191,7 +203,7 @@ class TerminalReporter:
|
|||
"""
|
||||
erase = markup.pop('erase', False)
|
||||
if erase:
|
||||
fill_count = self._tw.fullwidth - len(line)
|
||||
fill_count = self._tw.fullwidth - len(line) - 1
|
||||
fill = ' ' * fill_count
|
||||
else:
|
||||
fill = ''
|
||||
|
@ -209,7 +221,7 @@ class TerminalReporter:
|
|||
self._tw.line(msg, **kw)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
for line in py.builtin.text(excrepr).split("\n"):
|
||||
for line in six.text_type(excrepr).split("\n"):
|
||||
self.write_line("INTERNALERROR> " + line)
|
||||
return 1
|
||||
|
||||
|
@ -244,38 +256,73 @@ class TerminalReporter:
|
|||
rep = report
|
||||
res = self.config.hook.pytest_report_teststatus(report=rep)
|
||||
cat, letter, word = res
|
||||
if isinstance(word, tuple):
|
||||
word, markup = word
|
||||
else:
|
||||
markup = None
|
||||
self.stats.setdefault(cat, []).append(rep)
|
||||
self._tests_ran = True
|
||||
if not letter and not word:
|
||||
# probably passed setup/teardown
|
||||
return
|
||||
running_xdist = hasattr(rep, 'node')
|
||||
self._progress_items_reported += 1
|
||||
if self.verbosity <= 0:
|
||||
if not hasattr(rep, 'node') and self.showfspath:
|
||||
if not running_xdist and self.showfspath:
|
||||
self.write_fspath_result(rep.nodeid, letter)
|
||||
else:
|
||||
self._tw.write(letter)
|
||||
self._write_progress_if_past_edge()
|
||||
else:
|
||||
if isinstance(word, tuple):
|
||||
word, markup = word
|
||||
else:
|
||||
if markup is None:
|
||||
if rep.passed:
|
||||
markup = {'green': True}
|
||||
elif rep.failed:
|
||||
markup = {'red': True}
|
||||
elif rep.skipped:
|
||||
markup = {'yellow': True}
|
||||
else:
|
||||
markup = {}
|
||||
line = self._locationline(rep.nodeid, *rep.location)
|
||||
if not hasattr(rep, 'node'):
|
||||
if not running_xdist:
|
||||
self.write_ensure_prefix(line, word, **markup)
|
||||
# self._tw.write(word, **markup)
|
||||
else:
|
||||
self.ensure_newline()
|
||||
if hasattr(rep, 'node'):
|
||||
self._tw.write("[%s] " % rep.node.gateway.id)
|
||||
self._tw.write("[%s]" % rep.node.gateway.id)
|
||||
if self._show_progress_info:
|
||||
self._tw.write(self._get_progress_information_message() + " ", cyan=True)
|
||||
else:
|
||||
self._tw.write(' ')
|
||||
self._tw.write(word, **markup)
|
||||
self._tw.write(" " + line)
|
||||
self.currentfspath = -2
|
||||
|
||||
def _write_progress_if_past_edge(self):
|
||||
if not self._show_progress_info:
|
||||
return
|
||||
last_item = self._progress_items_reported == self._session.testscollected
|
||||
if last_item:
|
||||
self._write_progress_information_filling_space()
|
||||
return
|
||||
|
||||
past_edge = self._tw.chars_on_current_line + self._PROGRESS_LENGTH + 1 >= self._screen_width
|
||||
if past_edge:
|
||||
msg = self._get_progress_information_message()
|
||||
self._tw.write(msg + '\n', cyan=True)
|
||||
|
||||
_PROGRESS_LENGTH = len(' [100%]')
|
||||
|
||||
def _get_progress_information_message(self):
|
||||
progress = self._progress_items_reported * 100 // self._session.testscollected
|
||||
return ' [{:3d}%]'.format(progress)
|
||||
|
||||
def _write_progress_information_filling_space(self):
|
||||
if not self._show_progress_info:
|
||||
return
|
||||
msg = self._get_progress_information_message()
|
||||
fill = ' ' * (self._tw.fullwidth - self._tw.chars_on_current_line - len(msg) - 1)
|
||||
self.write(fill + msg, cyan=True)
|
||||
|
||||
def pytest_collection(self):
|
||||
if not self.isatty and self.config.option.verbose >= 1:
|
||||
self.write("collecting ... ", bold=True)
|
||||
|
@ -318,6 +365,7 @@ class TerminalReporter:
|
|||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_sessionstart(self, session):
|
||||
self._session = session
|
||||
self._sessionstarttime = time.time()
|
||||
if not self.showheader:
|
||||
return
|
||||
|
@ -494,9 +542,9 @@ class TerminalReporter:
|
|||
grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config))
|
||||
|
||||
self.write_sep("=", "warnings summary", yellow=True, bold=False)
|
||||
for location, warnings in grouped:
|
||||
for location, warning_records in grouped:
|
||||
self._tw.line(str(location) or '<undetermined location>')
|
||||
for w in warnings:
|
||||
for w in warning_records:
|
||||
lines = w.message.splitlines()
|
||||
indented = '\n'.join(' ' + x for x in lines)
|
||||
self._tw.line(indented)
|
||||
|
|
|
@ -9,7 +9,6 @@ import _pytest._code
|
|||
from _pytest.config import hookimpl
|
||||
from _pytest.outcomes import fail, skip, xfail
|
||||
from _pytest.python import transfer_markers, Class, Module, Function
|
||||
from _pytest.skipping import MarkEvaluator
|
||||
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
|
@ -134,8 +133,7 @@ class TestCaseFunction(Function):
|
|||
try:
|
||||
skip(reason)
|
||||
except skip.Exception:
|
||||
self._evalskip = MarkEvaluator(self, 'SkipTest')
|
||||
self._evalskip.result = True
|
||||
self._skipped_by_mark = True
|
||||
self._addexcinfo(sys.exc_info())
|
||||
|
||||
def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
|
||||
|
|
|
@ -1,13 +0,0 @@
|
|||
This directory vendors the `pluggy` module.
|
||||
|
||||
For a more detailed discussion for the reasons to vendoring this
|
||||
package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
|
||||
|
||||
To update the current version, execute:
|
||||
|
||||
```
|
||||
$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
|
||||
```
|
||||
|
||||
And commit the modified files. The `pluggy-<version>.dist-info` directory
|
||||
created by `pip` should be added as well.
|
|
@ -1,11 +0,0 @@
|
|||
|
||||
Plugin registration and hook calling for Python
|
||||
===============================================
|
||||
|
||||
This is the plugin manager as used by pytest but stripped
|
||||
of pytest specific details.
|
||||
|
||||
During the 0.x series this plugin does not have much documentation
|
||||
except extensive docstrings in the pluggy.py module.
|
||||
|
||||
|
|
@ -1 +0,0 @@
|
|||
pip
|
|
@ -1,22 +0,0 @@
|
|||
The MIT License (MIT)
|
||||
|
||||
Copyright (c) 2015 holger krekel (rather uses bitbucket/hpk42)
|
||||
|
||||
Permission is hereby granted, free of charge, to any person obtaining a copy
|
||||
of this software and associated documentation files (the "Software"), to deal
|
||||
in the Software without restriction, including without limitation the rights
|
||||
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
|
||||
copies of the Software, and to permit persons to whom the Software is
|
||||
furnished to do so, subject to the following conditions:
|
||||
|
||||
The above copyright notice and this permission notice shall be included in all
|
||||
copies or substantial portions of the Software.
|
||||
|
||||
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
|
||||
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
|
||||
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
|
||||
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
|
||||
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
|
||||
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
|
||||
SOFTWARE.
|
||||
|
|
@ -1,40 +0,0 @@
|
|||
Metadata-Version: 2.0
|
||||
Name: pluggy
|
||||
Version: 0.4.0
|
||||
Summary: plugin and hook calling mechanisms for python
|
||||
Home-page: https://github.com/pytest-dev/pluggy
|
||||
Author: Holger Krekel
|
||||
Author-email: holger at merlinux.eu
|
||||
License: MIT license
|
||||
Platform: unix
|
||||
Platform: linux
|
||||
Platform: osx
|
||||
Platform: win32
|
||||
Classifier: Development Status :: 4 - Beta
|
||||
Classifier: Intended Audience :: Developers
|
||||
Classifier: License :: OSI Approved :: MIT License
|
||||
Classifier: Operating System :: POSIX
|
||||
Classifier: Operating System :: Microsoft :: Windows
|
||||
Classifier: Operating System :: MacOS :: MacOS X
|
||||
Classifier: Topic :: Software Development :: Testing
|
||||
Classifier: Topic :: Software Development :: Libraries
|
||||
Classifier: Topic :: Utilities
|
||||
Classifier: Programming Language :: Python :: 2
|
||||
Classifier: Programming Language :: Python :: 2.6
|
||||
Classifier: Programming Language :: Python :: 2.7
|
||||
Classifier: Programming Language :: Python :: 3
|
||||
Classifier: Programming Language :: Python :: 3.3
|
||||
Classifier: Programming Language :: Python :: 3.4
|
||||
Classifier: Programming Language :: Python :: 3.5
|
||||
|
||||
|
||||
Plugin registration and hook calling for Python
|
||||
===============================================
|
||||
|
||||
This is the plugin manager as used by pytest but stripped
|
||||
of pytest specific details.
|
||||
|
||||
During the 0.x series this plugin does not have much documentation
|
||||
except extensive docstrings in the pluggy.py module.
|
||||
|
||||
|
|
@ -1,9 +0,0 @@
|
|||
pluggy.py,sha256=u0oG9cv-oLOkNvEBlwnnu8pp1AyxpoERgUO00S3rvpQ,31543
|
||||
pluggy-0.4.0.dist-info/DESCRIPTION.rst,sha256=ltvjkFd40LW_xShthp6RRVM6OB_uACYDFR3kTpKw7o4,307
|
||||
pluggy-0.4.0.dist-info/LICENSE.txt,sha256=ruwhUOyV1HgE9F35JVL9BCZ9vMSALx369I4xq9rhpkM,1134
|
||||
pluggy-0.4.0.dist-info/METADATA,sha256=pe2hbsqKFaLHC6wAQPpFPn0KlpcPfLBe_BnS4O70bfk,1364
|
||||
pluggy-0.4.0.dist-info/RECORD,,
|
||||
pluggy-0.4.0.dist-info/WHEEL,sha256=9Z5Xm-eel1bTS7e6ogYiKz0zmPEqDwIypurdHN1hR40,116
|
||||
pluggy-0.4.0.dist-info/metadata.json,sha256=T3go5L2qOa_-H-HpCZi3EoVKb8sZ3R-fOssbkWo2nvM,1119
|
||||
pluggy-0.4.0.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
|
||||
pluggy-0.4.0.dist-info/INSTALLER,sha256=zuuue4knoyJ-UwPPXg8fezS7VCrXJQrAP7zeNuwvFQg,4
|
|
@ -1,6 +0,0 @@
|
|||
Wheel-Version: 1.0
|
||||
Generator: bdist_wheel (0.29.0)
|
||||
Root-Is-Purelib: true
|
||||
Tag: py2-none-any
|
||||
Tag: py3-none-any
|
||||
|
|
@ -1 +0,0 @@
|
|||
{"classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"], "extensions": {"python.details": {"contacts": [{"email": "holger at merlinux.eu", "name": "Holger Krekel", "role": "author"}], "document_names": {"description": "DESCRIPTION.rst", "license": "LICENSE.txt"}, "project_urls": {"Home": "https://github.com/pytest-dev/pluggy"}}}, "generator": "bdist_wheel (0.29.0)", "license": "MIT license", "metadata_version": "2.0", "name": "pluggy", "platform": "unix", "summary": "plugin and hook calling mechanisms for python", "version": "0.4.0"}
|
|
@ -1 +0,0 @@
|
|||
pluggy
|
|
@ -1,802 +0,0 @@
|
|||
"""
|
||||
PluginManager, basic initialization and tracing.
|
||||
|
||||
pluggy is the cristallized core of plugin management as used
|
||||
by some 150 plugins for pytest.
|
||||
|
||||
Pluggy uses semantic versioning. Breaking changes are only foreseen for
|
||||
Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in
|
||||
your project you should thus use a dependency restriction like
|
||||
"pluggy>=0.1.0,<1.0" to avoid surprises.
|
||||
|
||||
pluggy is concerned with hook specification, hook implementations and hook
|
||||
calling. For any given hook specification a hook call invokes up to N implementations.
|
||||
A hook implementation can influence its position and type of execution:
|
||||
if attributed "tryfirst" or "trylast" it will be tried to execute
|
||||
first or last. However, if attributed "hookwrapper" an implementation
|
||||
can wrap all calls to non-hookwrapper implementations. A hookwrapper
|
||||
can thus execute some code ahead and after the execution of other hooks.
|
||||
|
||||
Hook specification is done by way of a regular python function where
|
||||
both the function name and the names of all its arguments are significant.
|
||||
Each hook implementation function is verified against the original specification
|
||||
function, including the names of all its arguments. To allow for hook specifications
|
||||
to evolve over the livetime of a project, hook implementations can
|
||||
accept less arguments. One can thus add new arguments and semantics to
|
||||
a hook specification by adding another argument typically without breaking
|
||||
existing hook implementations.
|
||||
|
||||
The chosen approach is meant to let a hook designer think carefuly about
|
||||
which objects are needed by an extension writer. By contrast, subclass-based
|
||||
extension mechanisms often expose a lot more state and behaviour than needed,
|
||||
thus restricting future developments.
|
||||
|
||||
Pluggy currently consists of functionality for:
|
||||
|
||||
- a way to register new hook specifications. Without a hook
|
||||
specification no hook calling can be performed.
|
||||
|
||||
- a registry of plugins which contain hook implementation functions. It
|
||||
is possible to register plugins for which a hook specification is not yet
|
||||
known and validate all hooks when the system is in a more referentially
|
||||
consistent state. Setting an "optionalhook" attribution to a hook
|
||||
implementation will avoid PluginValidationError's if a specification
|
||||
is missing. This allows to have optional integration between plugins.
|
||||
|
||||
- a "hook" relay object from which you can launch 1:N calls to
|
||||
registered hook implementation functions
|
||||
|
||||
- a mechanism for ordering hook implementation functions
|
||||
|
||||
- mechanisms for two different type of 1:N calls: "firstresult" for when
|
||||
the call should stop when the first implementation returns a non-None result.
|
||||
And the other (default) way of guaranteeing that all hook implementations
|
||||
will be called and their non-None result collected.
|
||||
|
||||
- mechanisms for "historic" extension points such that all newly
|
||||
registered functions will receive all hook calls that happened
|
||||
before their registration.
|
||||
|
||||
- a mechanism for discovering plugin objects which are based on
|
||||
setuptools based entry points.
|
||||
|
||||
- a simple tracing mechanism, including tracing of plugin calls and
|
||||
their arguments.
|
||||
|
||||
"""
|
||||
import sys
|
||||
import inspect
|
||||
|
||||
__version__ = '0.4.0'
|
||||
|
||||
__all__ = ["PluginManager", "PluginValidationError", "HookCallError",
|
||||
"HookspecMarker", "HookimplMarker"]
|
||||
|
||||
_py3 = sys.version_info > (3, 0)
|
||||
|
||||
|
||||
class HookspecMarker:
|
||||
""" Decorator helper class for marking functions as hook specifications.
|
||||
|
||||
You can instantiate it with a project_name to get a decorator.
|
||||
Calling PluginManager.add_hookspecs later will discover all marked functions
|
||||
if the PluginManager uses the same project_name.
|
||||
"""
|
||||
|
||||
def __init__(self, project_name):
|
||||
self.project_name = project_name
|
||||
|
||||
def __call__(self, function=None, firstresult=False, historic=False):
|
||||
""" if passed a function, directly sets attributes on the function
|
||||
which will make it discoverable to add_hookspecs(). If passed no
|
||||
function, returns a decorator which can be applied to a function
|
||||
later using the attributes supplied.
|
||||
|
||||
If firstresult is True the 1:N hook call (N being the number of registered
|
||||
hook implementation functions) will stop at I<=N when the I'th function
|
||||
returns a non-None result.
|
||||
|
||||
If historic is True calls to a hook will be memorized and replayed
|
||||
on later registered plugins.
|
||||
|
||||
"""
|
||||
def setattr_hookspec_opts(func):
|
||||
if historic and firstresult:
|
||||
raise ValueError("cannot have a historic firstresult hook")
|
||||
setattr(func, self.project_name + "_spec",
|
||||
dict(firstresult=firstresult, historic=historic))
|
||||
return func
|
||||
|
||||
if function is not None:
|
||||
return setattr_hookspec_opts(function)
|
||||
else:
|
||||
return setattr_hookspec_opts
|
||||
|
||||
|
||||
class HookimplMarker:
|
||||
""" Decorator helper class for marking functions as hook implementations.
|
||||
|
||||
You can instantiate with a project_name to get a decorator.
|
||||
Calling PluginManager.register later will discover all marked functions
|
||||
if the PluginManager uses the same project_name.
|
||||
"""
|
||||
def __init__(self, project_name):
|
||||
self.project_name = project_name
|
||||
|
||||
def __call__(self, function=None, hookwrapper=False, optionalhook=False,
|
||||
tryfirst=False, trylast=False):
|
||||
|
||||
""" if passed a function, directly sets attributes on the function
|
||||
which will make it discoverable to register(). If passed no function,
|
||||
returns a decorator which can be applied to a function later using
|
||||
the attributes supplied.
|
||||
|
||||
If optionalhook is True a missing matching hook specification will not result
|
||||
in an error (by default it is an error if no matching spec is found).
|
||||
|
||||
If tryfirst is True this hook implementation will run as early as possible
|
||||
in the chain of N hook implementations for a specfication.
|
||||
|
||||
If trylast is True this hook implementation will run as late as possible
|
||||
in the chain of N hook implementations.
|
||||
|
||||
If hookwrapper is True the hook implementations needs to execute exactly
|
||||
one "yield". The code before the yield is run early before any non-hookwrapper
|
||||
function is run. The code after the yield is run after all non-hookwrapper
|
||||
function have run. The yield receives an ``_CallOutcome`` object representing
|
||||
the exception or result outcome of the inner calls (including other hookwrapper
|
||||
calls).
|
||||
|
||||
"""
|
||||
def setattr_hookimpl_opts(func):
|
||||
setattr(func, self.project_name + "_impl",
|
||||
dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
|
||||
tryfirst=tryfirst, trylast=trylast))
|
||||
return func
|
||||
|
||||
if function is None:
|
||||
return setattr_hookimpl_opts
|
||||
else:
|
||||
return setattr_hookimpl_opts(function)
|
||||
|
||||
|
||||
def normalize_hookimpl_opts(opts):
|
||||
opts.setdefault("tryfirst", False)
|
||||
opts.setdefault("trylast", False)
|
||||
opts.setdefault("hookwrapper", False)
|
||||
opts.setdefault("optionalhook", False)
|
||||
|
||||
|
||||
class _TagTracer:
|
||||
def __init__(self):
|
||||
self._tag2proc = {}
|
||||
self.writer = None
|
||||
self.indent = 0
|
||||
|
||||
def get(self, name):
|
||||
return _TagTracerSub(self, (name,))
|
||||
|
||||
def format_message(self, tags, args):
|
||||
if isinstance(args[-1], dict):
|
||||
extra = args[-1]
|
||||
args = args[:-1]
|
||||
else:
|
||||
extra = {}
|
||||
|
||||
content = " ".join(map(str, args))
|
||||
indent = " " * self.indent
|
||||
|
||||
lines = [
|
||||
"%s%s [%s]\n" % (indent, content, ":".join(tags))
|
||||
]
|
||||
|
||||
for name, value in extra.items():
|
||||
lines.append("%s %s: %s\n" % (indent, name, value))
|
||||
return lines
|
||||
|
||||
def processmessage(self, tags, args):
|
||||
if self.writer is not None and args:
|
||||
lines = self.format_message(tags, args)
|
||||
self.writer(''.join(lines))
|
||||
try:
|
||||
self._tag2proc[tags](tags, args)
|
||||
except KeyError:
|
||||
pass
|
||||
|
||||
def setwriter(self, writer):
|
||||
self.writer = writer
|
||||
|
||||
def setprocessor(self, tags, processor):
|
||||
if isinstance(tags, str):
|
||||
tags = tuple(tags.split(":"))
|
||||
else:
|
||||
assert isinstance(tags, tuple)
|
||||
self._tag2proc[tags] = processor
|
||||
|
||||
|
||||
class _TagTracerSub:
|
||||
def __init__(self, root, tags):
|
||||
self.root = root
|
||||
self.tags = tags
|
||||
|
||||
def __call__(self, *args):
|
||||
self.root.processmessage(self.tags, args)
|
||||
|
||||
def setmyprocessor(self, processor):
|
||||
self.root.setprocessor(self.tags, processor)
|
||||
|
||||
def get(self, name):
|
||||
return self.__class__(self.root, self.tags + (name,))
|
||||
|
||||
|
||||
def _raise_wrapfail(wrap_controller, msg):
|
||||
co = wrap_controller.gi_code
|
||||
raise RuntimeError("wrap_controller at %r %s:%d %s" %
|
||||
(co.co_name, co.co_filename, co.co_firstlineno, msg))
|
||||
|
||||
|
||||
def _wrapped_call(wrap_controller, func):
|
||||
""" Wrap calling to a function with a generator which needs to yield
|
||||
exactly once. The yield point will trigger calling the wrapped function
|
||||
and return its _CallOutcome to the yield point. The generator then needs
|
||||
to finish (raise StopIteration) in order for the wrapped call to complete.
|
||||
"""
|
||||
try:
|
||||
next(wrap_controller) # first yield
|
||||
except StopIteration:
|
||||
_raise_wrapfail(wrap_controller, "did not yield")
|
||||
call_outcome = _CallOutcome(func)
|
||||
try:
|
||||
wrap_controller.send(call_outcome)
|
||||
_raise_wrapfail(wrap_controller, "has second yield")
|
||||
except StopIteration:
|
||||
pass
|
||||
return call_outcome.get_result()
|
||||
|
||||
|
||||
class _CallOutcome:
|
||||
""" Outcome of a function call, either an exception or a proper result.
|
||||
Calling the ``get_result`` method will return the result or reraise
|
||||
the exception raised when the function was called. """
|
||||
excinfo = None
|
||||
|
||||
def __init__(self, func):
|
||||
try:
|
||||
self.result = func()
|
||||
except BaseException:
|
||||
self.excinfo = sys.exc_info()
|
||||
|
||||
def force_result(self, result):
|
||||
self.result = result
|
||||
self.excinfo = None
|
||||
|
||||
def get_result(self):
|
||||
if self.excinfo is None:
|
||||
return self.result
|
||||
else:
|
||||
ex = self.excinfo
|
||||
if _py3:
|
||||
raise ex[1].with_traceback(ex[2])
|
||||
_reraise(*ex) # noqa
|
||||
|
||||
if not _py3:
|
||||
exec("""
|
||||
def _reraise(cls, val, tb):
|
||||
raise cls, val, tb
|
||||
""")
|
||||
|
||||
|
||||
class _TracedHookExecution:
|
||||
def __init__(self, pluginmanager, before, after):
|
||||
self.pluginmanager = pluginmanager
|
||||
self.before = before
|
||||
self.after = after
|
||||
self.oldcall = pluginmanager._inner_hookexec
|
||||
assert not isinstance(self.oldcall, _TracedHookExecution)
|
||||
self.pluginmanager._inner_hookexec = self
|
||||
|
||||
def __call__(self, hook, hook_impls, kwargs):
|
||||
self.before(hook.name, hook_impls, kwargs)
|
||||
outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs))
|
||||
self.after(outcome, hook.name, hook_impls, kwargs)
|
||||
return outcome.get_result()
|
||||
|
||||
def undo(self):
|
||||
self.pluginmanager._inner_hookexec = self.oldcall
|
||||
|
||||
|
||||
class PluginManager(object):
|
||||
""" Core Pluginmanager class which manages registration
|
||||
of plugin objects and 1:N hook calling.
|
||||
|
||||
You can register new hooks by calling ``add_hookspec(module_or_class)``.
|
||||
You can register plugin objects (which contain hooks) by calling
|
||||
``register(plugin)``. The Pluginmanager is initialized with a
|
||||
prefix that is searched for in the names of the dict of registered
|
||||
plugin objects. An optional excludefunc allows to blacklist names which
|
||||
are not considered as hooks despite a matching prefix.
|
||||
|
||||
For debugging purposes you can call ``enable_tracing()``
|
||||
which will subsequently send debug information to the trace helper.
|
||||
"""
|
||||
|
||||
def __init__(self, project_name, implprefix=None):
|
||||
""" if implprefix is given implementation functions
|
||||
will be recognized if their name matches the implprefix. """
|
||||
self.project_name = project_name
|
||||
self._name2plugin = {}
|
||||
self._plugin2hookcallers = {}
|
||||
self._plugin_distinfo = []
|
||||
self.trace = _TagTracer().get("pluginmanage")
|
||||
self.hook = _HookRelay(self.trace.root.get("hook"))
|
||||
self._implprefix = implprefix
|
||||
self._inner_hookexec = lambda hook, methods, kwargs: \
|
||||
_MultiCall(methods, kwargs, hook.spec_opts).execute()
|
||||
|
||||
def _hookexec(self, hook, methods, kwargs):
|
||||
# called from all hookcaller instances.
|
||||
# enable_tracing will set its own wrapping function at self._inner_hookexec
|
||||
return self._inner_hookexec(hook, methods, kwargs)
|
||||
|
||||
def register(self, plugin, name=None):
|
||||
""" Register a plugin and return its canonical name or None if the name
|
||||
is blocked from registering. Raise a ValueError if the plugin is already
|
||||
registered. """
|
||||
plugin_name = name or self.get_canonical_name(plugin)
|
||||
|
||||
if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
|
||||
if self._name2plugin.get(plugin_name, -1) is None:
|
||||
return # blocked plugin, return None to indicate no registration
|
||||
raise ValueError("Plugin already registered: %s=%s\n%s" %
|
||||
(plugin_name, plugin, self._name2plugin))
|
||||
|
||||
# XXX if an error happens we should make sure no state has been
|
||||
# changed at point of return
|
||||
self._name2plugin[plugin_name] = plugin
|
||||
|
||||
# register matching hook implementations of the plugin
|
||||
self._plugin2hookcallers[plugin] = hookcallers = []
|
||||
for name in dir(plugin):
|
||||
hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
|
||||
if hookimpl_opts is not None:
|
||||
normalize_hookimpl_opts(hookimpl_opts)
|
||||
method = getattr(plugin, name)
|
||||
hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
|
||||
hook = getattr(self.hook, name, None)
|
||||
if hook is None:
|
||||
hook = _HookCaller(name, self._hookexec)
|
||||
setattr(self.hook, name, hook)
|
||||
elif hook.has_spec():
|
||||
self._verify_hook(hook, hookimpl)
|
||||
hook._maybe_apply_history(hookimpl)
|
||||
hook._add_hookimpl(hookimpl)
|
||||
hookcallers.append(hook)
|
||||
return plugin_name
|
||||
|
||||
def parse_hookimpl_opts(self, plugin, name):
|
||||
method = getattr(plugin, name)
|
||||
try:
|
||||
res = getattr(method, self.project_name + "_impl", None)
|
||||
except Exception:
|
||||
res = {}
|
||||
if res is not None and not isinstance(res, dict):
|
||||
# false positive
|
||||
res = None
|
||||
elif res is None and self._implprefix and name.startswith(self._implprefix):
|
||||
res = {}
|
||||
return res
|
||||
|
||||
def unregister(self, plugin=None, name=None):
|
||||
""" unregister a plugin object and all its contained hook implementations
|
||||
from internal data structures. """
|
||||
if name is None:
|
||||
assert plugin is not None, "one of name or plugin needs to be specified"
|
||||
name = self.get_name(plugin)
|
||||
|
||||
if plugin is None:
|
||||
plugin = self.get_plugin(name)
|
||||
|
||||
# if self._name2plugin[name] == None registration was blocked: ignore
|
||||
if self._name2plugin.get(name):
|
||||
del self._name2plugin[name]
|
||||
|
||||
for hookcaller in self._plugin2hookcallers.pop(plugin, []):
|
||||
hookcaller._remove_plugin(plugin)
|
||||
|
||||
return plugin
|
||||
|
||||
def set_blocked(self, name):
|
||||
""" block registrations of the given name, unregister if already registered. """
|
||||
self.unregister(name=name)
|
||||
self._name2plugin[name] = None
|
||||
|
||||
def is_blocked(self, name):
|
||||
""" return True if the name blogs registering plugins of that name. """
|
||||
return name in self._name2plugin and self._name2plugin[name] is None
|
||||
|
||||
def add_hookspecs(self, module_or_class):
|
||||
""" add new hook specifications defined in the given module_or_class.
|
||||
Functions are recognized if they have been decorated accordingly. """
|
||||
names = []
|
||||
for name in dir(module_or_class):
|
||||
spec_opts = self.parse_hookspec_opts(module_or_class, name)
|
||||
if spec_opts is not None:
|
||||
hc = getattr(self.hook, name, None)
|
||||
if hc is None:
|
||||
hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
|
||||
setattr(self.hook, name, hc)
|
||||
else:
|
||||
# plugins registered this hook without knowing the spec
|
||||
hc.set_specification(module_or_class, spec_opts)
|
||||
for hookfunction in (hc._wrappers + hc._nonwrappers):
|
||||
self._verify_hook(hc, hookfunction)
|
||||
names.append(name)
|
||||
|
||||
if not names:
|
||||
raise ValueError("did not find any %r hooks in %r" %
|
||||
(self.project_name, module_or_class))
|
||||
|
||||
def parse_hookspec_opts(self, module_or_class, name):
|
||||
method = getattr(module_or_class, name)
|
||||
return getattr(method, self.project_name + "_spec", None)
|
||||
|
||||
def get_plugins(self):
|
||||
""" return the set of registered plugins. """
|
||||
return set(self._plugin2hookcallers)
|
||||
|
||||
def is_registered(self, plugin):
|
||||
""" Return True if the plugin is already registered. """
|
||||
return plugin in self._plugin2hookcallers
|
||||
|
||||
def get_canonical_name(self, plugin):
|
||||
""" Return canonical name for a plugin object. Note that a plugin
|
||||
may be registered under a different name which was specified
|
||||
by the caller of register(plugin, name). To obtain the name
|
||||
of an registered plugin use ``get_name(plugin)`` instead."""
|
||||
return getattr(plugin, "__name__", None) or str(id(plugin))
|
||||
|
||||
def get_plugin(self, name):
|
||||
""" Return a plugin or None for the given name. """
|
||||
return self._name2plugin.get(name)
|
||||
|
||||
def has_plugin(self, name):
|
||||
""" Return True if a plugin with the given name is registered. """
|
||||
return self.get_plugin(name) is not None
|
||||
|
||||
def get_name(self, plugin):
|
||||
""" Return name for registered plugin or None if not registered. """
|
||||
for name, val in self._name2plugin.items():
|
||||
if plugin == val:
|
||||
return name
|
||||
|
||||
def _verify_hook(self, hook, hookimpl):
|
||||
if hook.is_historic() and hookimpl.hookwrapper:
|
||||
raise PluginValidationError(
|
||||
"Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
|
||||
(hookimpl.plugin_name, hook.name))
|
||||
|
||||
for arg in hookimpl.argnames:
|
||||
if arg not in hook.argnames:
|
||||
raise PluginValidationError(
|
||||
"Plugin %r\nhook %r\nargument %r not available\n"
|
||||
"plugin definition: %s\n"
|
||||
"available hookargs: %s" %
|
||||
(hookimpl.plugin_name, hook.name, arg,
|
||||
_formatdef(hookimpl.function), ", ".join(hook.argnames)))
|
||||
|
||||
def check_pending(self):
|
||||
""" Verify that all hooks which have not been verified against
|
||||
a hook specification are optional, otherwise raise PluginValidationError"""
|
||||
for name in self.hook.__dict__:
|
||||
if name[0] != "_":
|
||||
hook = getattr(self.hook, name)
|
||||
if not hook.has_spec():
|
||||
for hookimpl in (hook._wrappers + hook._nonwrappers):
|
||||
if not hookimpl.optionalhook:
|
||||
raise PluginValidationError(
|
||||
"unknown hook %r in plugin %r" %
|
||||
(name, hookimpl.plugin))
|
||||
|
||||
def load_setuptools_entrypoints(self, entrypoint_name):
|
||||
""" Load modules from querying the specified setuptools entrypoint name.
|
||||
Return the number of loaded plugins. """
|
||||
from pkg_resources import (iter_entry_points, DistributionNotFound,
|
||||
VersionConflict)
|
||||
for ep in iter_entry_points(entrypoint_name):
|
||||
# is the plugin registered or blocked?
|
||||
if self.get_plugin(ep.name) or self.is_blocked(ep.name):
|
||||
continue
|
||||
try:
|
||||
plugin = ep.load()
|
||||
except DistributionNotFound:
|
||||
continue
|
||||
except VersionConflict as e:
|
||||
raise PluginValidationError(
|
||||
"Plugin %r could not be loaded: %s!" % (ep.name, e))
|
||||
self.register(plugin, name=ep.name)
|
||||
self._plugin_distinfo.append((plugin, ep.dist))
|
||||
return len(self._plugin_distinfo)
|
||||
|
||||
def list_plugin_distinfo(self):
|
||||
""" return list of distinfo/plugin tuples for all setuptools registered
|
||||
plugins. """
|
||||
return list(self._plugin_distinfo)
|
||||
|
||||
def list_name_plugin(self):
|
||||
""" return list of name/plugin pairs. """
|
||||
return list(self._name2plugin.items())
|
||||
|
||||
def get_hookcallers(self, plugin):
|
||||
""" get all hook callers for the specified plugin. """
|
||||
return self._plugin2hookcallers.get(plugin)
|
||||
|
||||
def add_hookcall_monitoring(self, before, after):
|
||||
""" add before/after tracing functions for all hooks
|
||||
and return an undo function which, when called,
|
||||
will remove the added tracers.
|
||||
|
||||
``before(hook_name, hook_impls, kwargs)`` will be called ahead
|
||||
of all hook calls and receive a hookcaller instance, a list
|
||||
of HookImpl instances and the keyword arguments for the hook call.
|
||||
|
||||
``after(outcome, hook_name, hook_impls, kwargs)`` receives the
|
||||
same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object
|
||||
which represents the result of the overall hook call.
|
||||
"""
|
||||
return _TracedHookExecution(self, before, after).undo
|
||||
|
||||
def enable_tracing(self):
|
||||
""" enable tracing of hook calls and return an undo function. """
|
||||
hooktrace = self.hook._trace
|
||||
|
||||
def before(hook_name, methods, kwargs):
|
||||
hooktrace.root.indent += 1
|
||||
hooktrace(hook_name, kwargs)
|
||||
|
||||
def after(outcome, hook_name, methods, kwargs):
|
||||
if outcome.excinfo is None:
|
||||
hooktrace("finish", hook_name, "-->", outcome.result)
|
||||
hooktrace.root.indent -= 1
|
||||
|
||||
return self.add_hookcall_monitoring(before, after)
|
||||
|
||||
def subset_hook_caller(self, name, remove_plugins):
|
||||
""" Return a new _HookCaller instance for the named method
|
||||
which manages calls to all registered plugins except the
|
||||
ones from remove_plugins. """
|
||||
orig = getattr(self.hook, name)
|
||||
plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
|
||||
if plugins_to_remove:
|
||||
hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
|
||||
orig.spec_opts)
|
||||
for hookimpl in (orig._wrappers + orig._nonwrappers):
|
||||
plugin = hookimpl.plugin
|
||||
if plugin not in plugins_to_remove:
|
||||
hc._add_hookimpl(hookimpl)
|
||||
# we also keep track of this hook caller so it
|
||||
# gets properly removed on plugin unregistration
|
||||
self._plugin2hookcallers.setdefault(plugin, []).append(hc)
|
||||
return hc
|
||||
return orig
|
||||
|
||||
|
||||
class _MultiCall:
|
||||
""" execute a call into multiple python functions/methods. """
|
||||
|
||||
# XXX note that the __multicall__ argument is supported only
|
||||
# for pytest compatibility reasons. It was never officially
|
||||
# supported there and is explicitely deprecated since 2.8
|
||||
# so we can remove it soon, allowing to avoid the below recursion
|
||||
# in execute() and simplify/speed up the execute loop.
|
||||
|
||||
def __init__(self, hook_impls, kwargs, specopts={}):
|
||||
self.hook_impls = hook_impls
|
||||
self.kwargs = kwargs
|
||||
self.kwargs["__multicall__"] = self
|
||||
self.specopts = specopts
|
||||
|
||||
def execute(self):
|
||||
all_kwargs = self.kwargs
|
||||
self.results = results = []
|
||||
firstresult = self.specopts.get("firstresult")
|
||||
|
||||
while self.hook_impls:
|
||||
hook_impl = self.hook_impls.pop()
|
||||
try:
|
||||
args = [all_kwargs[argname] for argname in hook_impl.argnames]
|
||||
except KeyError:
|
||||
for argname in hook_impl.argnames:
|
||||
if argname not in all_kwargs:
|
||||
raise HookCallError(
|
||||
"hook call must provide argument %r" % (argname,))
|
||||
if hook_impl.hookwrapper:
|
||||
return _wrapped_call(hook_impl.function(*args), self.execute)
|
||||
res = hook_impl.function(*args)
|
||||
if res is not None:
|
||||
if firstresult:
|
||||
return res
|
||||
results.append(res)
|
||||
|
||||
if not firstresult:
|
||||
return results
|
||||
|
||||
def __repr__(self):
|
||||
status = "%d meths" % (len(self.hook_impls),)
|
||||
if hasattr(self, "results"):
|
||||
status = ("%d results, " % len(self.results)) + status
|
||||
return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs)
|
||||
|
||||
|
||||
def varnames(func, startindex=None):
|
||||
""" return argument name tuple for a function, method, class or callable.
|
||||
|
||||
In case of a class, its "__init__" method is considered.
|
||||
For methods the "self" parameter is not included unless you are passing
|
||||
an unbound method with Python3 (which has no supports for unbound methods)
|
||||
"""
|
||||
cache = getattr(func, "__dict__", {})
|
||||
try:
|
||||
return cache["_varnames"]
|
||||
except KeyError:
|
||||
pass
|
||||
if inspect.isclass(func):
|
||||
try:
|
||||
func = func.__init__
|
||||
except AttributeError:
|
||||
return ()
|
||||
startindex = 1
|
||||
else:
|
||||
if not inspect.isfunction(func) and not inspect.ismethod(func):
|
||||
try:
|
||||
func = getattr(func, '__call__', func)
|
||||
except Exception:
|
||||
return ()
|
||||
if startindex is None:
|
||||
startindex = int(inspect.ismethod(func))
|
||||
|
||||
try:
|
||||
rawcode = func.__code__
|
||||
except AttributeError:
|
||||
return ()
|
||||
try:
|
||||
x = rawcode.co_varnames[startindex:rawcode.co_argcount]
|
||||
except AttributeError:
|
||||
x = ()
|
||||
else:
|
||||
defaults = func.__defaults__
|
||||
if defaults:
|
||||
x = x[:-len(defaults)]
|
||||
try:
|
||||
cache["_varnames"] = x
|
||||
except TypeError:
|
||||
pass
|
||||
return x
|
||||
|
||||
|
||||
class _HookRelay:
|
||||
""" hook holder object for performing 1:N hook calls where N is the number
|
||||
of registered plugins.
|
||||
|
||||
"""
|
||||
|
||||
def __init__(self, trace):
|
||||
self._trace = trace
|
||||
|
||||
|
||||
class _HookCaller(object):
|
||||
def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
|
||||
self.name = name
|
||||
self._wrappers = []
|
||||
self._nonwrappers = []
|
||||
self._hookexec = hook_execute
|
||||
if specmodule_or_class is not None:
|
||||
assert spec_opts is not None
|
||||
self.set_specification(specmodule_or_class, spec_opts)
|
||||
|
||||
def has_spec(self):
|
||||
return hasattr(self, "_specmodule_or_class")
|
||||
|
||||
def set_specification(self, specmodule_or_class, spec_opts):
|
||||
assert not self.has_spec()
|
||||
self._specmodule_or_class = specmodule_or_class
|
||||
specfunc = getattr(specmodule_or_class, self.name)
|
||||
argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class))
|
||||
assert "self" not in argnames # sanity check
|
||||
self.argnames = ["__multicall__"] + list(argnames)
|
||||
self.spec_opts = spec_opts
|
||||
if spec_opts.get("historic"):
|
||||
self._call_history = []
|
||||
|
||||
def is_historic(self):
|
||||
return hasattr(self, "_call_history")
|
||||
|
||||
def _remove_plugin(self, plugin):
|
||||
def remove(wrappers):
|
||||
for i, method in enumerate(wrappers):
|
||||
if method.plugin == plugin:
|
||||
del wrappers[i]
|
||||
return True
|
||||
if remove(self._wrappers) is None:
|
||||
if remove(self._nonwrappers) is None:
|
||||
raise ValueError("plugin %r not found" % (plugin,))
|
||||
|
||||
def _add_hookimpl(self, hookimpl):
|
||||
if hookimpl.hookwrapper:
|
||||
methods = self._wrappers
|
||||
else:
|
||||
methods = self._nonwrappers
|
||||
|
||||
if hookimpl.trylast:
|
||||
methods.insert(0, hookimpl)
|
||||
elif hookimpl.tryfirst:
|
||||
methods.append(hookimpl)
|
||||
else:
|
||||
# find last non-tryfirst method
|
||||
i = len(methods) - 1
|
||||
while i >= 0 and methods[i].tryfirst:
|
||||
i -= 1
|
||||
methods.insert(i + 1, hookimpl)
|
||||
|
||||
def __repr__(self):
|
||||
return "<_HookCaller %r>" % (self.name,)
|
||||
|
||||
def __call__(self, **kwargs):
|
||||
assert not self.is_historic()
|
||||
return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
|
||||
|
||||
def call_historic(self, proc=None, kwargs=None):
|
||||
self._call_history.append((kwargs or {}, proc))
|
||||
# historizing hooks don't return results
|
||||
self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
|
||||
|
||||
def call_extra(self, methods, kwargs):
|
||||
""" Call the hook with some additional temporarily participating
|
||||
methods using the specified kwargs as call parameters. """
|
||||
old = list(self._nonwrappers), list(self._wrappers)
|
||||
for method in methods:
|
||||
opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
|
||||
hookimpl = HookImpl(None, "<temp>", method, opts)
|
||||
self._add_hookimpl(hookimpl)
|
||||
try:
|
||||
return self(**kwargs)
|
||||
finally:
|
||||
self._nonwrappers, self._wrappers = old
|
||||
|
||||
def _maybe_apply_history(self, method):
|
||||
if self.is_historic():
|
||||
for kwargs, proc in self._call_history:
|
||||
res = self._hookexec(self, [method], kwargs)
|
||||
if res and proc is not None:
|
||||
proc(res[0])
|
||||
|
||||
|
||||
class HookImpl:
|
||||
def __init__(self, plugin, plugin_name, function, hook_impl_opts):
|
||||
self.function = function
|
||||
self.argnames = varnames(self.function)
|
||||
self.plugin = plugin
|
||||
self.opts = hook_impl_opts
|
||||
self.plugin_name = plugin_name
|
||||
self.__dict__.update(hook_impl_opts)
|
||||
|
||||
|
||||
class PluginValidationError(Exception):
|
||||
""" plugin failed validation. """
|
||||
|
||||
|
||||
class HookCallError(Exception):
|
||||
""" Hook was called wrongly. """
|
||||
|
||||
|
||||
if hasattr(inspect, 'signature'):
|
||||
def _formatdef(func):
|
||||
return "%s%s" % (
|
||||
func.__name__,
|
||||
str(inspect.signature(func))
|
||||
)
|
||||
else:
|
||||
def _formatdef(func):
|
||||
return "%s%s" % (
|
||||
func.__name__,
|
||||
inspect.formatargspec(*inspect.getargspec(func))
|
||||
)
|
|
@ -72,8 +72,8 @@ def catch_warnings_for_item(item):
|
|||
unicode_warning = False
|
||||
|
||||
if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args):
|
||||
new_args = [compat.safe_str(m) for m in warn_msg.args]
|
||||
unicode_warning = warn_msg.args != new_args
|
||||
new_args = [compat.ascii_escaped(m) for m in warn_msg.args]
|
||||
unicode_warning = list(warn_msg.args) != new_args
|
||||
warn_msg.args = new_args
|
||||
|
||||
msg = warnings.formatwarning(
|
||||
|
|
|
@ -10,9 +10,7 @@ environment:
|
|||
- TOXENV: "coveralls"
|
||||
# note: please use "tox --listenvs" to populate the build matrix below
|
||||
- TOXENV: "linting"
|
||||
- TOXENV: "py26"
|
||||
- TOXENV: "py27"
|
||||
- TOXENV: "py33"
|
||||
- TOXENV: "py34"
|
||||
- TOXENV: "py35"
|
||||
- TOXENV: "py36"
|
||||
|
@ -21,10 +19,12 @@ environment:
|
|||
- TOXENV: "py27-xdist"
|
||||
- TOXENV: "py27-trial"
|
||||
- TOXENV: "py27-numpy"
|
||||
- TOXENV: "py27-pluggymaster"
|
||||
- TOXENV: "py36-pexpect"
|
||||
- TOXENV: "py36-xdist"
|
||||
- TOXENV: "py36-trial"
|
||||
- TOXENV: "py36-numpy"
|
||||
- TOXENV: "py36-pluggymaster"
|
||||
- TOXENV: "py27-nobyte"
|
||||
- TOXENV: "doctesting"
|
||||
- TOXENV: "py35-freeze"
|
||||
|
|
|
@ -1 +0,0 @@
|
|||
Fix issue in assertion rewriting which could lead it to rewrite modules which should not be rewritten.
|
|
@ -1 +0,0 @@
|
|||
Handle marks without description in ``pytest.ini``.
|
|
@ -13,8 +13,6 @@ PAPEROPT_letter = -D latex_paper_size=letter
|
|||
ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) .
|
||||
|
||||
REGENDOC_ARGS := \
|
||||
--normalize "/={8,} (.*) ={8,}/======= \1 ========/" \
|
||||
--normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \
|
||||
--normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \
|
||||
--normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \
|
||||
--normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \
|
||||
|
|
|
@ -6,6 +6,7 @@ Release announcements
|
|||
:maxdepth: 2
|
||||
|
||||
|
||||
release-3.3.0
|
||||
release-3.2.5
|
||||
release-3.2.4
|
||||
release-3.2.3
|
||||
|
|
|
@ -0,0 +1,50 @@
|
|||
pytest-3.3.0
|
||||
=======================================
|
||||
|
||||
The pytest team is proud to announce the 3.3.0 release!
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1600 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
|
||||
This release contains a number of bugs fixes and improvements, so users are encouraged
|
||||
to take a look at the CHANGELOG:
|
||||
|
||||
http://doc.pytest.org/en/latest/changelog.html
|
||||
|
||||
For complete documentation, please visit:
|
||||
|
||||
http://docs.pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via:
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed to this release, among them:
|
||||
|
||||
* Anthony Sottile
|
||||
* Bruno Oliveira
|
||||
* Ceridwen
|
||||
* Daniel Hahler
|
||||
* Dirk Thomas
|
||||
* Dmitry Malinovsky
|
||||
* Florian Bruhin
|
||||
* George Y. Kussumoto
|
||||
* Hugo
|
||||
* Jesús Espino
|
||||
* Joan Massich
|
||||
* Ofir
|
||||
* OfirOshir
|
||||
* Ronny Pfannschmidt
|
||||
* Samuel Dion-Girardeau
|
||||
* Srinivas Reddy Thatiparthy
|
||||
* Sviatoslav Abakumov
|
||||
* Tarcisio Fischer
|
||||
* Thomas Hisch
|
||||
* Tyler Goodlet
|
||||
* hugovk
|
||||
* je
|
||||
* prokaktus
|
||||
|
||||
|
||||
Happy testing,
|
||||
The Pytest Development Team
|
|
@ -25,15 +25,15 @@ to assert that your function returns a certain value. If this assertion fails
|
|||
you will see the return value of the function call::
|
||||
|
||||
$ pytest test_assert1.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_assert1.py F
|
||||
test_assert1.py F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_function ________
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_function _______________________________
|
||||
|
||||
def test_function():
|
||||
> assert f() == 4
|
||||
|
@ -41,7 +41,7 @@ you will see the return value of the function call::
|
|||
E + where 3 = f()
|
||||
|
||||
test_assert1.py:5: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
``pytest`` has support for showing the values of the most common subexpressions
|
||||
including calls, attributes, comparisons, and binary and unary
|
||||
|
@ -168,15 +168,15 @@ when it encounters comparisons. For example::
|
|||
if you run this module::
|
||||
|
||||
$ pytest test_assert2.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_assert2.py F
|
||||
test_assert2.py F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_set_comparison ________
|
||||
================================= FAILURES =================================
|
||||
___________________________ test_set_comparison ____________________________
|
||||
|
||||
def test_set_comparison():
|
||||
set1 = set("1308")
|
||||
|
@ -190,7 +190,7 @@ if you run this module::
|
|||
E Use -v to get the full diff
|
||||
|
||||
test_assert2.py:5: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
Special comparisons are done for a number of cases:
|
||||
|
||||
|
@ -238,9 +238,9 @@ you can run the test module and get the custom output defined in
|
|||
the conftest file::
|
||||
|
||||
$ pytest -q test_foocompare.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_compare ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_compare _______________________________
|
||||
|
||||
def test_compare():
|
||||
f1 = Foo(1)
|
||||
|
|
|
@ -96,3 +96,10 @@ Past Releases
|
|||
were never documented and a leftover from a pre-virtualenv era. These entry
|
||||
points also created broken entry points in wheels, so removing them also
|
||||
removes a source of confusion for users.
|
||||
|
||||
|
||||
|
||||
3.3
|
||||
^^^
|
||||
|
||||
* Dropped support for EOL Python 2.6 and 3.3.
|
|
@ -91,11 +91,23 @@ You can ask for available builtin or project-custom
|
|||
capsys
|
||||
Enable capturing of writes to sys.stdout/sys.stderr and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
capsysbinary
|
||||
Enable capturing of writes to sys.stdout/sys.stderr and make
|
||||
captured output available via ``capsys.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``bytes``
|
||||
objects.
|
||||
capfd
|
||||
Enable capturing of writes to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfd.readouterr()`` method calls
|
||||
which return a ``(out, err)`` tuple.
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be ``text``
|
||||
objects.
|
||||
capfdbinary
|
||||
Enable capturing of write to file descriptors 1 and 2 and make
|
||||
captured output available via ``capfdbinary.readouterr`` method calls
|
||||
which return a ``(out, err)`` tuple. ``out`` and ``err`` will be
|
||||
``bytes`` objects.
|
||||
doctest_namespace
|
||||
Inject names into the doctest namespace.
|
||||
pytestconfig
|
||||
|
@ -104,6 +116,14 @@ You can ask for available builtin or project-custom
|
|||
Add extra xml properties to the tag for the calling test.
|
||||
The fixture is callable with ``(name, value)``, with value being automatically
|
||||
xml-encoded.
|
||||
caplog
|
||||
Access and control log capturing.
|
||||
|
||||
Captured logs are available through the following methods::
|
||||
|
||||
* caplog.text() -> string containing formatted log output
|
||||
* caplog.records() -> list of logging.LogRecord instances
|
||||
* caplog.record_tuples() -> list of (logger_name, level, message) tuples
|
||||
monkeypatch
|
||||
The returned ``monkeypatch`` fixture provides these
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
|
|
|
@ -46,9 +46,9 @@ First, let's create 50 test invocation of which only 2 fail::
|
|||
If you run this for the first time you will see two failures::
|
||||
|
||||
$ pytest -q
|
||||
.................F.......F........................
|
||||
======= FAILURES ========
|
||||
_______ test_num[17] ________
|
||||
.................F.......F........................ [100%]
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_num[17] _______________________________
|
||||
|
||||
i = 17
|
||||
|
||||
|
@ -59,7 +59,7 @@ If you run this for the first time you will see two failures::
|
|||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
_______ test_num[25] ________
|
||||
_______________________________ test_num[25] _______________________________
|
||||
|
||||
i = 25
|
||||
|
||||
|
@ -75,16 +75,16 @@ If you run this for the first time you will see two failures::
|
|||
If you then run it with ``--lf``::
|
||||
|
||||
$ pytest --lf
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items
|
||||
run-last-failure: rerun previous 2 failures
|
||||
|
||||
test_50.py FF
|
||||
test_50.py FF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_num[17] ________
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_num[17] _______________________________
|
||||
|
||||
i = 17
|
||||
|
||||
|
@ -95,7 +95,7 @@ If you then run it with ``--lf``::
|
|||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
_______ test_num[25] ________
|
||||
_______________________________ test_num[25] _______________________________
|
||||
|
||||
i = 25
|
||||
|
||||
|
@ -106,8 +106,8 @@ If you then run it with ``--lf``::
|
|||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
======= 48 tests deselected ========
|
||||
======= 2 failed, 48 deselected in 0.12 seconds ========
|
||||
=========================== 48 tests deselected ============================
|
||||
================= 2 failed, 48 deselected in 0.12 seconds ==================
|
||||
|
||||
You have run only the two failing test from the last run, while 48 tests have
|
||||
not been run ("deselected").
|
||||
|
@ -117,16 +117,16 @@ previous failures will be executed first (as can be seen from the series
|
|||
of ``FF`` and dots)::
|
||||
|
||||
$ pytest --ff
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 50 items
|
||||
run-last-failure: rerun previous 2 failures first
|
||||
|
||||
test_50.py FF................................................
|
||||
test_50.py FF................................................ [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_num[17] ________
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_num[17] _______________________________
|
||||
|
||||
i = 17
|
||||
|
||||
|
@ -137,7 +137,7 @@ of ``FF`` and dots)::
|
|||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
_______ test_num[25] ________
|
||||
_______________________________ test_num[25] _______________________________
|
||||
|
||||
i = 25
|
||||
|
||||
|
@ -148,7 +148,7 @@ of ``FF`` and dots)::
|
|||
E Failed: bad luck
|
||||
|
||||
test_50.py:6: Failed
|
||||
======= 2 failed, 48 passed in 0.12 seconds ========
|
||||
=================== 2 failed, 48 passed in 0.12 seconds ====================
|
||||
|
||||
.. _`config.cache`:
|
||||
|
||||
|
@ -182,9 +182,9 @@ If you run this command once, it will take a while because
|
|||
of the sleep::
|
||||
|
||||
$ pytest -q
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_function ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_function _______________________________
|
||||
|
||||
mydata = 42
|
||||
|
||||
|
@ -199,9 +199,9 @@ If you run it a second time the value will be retrieved from
|
|||
the cache and this will be quick::
|
||||
|
||||
$ pytest -q
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_function ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_function _______________________________
|
||||
|
||||
mydata = 42
|
||||
|
||||
|
@ -222,7 +222,7 @@ You can always peek at the content of the cache using the
|
|||
``--cache-show`` command line option::
|
||||
|
||||
$ py.test --cache-show
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
cachedir: $REGENDOC_TMPDIR/.cache
|
||||
|
@ -232,7 +232,7 @@ You can always peek at the content of the cache using the
|
|||
example/value contains:
|
||||
42
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
Clearing Cache content
|
||||
-------------------------------
|
||||
|
|
|
@ -63,15 +63,15 @@ and running this module will show you precisely the output
|
|||
of the failing function and hide the other one::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
test_module.py .F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_func2 ________
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_func2 ________________________________
|
||||
|
||||
def test_func2():
|
||||
> assert False
|
||||
|
@ -80,14 +80,14 @@ of the failing function and hide the other one::
|
|||
test_module.py:9: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
setting up <function test_func2 at 0xdeadbeef>
|
||||
======= 1 failed, 1 passed in 0.12 seconds ========
|
||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
---------------------------------------------------
|
||||
|
||||
The ``capsys`` and ``capfd`` fixtures allow to access stdout/stderr
|
||||
output created during test execution. Here is an example test function
|
||||
that performs some output related checks:
|
||||
The ``capsys``, ``capsysbinary``, ``capfd``, and ``capfdbinary`` fixtures
|
||||
allow access to stdout/stderr output created during test execution. Here is
|
||||
an example test function that performs some output related checks:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
@ -110,11 +110,26 @@ output streams and also interacts well with pytest's
|
|||
own per-test capturing.
|
||||
|
||||
If you want to capture on filedescriptor level you can use
|
||||
the ``capfd`` function argument which offers the exact
|
||||
the ``capfd`` fixture which offers the exact
|
||||
same interface but allows to also capture output from
|
||||
libraries or subprocesses that directly write to operating
|
||||
system level output streams (FD1 and FD2).
|
||||
|
||||
.. versionadded:: 3.3
|
||||
|
||||
If the code under test writes non-textual data, you can capture this using
|
||||
the ``capsysbinary`` fixture which instead returns ``bytes`` from
|
||||
the ``readouterr`` method. The ``capfsysbinary`` fixture is currently only
|
||||
available in python 3.
|
||||
|
||||
|
||||
.. versionadded:: 3.3
|
||||
|
||||
If the code under test writes non-textual data, you can capture this using
|
||||
the ``capfdbinary`` fixture which instead returns ``bytes`` from
|
||||
the ``readouterr`` method. The ``capfdbinary`` fixture operates on the
|
||||
filedescriptor level.
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
|
|
|
@ -30,6 +30,7 @@ Full pytest documentation
|
|||
xunit_setup
|
||||
plugins
|
||||
writing_plugins
|
||||
logging
|
||||
|
||||
goodpractices
|
||||
pythonpath
|
||||
|
|
|
@ -312,3 +312,22 @@ Builtin configuration file options
|
|||
relative to :ref:`rootdir <rootdir>`. Additionally path may contain environment
|
||||
variables, that will be expanded. For more information about cache plugin
|
||||
please refer to :ref:`cache_provider`.
|
||||
|
||||
|
||||
.. confval:: console_output_style
|
||||
|
||||
.. versionadded:: 3.3
|
||||
|
||||
Sets the console output style while running tests:
|
||||
|
||||
* ``classic``: classic pytest output.
|
||||
* ``progress``: like classic pytest output, but with a progress indicator.
|
||||
|
||||
The default is ``progress``, but you can fallback to ``classic`` if you prefer or
|
||||
the new mode is causing unexpected problems:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
console_output_style = classic
|
||||
|
|
|
@ -61,14 +61,14 @@ and another like this::
|
|||
then you can just invoke ``pytest`` without command line options::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 1 item
|
||||
|
||||
mymodule.py .
|
||||
mymodule.py . [100%]
|
||||
|
||||
======= 1 passed in 0.12 seconds ========
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
It is possible to use fixtures using the ``getfixture`` helper::
|
||||
|
||||
|
|
|
@ -30,32 +30,32 @@ You can "mark" a test function with custom metadata like this::
|
|||
You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
$ pytest -v -m webtest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py::test_send_http PASSED
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
======= 3 tests deselected ========
|
||||
======= 1 passed, 3 deselected in 0.12 seconds ========
|
||||
============================ 3 tests deselected ============================
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ pytest -v -m "not webtest"
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_another PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::test_something_quick PASSED [ 33%]
|
||||
test_server.py::test_another PASSED [ 66%]
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
======= 1 tests deselected ========
|
||||
======= 3 passed, 1 deselected in 0.12 seconds ========
|
||||
============================ 1 tests deselected ============================
|
||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
||||
|
||||
Selecting tests based on their node ID
|
||||
--------------------------------------
|
||||
|
@ -65,42 +65,42 @@ arguments to select only specified tests. This makes it easy to select
|
|||
tests based on their module, class, method, or function name::
|
||||
|
||||
$ pytest -v test_server.py::TestClass::test_method
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
======= 1 passed in 0.12 seconds ========
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
You can also select on the class::
|
||||
|
||||
$ pytest -v test_server.py::TestClass
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 1 item
|
||||
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
======= 1 passed in 0.12 seconds ========
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
Or select multiple nodes::
|
||||
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
test_server.py::TestClass::test_method PASSED [ 50%]
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
======= 2 passed in 0.12 seconds ========
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
|
||||
.. _node-id:
|
||||
|
||||
|
@ -129,47 +129,47 @@ exact match on markers that ``-m`` provides. This makes it easy to
|
|||
select tests based on their names::
|
||||
|
||||
$ pytest -v -k http # running with the above defined example module
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py::test_send_http PASSED
|
||||
test_server.py::test_send_http PASSED [100%]
|
||||
|
||||
======= 3 tests deselected ========
|
||||
======= 1 passed, 3 deselected in 0.12 seconds ========
|
||||
============================ 3 tests deselected ============================
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ pytest -k "not send_http" -v
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_another PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::test_something_quick PASSED [ 33%]
|
||||
test_server.py::test_another PASSED [ 66%]
|
||||
test_server.py::TestClass::test_method PASSED [100%]
|
||||
|
||||
======= 1 tests deselected ========
|
||||
======= 3 passed, 1 deselected in 0.12 seconds ========
|
||||
============================ 1 tests deselected ============================
|
||||
================== 3 passed, 1 deselected in 0.12 seconds ==================
|
||||
|
||||
Or to select "http" and "quick" tests::
|
||||
|
||||
$ pytest -k "http or quick" -v
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py::test_send_http PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_send_http PASSED [ 50%]
|
||||
test_server.py::test_something_quick PASSED [100%]
|
||||
|
||||
======= 2 tests deselected ========
|
||||
======= 2 passed, 2 deselected in 0.12 seconds ========
|
||||
============================ 2 tests deselected ============================
|
||||
================== 2 passed, 2 deselected in 0.12 seconds ==================
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -354,26 +354,26 @@ and an example invocations specifying a different environment than what
|
|||
the test needs::
|
||||
|
||||
$ pytest -E stage2
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py s
|
||||
test_someenv.py s [100%]
|
||||
|
||||
======= 1 skipped in 0.12 seconds ========
|
||||
======================== 1 skipped in 0.12 seconds =========================
|
||||
|
||||
and here is one that specifies exactly the environment needed::
|
||||
|
||||
$ pytest -E stage1
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_someenv.py .
|
||||
test_someenv.py . [100%]
|
||||
|
||||
======= 1 passed in 0.12 seconds ========
|
||||
========================= 1 passed in 0.12 seconds =========================
|
||||
|
||||
The ``--markers`` option always gives you a list of available markers::
|
||||
|
||||
|
@ -432,7 +432,7 @@ The output is as follows::
|
|||
|
||||
$ pytest -q -s
|
||||
Marker info name=my_marker args=(<function hello_world at 0xdeadbeef>,) kwars={}
|
||||
.
|
||||
. [100%]
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``.
|
||||
|
@ -477,7 +477,7 @@ Let's run this without capturing output and see what we get::
|
|||
glob args=('function',) kwargs={'x': 3}
|
||||
glob args=('class',) kwargs={'x': 2}
|
||||
glob args=('module',) kwargs={'x': 1}
|
||||
.
|
||||
. [100%]
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
marking platform specific tests with pytest
|
||||
|
@ -530,29 +530,29 @@ Let's do a little test file to show how this looks like::
|
|||
then you will see two tests skipped and two executed tests as expected::
|
||||
|
||||
$ pytest -rs # this option reports skip reasons
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s.
|
||||
======= short test summary info ========
|
||||
test_plat.py s.s. [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux
|
||||
|
||||
======= 2 passed, 2 skipped in 0.12 seconds ========
|
||||
=================== 2 passed, 2 skipped in 0.12 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
|
||||
$ pytest -m linux
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
test_plat.py .
|
||||
test_plat.py . [100%]
|
||||
|
||||
======= 3 tests deselected ========
|
||||
======= 1 passed, 3 deselected in 0.12 seconds ========
|
||||
============================ 3 tests deselected ============================
|
||||
================== 1 passed, 3 deselected in 0.12 seconds ==================
|
||||
|
||||
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
||||
|
||||
|
@ -596,47 +596,47 @@ We want to dynamically define two markers and can do it in a
|
|||
We can now use the ``-m option`` to select one set::
|
||||
|
||||
$ pytest -m interface --tb=short
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
test_module.py FF
|
||||
test_module.py FF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_interface_simple ________
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
_______ test_interface_complex ________
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
======= 2 tests deselected ========
|
||||
======= 2 failed, 2 deselected in 0.12 seconds ========
|
||||
============================ 2 tests deselected ============================
|
||||
================== 2 failed, 2 deselected in 0.12 seconds ==================
|
||||
|
||||
or to select both "event" and "interface" tests::
|
||||
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
test_module.py FFF
|
||||
test_module.py FFF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_interface_simple ________
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_interface_simple ___________________________
|
||||
test_module.py:3: in test_interface_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
_______ test_interface_complex ________
|
||||
__________________________ test_interface_complex __________________________
|
||||
test_module.py:6: in test_interface_complex
|
||||
assert 0
|
||||
E assert 0
|
||||
_______ test_event_simple ________
|
||||
____________________________ test_event_simple _____________________________
|
||||
test_module.py:9: in test_event_simple
|
||||
assert 0
|
||||
E assert 0
|
||||
======= 1 tests deselected ========
|
||||
======= 3 failed, 1 deselected in 0.12 seconds ========
|
||||
============================ 1 tests deselected ============================
|
||||
================== 3 failed, 1 deselected in 0.12 seconds ==================
|
||||
|
|
|
@ -6,7 +6,7 @@ import py
|
|||
import pytest
|
||||
import _pytest._code
|
||||
|
||||
pythonlist = ['python2.6', 'python2.7', 'python3.4', 'python3.5']
|
||||
pythonlist = ['python2.7', 'python3.4', 'python3.5']
|
||||
@pytest.fixture(params=pythonlist)
|
||||
def python1(request, tmpdir):
|
||||
picklefile = tmpdir.join("data.pickle")
|
||||
|
|
|
@ -26,19 +26,19 @@ and if you installed `PyYAML`_ or a compatible YAML-parser you can
|
|||
now execute the test specification::
|
||||
|
||||
nonpython $ pytest test_simple.yml
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml F.
|
||||
test_simple.yml F. [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ usecase: hello ________
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
======= 1 failed, 1 passed in 0.12 seconds ========
|
||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
|
@ -58,21 +58,21 @@ your own domain specific testing language this way.
|
|||
consulted when reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ pytest -v
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml::hello FAILED
|
||||
test_simple.yml::ok PASSED
|
||||
test_simple.yml::hello FAILED [ 50%]
|
||||
test_simple.yml::ok PASSED [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ usecase: hello ________
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
usecase execution failed
|
||||
spec failed: 'some': 'other'
|
||||
no further details known at this point.
|
||||
======= 1 failed, 1 passed in 0.12 seconds ========
|
||||
==================== 1 failed, 1 passed in 0.12 seconds ====================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
|
@ -80,7 +80,7 @@ While developing your custom test collection and execution it's also
|
|||
interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ pytest --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
collected 2 items
|
||||
|
@ -88,4 +88,4 @@ interesting to just look at the collection tree::
|
|||
<YamlItem 'hello'>
|
||||
<YamlItem 'ok'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
|
|
@ -45,16 +45,16 @@ Now we add a test configuration like this::
|
|||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
$ pytest -q test_compute.py
|
||||
..
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
|
||||
$ pytest -q --all
|
||||
....F
|
||||
======= FAILURES ========
|
||||
_______ test_compute[4] ________
|
||||
....F [100%]
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_compute[4] ______________________________
|
||||
|
||||
param1 = 4
|
||||
|
||||
|
@ -138,7 +138,7 @@ objects, they are still using the default pytest representation::
|
|||
|
||||
|
||||
$ pytest test_time.py --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 8 items
|
||||
|
@ -152,7 +152,7 @@ objects, they are still using the default pytest representation::
|
|||
<Function 'test_timedistance_v3[forward]'>
|
||||
<Function 'test_timedistance_v3[backward]'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs
|
||||
together with the actual data, instead of listing them separately.
|
||||
|
@ -194,20 +194,20 @@ only have to work a bit to construct the correct arguments for pytest's
|
|||
this is a fully self-contained example which you can run with::
|
||||
|
||||
$ pytest test_scenarios.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py ....
|
||||
test_scenarios.py .... [100%]
|
||||
|
||||
======= 4 passed in 0.12 seconds ========
|
||||
========================= 4 passed in 0.12 seconds =========================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
|
||||
|
||||
$ pytest --collect-only test_scenarios.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
@ -219,7 +219,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
|||
<Function 'test_demo1[advanced]'>
|
||||
<Function 'test_demo2[advanced]'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
Note that we told ``metafunc.parametrize()`` that your scenario values
|
||||
should be considered class-scoped. With pytest-2.3 this leads to a
|
||||
|
@ -272,7 +272,7 @@ creates a database object for the actual test invocations::
|
|||
Let's first see how it looks like at collection time::
|
||||
|
||||
$ pytest test_backends.py --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
@ -280,14 +280,14 @@ Let's first see how it looks like at collection time::
|
|||
<Function 'test_db_initialized[d1]'>
|
||||
<Function 'test_db_initialized[d2]'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
And then when we run the test::
|
||||
|
||||
$ pytest -q test_backends.py
|
||||
.F
|
||||
======= FAILURES ========
|
||||
_______ test_db_initialized[d2] ________
|
||||
.F [100%]
|
||||
================================= FAILURES =================================
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 object at 0xdeadbeef>
|
||||
|
||||
|
@ -333,14 +333,14 @@ will be passed to respective fixture function::
|
|||
The result of this test will be successful::
|
||||
|
||||
$ pytest test_indirect_list.py --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
<Module 'test_indirect_list.py'>
|
||||
<Function 'test_indirect[a-b]'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
|
@ -381,9 +381,9 @@ Our test generator looks up a class-level definition which specifies which
|
|||
argument sets to use for each test function. Let's run it::
|
||||
|
||||
$ pytest -q
|
||||
F..
|
||||
======= FAILURES ========
|
||||
_______ TestClass.test_equals[1-2] ________
|
||||
F.. [100%]
|
||||
================================= FAILURES =================================
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
|
||||
self = <test_parametrize.TestClass object at 0xdeadbeef>, a = 1, b = 2
|
||||
|
||||
|
@ -411,10 +411,8 @@ is to be run with different sets of arguments for its three arguments:
|
|||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
|
||||
. $ pytest -rs -q multipython.py
|
||||
sssssssssssssss.........sss.........sss.........
|
||||
======= short test summary info ========
|
||||
SKIP [21] $REGENDOC_TMPDIR/CWD/multipython.py:24: 'python2.6' not found
|
||||
27 passed, 21 skipped in 0.12 seconds
|
||||
........................... [100%]
|
||||
27 passed in 0.12 seconds
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
--------------------------------------------------------------------
|
||||
|
@ -460,16 +458,16 @@ And finally a little test module::
|
|||
If you run this with reporting for skips enabled::
|
||||
|
||||
$ pytest -rs test_module.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
======= short test summary info ========
|
||||
test_module.py .s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2'
|
||||
|
||||
======= 1 passed, 1 skipped in 0.12 seconds ========
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
You'll see that we don't have a ``opt2`` module and thus the second test run
|
||||
of our ``test_func1`` was skipped. A few notes:
|
||||
|
|
|
@ -116,7 +116,7 @@ that match ``*_check``. For example, if we have::
|
|||
then the test collection looks like this::
|
||||
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 2 items
|
||||
|
@ -126,7 +126,7 @@ then the test collection looks like this::
|
|||
<Function 'simple_check'>
|
||||
<Function 'complex_check'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -162,7 +162,7 @@ Finding out what is collected
|
|||
You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ pytest --collect-only pythoncollection.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 3 items
|
||||
|
@ -173,7 +173,7 @@ You can always peek at the collection tree without running tests like this::
|
|||
<Function 'test_method'>
|
||||
<Function 'test_anothermethod'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. _customizing-test-collection:
|
||||
|
||||
|
@ -231,9 +231,9 @@ If you run with a Python 3 interpreter both the one test and the ``setup.py``
|
|||
file will be left out::
|
||||
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
collected 0 items
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
|
|
@ -10,15 +10,15 @@ not showing the nice colors here in the HTML that you
|
|||
get on the terminal - we are working on that)::
|
||||
|
||||
assertion $ pytest failure_demo.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
|
||||
collected 42 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_generative[0] ________
|
||||
================================= FAILURES =================================
|
||||
____________________________ test_generative[0] ____________________________
|
||||
|
||||
param1 = 3, param2 = 6
|
||||
|
||||
|
@ -27,7 +27,7 @@ get on the terminal - we are working on that)::
|
|||
E assert (3 * 2) < 6
|
||||
|
||||
failure_demo.py:16: AssertionError
|
||||
_______ TestFailing.test_simple ________
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
|
||||
|
@ -43,7 +43,7 @@ get on the terminal - we are working on that)::
|
|||
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:29: AssertionError
|
||||
_______ TestFailing.test_simple_multiline ________
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
|
||||
|
@ -63,7 +63,7 @@ get on the terminal - we are working on that)::
|
|||
E assert 42 == 54
|
||||
|
||||
failure_demo.py:12: AssertionError
|
||||
_______ TestFailing.test_not ________
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0xdeadbeef>
|
||||
|
||||
|
@ -75,7 +75,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 42 = <function TestFailing.test_not.<locals>.f at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:39: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_text ________
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -86,7 +86,7 @@ get on the terminal - we are working on that)::
|
|||
E + eggs
|
||||
|
||||
failure_demo.py:43: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_similar_text ________
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -99,7 +99,7 @@ get on the terminal - we are working on that)::
|
|||
E ? ^
|
||||
|
||||
failure_demo.py:46: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_multiline_text ________
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -112,7 +112,7 @@ get on the terminal - we are working on that)::
|
|||
E bar
|
||||
|
||||
failure_demo.py:49: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_long_text ________
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -129,7 +129,7 @@ get on the terminal - we are working on that)::
|
|||
E ? ^
|
||||
|
||||
failure_demo.py:54: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_long_text_multiline ________
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -149,7 +149,7 @@ get on the terminal - we are working on that)::
|
|||
E ...Full output truncated (7 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:59: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_list ________
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -160,7 +160,7 @@ get on the terminal - we are working on that)::
|
|||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:62: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_list_long ________
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -173,7 +173,7 @@ get on the terminal - we are working on that)::
|
|||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:67: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_dict ________
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -191,7 +191,7 @@ get on the terminal - we are working on that)::
|
|||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:70: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_set ________
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -209,7 +209,7 @@ get on the terminal - we are working on that)::
|
|||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:73: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_eq_longer_list ________
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -220,7 +220,7 @@ get on the terminal - we are working on that)::
|
|||
E Use -v to get the full diff
|
||||
|
||||
failure_demo.py:76: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_in_list ________
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -229,7 +229,7 @@ get on the terminal - we are working on that)::
|
|||
E assert 1 in [0, 2, 3, 4, 5]
|
||||
|
||||
failure_demo.py:79: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_not_in_text_multiline ________
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -248,7 +248,7 @@ get on the terminal - we are working on that)::
|
|||
E ...Full output truncated (2 lines hidden), use '-vv' to show
|
||||
|
||||
failure_demo.py:83: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_not_in_text_single ________
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -261,7 +261,7 @@ get on the terminal - we are working on that)::
|
|||
E ? +++
|
||||
|
||||
failure_demo.py:87: AssertionError
|
||||
_______ TestSpecialisedExplanations.test_not_in_text_single_long ________
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0xdeadbeef>
|
||||
|
||||
|
@ -287,7 +287,7 @@ get on the terminal - we are working on that)::
|
|||
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
failure_demo.py:95: AssertionError
|
||||
_______ test_attribute ________
|
||||
______________________________ test_attribute ______________________________
|
||||
|
||||
def test_attribute():
|
||||
class Foo(object):
|
||||
|
@ -298,7 +298,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0xdeadbeef>.b
|
||||
|
||||
failure_demo.py:102: AssertionError
|
||||
_______ test_attribute_instance ________
|
||||
_________________________ test_attribute_instance __________________________
|
||||
|
||||
def test_attribute_instance():
|
||||
class Foo(object):
|
||||
|
@ -309,7 +309,7 @@ get on the terminal - we are working on that)::
|
|||
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0xdeadbeef> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
|
||||
|
||||
failure_demo.py:108: AssertionError
|
||||
_______ test_attribute_failure ________
|
||||
__________________________ test_attribute_failure __________________________
|
||||
|
||||
def test_attribute_failure():
|
||||
class Foo(object):
|
||||
|
@ -329,7 +329,7 @@ get on the terminal - we are working on that)::
|
|||
E Exception: Failed to get attrib
|
||||
|
||||
failure_demo.py:114: Exception
|
||||
_______ test_attribute_multiple ________
|
||||
_________________________ test_attribute_multiple __________________________
|
||||
|
||||
def test_attribute_multiple():
|
||||
class Foo(object):
|
||||
|
@ -344,7 +344,7 @@ get on the terminal - we are working on that)::
|
|||
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0xdeadbeef> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
|
||||
|
||||
failure_demo.py:125: AssertionError
|
||||
_______ TestRaises.test_raises ________
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
|
@ -359,7 +359,7 @@ get on the terminal - we are working on that)::
|
|||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:580>:1: ValueError
|
||||
_______ TestRaises.test_raises_doesnt ________
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
|
@ -368,7 +368,7 @@ get on the terminal - we are working on that)::
|
|||
E Failed: DID NOT RAISE <class 'OSError'>
|
||||
|
||||
failure_demo.py:137: Failed
|
||||
_______ TestRaises.test_raise ________
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
|
@ -377,7 +377,7 @@ get on the terminal - we are working on that)::
|
|||
E ValueError: demo error
|
||||
|
||||
failure_demo.py:140: ValueError
|
||||
_______ TestRaises.test_tupleerror ________
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
|
@ -399,7 +399,7 @@ get on the terminal - we are working on that)::
|
|||
failure_demo.py:148: TypeError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
l is [1, 2, 3]
|
||||
_______ TestRaises.test_some_error ________
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises object at 0xdeadbeef>
|
||||
|
||||
|
@ -408,7 +408,7 @@ get on the terminal - we are working on that)::
|
|||
E NameError: name 'namenotexi' is not defined
|
||||
|
||||
failure_demo.py:151: NameError
|
||||
_______ test_dynamic_compile_shows_nicely ________
|
||||
____________________ test_dynamic_compile_shows_nicely _____________________
|
||||
|
||||
def test_dynamic_compile_shows_nicely():
|
||||
src = 'def foo():\n assert 1 == 0\n'
|
||||
|
@ -427,7 +427,7 @@ get on the terminal - we are working on that)::
|
|||
E AssertionError
|
||||
|
||||
<2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:163>:2: AssertionError
|
||||
_______ TestMoreErrors.test_complex_error ________
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -451,7 +451,7 @@ get on the terminal - we are working on that)::
|
|||
E assert 44 == 43
|
||||
|
||||
failure_demo.py:6: AssertionError
|
||||
_______ TestMoreErrors.test_z1_unpack_error ________
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -461,7 +461,7 @@ get on the terminal - we are working on that)::
|
|||
E ValueError: not enough values to unpack (expected 2, got 0)
|
||||
|
||||
failure_demo.py:180: ValueError
|
||||
_______ TestMoreErrors.test_z2_type_error ________
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -471,7 +471,7 @@ get on the terminal - we are working on that)::
|
|||
E TypeError: 'int' object is not iterable
|
||||
|
||||
failure_demo.py:184: TypeError
|
||||
_______ TestMoreErrors.test_startswith ________
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -484,7 +484,7 @@ get on the terminal - we are working on that)::
|
|||
E + where <built-in method startswith of str object at 0xdeadbeef> = '123'.startswith
|
||||
|
||||
failure_demo.py:189: AssertionError
|
||||
_______ TestMoreErrors.test_startswith_nested ________
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -501,7 +501,7 @@ get on the terminal - we are working on that)::
|
|||
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0xdeadbeef>()
|
||||
|
||||
failure_demo.py:196: AssertionError
|
||||
_______ TestMoreErrors.test_global_func ________
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -512,7 +512,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 43 = globf(42)
|
||||
|
||||
failure_demo.py:199: AssertionError
|
||||
_______ TestMoreErrors.test_instance ________
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -523,7 +523,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 42 = <failure_demo.TestMoreErrors object at 0xdeadbeef>.x
|
||||
|
||||
failure_demo.py:203: AssertionError
|
||||
_______ TestMoreErrors.test_compare ________
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -533,7 +533,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 11 = globf(10)
|
||||
|
||||
failure_demo.py:206: AssertionError
|
||||
_______ TestMoreErrors.test_try_finally ________
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors object at 0xdeadbeef>
|
||||
|
||||
|
@ -544,7 +544,7 @@ get on the terminal - we are working on that)::
|
|||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:211: AssertionError
|
||||
_______ TestCustomAssertMsg.test_single_line ________
|
||||
___________________ TestCustomAssertMsg.test_single_line ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
|
||||
|
@ -558,7 +558,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_single_line.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:222: AssertionError
|
||||
_______ TestCustomAssertMsg.test_multiline ________
|
||||
____________________ TestCustomAssertMsg.test_multiline ____________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
|
||||
|
@ -575,7 +575,7 @@ get on the terminal - we are working on that)::
|
|||
E + where 1 = <class 'failure_demo.TestCustomAssertMsg.test_multiline.<locals>.A'>.a
|
||||
|
||||
failure_demo.py:228: AssertionError
|
||||
_______ TestCustomAssertMsg.test_custom_repr ________
|
||||
___________________ TestCustomAssertMsg.test_custom_repr ___________________
|
||||
|
||||
self = <failure_demo.TestCustomAssertMsg object at 0xdeadbeef>
|
||||
|
||||
|
@ -595,4 +595,10 @@ get on the terminal - we are working on that)::
|
|||
E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a
|
||||
|
||||
failure_demo.py:238: AssertionError
|
||||
======= 42 failed in 0.12 seconds ========
|
||||
============================= warnings summary =============================
|
||||
None
|
||||
Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0.
|
||||
Please use Metafunc.parametrize instead.
|
||||
|
||||
-- Docs: http://doc.pytest.org/en/latest/warnings.html
|
||||
================== 42 failed, 1 warnings in 0.12 seconds ===================
|
||||
|
|
|
@ -41,9 +41,9 @@ provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`:
|
|||
Let's run this without supplying our new option::
|
||||
|
||||
$ pytest -q test_sample.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
cmdopt = 'type1'
|
||||
|
||||
|
@ -63,9 +63,9 @@ Let's run this without supplying our new option::
|
|||
And now with supplying a command line option::
|
||||
|
||||
$ pytest -q --cmdopt=type2
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
cmdopt = 'type2'
|
||||
|
||||
|
@ -112,12 +112,12 @@ of subprocesses close to your CPU. Running in an empty
|
|||
directory with the above conftest.py::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. _`excontrolskip`:
|
||||
|
||||
|
@ -166,28 +166,28 @@ We can now write a test module like this:
|
|||
and when running it will see a skipped "slow" test::
|
||||
|
||||
$ pytest -rs # "-rs" means report details on the little 's'
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
======= short test summary info ========
|
||||
test_module.py .s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] test_module.py:8: need --runslow option to run
|
||||
|
||||
======= 1 passed, 1 skipped in 0.12 seconds ========
|
||||
=================== 1 passed, 1 skipped in 0.12 seconds ====================
|
||||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ pytest --runslow
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
test_module.py .. [100%]
|
||||
|
||||
======= 2 passed in 0.12 seconds ========
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
|
||||
Writing well integrated assertion helpers
|
||||
--------------------------------------------------
|
||||
|
@ -218,9 +218,9 @@ unless the ``--full-trace`` command line option is specified.
|
|||
Let's run our little function::
|
||||
|
||||
$ pytest -q test_checkconfig.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_something ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_something ______________________________
|
||||
|
||||
def test_something():
|
||||
> checkconfig(42)
|
||||
|
@ -305,13 +305,13 @@ It's easy to present extra information in a ``pytest`` run:
|
|||
which will add the string to the test header accordingly::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
project deps: mylib-1.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
|
@ -330,7 +330,7 @@ display more information if applicable:
|
|||
which will add info only when run with "--v"::
|
||||
|
||||
$ pytest -v
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
info1: did you know that ...
|
||||
|
@ -338,17 +338,17 @@ which will add info only when run with "--v"::
|
|||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 0 items
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
and nothing when run plainly::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 0 items
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
profiling test duration
|
||||
--------------------------
|
||||
|
@ -377,18 +377,18 @@ out which tests are the slowest. Let's make an artificial test suite:
|
|||
Now we can profile which test functions execute the slowest::
|
||||
|
||||
$ pytest --durations=3
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
test_some_are_slow.py ... [100%]
|
||||
|
||||
======= slowest 3 test durations ========
|
||||
========================= slowest 3 test durations =========================
|
||||
0.30s call test_some_are_slow.py::test_funcslow2
|
||||
0.20s call test_some_are_slow.py::test_funcslow1
|
||||
0.10s call test_some_are_slow.py::test_funcfast
|
||||
======= 3 passed in 0.12 seconds ========
|
||||
========================= 3 passed in 0.12 seconds =========================
|
||||
|
||||
incremental testing - test steps
|
||||
---------------------------------------------------
|
||||
|
@ -443,18 +443,18 @@ tests in a class. Here is a test module example:
|
|||
If we run this::
|
||||
|
||||
$ pytest -rx
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 4 items
|
||||
|
||||
test_step.py .Fx.
|
||||
======= short test summary info ========
|
||||
test_step.py .Fx. [100%]
|
||||
========================= short test summary info ==========================
|
||||
XFAIL test_step.py::TestUserHandling::()::test_deletion
|
||||
reason: previous test failed (test_modification)
|
||||
|
||||
======= FAILURES ========
|
||||
_______ TestUserHandling.test_modification ________
|
||||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling object at 0xdeadbeef>
|
||||
|
||||
|
@ -463,7 +463,7 @@ If we run this::
|
|||
E assert 0
|
||||
|
||||
test_step.py:9: AssertionError
|
||||
======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ========
|
||||
============== 1 failed, 2 passed, 1 xfailed in 0.12 seconds ===============
|
||||
|
||||
We'll see that ``test_deletion`` was not executed because ``test_modification``
|
||||
failed. It is reported as an "expected failure".
|
||||
|
@ -522,27 +522,27 @@ the ``db`` fixture:
|
|||
We can run this::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 7 items
|
||||
|
||||
test_step.py .Fx.
|
||||
a/test_db.py F
|
||||
a/test_db2.py F
|
||||
b/test_error.py E
|
||||
test_step.py .Fx. [ 57%]
|
||||
a/test_db.py F [ 71%]
|
||||
a/test_db2.py F [ 85%]
|
||||
b/test_error.py E [100%]
|
||||
|
||||
======= ERRORS ========
|
||||
_______ ERROR at setup of test_root ________
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_root ________________________
|
||||
file $REGENDOC_TMPDIR/b/test_error.py, line 1
|
||||
def test_root(db): # no db here, will error out
|
||||
E fixture 'db' not found
|
||||
> available fixtures: cache, capfd, capsys, doctest_namespace, monkeypatch, pytestconfig, record_xml_property, recwarn, tmpdir, tmpdir_factory
|
||||
> available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_xml_property, recwarn, tmpdir, tmpdir_factory
|
||||
> use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
$REGENDOC_TMPDIR/b/test_error.py:1
|
||||
======= FAILURES ========
|
||||
_______ TestUserHandling.test_modification ________
|
||||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling object at 0xdeadbeef>
|
||||
|
||||
|
@ -551,7 +551,7 @@ We can run this::
|
|||
E assert 0
|
||||
|
||||
test_step.py:9: AssertionError
|
||||
_______ test_a1 ________
|
||||
_________________________________ test_a1 __________________________________
|
||||
|
||||
db = <conftest.DB object at 0xdeadbeef>
|
||||
|
||||
|
@ -561,7 +561,7 @@ We can run this::
|
|||
E assert 0
|
||||
|
||||
a/test_db.py:2: AssertionError
|
||||
_______ test_a2 ________
|
||||
_________________________________ test_a2 __________________________________
|
||||
|
||||
db = <conftest.DB object at 0xdeadbeef>
|
||||
|
||||
|
@ -571,7 +571,7 @@ We can run this::
|
|||
E assert 0
|
||||
|
||||
a/test_db2.py:2: AssertionError
|
||||
======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========
|
||||
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ==========
|
||||
|
||||
The two test modules in the ``a`` directory see the same ``db`` fixture instance
|
||||
while the one test in the sister-directory ``b`` doesn't see it. We could of course
|
||||
|
@ -630,15 +630,15 @@ if you then have failing tests:
|
|||
and run them::
|
||||
|
||||
$ pytest test_module.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
test_module.py FF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_fail1 ________
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_fail1 ________________________________
|
||||
|
||||
tmpdir = local('PYTEST_TMPDIR/test_fail10')
|
||||
|
||||
|
@ -647,14 +647,14 @@ and run them::
|
|||
E assert 0
|
||||
|
||||
test_module.py:2: AssertionError
|
||||
_______ test_fail2 ________
|
||||
________________________________ test_fail2 ________________________________
|
||||
|
||||
def test_fail2():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:4: AssertionError
|
||||
======= 2 failed in 0.12 seconds ========
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
|
||||
you will have a "failures" file which contains the failing test ids::
|
||||
|
||||
|
@ -724,17 +724,17 @@ if you then have failing tests:
|
|||
and run it::
|
||||
|
||||
$ pytest -s test_module.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
|
||||
Fexecuting test failed test_module.py::test_call_fails
|
||||
F
|
||||
F [100%]
|
||||
|
||||
======= ERRORS ========
|
||||
_______ ERROR at setup of test_setup_fails ________
|
||||
================================== ERRORS ==================================
|
||||
____________________ ERROR at setup of test_setup_fails ____________________
|
||||
|
||||
@pytest.fixture
|
||||
def other():
|
||||
|
@ -742,8 +742,8 @@ and run it::
|
|||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
======= FAILURES ========
|
||||
_______ test_call_fails ________
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_call_fails ______________________________
|
||||
|
||||
something = None
|
||||
|
||||
|
@ -752,14 +752,14 @@ and run it::
|
|||
E assert 0
|
||||
|
||||
test_module.py:12: AssertionError
|
||||
_______ test_fail2 ________
|
||||
________________________________ test_fail2 ________________________________
|
||||
|
||||
def test_fail2():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:15: AssertionError
|
||||
======= 2 failed, 1 error in 0.12 seconds ========
|
||||
==================== 2 failed, 1 error in 0.12 seconds =====================
|
||||
|
||||
You'll see that the fixture finalizers could use the precise reporting
|
||||
information.
|
||||
|
|
|
@ -68,5 +68,5 @@ If you run this without output capturing::
|
|||
.test_method1 called
|
||||
.test other
|
||||
.test_unit1 method called
|
||||
.
|
||||
. [100%]
|
||||
4 passed in 0.12 seconds
|
||||
|
|
|
@ -69,15 +69,15 @@ will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
|
|||
marked ``smtp`` fixture function. Running the test looks like this::
|
||||
|
||||
$ pytest test_smtpsimple.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_smtpsimple.py F
|
||||
test_smtpsimple.py F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_ehlo ________
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -88,7 +88,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
|||
E assert 0
|
||||
|
||||
test_smtpsimple.py:11: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
In the failure traceback we see that the test function was called with a
|
||||
``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||
|
@ -205,15 +205,15 @@ We deliberately insert failing ``assert 0`` statements in order to
|
|||
inspect what is going on and can now run the tests::
|
||||
|
||||
$ pytest test_module.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
test_module.py FF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_ehlo ________
|
||||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -225,7 +225,7 @@ inspect what is going on and can now run the tests::
|
|||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
_______ test_noop ________
|
||||
________________________________ test_noop _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -236,7 +236,7 @@ inspect what is going on and can now run the tests::
|
|||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
======= 2 failed in 0.12 seconds ========
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
|
||||
You see the two ``assert 0`` failing and more importantly you can also see
|
||||
that the same (module-scoped) ``smtp`` object was passed into the two
|
||||
|
@ -286,7 +286,7 @@ tests.
|
|||
Let's execute it::
|
||||
|
||||
$ pytest -s -q --tb=no
|
||||
FFteardown smtp
|
||||
FF [100%]teardown smtp
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
|
||||
|
@ -391,7 +391,7 @@ We use the ``request.module`` attribute to optionally obtain an
|
|||
again, nothing much has changed::
|
||||
|
||||
$ pytest -s -q --tb=no
|
||||
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||
FF [100%]finalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
|
||||
|
@ -408,9 +408,9 @@ server URL in its module namespace::
|
|||
Running it::
|
||||
|
||||
$ pytest -qq --tb=short test_anothersmtp.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_showhelo ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_showhelo _______________________________
|
||||
test_anothersmtp.py:5: in test_showhelo
|
||||
assert 0, smtp.helo()
|
||||
E AssertionError: (250, b'mail.python.org')
|
||||
|
@ -457,9 +457,9 @@ a value via ``request.param``. No test function code needs to change.
|
|||
So let's just do another run::
|
||||
|
||||
$ pytest -q test_module.py
|
||||
FFFF
|
||||
======= FAILURES ========
|
||||
_______ test_ehlo[smtp.gmail.com] ________
|
||||
FFFF [100%]
|
||||
================================= FAILURES =================================
|
||||
________________________ test_ehlo[smtp.gmail.com] _________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -471,7 +471,7 @@ So let's just do another run::
|
|||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
_______ test_noop[smtp.gmail.com] ________
|
||||
________________________ test_noop[smtp.gmail.com] _________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -482,7 +482,7 @@ So let's just do another run::
|
|||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
_______ test_ehlo[mail.python.org] ________
|
||||
________________________ test_ehlo[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -495,7 +495,7 @@ So let's just do another run::
|
|||
test_module.py:5: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
finalizing <smtplib.SMTP object at 0xdeadbeef>
|
||||
_______ test_noop[mail.python.org] ________
|
||||
________________________ test_noop[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP object at 0xdeadbeef>
|
||||
|
||||
|
@ -559,7 +559,7 @@ return ``None`` then pytest's auto-generated ID will be used.
|
|||
Running the above tests results in the following test IDs being used::
|
||||
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 10 items
|
||||
|
@ -577,7 +577,7 @@ Running the above tests results in the following test IDs being used::
|
|||
<Function 'test_ehlo[mail.python.org]'>
|
||||
<Function 'test_noop[mail.python.org]'>
|
||||
|
||||
======= no tests ran in 0.12 seconds ========
|
||||
======================= no tests ran in 0.12 seconds =======================
|
||||
|
||||
.. _`interdependent fixtures`:
|
||||
|
||||
|
@ -610,16 +610,16 @@ Here we declare an ``app`` fixture which receives the previously defined
|
|||
``smtp`` fixture and instantiates an ``App`` object with it. Let's run it::
|
||||
|
||||
$ pytest -v test_appsetup.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED
|
||||
test_appsetup.py::test_smtp_exists[mail.python.org] PASSED
|
||||
test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%]
|
||||
test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%]
|
||||
|
||||
======= 2 passed in 0.12 seconds ========
|
||||
========================= 2 passed in 0.12 seconds =========================
|
||||
|
||||
Due to the parametrization of ``smtp`` the test will run twice with two
|
||||
different ``App`` instances and respective smtp servers. There is no
|
||||
|
@ -679,7 +679,7 @@ to show the setup/teardown flow::
|
|||
Let's run the tests in verbose mode and with looking at the print-output::
|
||||
|
||||
$ pytest -v -s test_module.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -687,38 +687,38 @@ Let's run the tests in verbose mode and with looking at the print-output::
|
|||
|
||||
test_module.py::test_0[1] SETUP otherarg 1
|
||||
RUN test0 with otherarg 1
|
||||
PASSED TEARDOWN otherarg 1
|
||||
PASSED [ 12%] TEARDOWN otherarg 1
|
||||
|
||||
test_module.py::test_0[2] SETUP otherarg 2
|
||||
RUN test0 with otherarg 2
|
||||
PASSED TEARDOWN otherarg 2
|
||||
PASSED [ 25%] TEARDOWN otherarg 2
|
||||
|
||||
test_module.py::test_1[mod1] SETUP modarg mod1
|
||||
RUN test1 with modarg mod1
|
||||
PASSED
|
||||
PASSED [ 37%]
|
||||
test_module.py::test_2[1-mod1] SETUP otherarg 1
|
||||
RUN test2 with otherarg 1 and modarg mod1
|
||||
PASSED TEARDOWN otherarg 1
|
||||
PASSED [ 50%] TEARDOWN otherarg 1
|
||||
|
||||
test_module.py::test_2[2-mod1] SETUP otherarg 2
|
||||
RUN test2 with otherarg 2 and modarg mod1
|
||||
PASSED TEARDOWN otherarg 2
|
||||
PASSED [ 62%] TEARDOWN otherarg 2
|
||||
|
||||
test_module.py::test_1[mod2] TEARDOWN modarg mod1
|
||||
SETUP modarg mod2
|
||||
RUN test1 with modarg mod2
|
||||
PASSED
|
||||
PASSED [ 75%]
|
||||
test_module.py::test_2[1-mod2] SETUP otherarg 1
|
||||
RUN test2 with otherarg 1 and modarg mod2
|
||||
PASSED TEARDOWN otherarg 1
|
||||
PASSED [ 87%] TEARDOWN otherarg 1
|
||||
|
||||
test_module.py::test_2[2-mod2] SETUP otherarg 2
|
||||
RUN test2 with otherarg 2 and modarg mod2
|
||||
PASSED TEARDOWN otherarg 2
|
||||
PASSED [100%] TEARDOWN otherarg 2
|
||||
TEARDOWN modarg mod2
|
||||
|
||||
|
||||
======= 8 passed in 0.12 seconds ========
|
||||
========================= 8 passed in 0.12 seconds =========================
|
||||
|
||||
You can see that the parametrized module-scoped ``modarg`` resource caused an
|
||||
ordering of test execution that lead to the fewest possible "active" resources.
|
||||
|
@ -781,7 +781,7 @@ you specified a "cleandir" function argument to each of them. Let's run it
|
|||
to verify our fixture is activated and the tests pass::
|
||||
|
||||
$ pytest -q
|
||||
..
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
You can specify multiple fixtures like this:
|
||||
|
@ -862,7 +862,7 @@ class-level ``usefixtures`` decorator.
|
|||
If we run it, we get two passing tests::
|
||||
|
||||
$ pytest -q
|
||||
..
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
Here is how autouse fixtures work in other scopes:
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
Installation and Getting Started
|
||||
===================================
|
||||
|
||||
**Pythons**: Python 2.6,2.7,3.3,3.4,3.5,3.6 Jython, PyPy-2.3
|
||||
**Pythons**: Python 2.7, 3.4, 3.5, 3.6, Jython, PyPy-2.3
|
||||
|
||||
**Platforms**: Unix/Posix and Windows
|
||||
|
||||
|
@ -9,8 +9,6 @@ Installation and Getting Started
|
|||
|
||||
**dependencies**: `py <http://pypi.python.org/pypi/py>`_,
|
||||
`colorama (Windows) <http://pypi.python.org/pypi/colorama>`_,
|
||||
`argparse (py26) <http://pypi.python.org/pypi/argparse>`_,
|
||||
`ordereddict (py26) <http://pypi.python.org/pypi/ordereddict>`_.
|
||||
|
||||
**documentation as PDF**: `download latest <https://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
|
||||
|
||||
|
@ -46,15 +44,15 @@ Let's create a first test file with a simple test function::
|
|||
That's it. You can execute the test function now::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_sample.py F
|
||||
test_sample.py F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
def test_answer():
|
||||
> assert func(3) == 5
|
||||
|
@ -62,7 +60,7 @@ That's it. You can execute the test function now::
|
|||
E + where 4 = func(3)
|
||||
|
||||
test_sample.py:5: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
We got a failure report because our little ``func(3)`` call did not return ``5``.
|
||||
|
||||
|
@ -101,7 +99,7 @@ use the ``raises`` helper::
|
|||
Running it with, this time in "quiet" reporting mode::
|
||||
|
||||
$ pytest -q test_sysexit.py
|
||||
.
|
||||
. [100%]
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
Grouping multiple tests in a class
|
||||
|
@ -126,9 +124,9 @@ There is no need to subclass anything. We can simply
|
|||
run the module by passing its filename::
|
||||
|
||||
$ pytest -q test_class.py
|
||||
.F
|
||||
======= FAILURES ========
|
||||
_______ TestClass.test_two ________
|
||||
.F [100%]
|
||||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass object at 0xdeadbeef>
|
||||
|
||||
|
@ -163,9 +161,9 @@ We list the name ``tmpdir`` in the test function signature and
|
|||
before performing the test function call. Let's just run it::
|
||||
|
||||
$ pytest -q test_tmpdir.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_needsfiles ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('PYTEST_TMPDIR/test_needsfiles0')
|
||||
|
||||
|
|
|
@ -24,15 +24,15 @@ An example of a simple test:
|
|||
To execute it::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_sample.py F
|
||||
test_sample.py F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_answer ________________________________
|
||||
|
||||
def test_answer():
|
||||
> assert inc(3) == 5
|
||||
|
@ -40,7 +40,7 @@ To execute it::
|
|||
E + where 4 = inc(3)
|
||||
|
||||
test_sample.py:5: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
|
||||
See :ref:`Getting Started <getstarted>` for more examples.
|
||||
|
@ -57,7 +57,7 @@ Features
|
|||
|
||||
- Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box;
|
||||
|
||||
- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
|
||||
- Python 2.7, Python 3.4+, PyPy 2.3, Jython 2.5 (untested);
|
||||
|
||||
- Rich plugin architecture, with over 315+ `external plugins <http://plugincompat.herokuapp.com>`_ and thriving community;
|
||||
|
||||
|
|
|
@ -0,0 +1,192 @@
|
|||
.. _logging:
|
||||
|
||||
Logging
|
||||
-------
|
||||
|
||||
.. versionadded 3.3.0
|
||||
|
||||
.. note::
|
||||
|
||||
This feature is a drop-in replacement for the `pytest-catchlog
|
||||
<https://pypi.org/project/pytest-catchlog/>`_ plugin and they will conflict
|
||||
with each other. The backward compatibility API with ``pytest-capturelog``
|
||||
has been dropped when this feature was introduced, so if for that reason you
|
||||
still need ``pytest-catchlog`` you can disable the internal feature by
|
||||
adding to your ``pytest.ini``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
addopts=-p no:logging
|
||||
|
||||
Log messages are captured by default and for each failed test will be shown in
|
||||
the same manner as captured stdout and stderr.
|
||||
|
||||
Running without options::
|
||||
|
||||
pytest
|
||||
|
||||
Shows failed tests like so::
|
||||
|
||||
----------------------- Captured stdlog call ----------------------
|
||||
test_reporting.py 26 INFO text going to logger
|
||||
----------------------- Captured stdout call ----------------------
|
||||
text going to stdout
|
||||
----------------------- Captured stderr call ----------------------
|
||||
text going to stderr
|
||||
==================== 2 failed in 0.02 seconds =====================
|
||||
|
||||
By default each captured log message shows the module, line number, log level
|
||||
and message. Showing the exact module and line number is useful for testing and
|
||||
debugging. If desired the log format and date format can be specified to
|
||||
anything that the logging module supports.
|
||||
|
||||
Running pytest specifying formatting options::
|
||||
|
||||
pytest --log-format="%(asctime)s %(levelname)s %(message)s" \
|
||||
--log-date-format="%Y-%m-%d %H:%M:%S"
|
||||
|
||||
Shows failed tests like so::
|
||||
|
||||
----------------------- Captured stdlog call ----------------------
|
||||
2010-04-10 14:48:44 INFO text going to logger
|
||||
----------------------- Captured stdout call ----------------------
|
||||
text going to stdout
|
||||
----------------------- Captured stderr call ----------------------
|
||||
text going to stderr
|
||||
==================== 2 failed in 0.02 seconds =====================
|
||||
|
||||
These options can also be customized through a configuration file:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
log_format = %(asctime)s %(levelname)s %(message)s
|
||||
log_date_format = %Y-%m-%d %H:%M:%S
|
||||
|
||||
Further it is possible to disable reporting logs on failed tests completely
|
||||
with::
|
||||
|
||||
pytest --no-print-logs
|
||||
|
||||
Or in you ``pytest.ini``:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
[pytest]
|
||||
log_print = False
|
||||
|
||||
|
||||
Shows failed tests in the normal manner as no logs were captured::
|
||||
|
||||
----------------------- Captured stdout call ----------------------
|
||||
text going to stdout
|
||||
----------------------- Captured stderr call ----------------------
|
||||
text going to stderr
|
||||
==================== 2 failed in 0.02 seconds =====================
|
||||
|
||||
Inside tests it is possible to change the log level for the captured log
|
||||
messages. This is supported by the ``caplog`` fixture::
|
||||
|
||||
def test_foo(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
pass
|
||||
|
||||
By default the level is set on the handler used to catch the log messages,
|
||||
however as a convenience it is also possible to set the log level of any
|
||||
logger::
|
||||
|
||||
def test_foo(caplog):
|
||||
caplog.set_level(logging.CRITICAL, logger='root.baz')
|
||||
pass
|
||||
|
||||
It is also possible to use a context manager to temporarily change the log
|
||||
level::
|
||||
|
||||
def test_bar(caplog):
|
||||
with caplog.at_level(logging.INFO):
|
||||
pass
|
||||
|
||||
Again, by default the level of the handler is affected but the level of any
|
||||
logger can be changed instead with::
|
||||
|
||||
def test_bar(caplog):
|
||||
with caplog.at_level(logging.CRITICAL, logger='root.baz'):
|
||||
pass
|
||||
|
||||
Lastly all the logs sent to the logger during the test run are made available on
|
||||
the fixture in the form of both the LogRecord instances and the final log text.
|
||||
This is useful for when you want to assert on the contents of a message::
|
||||
|
||||
def test_baz(caplog):
|
||||
func_under_test()
|
||||
for record in caplog.records:
|
||||
assert record.levelname != 'CRITICAL'
|
||||
assert 'wally' not in caplog.text
|
||||
|
||||
For all the available attributes of the log records see the
|
||||
``logging.LogRecord`` class.
|
||||
|
||||
You can also resort to ``record_tuples`` if all you want to do is to ensure,
|
||||
that certain messages have been logged under a given logger name with a given
|
||||
severity and message::
|
||||
|
||||
def test_foo(caplog):
|
||||
logging.getLogger().info('boo %s', 'arg')
|
||||
|
||||
assert caplog.record_tuples == [
|
||||
('root', logging.INFO, 'boo arg'),
|
||||
]
|
||||
|
||||
You can call ``caplog.clear()`` to reset the captured log records in a test::
|
||||
|
||||
def test_something_with_clearing_records(caplog):
|
||||
some_method_that_creates_log_records()
|
||||
caplog.clear()
|
||||
your_test_method()
|
||||
assert ['Foo'] == [rec.message for rec in caplog.records]
|
||||
|
||||
Live Logs
|
||||
^^^^^^^^^
|
||||
|
||||
By default, pytest will output any logging records with a level higher or
|
||||
equal to WARNING. In order to actually see these logs in the console you have to
|
||||
disable pytest output capture by passing ``-s``.
|
||||
|
||||
You can specify the logging level for which log records with equal or higher
|
||||
level are printed to the console by passing ``--log-cli-level``. This setting
|
||||
accepts the logging level names as seen in python's documentation or an integer
|
||||
as the logging level num.
|
||||
|
||||
Additionally, you can also specify ``--log-cli-format`` and
|
||||
``--log-cli-date-format`` which mirror and default to ``--log-format`` and
|
||||
``--log-date-format`` if not provided, but are applied only to the console
|
||||
logging handler.
|
||||
|
||||
All of the CLI log options can also be set in the configuration INI file. The
|
||||
option names are:
|
||||
|
||||
* ``log_cli_level``
|
||||
* ``log_cli_format``
|
||||
* ``log_cli_date_format``
|
||||
|
||||
If you need to record the whole test suite logging calls to a file, you can pass
|
||||
``--log-file=/path/to/log/file``. This log file is opened in write mode which
|
||||
means that it will be overwritten at each run tests session.
|
||||
|
||||
You can also specify the logging level for the log file by passing
|
||||
``--log-file-level``. This setting accepts the logging level names as seen in
|
||||
python's documentation(ie, uppercased level names) or an integer as the logging
|
||||
level num.
|
||||
|
||||
Additionally, you can also specify ``--log-file-format`` and
|
||||
``--log-file-date-format`` which are equal to ``--log-format`` and
|
||||
``--log-date-format`` but are applied to the log file logging handler.
|
||||
|
||||
All of the log file options can also be set in the configuration INI file. The
|
||||
option names are:
|
||||
|
||||
* ``log_file``
|
||||
* ``log_file_level``
|
||||
* ``log_file_format``
|
||||
* ``log_file_date_format``
|
|
@ -53,15 +53,15 @@ tuples so that the ``test_eval`` function will run three times using
|
|||
them in turn::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..F
|
||||
test_expectation.py ..F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_eval[6*9-42] ________
|
||||
================================= FAILURES =================================
|
||||
____________________________ test_eval[6*9-42] _____________________________
|
||||
|
||||
test_input = '6*9', expected = 42
|
||||
|
||||
|
@ -76,7 +76,7 @@ them in turn::
|
|||
E + where 54 = eval('6*9')
|
||||
|
||||
test_expectation.py:8: AssertionError
|
||||
======= 1 failed, 2 passed in 0.12 seconds ========
|
||||
==================== 1 failed, 2 passed in 0.12 seconds ====================
|
||||
|
||||
As designed in this example, only one pair of input/output values fails
|
||||
the simple test function. And as usual with test function arguments,
|
||||
|
@ -102,14 +102,14 @@ for example with the builtin ``mark.xfail``::
|
|||
Let's run this::
|
||||
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..x
|
||||
test_expectation.py ..x [100%]
|
||||
|
||||
======= 2 passed, 1 xfailed in 0.12 seconds ========
|
||||
=================== 2 passed, 1 xfailed in 0.12 seconds ====================
|
||||
|
||||
The one parameter set which caused a failure previously now
|
||||
shows up as an "xfailed (expected to fail)" test.
|
||||
|
@ -165,15 +165,15 @@ command line option and the parametrization of our test function::
|
|||
If we now pass two stringinput values, our test will run twice::
|
||||
|
||||
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||
..
|
||||
.. [100%]
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
Let's also run with a stringinput that will lead to a failing test::
|
||||
|
||||
$ pytest -q --stringinput="!" test_strings.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_valid_string[!] ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
___________________________ test_valid_string[!] ___________________________
|
||||
|
||||
stringinput = '!'
|
||||
|
||||
|
@ -193,9 +193,9 @@ If you don't specify a stringinput it will be skipped because
|
|||
list::
|
||||
|
||||
$ pytest -q -rs test_strings.py
|
||||
s
|
||||
======= short test summary info ========
|
||||
SKIP [1] test_strings.py:2: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1
|
||||
s [100%]
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] test_strings.py: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1
|
||||
1 skipped in 0.12 seconds
|
||||
|
||||
Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across
|
||||
|
|
|
@ -27,9 +27,6 @@ Here is a little annotated list for some popular plugins:
|
|||
for `twisted <http://twistedmatrix.com>`_ apps, starting a reactor and
|
||||
processing deferreds from test functions.
|
||||
|
||||
* `pytest-catchlog <http://pypi.python.org/pypi/pytest-catchlog>`_:
|
||||
to capture and assert about messages from the logging module
|
||||
|
||||
* `pytest-cov <http://pypi.python.org/pypi/pytest-cov>`_:
|
||||
coverage reporting, compatible with distributed testing
|
||||
|
||||
|
|
|
@ -58,6 +58,16 @@ by calling the ``pytest.skip(reason)`` function:
|
|||
if not valid_config():
|
||||
pytest.skip("unsupported configuration")
|
||||
|
||||
It is also possible to skip the whole module using
|
||||
``pytest.skip(reason, allow_module_level=True)`` at the module level:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
if not pytest.config.getoption("--custom-flag"):
|
||||
pytest.skip("--custom-flag is missing, skipping tests", allow_module_level=True)
|
||||
|
||||
The imperative method is useful when it is not possible to evaluate the skip condition
|
||||
during import time.
|
||||
|
||||
|
@ -68,11 +78,11 @@ during import time.
|
|||
|
||||
If you wish to skip something conditionally then you can use ``skipif`` instead.
|
||||
Here is an example of marking a test function to be skipped
|
||||
when run on a Python3.3 interpreter::
|
||||
when run on a Python3.6 interpreter::
|
||||
|
||||
import sys
|
||||
@pytest.mark.skipif(sys.version_info < (3,3),
|
||||
reason="requires python3.3")
|
||||
@pytest.mark.skipif(sys.version_info < (3,6),
|
||||
reason="requires python3.6")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
|
@ -264,8 +274,8 @@ You can change the default value of the ``strict`` parameter using the
|
|||
As with skipif_ you can also mark your expectation of a failure
|
||||
on a particular platform::
|
||||
|
||||
@pytest.mark.xfail(sys.version_info >= (3,3),
|
||||
reason="python3.3 api changes")
|
||||
@pytest.mark.xfail(sys.version_info >= (3,6),
|
||||
reason="python3.6 api changes")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
|
@ -321,13 +331,13 @@ Here is a simple test file with the several usages:
|
|||
Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ pytest -rx xfail_demo.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR/example, inifile:
|
||||
collected 7 items
|
||||
|
||||
xfail_demo.py xxxxxxx
|
||||
======= short test summary info ========
|
||||
xfail_demo.py xxxxxxx [100%]
|
||||
========================= short test summary info ==========================
|
||||
XFAIL xfail_demo.py::test_hello
|
||||
XFAIL xfail_demo.py::test_hello2
|
||||
reason: [NOTRUN]
|
||||
|
@ -341,7 +351,7 @@ Running it with the report-on-xfail option gives this output::
|
|||
reason: reason
|
||||
XFAIL xfail_demo.py::test_hello7
|
||||
|
||||
======= 7 xfailed in 0.12 seconds ========
|
||||
======================== 7 xfailed in 0.12 seconds =========================
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
|
|
|
@ -28,15 +28,15 @@ Running this would result in a passed test except for the last
|
|||
``assert 0`` line which we use to look at values::
|
||||
|
||||
$ pytest test_tmpdir.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_tmpdir.py F
|
||||
test_tmpdir.py F [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ test_create_file ________
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('PYTEST_TMPDIR/test_create_file0')
|
||||
|
||||
|
@ -49,7 +49,7 @@ Running this would result in a passed test except for the last
|
|||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
========================= 1 failed in 0.12 seconds =========================
|
||||
|
||||
The 'tmpdir_factory' fixture
|
||||
----------------------------
|
||||
|
|
|
@ -126,15 +126,15 @@ Due to the deliberately failing assert statements, we can take a look at
|
|||
the ``self.db`` values in the traceback::
|
||||
|
||||
$ pytest test_unittest_db.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 2 items
|
||||
|
||||
test_unittest_db.py FF
|
||||
test_unittest_db.py FF [100%]
|
||||
|
||||
======= FAILURES ========
|
||||
_______ MyTest.test_method1 ________
|
||||
================================= FAILURES =================================
|
||||
___________________________ MyTest.test_method1 ____________________________
|
||||
|
||||
self = <test_unittest_db.MyTest testMethod=test_method1>
|
||||
|
||||
|
@ -145,7 +145,7 @@ the ``self.db`` values in the traceback::
|
|||
E assert 0
|
||||
|
||||
test_unittest_db.py:9: AssertionError
|
||||
_______ MyTest.test_method2 ________
|
||||
___________________________ MyTest.test_method2 ____________________________
|
||||
|
||||
self = <test_unittest_db.MyTest testMethod=test_method2>
|
||||
|
||||
|
@ -155,7 +155,7 @@ the ``self.db`` values in the traceback::
|
|||
E assert 0
|
||||
|
||||
test_unittest_db.py:12: AssertionError
|
||||
======= 2 failed in 0.12 seconds ========
|
||||
========================= 2 failed in 0.12 seconds =========================
|
||||
|
||||
This default pytest traceback shows that the two test methods
|
||||
share the same ``self.db`` instance which was our intention
|
||||
|
@ -203,7 +203,7 @@ on the class like in the previous example.
|
|||
Running this test module ...::
|
||||
|
||||
$ pytest -q test_unittest_cleandir.py
|
||||
.
|
||||
. [100%]
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
... gives us one passed test because the ``initdir`` fixture function
|
||||
|
|
|
@ -189,7 +189,6 @@ in your code and pytest automatically disables its output capture for that test:
|
|||
for test output occurring after you exit the interactive PDB_ tracing session
|
||||
and continue with the regular test run.
|
||||
|
||||
|
||||
.. _durations:
|
||||
|
||||
Profiling test execution duration
|
||||
|
|
|
@ -21,20 +21,20 @@ and displays them at the end of the session::
|
|||
Running pytest now produces this output::
|
||||
|
||||
$ pytest test_show_warnings.py
|
||||
======= test session starts ========
|
||||
=========================== test session starts ============================
|
||||
platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
collected 1 item
|
||||
|
||||
test_show_warnings.py .
|
||||
test_show_warnings.py . [100%]
|
||||
|
||||
======= warnings summary ========
|
||||
============================= warnings summary =============================
|
||||
test_show_warnings.py::test_one
|
||||
$REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2
|
||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||
|
||||
-- Docs: http://doc.pytest.org/en/latest/warnings.html
|
||||
======= 1 passed, 1 warnings in 0.12 seconds ========
|
||||
=================== 1 passed, 1 warnings in 0.12 seconds ===================
|
||||
|
||||
Pytest by default catches all warnings except for ``DeprecationWarning`` and ``PendingDeprecationWarning``.
|
||||
|
||||
|
@ -42,9 +42,9 @@ The ``-W`` flag can be passed to control which warnings will be displayed or eve
|
|||
them into errors::
|
||||
|
||||
$ pytest -q test_show_warnings.py -W error::UserWarning
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_one ________
|
||||
F [100%]
|
||||
================================= FAILURES =================================
|
||||
_________________________________ test_one _________________________________
|
||||
|
||||
def test_one():
|
||||
> assert api_v1() == 1
|
||||
|
@ -168,7 +168,20 @@ which works in a similar manner to :ref:`raises <assertraises>`::
|
|||
with pytest.warns(UserWarning):
|
||||
warnings.warn("my warning", UserWarning)
|
||||
|
||||
The test will fail if the warning in question is not raised.
|
||||
The test will fail if the warning in question is not raised. The keyword
|
||||
argument ``match`` to assert that the exception matches a text or regex::
|
||||
|
||||
>>> with warns(UserWarning, match='must be 0 or None'):
|
||||
... warnings.warn("value must be 0 or None", UserWarning)
|
||||
|
||||
>>> with warns(UserWarning, match=r'must be \d+$'):
|
||||
... warnings.warn("value must be 42", UserWarning)
|
||||
|
||||
>>> with warns(UserWarning, match=r'must be \d+$'):
|
||||
... warnings.warn("this is not here", UserWarning)
|
||||
Traceback (most recent call last):
|
||||
...
|
||||
Failed: DID NOT WARN. No warnings of type ...UserWarning... was emitted...
|
||||
|
||||
You can also call ``pytest.warns`` on a function or code string::
|
||||
|
||||
|
|
|
@ -452,7 +452,7 @@ hook wrappers and passes the same arguments as to the regular hooks.
|
|||
|
||||
At the yield point of the hook wrapper pytest will execute the next hook
|
||||
implementations and return their result to the yield point in the form of
|
||||
a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates a result or
|
||||
a :py:class:`Result <pluggy._Result>` instance which encapsulates a result or
|
||||
exception info. The yield point itself will thus typically not raise
|
||||
exceptions (unless there are bugs).
|
||||
|
||||
|
@ -517,7 +517,7 @@ Here is the order of execution:
|
|||
Plugin1).
|
||||
|
||||
4. Plugin3's pytest_collection_modifyitems then executing the code after the yield
|
||||
point. The yield receives a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates
|
||||
point. The yield receives a :py:class:`Result <pluggy._Result>` instance which encapsulates
|
||||
the result from calling the non-wrappers. Wrappers shall not modify the result.
|
||||
|
||||
It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with
|
||||
|
@ -714,7 +714,7 @@ Reference of objects involved in hooks
|
|||
:members:
|
||||
:inherited-members:
|
||||
|
||||
.. autoclass:: _pytest.vendored_packages.pluggy._CallOutcome()
|
||||
.. autoclass:: pluggy._Result
|
||||
:members:
|
||||
|
||||
.. autofunction:: _pytest.config.get_plugin_manager()
|
||||
|
@ -724,7 +724,7 @@ Reference of objects involved in hooks
|
|||
:undoc-members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: _pytest.vendored_packages.pluggy.PluginManager()
|
||||
.. autoclass:: pluggy.PluginManager()
|
||||
:members:
|
||||
|
||||
.. currentmodule:: _pytest.pytester
|
||||
|
|
|
@ -7,7 +7,7 @@ pytest: unit and functional testing with Python.
|
|||
# else we are imported
|
||||
|
||||
from _pytest.config import (
|
||||
main, UsageError, _preloadplugins, cmdline,
|
||||
main, UsageError, cmdline,
|
||||
hookspec, hookimpl
|
||||
)
|
||||
from _pytest.fixtures import fixture, yield_fixture
|
||||
|
@ -74,5 +74,4 @@ if __name__ == '__main__':
|
|||
else:
|
||||
|
||||
from _pytest.compat import _setup_collect_fakemodule
|
||||
_preloadplugins() # to populate pytest.* namespace so help(pytest) works
|
||||
_setup_collect_fakemodule()
|
||||
|
|
34
setup.py
34
setup.py
|
@ -16,7 +16,7 @@ classifiers = [
|
|||
'Topic :: Utilities',
|
||||
] + [
|
||||
('Programming Language :: Python :: %s' % x)
|
||||
for x in '2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split()
|
||||
for x in '2 2.7 3 3.4 3.5 3.6'.split()
|
||||
]
|
||||
|
||||
with open('README.rst') as fd:
|
||||
|
@ -43,17 +43,25 @@ def has_environment_marker_support():
|
|||
|
||||
|
||||
def main():
|
||||
install_requires = ['py>=1.4.33', 'setuptools'] # pluggy is vendored in _pytest.vendored_packages
|
||||
extras_require = {}
|
||||
install_requires = [
|
||||
'py>=1.5.0',
|
||||
'six>=1.10.0',
|
||||
'setuptools',
|
||||
'attrs>=17.2.0',
|
||||
]
|
||||
# if _PYTEST_SETUP_SKIP_PLUGGY_DEP is set, skip installing pluggy;
|
||||
# used by tox.ini to test with pluggy master
|
||||
if '_PYTEST_SETUP_SKIP_PLUGGY_DEP' not in os.environ:
|
||||
install_requires.append('pluggy>=0.5,<0.7')
|
||||
if has_environment_marker_support():
|
||||
extras_require[':python_version=="2.6"'] = ['argparse', 'ordereddict']
|
||||
extras_require[':python_version<"3.0"'] = ['funcsigs']
|
||||
extras_require[':sys_platform=="win32"'] = ['colorama']
|
||||
else:
|
||||
if sys.version_info < (2, 7):
|
||||
install_requires.append('argparse')
|
||||
install_requires.append('ordereddict')
|
||||
if sys.platform == 'win32':
|
||||
install_requires.append('colorama')
|
||||
if sys.version_info < (3, 0):
|
||||
install_requires.append('funcsigs')
|
||||
|
||||
setup(
|
||||
name='pytest',
|
||||
|
@ -65,17 +73,20 @@ def main():
|
|||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others',
|
||||
entry_points={'console_scripts':
|
||||
['pytest=pytest:main', 'py.test=pytest:main']},
|
||||
author=(
|
||||
'Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, '
|
||||
'Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others'),
|
||||
entry_points={'console_scripts': [
|
||||
'pytest=pytest:main', 'py.test=pytest:main']},
|
||||
classifiers=classifiers,
|
||||
keywords="test unittest",
|
||||
cmdclass={'test': PyTest},
|
||||
# the following should be enabled for release
|
||||
setup_requires=['setuptools-scm'],
|
||||
python_requires='>=2.7, !=3.0.*, !=3.1.*, !=3.2.*, !=3.3.*',
|
||||
install_requires=install_requires,
|
||||
extras_require=extras_require,
|
||||
packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'],
|
||||
packages=['_pytest', '_pytest.assertion', '_pytest._code'],
|
||||
py_modules=['pytest'],
|
||||
zip_safe=False,
|
||||
)
|
||||
|
@ -83,10 +94,13 @@ def main():
|
|||
|
||||
class PyTest(Command):
|
||||
user_options = []
|
||||
|
||||
def initialize_options(self):
|
||||
pass
|
||||
|
||||
def finalize_options(self):
|
||||
pass
|
||||
|
||||
def run(self):
|
||||
import subprocess
|
||||
PPATH = [x for x in os.environ.get('PYTHONPATH', '').split(':') if x]
|
||||
|
|
|
@ -4,10 +4,9 @@ Invoke tasks to help with pytest development and release process.
|
|||
|
||||
import invoke
|
||||
|
||||
from . import generate, vendoring
|
||||
from . import generate
|
||||
|
||||
|
||||
ns = invoke.Collection(
|
||||
generate,
|
||||
vendoring
|
||||
)
|
||||
|
|
|
@ -1,23 +0,0 @@
|
|||
from __future__ import absolute_import, print_function
|
||||
import py
|
||||
import invoke
|
||||
|
||||
VENDOR_TARGET = py.path.local("_pytest/vendored_packages")
|
||||
GOOD_FILES = 'README.md', '__init__.py'
|
||||
|
||||
@invoke.task()
|
||||
def remove_libs(ctx):
|
||||
print("removing vendored libs")
|
||||
for path in VENDOR_TARGET.listdir():
|
||||
if path.basename not in GOOD_FILES:
|
||||
print(" ", path)
|
||||
path.remove()
|
||||
|
||||
@invoke.task(pre=[remove_libs])
|
||||
def update_libs(ctx):
|
||||
print("installing libs")
|
||||
ctx.run("pip install -t {target} pluggy".format(target=VENDOR_TARGET))
|
||||
ctx.run("git add {target}".format(target=VENDOR_TARGET))
|
||||
print("Please commit to finish the update after running the tests:")
|
||||
print()
|
||||
print(' git commit -am "Updated vendored libs"')
|
|
@ -344,7 +344,7 @@ class TestGeneralUsage(object):
|
|||
Importing a module that didn't exist, even if the ImportError was
|
||||
gracefully handled, would make our test crash.
|
||||
|
||||
Use recwarn here to silence this warning in Python 2.6 and 2.7:
|
||||
Use recwarn here to silence this warning in Python 2.7:
|
||||
ImportWarning: Not importing directory '...\not_a_package': missing __init__.py
|
||||
"""
|
||||
testdir.mkdir('not_a_package')
|
||||
|
@ -630,10 +630,10 @@ class TestInvocationVariants(object):
|
|||
testdir.chdir()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_hello.py::test_hello*PASSED",
|
||||
"*test_hello.py::test_other*PASSED",
|
||||
"*test_world.py::test_world*PASSED",
|
||||
"*test_world.py::test_other*PASSED",
|
||||
"*test_hello.py::test_hello*PASSED*",
|
||||
"*test_hello.py::test_other*PASSED*",
|
||||
"*test_world.py::test_world*PASSED*",
|
||||
"*test_world.py::test_other*PASSED*",
|
||||
"*4 passed*"
|
||||
])
|
||||
|
||||
|
@ -641,7 +641,7 @@ class TestInvocationVariants(object):
|
|||
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.world.test_world::test_other")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_world.py::test_other*PASSED",
|
||||
"*test_world.py::test_other*PASSED*",
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
|
|
|
@ -1,7 +1,6 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
from __future__ import absolute_import, division, print_function
|
||||
|
||||
import sys
|
||||
import operator
|
||||
import _pytest
|
||||
import py
|
||||
|
@ -345,10 +344,7 @@ def test_excinfo_no_sourcecode():
|
|||
except ValueError:
|
||||
excinfo = _pytest._code.ExceptionInfo()
|
||||
s = str(excinfo.traceback[-1])
|
||||
if py.std.sys.version_info < (2, 5):
|
||||
assert s == " File '<string>':1 in ?\n ???\n"
|
||||
else:
|
||||
assert s == " File '<string>':1 in <module>\n ???\n"
|
||||
assert s == " File '<string>':1 in <module>\n ???\n"
|
||||
|
||||
|
||||
def test_excinfo_no_python_sourcecode(tmpdir):
|
||||
|
@ -1244,9 +1240,6 @@ def test_no_recursion_index_on_recursion_error():
|
|||
except: # noqa
|
||||
from _pytest._code.code import ExceptionInfo
|
||||
exc_info = ExceptionInfo()
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
assert "'RecursionDepthError' object has no attribute '___" in str(exc_info.getrepr())
|
||||
else:
|
||||
assert 'maximum recursion' in str(exc_info.getrepr())
|
||||
assert 'maximum recursion' in str(exc_info.getrepr())
|
||||
else:
|
||||
assert 0
|
||||
|
|
|
@ -273,7 +273,6 @@ class TestSourceParsingAndCompiling(object):
|
|||
assert getstatement(2, source).lines == source.lines[2:3]
|
||||
assert getstatement(3, source).lines == source.lines[3:4]
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,6)")
|
||||
def test_getstatementrange_out_of_bounds_py3(self):
|
||||
source = Source("if xxx:\n from .collections import something")
|
||||
r = source.getstatementrange(1)
|
||||
|
@ -283,7 +282,6 @@ class TestSourceParsingAndCompiling(object):
|
|||
source = Source(":")
|
||||
pytest.raises(SyntaxError, lambda: source.getstatementrange(0))
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,6)")
|
||||
def test_compile_to_ast(self):
|
||||
import ast
|
||||
source = Source("x = 4")
|
||||
|
|
|
@ -82,3 +82,33 @@ def test_resultlog_is_deprecated(testdir):
|
|||
'*--result-log is deprecated and scheduled for removal in pytest 4.0*',
|
||||
'*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*',
|
||||
])
|
||||
|
||||
|
||||
@pytest.mark.filterwarnings('always:Metafunc.addcall is deprecated')
|
||||
def test_metafunc_addcall_deprecated(testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.addcall({'i': 1})
|
||||
metafunc.addcall({'i': 2})
|
||||
def test_func(i):
|
||||
pass
|
||||
""")
|
||||
res = testdir.runpytest('-s')
|
||||
assert res.ret == 0
|
||||
res.stdout.fnmatch_lines([
|
||||
"*Metafunc.addcall is deprecated*",
|
||||
"*2 passed, 2 warnings*",
|
||||
])
|
||||
|
||||
|
||||
def test_pytest_catchlog_deprecated(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_func(pytestconfig):
|
||||
pytestconfig.pluginmanager.register(None, 'pytest_catchlog')
|
||||
""")
|
||||
res = testdir.runpytest()
|
||||
assert res.ret == 0
|
||||
res.stdout.fnmatch_lines([
|
||||
"*pytest-catchlog plugin has been merged into the core*",
|
||||
"*1 passed, 1 warnings*",
|
||||
])
|
||||
|
|
|
@ -0,0 +1,70 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import logging
|
||||
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
sublogger = logging.getLogger(__name__ + '.baz')
|
||||
|
||||
|
||||
def test_fixture_help(testdir):
|
||||
result = testdir.runpytest('--fixtures')
|
||||
result.stdout.fnmatch_lines(['*caplog*'])
|
||||
|
||||
|
||||
def test_change_level(caplog):
|
||||
caplog.set_level(logging.INFO)
|
||||
logger.debug('handler DEBUG level')
|
||||
logger.info('handler INFO level')
|
||||
|
||||
caplog.set_level(logging.CRITICAL, logger=sublogger.name)
|
||||
sublogger.warning('logger WARNING level')
|
||||
sublogger.critical('logger CRITICAL level')
|
||||
|
||||
assert 'DEBUG' not in caplog.text
|
||||
assert 'INFO' in caplog.text
|
||||
assert 'WARNING' not in caplog.text
|
||||
assert 'CRITICAL' in caplog.text
|
||||
|
||||
|
||||
def test_with_statement(caplog):
|
||||
with caplog.at_level(logging.INFO):
|
||||
logger.debug('handler DEBUG level')
|
||||
logger.info('handler INFO level')
|
||||
|
||||
with caplog.at_level(logging.CRITICAL, logger=sublogger.name):
|
||||
sublogger.warning('logger WARNING level')
|
||||
sublogger.critical('logger CRITICAL level')
|
||||
|
||||
assert 'DEBUG' not in caplog.text
|
||||
assert 'INFO' in caplog.text
|
||||
assert 'WARNING' not in caplog.text
|
||||
assert 'CRITICAL' in caplog.text
|
||||
|
||||
|
||||
def test_log_access(caplog):
|
||||
logger.info('boo %s', 'arg')
|
||||
assert caplog.records[0].levelname == 'INFO'
|
||||
assert caplog.records[0].msg == 'boo %s'
|
||||
assert 'boo arg' in caplog.text
|
||||
|
||||
|
||||
def test_record_tuples(caplog):
|
||||
logger.info('boo %s', 'arg')
|
||||
|
||||
assert caplog.record_tuples == [
|
||||
(__name__, logging.INFO, 'boo arg'),
|
||||
]
|
||||
|
||||
|
||||
def test_unicode(caplog):
|
||||
logger.info(u'bū')
|
||||
assert caplog.records[0].levelname == 'INFO'
|
||||
assert caplog.records[0].msg == u'bū'
|
||||
assert u'bū' in caplog.text
|
||||
|
||||
|
||||
def test_clear(caplog):
|
||||
logger.info(u'bū')
|
||||
assert len(caplog.records)
|
||||
caplog.clear()
|
||||
assert not len(caplog.records)
|
|
@ -0,0 +1,398 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import os
|
||||
import pytest
|
||||
|
||||
|
||||
def test_nothing_logged(testdir):
|
||||
testdir.makepyfile('''
|
||||
import sys
|
||||
|
||||
def test_foo():
|
||||
sys.stdout.write('text going to stdout')
|
||||
sys.stderr.write('text going to stderr')
|
||||
assert False
|
||||
''')
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(['*- Captured stdout call -*',
|
||||
'text going to stdout'])
|
||||
result.stdout.fnmatch_lines(['*- Captured stderr call -*',
|
||||
'text going to stderr'])
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
result.stdout.fnmatch_lines(['*- Captured *log call -*'])
|
||||
|
||||
|
||||
def test_messages_logged(testdir):
|
||||
testdir.makepyfile('''
|
||||
import sys
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def test_foo():
|
||||
sys.stdout.write('text going to stdout')
|
||||
sys.stderr.write('text going to stderr')
|
||||
logger.info('text going to logger')
|
||||
assert False
|
||||
''')
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(['*- Captured *log call -*',
|
||||
'*text going to logger*'])
|
||||
result.stdout.fnmatch_lines(['*- Captured stdout call -*',
|
||||
'text going to stdout'])
|
||||
result.stdout.fnmatch_lines(['*- Captured stderr call -*',
|
||||
'text going to stderr'])
|
||||
|
||||
|
||||
def test_setup_logging(testdir):
|
||||
testdir.makepyfile('''
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def setup_function(function):
|
||||
logger.info('text going to logger from setup')
|
||||
|
||||
def test_foo():
|
||||
logger.info('text going to logger from call')
|
||||
assert False
|
||||
''')
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(['*- Captured *log setup -*',
|
||||
'*text going to logger from setup*',
|
||||
'*- Captured *log call -*',
|
||||
'*text going to logger from call*'])
|
||||
|
||||
|
||||
def test_teardown_logging(testdir):
|
||||
testdir.makepyfile('''
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def test_foo():
|
||||
logger.info('text going to logger from call')
|
||||
|
||||
def teardown_function(function):
|
||||
logger.info('text going to logger from teardown')
|
||||
assert False
|
||||
''')
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(['*- Captured *log call -*',
|
||||
'*text going to logger from call*',
|
||||
'*- Captured *log teardown -*',
|
||||
'*text going to logger from teardown*'])
|
||||
|
||||
|
||||
def test_disable_log_capturing(testdir):
|
||||
testdir.makepyfile('''
|
||||
import sys
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def test_foo():
|
||||
sys.stdout.write('text going to stdout')
|
||||
logger.warning('catch me if you can!')
|
||||
sys.stderr.write('text going to stderr')
|
||||
assert False
|
||||
''')
|
||||
result = testdir.runpytest('--no-print-logs')
|
||||
print(result.stdout)
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(['*- Captured stdout call -*',
|
||||
'text going to stdout'])
|
||||
result.stdout.fnmatch_lines(['*- Captured stderr call -*',
|
||||
'text going to stderr'])
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
result.stdout.fnmatch_lines(['*- Captured *log call -*'])
|
||||
|
||||
|
||||
def test_disable_log_capturing_ini(testdir):
|
||||
testdir.makeini(
|
||||
'''
|
||||
[pytest]
|
||||
log_print=False
|
||||
'''
|
||||
)
|
||||
testdir.makepyfile('''
|
||||
import sys
|
||||
import logging
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
def test_foo():
|
||||
sys.stdout.write('text going to stdout')
|
||||
logger.warning('catch me if you can!')
|
||||
sys.stderr.write('text going to stderr')
|
||||
assert False
|
||||
''')
|
||||
result = testdir.runpytest()
|
||||
print(result.stdout)
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines(['*- Captured stdout call -*',
|
||||
'text going to stdout'])
|
||||
result.stdout.fnmatch_lines(['*- Captured stderr call -*',
|
||||
'text going to stderr'])
|
||||
with pytest.raises(pytest.fail.Exception):
|
||||
result.stdout.fnmatch_lines(['*- Captured *log call -*'])
|
||||
|
||||
|
||||
def test_log_cli_default_level(testdir):
|
||||
# Default log file level
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_cli(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_cli_handler.level == logging.WARNING
|
||||
logging.getLogger('catchlog').info("This log message won't be shown")
|
||||
logging.getLogger('catchlog').warning("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('-s')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_cli_default_level.py PASSED',
|
||||
])
|
||||
result.stderr.fnmatch_lines([
|
||||
"* This log message will be shown"
|
||||
])
|
||||
for line in result.errlines:
|
||||
try:
|
||||
assert "This log message won't be shown" in line
|
||||
pytest.fail("A log message was shown and it shouldn't have been")
|
||||
except AssertionError:
|
||||
continue
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_log_cli_level(testdir):
|
||||
# Default log file level
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_cli(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_cli_handler.level == logging.INFO
|
||||
logging.getLogger('catchlog').debug("This log message won't be shown")
|
||||
logging.getLogger('catchlog').info("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('-s', '--log-cli-level=INFO')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_cli_level.py PASSED',
|
||||
])
|
||||
result.stderr.fnmatch_lines([
|
||||
"* This log message will be shown"
|
||||
])
|
||||
for line in result.errlines:
|
||||
try:
|
||||
assert "This log message won't be shown" in line
|
||||
pytest.fail("A log message was shown and it shouldn't have been")
|
||||
except AssertionError:
|
||||
continue
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
|
||||
result = testdir.runpytest('-s', '--log-level=INFO')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_cli_level.py PASSED',
|
||||
])
|
||||
result.stderr.fnmatch_lines([
|
||||
"* This log message will be shown"
|
||||
])
|
||||
for line in result.errlines:
|
||||
try:
|
||||
assert "This log message won't be shown" in line
|
||||
pytest.fail("A log message was shown and it shouldn't have been")
|
||||
except AssertionError:
|
||||
continue
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_log_cli_ini_level(testdir):
|
||||
testdir.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
log_cli_level = INFO
|
||||
""")
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_cli(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_cli_handler.level == logging.INFO
|
||||
logging.getLogger('catchlog').debug("This log message won't be shown")
|
||||
logging.getLogger('catchlog').info("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('-s')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_cli_ini_level.py PASSED',
|
||||
])
|
||||
result.stderr.fnmatch_lines([
|
||||
"* This log message will be shown"
|
||||
])
|
||||
for line in result.errlines:
|
||||
try:
|
||||
assert "This log message won't be shown" in line
|
||||
pytest.fail("A log message was shown and it shouldn't have been")
|
||||
except AssertionError:
|
||||
continue
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_log_file_cli(testdir):
|
||||
# Default log file level
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_file(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_file_handler.level == logging.WARNING
|
||||
logging.getLogger('catchlog').info("This log message won't be shown")
|
||||
logging.getLogger('catchlog').warning("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
log_file = testdir.tmpdir.join('pytest.log').strpath
|
||||
|
||||
result = testdir.runpytest('-s', '--log-file={0}'.format(log_file))
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_file_cli.py PASSED',
|
||||
])
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
assert os.path.isfile(log_file)
|
||||
with open(log_file) as rfh:
|
||||
contents = rfh.read()
|
||||
assert "This log message will be shown" in contents
|
||||
assert "This log message won't be shown" not in contents
|
||||
|
||||
|
||||
def test_log_file_cli_level(testdir):
|
||||
# Default log file level
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_file(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_file_handler.level == logging.INFO
|
||||
logging.getLogger('catchlog').debug("This log message won't be shown")
|
||||
logging.getLogger('catchlog').info("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
log_file = testdir.tmpdir.join('pytest.log').strpath
|
||||
|
||||
result = testdir.runpytest('-s',
|
||||
'--log-file={0}'.format(log_file),
|
||||
'--log-file-level=INFO')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_file_cli_level.py PASSED',
|
||||
])
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
assert os.path.isfile(log_file)
|
||||
with open(log_file) as rfh:
|
||||
contents = rfh.read()
|
||||
assert "This log message will be shown" in contents
|
||||
assert "This log message won't be shown" not in contents
|
||||
|
||||
|
||||
def test_log_file_ini(testdir):
|
||||
log_file = testdir.tmpdir.join('pytest.log').strpath
|
||||
|
||||
testdir.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
log_file={0}
|
||||
""".format(log_file))
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_file(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_file_handler.level == logging.WARNING
|
||||
logging.getLogger('catchlog').info("This log message won't be shown")
|
||||
logging.getLogger('catchlog').warning("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('-s')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_file_ini.py PASSED',
|
||||
])
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
assert os.path.isfile(log_file)
|
||||
with open(log_file) as rfh:
|
||||
contents = rfh.read()
|
||||
assert "This log message will be shown" in contents
|
||||
assert "This log message won't be shown" not in contents
|
||||
|
||||
|
||||
def test_log_file_ini_level(testdir):
|
||||
log_file = testdir.tmpdir.join('pytest.log').strpath
|
||||
|
||||
testdir.makeini(
|
||||
"""
|
||||
[pytest]
|
||||
log_file={0}
|
||||
log_file_level = INFO
|
||||
""".format(log_file))
|
||||
testdir.makepyfile('''
|
||||
import pytest
|
||||
import logging
|
||||
def test_log_file(request):
|
||||
plugin = request.config.pluginmanager.getplugin('logging-plugin')
|
||||
assert plugin.log_file_handler.level == logging.INFO
|
||||
logging.getLogger('catchlog').debug("This log message won't be shown")
|
||||
logging.getLogger('catchlog').info("This log message will be shown")
|
||||
print('PASSED')
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('-s')
|
||||
|
||||
# fnmatch_lines does an assertion internally
|
||||
result.stdout.fnmatch_lines([
|
||||
'test_log_file_ini_level.py PASSED',
|
||||
])
|
||||
|
||||
# make sure that that we get a '0' exit code for the testsuite
|
||||
assert result.ret == 0
|
||||
assert os.path.isfile(log_file)
|
||||
with open(log_file) as rfh:
|
||||
contents = rfh.read()
|
||||
assert "This log message will be shown" in contents
|
||||
assert "This log message won't be shown" not in contents
|
|
@ -24,11 +24,8 @@ class MyDocTestRunner(doctest.DocTestRunner):
|
|||
class TestApprox(object):
|
||||
|
||||
def test_repr_string(self):
|
||||
# for some reason in Python 2.6 it is not displaying the tolerance representation correctly
|
||||
plus_minus = u'\u00b1' if sys.version_info[0] > 2 else u'+-'
|
||||
tol1, tol2, infr = '1.0e-06', '2.0e-06', 'inf'
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
tol1, tol2, infr = '???', '???', '???'
|
||||
assert repr(approx(1.0)) == '1.0 {pm} {tol1}'.format(pm=plus_minus, tol1=tol1)
|
||||
assert repr(approx([1.0, 2.0])) == 'approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])'.format(
|
||||
pm=plus_minus, tol1=tol1, tol2=tol2)
|
||||
|
@ -375,9 +372,6 @@ class TestApprox(object):
|
|||
assert [3] == [pytest.approx(4)]
|
||||
""")
|
||||
expected = '4.0e-06'
|
||||
# for some reason in Python 2.6 it is not displaying the tolerance representation correctly
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
expected = '???'
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
'*At index 0 diff: 3 != 4 * {0}'.format(expected),
|
||||
|
|
|
@ -164,17 +164,10 @@ class TestClass(object):
|
|||
assert fix == 1
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
if sys.version_info < (2, 7):
|
||||
# in 2.6, the code to handle static methods doesn't work
|
||||
result.stdout.fnmatch_lines([
|
||||
"*collected 0 items*",
|
||||
"*cannot collect static method*",
|
||||
])
|
||||
else:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*collected 2 items*",
|
||||
"*2 passed in*",
|
||||
])
|
||||
result.stdout.fnmatch_lines([
|
||||
"*collected 2 items*",
|
||||
"*2 passed in*",
|
||||
])
|
||||
|
||||
def test_setup_teardown_class_as_classmethod(self, testdir):
|
||||
testdir.makepyfile(test_mod1="""
|
||||
|
@ -819,10 +812,12 @@ class TestConftestCustomization(object):
|
|||
def test_customized_pymakemodule_issue205_subdir(self, testdir):
|
||||
b = testdir.mkdir("a").mkdir("b")
|
||||
b.join("conftest.py").write(_pytest._code.Source("""
|
||||
def pytest_pycollect_makemodule(__multicall__):
|
||||
mod = __multicall__.execute()
|
||||
import pytest
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_pycollect_makemodule():
|
||||
outcome = yield
|
||||
mod = outcome.get_result()
|
||||
mod.obj.hello = "world"
|
||||
return mod
|
||||
"""))
|
||||
b.join("test_module.py").write(_pytest._code.Source("""
|
||||
def test_hello():
|
||||
|
@ -839,7 +834,7 @@ class TestConftestCustomization(object):
|
|||
def pytest_pycollect_makeitem():
|
||||
outcome = yield
|
||||
if outcome.excinfo is None:
|
||||
result = outcome.result
|
||||
result = outcome.get_result()
|
||||
if result:
|
||||
for func in result:
|
||||
func._some123 = "world"
|
||||
|
|
|
@ -2,7 +2,6 @@ from textwrap import dedent
|
|||
|
||||
import _pytest._code
|
||||
import pytest
|
||||
import sys
|
||||
from _pytest.pytester import get_public_names
|
||||
from _pytest.fixtures import FixtureLookupError
|
||||
from _pytest import fixtures
|
||||
|
@ -34,9 +33,6 @@ def test_getfuncargnames():
|
|||
pass
|
||||
|
||||
assert fixtures.getfuncargnames(A().f) == ('arg1',)
|
||||
if sys.version_info < (3, 0):
|
||||
assert fixtures.getfuncargnames(A.f) == ('arg1',)
|
||||
|
||||
assert fixtures.getfuncargnames(A.static, cls=A) == ('arg1', 'arg2')
|
||||
|
||||
|
||||
|
@ -2123,6 +2119,10 @@ class TestFixtureMarker(object):
|
|||
assert values == [1, 1, 2, 2]
|
||||
|
||||
def test_module_parametrized_ordering(self, testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
console_output_style=classic
|
||||
""")
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
||||
|
@ -2169,6 +2169,10 @@ class TestFixtureMarker(object):
|
|||
""")
|
||||
|
||||
def test_class_ordering(self, testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
console_output_style=classic
|
||||
""")
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
||||
|
@ -2826,7 +2830,7 @@ class TestShowFixtures(object):
|
|||
import pytest
|
||||
class TestClass:
|
||||
@pytest.fixture
|
||||
def fixture1():
|
||||
def fixture1(self):
|
||||
"""line1
|
||||
line2
|
||||
indented line
|
||||
|
@ -3125,3 +3129,43 @@ class TestParameterizedSubRequest(object):
|
|||
E*{1}:5
|
||||
*1 failed*
|
||||
""".format(fixfile.strpath, testfile.basename))
|
||||
|
||||
|
||||
def test_pytest_fixture_setup_and_post_finalizer_hook(testdir):
|
||||
testdir.makeconftest("""
|
||||
from __future__ import print_function
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
print('ROOT setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
|
||||
def pytest_fixture_post_finalizer(fixturedef, request):
|
||||
print('ROOT finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
|
||||
""")
|
||||
testdir.makepyfile(**{
|
||||
'tests/conftest.py': """
|
||||
from __future__ import print_function
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
print('TESTS setup hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
|
||||
def pytest_fixture_post_finalizer(fixturedef, request):
|
||||
print('TESTS finalizer hook called for {0} from {1}'.format(fixturedef.argname, request.node.name))
|
||||
""",
|
||||
'tests/test_hooks.py': """
|
||||
from __future__ import print_function
|
||||
import pytest
|
||||
|
||||
@pytest.fixture()
|
||||
def my_fixture():
|
||||
return 'some'
|
||||
|
||||
def test_func(my_fixture):
|
||||
print('TEST test_func')
|
||||
assert my_fixture == 'some'
|
||||
"""
|
||||
})
|
||||
result = testdir.runpytest("-s")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*TESTS setup hook called for my_fixture from test_func*",
|
||||
"*ROOT setup hook called for my_fixture from test_func*",
|
||||
"*TEST test_func*",
|
||||
"*TESTS finalizer hook called for my_fixture from test_func*",
|
||||
"*ROOT finalizer hook called for my_fixture from test_func*",
|
||||
])
|
||||
|
|
|
@ -158,7 +158,7 @@ class TestMetafunc(object):
|
|||
pass
|
||||
metafunc = self.Metafunc(func)
|
||||
metafunc.parametrize("y", [])
|
||||
assert 'skip' in metafunc._calls[0].keywords
|
||||
assert 'skip' == metafunc._calls[0].marks[0].name
|
||||
|
||||
def test_parametrize_with_userobjects(self):
|
||||
def func(x, y):
|
||||
|
@ -960,6 +960,10 @@ class TestMetafuncFunctional(object):
|
|||
])
|
||||
|
||||
def test_parametrize_with_ids(self, testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
console_output_style=classic
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def pytest_generate_tests(metafunc):
|
||||
|
@ -1005,9 +1009,9 @@ class TestMetafuncFunctional(object):
|
|||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*basic*PASSED",
|
||||
"*test_function*1-1*PASSED",
|
||||
"*test_function*advanced*FAILED",
|
||||
"*test_function*basic*PASSED*",
|
||||
"*test_function*1-1*PASSED*",
|
||||
"*test_function*advanced*FAILED*",
|
||||
])
|
||||
|
||||
def test_fixture_parametrized_empty_ids(self, testdir):
|
||||
|
@ -1062,8 +1066,8 @@ class TestMetafuncFunctional(object):
|
|||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*a0*PASSED",
|
||||
"*test_function*a1*FAILED"
|
||||
"*test_function*a0*PASSED*",
|
||||
"*test_function*a1*FAILED*"
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(("scope", "length"),
|
||||
|
|
|
@ -238,6 +238,6 @@ def test_show_fixtures_and_execute_test(testdir):
|
|||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg*',
|
||||
'*test_arg (fixtures used: arg)F',
|
||||
'*test_arg (fixtures used: arg)F*',
|
||||
'*TEARDOWN F arg*',
|
||||
])
|
||||
|
|
|
@ -0,0 +1,22 @@
|
|||
import pytest
|
||||
|
||||
from _pytest.python import PyCollector
|
||||
|
||||
|
||||
class PyCollectorMock(PyCollector):
|
||||
"""evil hack"""
|
||||
|
||||
def __init__(self):
|
||||
self.called = False
|
||||
|
||||
def _makeitem(self, *k):
|
||||
"""hack to disable the actual behaviour"""
|
||||
self.called = True
|
||||
|
||||
|
||||
def test_pycollector_makeitem_is_deprecated():
|
||||
|
||||
collector = PyCollectorMock()
|
||||
with pytest.deprecated_call():
|
||||
collector.makeitem('foo', 'bar')
|
||||
assert collector.called
|
|
@ -838,7 +838,7 @@ def test_traceback_failure(testdir):
|
|||
""")
|
||||
result = testdir.runpytest(p1, "--tb=long")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"*test_traceback_failure.py F*",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
|
@ -858,7 +858,7 @@ def test_traceback_failure(testdir):
|
|||
|
||||
result = testdir.runpytest(p1) # "auto"
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"*test_traceback_failure.py F*",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue