Merge remote-tracking branch 'upstream/features' into allow-none-as-parametrized-test-id
This commit is contained in:
commit
7885e43b78
4
AUTHORS
4
AUTHORS
|
@ -52,6 +52,7 @@ Jason R. Coombs
|
|||
Joshua Bronson
|
||||
Jurko Gospodnetić
|
||||
Katarzyna Jachim
|
||||
Kale Kundert
|
||||
Kevin Cox
|
||||
Lee Kamentsky
|
||||
Lukas Bednar
|
||||
|
@ -62,9 +63,11 @@ Mark Abramowitz
|
|||
Markus Unterwaditzer
|
||||
Martijn Faassen
|
||||
Matt Bachmann
|
||||
Matt Williams
|
||||
Michael Aquilina
|
||||
Michael Birtwell
|
||||
Michael Droettboom
|
||||
Mike Lundy
|
||||
Nicolas Delaby
|
||||
Pieter Mulder
|
||||
Piotr Banaszkiewicz
|
||||
|
@ -75,6 +78,7 @@ Ronny Pfannschmidt
|
|||
Ross Lawley
|
||||
Ryan Wooden
|
||||
Samuele Pedroni
|
||||
Tareq Alayan
|
||||
Tom Viner
|
||||
Trevor Bekolay
|
||||
Wouter van Ackooy
|
||||
|
|
|
@ -1,3 +1,45 @@
|
|||
2.10.0.dev1
|
||||
===========
|
||||
|
||||
**New Features**
|
||||
|
||||
* New ``doctest_namespace`` fixture for injecting names into the
|
||||
namespace in which your doctests run.
|
||||
Thanks `@milliams`_ for the complete PR (`#1428`_).
|
||||
|
||||
* New ``name`` argument to ``pytest.fixture`` mark, which allows a custom name
|
||||
for a fixture (to solve the funcarg-shadowing-fixture problem).
|
||||
Thanks `@novas0x2a`_ for the complete PR (`#1444`_).
|
||||
|
||||
* New ``approx()`` function for easily comparing floating-point numbers in
|
||||
tests.
|
||||
Thanks `@kalekundert`_ for the complete PR (`#1441`_).
|
||||
|
||||
* New Add ability to add global properties in the final xunit output file.
|
||||
Thanks `@tareqalayan`_ for the complete PR `#1454`_).
|
||||
|
||||
|
||||
*
|
||||
|
||||
**Changes**
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
.. _@milliams: https://github.com/milliams
|
||||
.. _@novas0x2a: https://github.com/novas0x2a
|
||||
.. _@kalekundert: https://github.com/kalekundert
|
||||
.. _@tareqalayan: https://github.com/tareqalayan
|
||||
|
||||
.. _#1428: https://github.com/pytest-dev/pytest/pull/1428
|
||||
.. _#1444: https://github.com/pytest-dev/pytest/pull/1444
|
||||
.. _#1441: https://github.com/pytest-dev/pytest/pull/1441
|
||||
.. _#1454: https://github.com/pytest-dev/pytest/pull/1454
|
||||
|
||||
|
||||
2.9.2.dev1
|
||||
==========
|
||||
|
||||
|
|
|
@ -1,2 +1,3 @@
|
|||
#
|
||||
__version__ = '2.9.2.dev1'
|
||||
|
||||
__version__ = '2.10.0.dev1'
|
||||
|
|
|
@ -71,6 +71,8 @@ class DoctestItem(pytest.Item):
|
|||
if self.dtest is not None:
|
||||
self.fixture_request = _setup_fixtures(self)
|
||||
globs = dict(getfixture=self.fixture_request.getfuncargvalue)
|
||||
for name, value in self.fixture_request.getfuncargvalue('doctest_namespace').items():
|
||||
globs[name] = value
|
||||
self.dtest.globs.update(globs)
|
||||
|
||||
def runtest(self):
|
||||
|
@ -159,6 +161,9 @@ class DoctestTextfile(DoctestItem, pytest.Module):
|
|||
if '__name__' not in globs:
|
||||
globs['__name__'] = '__main__'
|
||||
|
||||
for name, value in fixture_request.getfuncargvalue('doctest_namespace').items():
|
||||
globs[name] = value
|
||||
|
||||
optionflags = get_optionflags(self)
|
||||
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
|
||||
checker=_get_checker())
|
||||
|
@ -288,3 +293,11 @@ def _get_allow_bytes_flag():
|
|||
"""
|
||||
import doctest
|
||||
return doctest.register_optionflag('ALLOW_BYTES')
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def doctest_namespace():
|
||||
"""
|
||||
Inject names into the doctest namespace.
|
||||
"""
|
||||
return dict()
|
||||
|
|
|
@ -265,6 +265,7 @@ class LogXML(object):
|
|||
], 0)
|
||||
self.node_reporters = {} # nodeid -> _NodeReporter
|
||||
self.node_reporters_ordered = []
|
||||
self.global_properties = []
|
||||
|
||||
def finalize(self, report):
|
||||
nodeid = getattr(report, 'nodeid', report)
|
||||
|
@ -284,9 +285,12 @@ class LogXML(object):
|
|||
if key in self.node_reporters:
|
||||
# TODO: breasks for --dist=each
|
||||
return self.node_reporters[key]
|
||||
|
||||
reporter = _NodeReporter(nodeid, self)
|
||||
|
||||
self.node_reporters[key] = reporter
|
||||
self.node_reporters_ordered.append(reporter)
|
||||
|
||||
return reporter
|
||||
|
||||
def add_stats(self, key):
|
||||
|
@ -372,7 +376,9 @@ class LogXML(object):
|
|||
numtests = self.stats['passed'] + self.stats['failure']
|
||||
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
|
||||
logfile.write(Junit.testsuite(
|
||||
self._get_global_properties_node(),
|
||||
[x.to_xml() for x in self.node_reporters_ordered],
|
||||
name="pytest",
|
||||
errors=self.stats['error'],
|
||||
|
@ -385,3 +391,18 @@ class LogXML(object):
|
|||
def pytest_terminal_summary(self, terminalreporter):
|
||||
terminalreporter.write_sep("-",
|
||||
"generated xml file: %s" % (self.logfile))
|
||||
|
||||
def add_global_property(self, name, value):
|
||||
self.global_properties.append((str(name), bin_xml_escape(value)))
|
||||
|
||||
def _get_global_properties_node(self):
|
||||
"""Return a Junit node containing custom properties, if any.
|
||||
"""
|
||||
if self.global_properties:
|
||||
return Junit.properties(
|
||||
[
|
||||
Junit.property(name=name, value=value)
|
||||
for name, value in self.global_properties
|
||||
]
|
||||
)
|
||||
return ''
|
||||
|
|
|
@ -1,10 +1,12 @@
|
|||
""" Python test discovery, setup and run of test functions. """
|
||||
|
||||
import fnmatch
|
||||
import functools
|
||||
import inspect
|
||||
import re
|
||||
import types
|
||||
import sys
|
||||
import math
|
||||
|
||||
import py
|
||||
import pytest
|
||||
|
@ -114,12 +116,13 @@ def safe_getattr(object, name, default):
|
|||
|
||||
class FixtureFunctionMarker:
|
||||
def __init__(self, scope, params,
|
||||
autouse=False, yieldctx=False, ids=None):
|
||||
autouse=False, yieldctx=False, ids=None, name=None):
|
||||
self.scope = scope
|
||||
self.params = params
|
||||
self.autouse = autouse
|
||||
self.yieldctx = yieldctx
|
||||
self.ids = ids
|
||||
self.name = name
|
||||
|
||||
def __call__(self, function):
|
||||
if isclass(function):
|
||||
|
@ -129,7 +132,7 @@ class FixtureFunctionMarker:
|
|||
return function
|
||||
|
||||
|
||||
def fixture(scope="function", params=None, autouse=False, ids=None):
|
||||
def fixture(scope="function", params=None, autouse=False, ids=None, name=None):
|
||||
""" (return a) decorator to mark a fixture factory function.
|
||||
|
||||
This decorator can be used (with or or without parameters) to define
|
||||
|
@ -155,14 +158,21 @@ def fixture(scope="function", params=None, autouse=False, ids=None):
|
|||
so that they are part of the test id. If no ids are provided
|
||||
they will be generated automatically from the params.
|
||||
|
||||
:arg name: the name of the fixture. This defaults to the name of the
|
||||
decorated function. If a fixture is used in the same module in
|
||||
which it is defined, the function name of the fixture will be
|
||||
shadowed by the function arg that requests the fixture; one way
|
||||
to resolve this is to name the decorated function
|
||||
``fixture_<fixturename>`` and then use
|
||||
``@pytest.fixture(name='<fixturename>')``.
|
||||
"""
|
||||
if callable(scope) and params is None and autouse == False:
|
||||
# direct decoration
|
||||
return FixtureFunctionMarker(
|
||||
"function", params, autouse)(scope)
|
||||
"function", params, autouse, name=name)(scope)
|
||||
if params is not None and not isinstance(params, (list, tuple)):
|
||||
params = list(params)
|
||||
return FixtureFunctionMarker(scope, params, autouse, ids=ids)
|
||||
return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name)
|
||||
|
||||
def yield_fixture(scope="function", params=None, autouse=False, ids=None):
|
||||
""" (return a) decorator to mark a yield-fixture factory function
|
||||
|
@ -261,7 +271,8 @@ def pytest_namespace():
|
|||
return {
|
||||
'fixture': fixture,
|
||||
'yield_fixture': yield_fixture,
|
||||
'raises' : raises,
|
||||
'raises': raises,
|
||||
'approx': approx,
|
||||
'collect': {
|
||||
'Module': Module, 'Class': Class, 'Instance': Instance,
|
||||
'Function': Function, 'Generator': Generator,
|
||||
|
@ -1206,7 +1217,8 @@ def getlocation(function, curdir):
|
|||
# builtin pytest.raises helper
|
||||
|
||||
def raises(expected_exception, *args, **kwargs):
|
||||
""" assert that a code block/function call raises ``expected_exception``
|
||||
"""
|
||||
Assert that a code block/function call raises ``expected_exception``
|
||||
and raise a failure exception otherwise.
|
||||
|
||||
This helper produces a ``ExceptionInfo()`` object (see below).
|
||||
|
@ -1339,6 +1351,255 @@ class RaisesContext(object):
|
|||
self.excinfo.__init__(tp)
|
||||
return issubclass(self.excinfo.type, self.expected_exception)
|
||||
|
||||
# builtin pytest.approx helper
|
||||
|
||||
class approx(object):
|
||||
"""
|
||||
Assert that two numbers (or two sets of numbers) are equal to each other
|
||||
within some tolerance.
|
||||
|
||||
Due to the `intricacies of floating-point arithmetic`__, numbers that we
|
||||
would intuitively expect to be equal are not always so::
|
||||
|
||||
>>> 0.1 + 0.2 == 0.3
|
||||
False
|
||||
|
||||
__ https://docs.python.org/3/tutorial/floatingpoint.html
|
||||
|
||||
This problem is commonly encountered when writing tests, e.g. when making
|
||||
sure that floating-point values are what you expect them to be. One way to
|
||||
deal with this problem is to assert that two floating-point numbers are
|
||||
equal to within some appropriate tolerance::
|
||||
|
||||
>>> abs((0.1 + 0.2) - 0.3) < 1e-6
|
||||
True
|
||||
|
||||
However, comparisons like this are tedious to write and difficult to
|
||||
understand. Furthermore, absolute comparisons like the one above are
|
||||
usually discouraged because there's no tolerance that works well for all
|
||||
situations. ``1e-6`` is good for numbers around ``1``, but too small for
|
||||
very big numbers and too big for very small ones. It's better to express
|
||||
the tolerance as a fraction of the expected value, but relative comparisons
|
||||
like that are even more difficult to write correctly and concisely.
|
||||
|
||||
The ``approx`` class performs floating-point comparisons using a syntax
|
||||
that's as intuitive as possible::
|
||||
|
||||
>>> from pytest import approx
|
||||
>>> 0.1 + 0.2 == approx(0.3)
|
||||
True
|
||||
|
||||
The same syntax also works on sequences of numbers::
|
||||
|
||||
>>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6))
|
||||
True
|
||||
|
||||
By default, ``approx`` considers numbers within a relative tolerance of
|
||||
``1e-6`` (i.e. one part in a million) of its expected value to be equal.
|
||||
This treatment would lead to surprising results if the expected value was
|
||||
``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``.
|
||||
To handle this case less surprisingly, ``approx`` also considers numbers
|
||||
within an absolute tolerance of ``1e-12`` of its expected value to be
|
||||
equal. Infinite numbers are another special case. They are only
|
||||
considered equal to themselves, regardless of the relative tolerance. Both
|
||||
the relative and absolute tolerances can be changed by passing arguments to
|
||||
the ``approx`` constructor::
|
||||
|
||||
>>> 1.0001 == approx(1)
|
||||
False
|
||||
>>> 1.0001 == approx(1, rel=1e-3)
|
||||
True
|
||||
>>> 1.0001 == approx(1, abs=1e-3)
|
||||
True
|
||||
|
||||
If you specify ``abs`` but not ``rel``, the comparison will not consider
|
||||
the relative tolerance at all. In other words, two numbers that are within
|
||||
the default relative tolerance of ``1e-6`` will still be considered unequal
|
||||
if they exceed the specified absolute tolerance. If you specify both
|
||||
``abs`` and ``rel``, the numbers will be considered equal if either
|
||||
tolerance is met::
|
||||
|
||||
>>> 1 + 1e-8 == approx(1)
|
||||
True
|
||||
>>> 1 + 1e-8 == approx(1, abs=1e-12)
|
||||
False
|
||||
>>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12)
|
||||
True
|
||||
|
||||
If you're thinking about using ``approx``, then you might want to know how
|
||||
it compares to other good ways of comparing floating-point numbers. All of
|
||||
these algorithms are based on relative and absolute tolerances and should
|
||||
agree for the most part, but they do have meaningful differences:
|
||||
|
||||
- ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative
|
||||
tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute
|
||||
tolerance is met. Because the relative tolerance is calculated w.r.t.
|
||||
both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor
|
||||
``b`` is a "reference value"). You have to specify an absolute tolerance
|
||||
if you want to compare to ``0.0`` because there is no tolerance by
|
||||
default. Only available in python>=3.5. `More information...`__
|
||||
|
||||
__ https://docs.python.org/3/library/math.html#math.isclose
|
||||
|
||||
- ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference
|
||||
between ``a`` and ``b`` is less that the sum of the relative tolerance
|
||||
w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance
|
||||
is only calculated w.r.t. ``b``, this test is asymmetric and you can
|
||||
think of ``b`` as the reference value. Support for comparing sequences
|
||||
is provided by ``numpy.allclose``. `More information...`__
|
||||
|
||||
__ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html
|
||||
|
||||
- ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b``
|
||||
are within an absolute tolerance of ``1e-7``. No relative tolerance is
|
||||
considered and the absolute tolerance cannot be changed, so this function
|
||||
is not appropriate for very large or very small numbers. Also, it's only
|
||||
available in subclasses of ``unittest.TestCase`` and it's ugly because it
|
||||
doesn't follow PEP8. `More information...`__
|
||||
|
||||
__ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual
|
||||
|
||||
- ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative
|
||||
tolerance is met w.r.t. ``b`` or if the absolute tolerance is met.
|
||||
Because the relative tolerance is only calculated w.r.t. ``b``, this test
|
||||
is asymmetric and you can think of ``b`` as the reference value. In the
|
||||
special case that you explicitly specify an absolute tolerance but not a
|
||||
relative tolerance, only the absolute tolerance is considered.
|
||||
"""
|
||||
|
||||
def __init__(self, expected, rel=None, abs=None):
|
||||
self.expected = expected
|
||||
self.abs = abs
|
||||
self.rel = rel
|
||||
|
||||
def __repr__(self):
|
||||
return ', '.join(repr(x) for x in self.expected)
|
||||
|
||||
def __eq__(self, actual):
|
||||
from collections import Iterable
|
||||
if not isinstance(actual, Iterable):
|
||||
actual = [actual]
|
||||
if len(actual) != len(self.expected):
|
||||
return False
|
||||
return all(a == x for a, x in zip(actual, self.expected))
|
||||
|
||||
def __ne__(self, actual):
|
||||
return not (actual == self)
|
||||
|
||||
@property
|
||||
def expected(self):
|
||||
# Regardless of whether the user-specified expected value is a number
|
||||
# or a sequence of numbers, return a list of ApproxNotIterable objects
|
||||
# that can be compared against.
|
||||
from collections import Iterable
|
||||
approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs)
|
||||
if isinstance(self._expected, Iterable):
|
||||
return [approx_non_iter(x) for x in self._expected]
|
||||
else:
|
||||
return [approx_non_iter(self._expected)]
|
||||
|
||||
@expected.setter
|
||||
def expected(self, expected):
|
||||
self._expected = expected
|
||||
|
||||
|
||||
class ApproxNonIterable(object):
|
||||
"""
|
||||
Perform approximate comparisons for single numbers only.
|
||||
|
||||
In other words, the ``expected`` attribute for objects of this class must
|
||||
be some sort of number. This is in contrast to the ``approx`` class, where
|
||||
the ``expected`` attribute can either be a number of a sequence of numbers.
|
||||
This class is responsible for making comparisons, while ``approx`` is
|
||||
responsible for abstracting the difference between numbers and sequences of
|
||||
numbers. Although this class can stand on its own, it's only meant to be
|
||||
used within ``approx``.
|
||||
"""
|
||||
|
||||
def __init__(self, expected, rel=None, abs=None):
|
||||
self.expected = expected
|
||||
self.abs = abs
|
||||
self.rel = rel
|
||||
|
||||
def __repr__(self):
|
||||
# Infinities aren't compared using tolerances, so don't show a
|
||||
# tolerance.
|
||||
if math.isinf(self.expected):
|
||||
return str(self.expected)
|
||||
|
||||
# If a sensible tolerance can't be calculated, self.tolerance will
|
||||
# raise a ValueError. In this case, display '???'.
|
||||
try:
|
||||
vetted_tolerance = '{:.1e}'.format(self.tolerance)
|
||||
except ValueError:
|
||||
vetted_tolerance = '???'
|
||||
|
||||
plus_minus = u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance)
|
||||
|
||||
# In python2, __repr__() must return a string (i.e. not a unicode
|
||||
# object). In python3, __repr__() must return a unicode object
|
||||
# (although now strings are unicode objects and bytes are what
|
||||
# strings were).
|
||||
if sys.version_info[0] == 2:
|
||||
return plus_minus.encode('utf-8')
|
||||
else:
|
||||
return plus_minus
|
||||
|
||||
def __eq__(self, actual):
|
||||
# Short-circuit exact equality.
|
||||
if actual == self.expected:
|
||||
return True
|
||||
|
||||
# Infinity shouldn't be approximately equal to anything but itself, but
|
||||
# if there's a relative tolerance, it will be infinite and infinity
|
||||
# will seem approximately equal to everything. The equal-to-itself
|
||||
# case would have been short circuited above, so here we can just
|
||||
# return false if the expected value is infinite. The abs() call is
|
||||
# for compatibility with complex numbers.
|
||||
if math.isinf(abs(self.expected)):
|
||||
return False
|
||||
|
||||
# Return true if the two numbers are within the tolerance.
|
||||
return abs(self.expected - actual) <= self.tolerance
|
||||
|
||||
def __ne__(self, actual):
|
||||
return not (actual == self)
|
||||
|
||||
@property
|
||||
def tolerance(self):
|
||||
set_default = lambda x, default: x if x is not None else default
|
||||
|
||||
# Figure out what the absolute tolerance should be. ``self.abs`` is
|
||||
# either None or a value specified by the user.
|
||||
absolute_tolerance = set_default(self.abs, 1e-12)
|
||||
|
||||
if absolute_tolerance < 0:
|
||||
raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance))
|
||||
if math.isnan(absolute_tolerance):
|
||||
raise ValueError("absolute tolerance can't be NaN.")
|
||||
|
||||
# If the user specified an absolute tolerance but not a relative one,
|
||||
# just return the absolute tolerance.
|
||||
if self.rel is None:
|
||||
if self.abs is not None:
|
||||
return absolute_tolerance
|
||||
|
||||
# Figure out what the relative tolerance should be. ``self.rel`` is
|
||||
# either None or a value specified by the user. This is done after
|
||||
# we've made sure the user didn't ask for an absolute tolerance only,
|
||||
# because we don't want to raise errors about the relative tolerance if
|
||||
# we aren't even going to use it.
|
||||
relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected)
|
||||
|
||||
if relative_tolerance < 0:
|
||||
raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance))
|
||||
if math.isnan(relative_tolerance):
|
||||
raise ValueError("relative tolerance can't be NaN.")
|
||||
|
||||
# Return the larger of the relative and absolute tolerances.
|
||||
return max(relative_tolerance, absolute_tolerance)
|
||||
|
||||
|
||||
#
|
||||
# the basic pytest Function item
|
||||
#
|
||||
|
@ -1992,6 +2253,8 @@ class FixtureManager:
|
|||
# fixture attribute
|
||||
continue
|
||||
else:
|
||||
if marker.name:
|
||||
name = marker.name
|
||||
assert not name.startswith(self._argprefix)
|
||||
fixturedef = FixtureDef(self, nodeid, name, obj,
|
||||
marker.scope, marker.params,
|
||||
|
|
|
@ -35,6 +35,11 @@ Examples at :ref:`assertraises`.
|
|||
|
||||
.. autofunction:: deprecated_call
|
||||
|
||||
Comparing floating point numbers
|
||||
--------------------------------
|
||||
|
||||
.. autoclass:: approx
|
||||
|
||||
Raising a specific test outcome
|
||||
--------------------------------------
|
||||
|
||||
|
@ -48,7 +53,7 @@ you can rather use declarative marks, see :ref:`skipping`.
|
|||
.. autofunction:: _pytest.skipping.xfail
|
||||
.. autofunction:: _pytest.runner.exit
|
||||
|
||||
fixtures and requests
|
||||
Fixtures and requests
|
||||
-----------------------------------------------------
|
||||
|
||||
To mark a fixture function:
|
||||
|
|
|
@ -102,4 +102,31 @@ itself::
|
|||
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
|
||||
'Hello'
|
||||
|
||||
The 'doctest_namespace' fixture
|
||||
-------------------------------
|
||||
|
||||
.. versionadded:: 2.10
|
||||
|
||||
The ``doctest_namespace`` fixture can be used to inject items into the
|
||||
namespace in which your doctests run. It is intended to be used within
|
||||
your own fixtures to provide the tests that use them with context.
|
||||
|
||||
``doctest_namespace`` is a standard ``dict`` object into which you
|
||||
place the objects you want to appear in the doctest namespace::
|
||||
|
||||
# content of conftest.py
|
||||
import numpy
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace['np'] = numpy
|
||||
|
||||
which can then be used in your doctests directly::
|
||||
|
||||
# content of numpy.py
|
||||
def arange():
|
||||
"""
|
||||
>>> a = np.arange(10)
|
||||
>>> len(a)
|
||||
10
|
||||
"""
|
||||
pass
|
||||
|
|
|
@ -193,6 +193,53 @@ This will add an extra property ``example_key="1"`` to the generated
|
|||
Also please note that using this feature will break any schema verification.
|
||||
This might be a problem when used with some CI servers.
|
||||
|
||||
LogXML: add_global_property
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. versionadded:: 2.10
|
||||
|
||||
If you want to add a properties node in the testsuite level, which may contains properties that are relevant
|
||||
to all testcases you can use ``LogXML.add_global_properties``
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def log_global_env_facts(f):
|
||||
|
||||
if pytest.config.pluginmanager.hasplugin('junitxml'):
|
||||
my_junit = getattr(pytest.config, '_xml', None)
|
||||
|
||||
my_junit.add_global_property('ARCH', 'PPC')
|
||||
my_junit.add_global_property('STORAGE_TYPE', 'CEPH')
|
||||
|
||||
@pytest.mark.usefixtures(log_global_env_facts)
|
||||
def start_and_prepare_env():
|
||||
pass
|
||||
|
||||
class TestMe:
|
||||
def test_foo(self):
|
||||
assert True
|
||||
|
||||
This will add a property node below the testsuite node to the generated xml:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<testsuite errors="0" failures="0" name="pytest" skips="0" tests="1" time="0.006">
|
||||
<properties>
|
||||
<property name="ARCH" value="PPC"/>
|
||||
<property name="STORAGE_TYPE" value="CEPH"/>
|
||||
</properties>
|
||||
<testcase classname="test_me.TestMe" file="test_me.py" line="16" name="test_foo" time="0.000243663787842"/>
|
||||
</testsuite>
|
||||
|
||||
.. warning::
|
||||
|
||||
This is an experimental feature, and its interface might be replaced
|
||||
by something more powerful and general in future versions. The
|
||||
functionality per-se will be kept.
|
||||
|
||||
Creating resultlog format files
|
||||
----------------------------------------------------
|
||||
|
||||
|
|
|
@ -0,0 +1,286 @@
|
|||
# encoding: utf-8
|
||||
|
||||
import pytest
|
||||
import doctest
|
||||
|
||||
from pytest import approx
|
||||
from operator import eq, ne
|
||||
from decimal import Decimal
|
||||
from fractions import Fraction
|
||||
inf, nan = float('inf'), float('nan')
|
||||
|
||||
class MyDocTestRunner(doctest.DocTestRunner):
|
||||
|
||||
def __init__(self):
|
||||
doctest.DocTestRunner.__init__(self)
|
||||
|
||||
def report_failure(self, out, test, example, got):
|
||||
raise AssertionError("'{}' evaluates to '{}', not '{}'".format(
|
||||
example.source.strip(), got.strip(), example.want.strip()))
|
||||
|
||||
|
||||
class TestApprox:
|
||||
|
||||
def test_repr_string(self):
|
||||
# Just make sure the Unicode handling doesn't raise any exceptions.
|
||||
print(approx(1.0))
|
||||
print(approx([1.0, 2.0, 3.0]))
|
||||
print(approx(inf))
|
||||
print(approx(1.0, rel=nan))
|
||||
print(approx(1.0, rel=inf))
|
||||
|
||||
def test_operator_overloading(self):
|
||||
assert 1 == approx(1, rel=1e-6, abs=1e-12)
|
||||
assert not (1 != approx(1, rel=1e-6, abs=1e-12))
|
||||
assert 10 != approx(1, rel=1e-6, abs=1e-12)
|
||||
assert not (10 == approx(1, rel=1e-6, abs=1e-12))
|
||||
|
||||
def test_exactly_equal(self):
|
||||
examples = [
|
||||
(2.0, 2.0),
|
||||
(0.1e200, 0.1e200),
|
||||
(1.123e-300, 1.123e-300),
|
||||
(12345, 12345.0),
|
||||
(0.0, -0.0),
|
||||
(345678, 345678),
|
||||
(Decimal('1.0001'), Decimal('1.0001')),
|
||||
(Fraction(1, 3), Fraction(-1, -3)),
|
||||
]
|
||||
for a, x in examples:
|
||||
assert a == approx(x)
|
||||
|
||||
def test_opposite_sign(self):
|
||||
examples = [
|
||||
(eq, 1e-100, -1e-100),
|
||||
(ne, 1e100, -1e100),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_zero_tolerance(self):
|
||||
within_1e10 = [
|
||||
(1.1e-100, 1e-100),
|
||||
(-1.1e-100, -1e-100),
|
||||
]
|
||||
for a, x in within_1e10:
|
||||
assert x == approx(x, rel=0.0, abs=0.0)
|
||||
assert a != approx(x, rel=0.0, abs=0.0)
|
||||
assert a == approx(x, rel=0.0, abs=5e-101)
|
||||
assert a != approx(x, rel=0.0, abs=5e-102)
|
||||
assert a == approx(x, rel=5e-1, abs=0.0)
|
||||
assert a != approx(x, rel=5e-2, abs=0.0)
|
||||
|
||||
def test_negative_tolerance(self):
|
||||
# Negative tolerances are not allowed.
|
||||
illegal_kwargs = [
|
||||
dict(rel=-1e100),
|
||||
dict(abs=-1e100),
|
||||
dict(rel=1e100, abs=-1e100),
|
||||
dict(rel=-1e100, abs=1e100),
|
||||
dict(rel=-1e100, abs=-1e100),
|
||||
]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1.1 == approx(1, **kwargs)
|
||||
|
||||
def test_inf_tolerance(self):
|
||||
# Everything should be equal if the tolerance is infinite.
|
||||
large_diffs = [
|
||||
(1, 1000),
|
||||
(1e-50, 1e50),
|
||||
(-1.0, -1e300),
|
||||
(0.0, 10),
|
||||
]
|
||||
for a, x in large_diffs:
|
||||
assert a != approx(x, rel=0.0, abs=0.0)
|
||||
assert a == approx(x, rel=inf, abs=0.0)
|
||||
assert a == approx(x, rel=0.0, abs=inf)
|
||||
assert a == approx(x, rel=inf, abs=inf)
|
||||
|
||||
def test_inf_tolerance_expecting_zero(self):
|
||||
# If the relative tolerance is zero but the expected value is infinite,
|
||||
# the actual tolerance is a NaN, which should be an error.
|
||||
illegal_kwargs = [
|
||||
dict(rel=inf, abs=0.0),
|
||||
dict(rel=inf, abs=inf),
|
||||
]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1 == approx(0, **kwargs)
|
||||
|
||||
def test_nan_tolerance(self):
|
||||
illegal_kwargs = [
|
||||
dict(rel=nan),
|
||||
dict(abs=nan),
|
||||
dict(rel=nan, abs=nan),
|
||||
]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1.1 == approx(1, **kwargs)
|
||||
|
||||
def test_reasonable_defaults(self):
|
||||
# Whatever the defaults are, they should work for numbers close to 1
|
||||
# than have a small amount of floating-point error.
|
||||
assert 0.1 + 0.2 == approx(0.3)
|
||||
|
||||
def test_default_tolerances(self):
|
||||
# This tests the defaults as they are currently set. If you change the
|
||||
# defaults, this test will fail but you should feel free to change it.
|
||||
# None of the other tests (except the doctests) should be affected by
|
||||
# the choice of defaults.
|
||||
examples = [
|
||||
# Relative tolerance used.
|
||||
(eq, 1e100 + 1e94, 1e100),
|
||||
(ne, 1e100 + 2e94, 1e100),
|
||||
(eq, 1e0 + 1e-6, 1e0),
|
||||
(ne, 1e0 + 2e-6, 1e0),
|
||||
# Absolute tolerance used.
|
||||
(eq, 1e-100, + 1e-106),
|
||||
(eq, 1e-100, + 2e-106),
|
||||
(eq, 1e-100, 0),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_custom_tolerances(self):
|
||||
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e0)
|
||||
assert 1e8 + 1e0 == approx(1e8, rel=5e-9, abs=5e0)
|
||||
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e-1)
|
||||
assert 1e8 + 1e0 != approx(1e8, rel=5e-9, abs=5e-1)
|
||||
|
||||
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-8)
|
||||
assert 1e0 + 1e-8 == approx(1e0, rel=5e-9, abs=5e-8)
|
||||
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-9)
|
||||
assert 1e0 + 1e-8 != approx(1e0, rel=5e-9, abs=5e-9)
|
||||
|
||||
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-16)
|
||||
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-9, abs=5e-16)
|
||||
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-17)
|
||||
assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17)
|
||||
|
||||
def test_relative_tolerance(self):
|
||||
within_1e8_rel = [
|
||||
(1e8 + 1e0, 1e8),
|
||||
(1e0 + 1e-8, 1e0),
|
||||
(1e-8 + 1e-16, 1e-8),
|
||||
]
|
||||
for a, x in within_1e8_rel:
|
||||
assert a == approx(x, rel=5e-8, abs=0.0)
|
||||
assert a != approx(x, rel=5e-9, abs=0.0)
|
||||
|
||||
def test_absolute_tolerance(self):
|
||||
within_1e8_abs = [
|
||||
(1e8 + 9e-9, 1e8),
|
||||
(1e0 + 9e-9, 1e0),
|
||||
(1e-8 + 9e-9, 1e-8),
|
||||
]
|
||||
for a, x in within_1e8_abs:
|
||||
assert a == approx(x, rel=0, abs=5e-8)
|
||||
assert a != approx(x, rel=0, abs=5e-9)
|
||||
|
||||
def test_expecting_zero(self):
|
||||
examples = [
|
||||
(ne, 1e-6, 0.0),
|
||||
(ne, -1e-6, 0.0),
|
||||
(eq, 1e-12, 0.0),
|
||||
(eq, -1e-12, 0.0),
|
||||
(ne, 2e-12, 0.0),
|
||||
(ne, -2e-12, 0.0),
|
||||
(ne, inf, 0.0),
|
||||
(ne, nan, 0.0),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x, rel=0.0, abs=1e-12))
|
||||
assert op(a, approx(x, rel=1e-6, abs=1e-12))
|
||||
|
||||
def test_expecting_inf(self):
|
||||
examples = [
|
||||
(eq, inf, inf),
|
||||
(eq, -inf, -inf),
|
||||
(ne, inf, -inf),
|
||||
(ne, 0.0, inf),
|
||||
(ne, nan, inf),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_expecting_nan(self):
|
||||
examples = [
|
||||
(nan, nan),
|
||||
(-nan, -nan),
|
||||
(nan, -nan),
|
||||
(0.0, nan),
|
||||
(inf, nan),
|
||||
]
|
||||
for a, x in examples:
|
||||
# If there is a relative tolerance and the expected value is NaN,
|
||||
# the actual tolerance is a NaN, which should be an error.
|
||||
with pytest.raises(ValueError):
|
||||
a != approx(x, rel=inf)
|
||||
|
||||
# You can make comparisons against NaN by not specifying a relative
|
||||
# tolerance, so only an absolute tolerance is calculated.
|
||||
assert a != approx(x, abs=inf)
|
||||
|
||||
def test_expecting_sequence(self):
|
||||
within_1e8 = [
|
||||
(1e8 + 1e0, 1e8),
|
||||
(1e0 + 1e-8, 1e0),
|
||||
(1e-8 + 1e-16, 1e-8),
|
||||
]
|
||||
actual, expected = zip(*within_1e8)
|
||||
assert actual == approx(expected, rel=5e-8, abs=0.0)
|
||||
|
||||
def test_expecting_sequence_wrong_len(self):
|
||||
assert [1, 2] != approx([1])
|
||||
assert [1, 2] != approx([1,2,3])
|
||||
|
||||
def test_complex(self):
|
||||
within_1e6 = [
|
||||
( 1.000001 + 1.0j, 1.0 + 1.0j),
|
||||
(1.0 + 1.000001j, 1.0 + 1.0j),
|
||||
(-1.000001 + 1.0j, -1.0 + 1.0j),
|
||||
(1.0 - 1.000001j, 1.0 - 1.0j),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
||||
def test_int(self):
|
||||
within_1e6 = [
|
||||
(1000001, 1000000),
|
||||
(-1000001, -1000000),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
||||
def test_decimal(self):
|
||||
within_1e6 = [
|
||||
(Decimal('1.000001'), Decimal('1.0')),
|
||||
(Decimal('-1.000001'), Decimal('-1.0')),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=Decimal('5e-6'), abs=0)
|
||||
assert a != approx(x, rel=Decimal('5e-7'), abs=0)
|
||||
|
||||
def test_fraction(self):
|
||||
within_1e6 = [
|
||||
(1 + Fraction(1, 1000000), Fraction(1)),
|
||||
(-1 - Fraction(-1, 1000000), Fraction(-1)),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
||||
def test_doctests(self):
|
||||
parser = doctest.DocTestParser()
|
||||
test = parser.get_doctest(
|
||||
approx.__doc__,
|
||||
{'approx': approx},
|
||||
approx.__name__,
|
||||
None, None,
|
||||
)
|
||||
runner = MyDocTestRunner()
|
||||
runner.run(test)
|
||||
|
|
@ -2691,3 +2691,14 @@ class TestContextManagerFixtureFuncs:
|
|||
*def arg1*
|
||||
""")
|
||||
|
||||
def test_custom_name(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.fixture(name='meow')
|
||||
def arg1():
|
||||
return 'mew'
|
||||
def test_1(meow):
|
||||
print(meow)
|
||||
""")
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("*mew*")
|
||||
|
|
|
@ -713,3 +713,53 @@ class TestDoctestAutoUseFixtures:
|
|||
result = testdir.runpytest('--doctest-modules')
|
||||
assert 'FAILURES' not in str(result.stdout.str())
|
||||
result.stdout.fnmatch_lines(['*=== 1 passed in *'])
|
||||
|
||||
|
||||
class TestDoctestNamespaceFixture:
|
||||
|
||||
SCOPES = ['module', 'session', 'class', 'function']
|
||||
|
||||
@pytest.mark.parametrize('scope', SCOPES)
|
||||
def test_namespace_doctestfile(self, testdir, scope):
|
||||
"""
|
||||
Check that inserting something into the namespace works in a
|
||||
simple text file doctest
|
||||
"""
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
import contextlib
|
||||
|
||||
@pytest.fixture(autouse=True, scope="{scope}")
|
||||
def add_contextlib(doctest_namespace):
|
||||
doctest_namespace['cl'] = contextlib
|
||||
""".format(scope=scope))
|
||||
p = testdir.maketxtfile("""
|
||||
>>> print(cl.__name__)
|
||||
contextlib
|
||||
""")
|
||||
reprec = testdir.inline_run(p)
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
@pytest.mark.parametrize('scope', SCOPES)
|
||||
def test_namespace_pyfile(self, testdir, scope):
|
||||
"""
|
||||
Check that inserting something into the namespace works in a
|
||||
simple Python file docstring doctest
|
||||
"""
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
import contextlib
|
||||
|
||||
@pytest.fixture(autouse=True, scope="{scope}")
|
||||
def add_contextlib(doctest_namespace):
|
||||
doctest_namespace['cl'] = contextlib
|
||||
""".format(scope=scope))
|
||||
p = testdir.makepyfile("""
|
||||
def foo():
|
||||
'''
|
||||
>>> print(cl.__name__)
|
||||
contextlib
|
||||
'''
|
||||
""")
|
||||
reprec = testdir.inline_run(p, "--doctest-modules")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
|
|
@ -814,3 +814,38 @@ def test_fancy_items_regression(testdir):
|
|||
u'test_fancy_items_regression test_pass'
|
||||
u' test_fancy_items_regression.py',
|
||||
]
|
||||
|
||||
|
||||
def test_global_properties(testdir):
|
||||
path = testdir.tmpdir.join("test_global_properties.xml")
|
||||
log = LogXML(str(path), None)
|
||||
from _pytest.runner import BaseReport
|
||||
|
||||
class Report(BaseReport):
|
||||
sections = []
|
||||
nodeid = "test_node_id"
|
||||
|
||||
log.pytest_sessionstart()
|
||||
log.add_global_property('foo', 1)
|
||||
log.add_global_property('bar', 2)
|
||||
log.pytest_sessionfinish()
|
||||
|
||||
dom = minidom.parse(str(path))
|
||||
|
||||
properties = dom.getElementsByTagName('properties')
|
||||
|
||||
assert (properties.length == 1), "There must be one <properties> node"
|
||||
|
||||
property_list = dom.getElementsByTagName('property')
|
||||
|
||||
assert (property_list.length == 2), "There most be only 2 property nodes"
|
||||
|
||||
expected = {'foo': '1', 'bar': '2'}
|
||||
actual = {}
|
||||
|
||||
for p in property_list:
|
||||
k = str(p.getAttribute('name'))
|
||||
v = str(p.getAttribute('value'))
|
||||
actual[k] = v
|
||||
|
||||
assert actual == expected
|
||||
|
|
Loading…
Reference in New Issue