Merge pull request #5096 from asottile/docs_highlight

blacken-docs more code samples in docs
This commit is contained in:
Bruno Oliveira 2019-04-12 10:00:26 -03:00 committed by GitHub
commit a9e850f749
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
13 changed files with 305 additions and 131 deletions

View File

@ -1,16 +1,16 @@
exclude: doc/en/example/py2py3/test_py2.py exclude: doc/en/example/py2py3/test_py2.py
repos: repos:
- repo: https://github.com/ambv/black - repo: https://github.com/ambv/black
rev: 18.9b0 rev: 19.3b0
hooks: hooks:
- id: black - id: black
args: [--safe, --quiet] args: [--safe, --quiet]
language_version: python3 language_version: python3
- repo: https://github.com/asottile/blacken-docs - repo: https://github.com/asottile/blacken-docs
rev: v0.3.0 rev: v0.5.0
hooks: hooks:
- id: blacken-docs - id: blacken-docs
additional_dependencies: [black==18.9b0] additional_dependencies: [black==19.3b0]
language_version: python3 language_version: python3
- repo: https://github.com/pre-commit/pre-commit-hooks - repo: https://github.com/pre-commit/pre-commit-hooks
rev: v2.1.0 rev: v2.1.0
@ -22,22 +22,22 @@ repos:
exclude: _pytest/debugging.py exclude: _pytest/debugging.py
language_version: python3 language_version: python3
- repo: https://gitlab.com/pycqa/flake8 - repo: https://gitlab.com/pycqa/flake8
rev: 3.7.0 rev: 3.7.7
hooks: hooks:
- id: flake8 - id: flake8
language_version: python3 language_version: python3
- repo: https://github.com/asottile/reorder_python_imports - repo: https://github.com/asottile/reorder_python_imports
rev: v1.3.5 rev: v1.4.0
hooks: hooks:
- id: reorder-python-imports - id: reorder-python-imports
args: ['--application-directories=.:src'] args: ['--application-directories=.:src']
- repo: https://github.com/asottile/pyupgrade - repo: https://github.com/asottile/pyupgrade
rev: v1.11.1 rev: v1.15.0
hooks: hooks:
- id: pyupgrade - id: pyupgrade
args: [--keep-percent-format] args: [--keep-percent-format]
- repo: https://github.com/pre-commit/pygrep-hooks - repo: https://github.com/pre-commit/pygrep-hooks
rev: v1.2.0 rev: v1.3.0
hooks: hooks:
- id: rst-backticks - id: rst-backticks
- repo: local - repo: local

View File

@ -16,4 +16,4 @@ run = 'fc("/d")'
if __name__ == "__main__": if __name__ == "__main__":
print(timeit.timeit(run, setup=setup % imports[0], number=count)) print(timeit.timeit(run, setup=setup % imports[0], number=count))
print((timeit.timeit(run, setup=setup % imports[1], number=count))) print(timeit.timeit(run, setup=setup % imports[1], number=count))

View File

@ -12,12 +12,15 @@ Asserting with the ``assert`` statement
``pytest`` allows you to use the standard python ``assert`` for verifying ``pytest`` allows you to use the standard python ``assert`` for verifying
expectations and values in Python tests. For example, you can write the expectations and values in Python tests. For example, you can write the
following:: following:
.. code-block:: python
# content of test_assert1.py # content of test_assert1.py
def f(): def f():
return 3 return 3
def test_function(): def test_function():
assert f() == 4 assert f() == 4
@ -52,7 +55,9 @@ operators. (See :ref:`tbreportdemo`). This allows you to use the
idiomatic python constructs without boilerplate code while not losing idiomatic python constructs without boilerplate code while not losing
introspection information. introspection information.
However, if you specify a message with the assertion like this:: However, if you specify a message with the assertion like this:
.. code-block:: python
assert a % 2 == 0, "value was odd, should be even" assert a % 2 == 0, "value was odd, should be even"
@ -67,22 +72,29 @@ Assertions about expected exceptions
------------------------------------------ ------------------------------------------
In order to write assertions about raised exceptions, you can use In order to write assertions about raised exceptions, you can use
``pytest.raises`` as a context manager like this:: ``pytest.raises`` as a context manager like this:
.. code-block:: python
import pytest import pytest
def test_zero_division(): def test_zero_division():
with pytest.raises(ZeroDivisionError): with pytest.raises(ZeroDivisionError):
1 / 0 1 / 0
and if you need to have access to the actual exception info you may use:: and if you need to have access to the actual exception info you may use:
.. code-block:: python
def test_recursion_depth(): def test_recursion_depth():
with pytest.raises(RuntimeError) as excinfo: with pytest.raises(RuntimeError) as excinfo:
def f(): def f():
f() f()
f() f()
assert 'maximum recursion' in str(excinfo.value) assert "maximum recursion" in str(excinfo.value)
``excinfo`` is a ``ExceptionInfo`` instance, which is a wrapper around ``excinfo`` is a ``ExceptionInfo`` instance, which is a wrapper around
the actual exception raised. The main attributes of interest are the actual exception raised. The main attributes of interest are
@ -90,15 +102,19 @@ the actual exception raised. The main attributes of interest are
You can pass a ``match`` keyword parameter to the context-manager to test You can pass a ``match`` keyword parameter to the context-manager to test
that a regular expression matches on the string representation of an exception that a regular expression matches on the string representation of an exception
(similar to the ``TestCase.assertRaisesRegexp`` method from ``unittest``):: (similar to the ``TestCase.assertRaisesRegexp`` method from ``unittest``):
.. code-block:: python
import pytest import pytest
def myfunc(): def myfunc():
raise ValueError("Exception 123 raised") raise ValueError("Exception 123 raised")
def test_match(): def test_match():
with pytest.raises(ValueError, match=r'.* 123 .*'): with pytest.raises(ValueError, match=r".* 123 .*"):
myfunc() myfunc()
The regexp parameter of the ``match`` method is matched with the ``re.search`` The regexp parameter of the ``match`` method is matched with the ``re.search``
@ -107,7 +123,9 @@ well.
There's an alternate form of the ``pytest.raises`` function where you pass There's an alternate form of the ``pytest.raises`` function where you pass
a function that will be executed with the given ``*args`` and ``**kwargs`` and a function that will be executed with the given ``*args`` and ``**kwargs`` and
assert that the given exception is raised:: assert that the given exception is raised:
.. code-block:: python
pytest.raises(ExpectedException, func, *args, **kwargs) pytest.raises(ExpectedException, func, *args, **kwargs)
@ -116,7 +134,9 @@ exception* or *wrong exception*.
Note that it is also possible to specify a "raises" argument to Note that it is also possible to specify a "raises" argument to
``pytest.mark.xfail``, which checks that the test is failing in a more ``pytest.mark.xfail``, which checks that the test is failing in a more
specific way than just having any exception raised:: specific way than just having any exception raised:
.. code-block:: python
@pytest.mark.xfail(raises=IndexError) @pytest.mark.xfail(raises=IndexError)
def test_f(): def test_f():
@ -148,10 +168,13 @@ Making use of context-sensitive comparisons
.. versionadded:: 2.0 .. versionadded:: 2.0
``pytest`` has rich support for providing context-sensitive information ``pytest`` has rich support for providing context-sensitive information
when it encounters comparisons. For example:: when it encounters comparisons. For example:
.. code-block:: python
# content of test_assert2.py # content of test_assert2.py
def test_set_comparison(): def test_set_comparison():
set1 = set("1308") set1 = set("1308")
set2 = set("8035") set2 = set("8035")
@ -205,16 +228,21 @@ the ``pytest_assertrepr_compare`` hook.
:noindex: :noindex:
As an example consider adding the following hook in a :ref:`conftest.py <conftest.py>` As an example consider adding the following hook in a :ref:`conftest.py <conftest.py>`
file which provides an alternative explanation for ``Foo`` objects:: file which provides an alternative explanation for ``Foo`` objects:
.. code-block:: python
# content of conftest.py # content of conftest.py
from test_foocompare import Foo from test_foocompare import Foo
def pytest_assertrepr_compare(op, left, right): def pytest_assertrepr_compare(op, left, right):
if isinstance(left, Foo) and isinstance(right, Foo) and op == "==": if isinstance(left, Foo) and isinstance(right, Foo) and op == "==":
return ['Comparing Foo instances:', return ["Comparing Foo instances:", " vals: %s != %s" % (left.val, right.val)]
' vals: %s != %s' % (left.val, right.val)]
now, given this test module:: now, given this test module:
.. code-block:: python
# content of test_foocompare.py # content of test_foocompare.py
class Foo(object): class Foo(object):
@ -224,6 +252,7 @@ now, given this test module::
def __eq__(self, other): def __eq__(self, other):
return self.val == other.val return self.val == other.val
def test_compare(): def test_compare():
f1 = Foo(1) f1 = Foo(1)
f2 = Foo(2) f2 = Foo(2)

View File

@ -9,18 +9,28 @@ Here are some example using the :ref:`mark` mechanism.
Marking test functions and selecting them for a run Marking test functions and selecting them for a run
---------------------------------------------------- ----------------------------------------------------
You can "mark" a test function with custom metadata like this:: You can "mark" a test function with custom metadata like this:
.. code-block:: python
# content of test_server.py # content of test_server.py
import pytest import pytest
@pytest.mark.webtest @pytest.mark.webtest
def test_send_http(): def test_send_http():
pass # perform some webtest test for your app pass # perform some webtest test for your app
def test_something_quick(): def test_something_quick():
pass pass
def test_another(): def test_another():
pass pass
class TestClass(object): class TestClass(object):
def test_method(self): def test_method(self):
pass pass
@ -257,14 +267,19 @@ Marking whole classes or modules
---------------------------------------------------- ----------------------------------------------------
You may use ``pytest.mark`` decorators with classes to apply markers to all of You may use ``pytest.mark`` decorators with classes to apply markers to all of
its test methods:: its test methods:
.. code-block:: python
# content of test_mark_classlevel.py # content of test_mark_classlevel.py
import pytest import pytest
@pytest.mark.webtest @pytest.mark.webtest
class TestClass(object): class TestClass(object):
def test_startup(self): def test_startup(self):
pass pass
def test_startup_and_more(self): def test_startup_and_more(self):
pass pass
@ -272,17 +287,23 @@ This is equivalent to directly applying the decorator to the
two test functions. two test functions.
To remain backward-compatible with Python 2.4 you can also set a To remain backward-compatible with Python 2.4 you can also set a
``pytestmark`` attribute on a TestClass like this:: ``pytestmark`` attribute on a TestClass like this:
.. code-block:: python
import pytest import pytest
class TestClass(object): class TestClass(object):
pytestmark = pytest.mark.webtest pytestmark = pytest.mark.webtest
or if you need to use multiple markers you can use a list:: or if you need to use multiple markers you can use a list:
.. code-block:: python
import pytest import pytest
class TestClass(object): class TestClass(object):
pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] pytestmark = [pytest.mark.webtest, pytest.mark.slowtest]
@ -305,16 +326,17 @@ Marking individual tests when using parametrize
When using parametrize, applying a mark will make it apply When using parametrize, applying a mark will make it apply
to each individual test. However it is also possible to to each individual test. However it is also possible to
apply a marker to an individual test instance:: apply a marker to an individual test instance:
.. code-block:: python
import pytest import pytest
@pytest.mark.foo @pytest.mark.foo
@pytest.mark.parametrize(("n", "expected"), [ @pytest.mark.parametrize(
(1, 2), ("n", "expected"), [(1, 2), pytest.param((1, 3), marks=pytest.mark.bar), (2, 3)]
pytest.param((1, 3), marks=pytest.mark.bar), )
(2, 3),
])
def test_increment(n, expected): def test_increment(n, expected):
assert n + 1 == expected assert n + 1 == expected
@ -332,31 +354,46 @@ Custom marker and command line option to control test runs
Plugins can provide custom markers and implement specific behaviour Plugins can provide custom markers and implement specific behaviour
based on it. This is a self-contained example which adds a command based on it. This is a self-contained example which adds a command
line option and a parametrized test function marker to run tests line option and a parametrized test function marker to run tests
specifies via named environments:: specifies via named environments:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
def pytest_addoption(parser): def pytest_addoption(parser):
parser.addoption("-E", action="store", metavar="NAME", parser.addoption(
help="only run tests matching the environment NAME.") "-E",
action="store",
metavar="NAME",
help="only run tests matching the environment NAME.",
)
def pytest_configure(config): def pytest_configure(config):
# register an additional marker # register an additional marker
config.addinivalue_line("markers", config.addinivalue_line(
"env(name): mark test to run only on named environment") "markers", "env(name): mark test to run only on named environment"
)
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
envnames = [mark.args[0] for mark in item.iter_markers(name='env')] envnames = [mark.args[0] for mark in item.iter_markers(name="env")]
if envnames: if envnames:
if item.config.getoption("-E") not in envnames: if item.config.getoption("-E") not in envnames:
pytest.skip("test requires env in %r" % envnames) pytest.skip("test requires env in %r" % envnames)
A test file using this local plugin:: A test file using this local plugin:
.. code-block:: python
# content of test_someenv.py # content of test_someenv.py
import pytest import pytest
@pytest.mark.env("stage1") @pytest.mark.env("stage1")
def test_basic_db_operation(): def test_basic_db_operation():
pass pass
@ -423,25 +460,32 @@ Passing a callable to custom markers
.. regendoc:wipe .. regendoc:wipe
Below is the config file that will be used in the next examples:: Below is the config file that will be used in the next examples:
.. code-block:: python
# content of conftest.py # content of conftest.py
import sys import sys
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
for marker in item.iter_markers(name='my_marker'): for marker in item.iter_markers(name="my_marker"):
print(marker) print(marker)
sys.stdout.flush() sys.stdout.flush()
A custom marker can have its argument set, i.e. ``args`` and ``kwargs`` properties, defined by either invoking it as a callable or using ``pytest.mark.MARKER_NAME.with_args``. These two methods achieve the same effect most of the time. A custom marker can have its argument set, i.e. ``args`` and ``kwargs`` properties, defined by either invoking it as a callable or using ``pytest.mark.MARKER_NAME.with_args``. These two methods achieve the same effect most of the time.
However, if there is a callable as the single positional argument with no keyword arguments, using the ``pytest.mark.MARKER_NAME(c)`` will not pass ``c`` as a positional argument but decorate ``c`` with the custom marker (see :ref:`MarkDecorator <mark>`). Fortunately, ``pytest.mark.MARKER_NAME.with_args`` comes to the rescue:: However, if there is a callable as the single positional argument with no keyword arguments, using the ``pytest.mark.MARKER_NAME(c)`` will not pass ``c`` as a positional argument but decorate ``c`` with the custom marker (see :ref:`MarkDecorator <mark>`). Fortunately, ``pytest.mark.MARKER_NAME.with_args`` comes to the rescue:
.. code-block:: python
# content of test_custom_marker.py # content of test_custom_marker.py
import pytest import pytest
def hello_world(*args, **kwargs): def hello_world(*args, **kwargs):
return 'Hello World' return "Hello World"
@pytest.mark.my_marker.with_args(hello_world) @pytest.mark.my_marker.with_args(hello_world)
def test_with_args(): def test_with_args():
@ -467,12 +511,16 @@ Reading markers which were set from multiple places
.. regendoc:wipe .. regendoc:wipe
If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin If you are heavily using markers in your test suite you may encounter the case where a marker is applied several times to a test function. From plugin
code you can read over all such settings. Example:: code you can read over all such settings. Example:
.. code-block:: python
# content of test_mark_three_times.py # content of test_mark_three_times.py
import pytest import pytest
pytestmark = pytest.mark.glob("module", x=1) pytestmark = pytest.mark.glob("module", x=1)
@pytest.mark.glob("class", x=2) @pytest.mark.glob("class", x=2)
class TestClass(object): class TestClass(object):
@pytest.mark.glob("function", x=3) @pytest.mark.glob("function", x=3)
@ -480,13 +528,16 @@ code you can read over all such settings. Example::
pass pass
Here we have the marker "glob" applied three times to the same Here we have the marker "glob" applied three times to the same
test function. From a conftest file we can read it like this:: test function. From a conftest file we can read it like this:
.. code-block:: python
# content of conftest.py # content of conftest.py
import sys import sys
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
for mark in item.iter_markers(name='glob'): for mark in item.iter_markers(name="glob"):
print("glob args=%s kwargs=%s" % (mark.args, mark.kwargs)) print("glob args=%s kwargs=%s" % (mark.args, mark.kwargs))
sys.stdout.flush() sys.stdout.flush()
@ -510,7 +561,9 @@ Consider you have a test suite which marks tests for particular platforms,
namely ``pytest.mark.darwin``, ``pytest.mark.win32`` etc. and you namely ``pytest.mark.darwin``, ``pytest.mark.win32`` etc. and you
also have tests that run on all platforms and have no specific also have tests that run on all platforms and have no specific
marker. If you now want to have a way to only run the tests marker. If you now want to have a way to only run the tests
for your particular platform, you could use the following plugin:: for your particular platform, you could use the following plugin:
.. code-block:: python
# content of conftest.py # content of conftest.py
# #
@ -519,6 +572,7 @@ for your particular platform, you could use the following plugin::
ALL = set("darwin linux win32".split()) ALL = set("darwin linux win32".split())
def pytest_runtest_setup(item): def pytest_runtest_setup(item):
supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers()) supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers())
plat = sys.platform plat = sys.platform
@ -526,24 +580,30 @@ for your particular platform, you could use the following plugin::
pytest.skip("cannot run on platform %s" % (plat)) pytest.skip("cannot run on platform %s" % (plat))
then tests will be skipped if they were specified for a different platform. then tests will be skipped if they were specified for a different platform.
Let's do a little test file to show how this looks like:: Let's do a little test file to show how this looks like:
.. code-block:: python
# content of test_plat.py # content of test_plat.py
import pytest import pytest
@pytest.mark.darwin @pytest.mark.darwin
def test_if_apple_is_evil(): def test_if_apple_is_evil():
pass pass
@pytest.mark.linux @pytest.mark.linux
def test_if_linux_works(): def test_if_linux_works():
pass pass
@pytest.mark.win32 @pytest.mark.win32
def test_if_win32_crashes(): def test_if_win32_crashes():
pass pass
def test_runs_everywhere(): def test_runs_everywhere():
pass pass
@ -589,28 +649,38 @@ Automatically adding markers based on test names
If you a test suite where test function names indicate a certain If you a test suite where test function names indicate a certain
type of test, you can implement a hook that automatically defines type of test, you can implement a hook that automatically defines
markers so that you can use the ``-m`` option with it. Let's look markers so that you can use the ``-m`` option with it. Let's look
at this test module:: at this test module:
.. code-block:: python
# content of test_module.py # content of test_module.py
def test_interface_simple(): def test_interface_simple():
assert 0 assert 0
def test_interface_complex(): def test_interface_complex():
assert 0 assert 0
def test_event_simple(): def test_event_simple():
assert 0 assert 0
def test_something_else(): def test_something_else():
assert 0 assert 0
We want to dynamically define two markers and can do it in a We want to dynamically define two markers and can do it in a
``conftest.py`` plugin:: ``conftest.py`` plugin:
.. code-block:: python
# content of conftest.py # content of conftest.py
import pytest import pytest
def pytest_collection_modifyitems(items): def pytest_collection_modifyitems(items):
for item in items: for item in items:
if "interface" in item.nodeid: if "interface" in item.nodeid:

View File

@ -515,21 +515,25 @@ Set marks or test ID for individual parametrized test
-------------------------------------------------------------------- --------------------------------------------------------------------
Use ``pytest.param`` to apply marks or set test ID to individual parametrized test. Use ``pytest.param`` to apply marks or set test ID to individual parametrized test.
For example:: For example:
.. code-block:: python
# content of test_pytest_param_example.py # content of test_pytest_param_example.py
import pytest import pytest
@pytest.mark.parametrize('test_input,expected', [
('3+5', 8),
pytest.param('1+7', 8, @pytest.mark.parametrize(
marks=pytest.mark.basic), "test_input,expected",
pytest.param('2+4', 6, [
marks=pytest.mark.basic, ("3+5", 8),
id='basic_2+4'), pytest.param("1+7", 8, marks=pytest.mark.basic),
pytest.param('6*9', 42, pytest.param("2+4", 6, marks=pytest.mark.basic, id="basic_2+4"),
marks=[pytest.mark.basic, pytest.mark.xfail], pytest.param(
id='basic_6*9'), "6*9", 42, marks=[pytest.mark.basic, pytest.mark.xfail], id="basic_6*9"
]) ),
],
)
def test_eval(test_input, expected): def test_eval(test_input, expected):
assert eval(test_input) == expected assert eval(test_input) == expected

View File

@ -57,14 +57,16 @@ Applying marks to ``@pytest.mark.parametrize`` parameters
.. versionchanged:: 3.1 .. versionchanged:: 3.1
Prior to version 3.1 the supported mechanism for marking values Prior to version 3.1 the supported mechanism for marking values
used the syntax:: used the syntax:
.. code-block:: python
import pytest import pytest
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
("2+4", 6), @pytest.mark.parametrize(
pytest.mark.xfail(("6*9", 42),), "test_input,expected", [("3+5", 8), ("2+4", 6), pytest.mark.xfail(("6*9", 42))]
]) )
def test_eval(test_input, expected): def test_eval(test_input, expected):
assert eval(test_input) == expected assert eval(test_input) == expected
@ -105,9 +107,13 @@ Conditions as strings instead of booleans
.. versionchanged:: 2.4 .. versionchanged:: 2.4
Prior to pytest-2.4 the only way to specify skipif/xfail conditions was Prior to pytest-2.4 the only way to specify skipif/xfail conditions was
to use strings:: to use strings:
.. code-block:: python
import sys import sys
@pytest.mark.skipif("sys.version_info >= (3,3)") @pytest.mark.skipif("sys.version_info >= (3,3)")
def test_function(): def test_function():
... ...
@ -139,17 +145,20 @@ dictionary which is constructed as follows:
expression is applied. expression is applied.
The pytest ``config`` object allows you to skip based on a test The pytest ``config`` object allows you to skip based on a test
configuration value which you might have added:: configuration value which you might have added:
.. code-block:: python
@pytest.mark.skipif("not config.getvalue('db')") @pytest.mark.skipif("not config.getvalue('db')")
def test_function(...): def test_function():
... ...
The equivalent with "boolean conditions" is:: The equivalent with "boolean conditions" is:
@pytest.mark.skipif(not pytest.config.getvalue("db"), .. code-block:: python
reason="--db was not specified")
def test_function(...): @pytest.mark.skipif(not pytest.config.getvalue("db"), reason="--db was not specified")
def test_function():
pass pass
.. note:: .. note::
@ -164,9 +173,13 @@ The equivalent with "boolean conditions" is::
.. versionchanged:: 2.4 .. versionchanged:: 2.4
Previous to version 2.4 to set a break point in code one needed to use ``pytest.set_trace()``:: Previous to version 2.4 to set a break point in code one needed to use ``pytest.set_trace()``:
.. code-block:: python
import pytest import pytest
def test_function(): def test_function():
... ...
pytest.set_trace() # invoke PDB debugger and tracing pytest.set_trace() # invoke PDB debugger and tracing

View File

@ -36,15 +36,15 @@ pytest enables test parametrization at several levels:
The builtin :ref:`pytest.mark.parametrize ref` decorator enables The builtin :ref:`pytest.mark.parametrize ref` decorator enables
parametrization of arguments for a test function. Here is a typical example parametrization of arguments for a test function. Here is a typical example
of a test function that implements checking that a certain input leads of a test function that implements checking that a certain input leads
to an expected output:: to an expected output:
.. code-block:: python
# content of test_expectation.py # content of test_expectation.py
import pytest import pytest
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
("2+4", 6), @pytest.mark.parametrize("test_input,expected", [("3+5", 8), ("2+4", 6), ("6*9", 42)])
("6*9", 42),
])
def test_eval(test_input, expected): def test_eval(test_input, expected):
assert eval(test_input) == expected assert eval(test_input) == expected
@ -104,16 +104,18 @@ Note that you could also use the parametrize marker on a class or a module
(see :ref:`mark`) which would invoke several functions with the argument sets. (see :ref:`mark`) which would invoke several functions with the argument sets.
It is also possible to mark individual test instances within parametrize, It is also possible to mark individual test instances within parametrize,
for example with the builtin ``mark.xfail``:: for example with the builtin ``mark.xfail``:
.. code-block:: python
# content of test_expectation.py # content of test_expectation.py
import pytest import pytest
@pytest.mark.parametrize("test_input,expected", [
("3+5", 8),
("2+4", 6), @pytest.mark.parametrize(
pytest.param("6*9", 42, "test_input,expected",
marks=pytest.mark.xfail), [("3+5", 8), ("2+4", 6), pytest.param("6*9", 42, marks=pytest.mark.xfail)],
]) )
def test_eval(test_input, expected): def test_eval(test_input, expected):
assert eval(test_input) == expected assert eval(test_input) == expected
@ -140,9 +142,13 @@ example, if they're dynamically generated by some function - the behaviour of
pytest is defined by the :confval:`empty_parameter_set_mark` option. pytest is defined by the :confval:`empty_parameter_set_mark` option.
To get all combinations of multiple parametrized arguments you can stack To get all combinations of multiple parametrized arguments you can stack
``parametrize`` decorators:: ``parametrize`` decorators:
.. code-block:: python
import pytest import pytest
@pytest.mark.parametrize("x", [0, 1]) @pytest.mark.parametrize("x", [0, 1])
@pytest.mark.parametrize("y", [2, 3]) @pytest.mark.parametrize("y", [2, 3])
def test_foo(x, y): def test_foo(x, y):
@ -166,26 +172,36 @@ parametrization.
For example, let's say we want to run a test taking string inputs which For example, let's say we want to run a test taking string inputs which
we want to set via a new ``pytest`` command line option. Let's first write we want to set via a new ``pytest`` command line option. Let's first write
a simple test accepting a ``stringinput`` fixture function argument:: a simple test accepting a ``stringinput`` fixture function argument:
.. code-block:: python
# content of test_strings.py # content of test_strings.py
def test_valid_string(stringinput): def test_valid_string(stringinput):
assert stringinput.isalpha() assert stringinput.isalpha()
Now we add a ``conftest.py`` file containing the addition of a Now we add a ``conftest.py`` file containing the addition of a
command line option and the parametrization of our test function:: command line option and the parametrization of our test function:
.. code-block:: python
# content of conftest.py # content of conftest.py
def pytest_addoption(parser): def pytest_addoption(parser):
parser.addoption("--stringinput", action="append", default=[], parser.addoption(
help="list of stringinputs to pass to test functions") "--stringinput",
action="append",
default=[],
help="list of stringinputs to pass to test functions",
)
def pytest_generate_tests(metafunc): def pytest_generate_tests(metafunc):
if 'stringinput' in metafunc.fixturenames: if "stringinput" in metafunc.fixturenames:
metafunc.parametrize("stringinput", metafunc.parametrize("stringinput", metafunc.config.getoption("stringinput"))
metafunc.config.getoption('stringinput'))
If we now pass two stringinput values, our test will run twice: If we now pass two stringinput values, our test will run twice:

View File

@ -84,32 +84,44 @@ It is also possible to skip the whole module using
If you wish to skip something conditionally then you can use ``skipif`` instead. If you wish to skip something conditionally then you can use ``skipif`` instead.
Here is an example of marking a test function to be skipped Here is an example of marking a test function to be skipped
when run on an interpreter earlier than Python3.6:: when run on an interpreter earlier than Python3.6:
.. code-block:: python
import sys import sys
@pytest.mark.skipif(sys.version_info < (3,6),
reason="requires python3.6 or higher")
@pytest.mark.skipif(sys.version_info < (3, 6), reason="requires python3.6 or higher")
def test_function(): def test_function():
... ...
If the condition evaluates to ``True`` during collection, the test function will be skipped, If the condition evaluates to ``True`` during collection, the test function will be skipped,
with the specified reason appearing in the summary when using ``-rs``. with the specified reason appearing in the summary when using ``-rs``.
You can share ``skipif`` markers between modules. Consider this test module:: You can share ``skipif`` markers between modules. Consider this test module:
.. code-block:: python
# content of test_mymodule.py # content of test_mymodule.py
import mymodule import mymodule
minversion = pytest.mark.skipif(mymodule.__versioninfo__ < (1,1),
reason="at least mymodule-1.1 required") minversion = pytest.mark.skipif(
mymodule.__versioninfo__ < (1, 1), reason="at least mymodule-1.1 required"
)
@minversion @minversion
def test_function(): def test_function():
... ...
You can import the marker and reuse it in another test module:: You can import the marker and reuse it in another test module:
.. code-block:: python
# test_myothermodule.py # test_myothermodule.py
from test_mymodule import minversion from test_mymodule import minversion
@minversion @minversion
def test_anotherfunction(): def test_anotherfunction():
... ...
@ -128,12 +140,12 @@ so they are supported mainly for backward compatibility reasons.
Skip all test functions of a class or module Skip all test functions of a class or module
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
You can use the ``skipif`` marker (as any other marker) on classes:: You can use the ``skipif`` marker (as any other marker) on classes:
@pytest.mark.skipif(sys.platform == 'win32', .. code-block:: python
reason="does not run on windows")
@pytest.mark.skipif(sys.platform == "win32", reason="does not run on windows")
class TestPosixCalls(object): class TestPosixCalls(object):
def test_function(self): def test_function(self):
"will not be setup or run under 'win32' platform" "will not be setup or run under 'win32' platform"
@ -269,10 +281,11 @@ You can change the default value of the ``strict`` parameter using the
~~~~~~~~~~~~~~~~~~~~ ~~~~~~~~~~~~~~~~~~~~
As with skipif_ you can also mark your expectation of a failure As with skipif_ you can also mark your expectation of a failure
on a particular platform:: on a particular platform:
@pytest.mark.xfail(sys.version_info >= (3,6), .. code-block:: python
reason="python3.6 api changes")
@pytest.mark.xfail(sys.version_info >= (3, 6), reason="python3.6 api changes")
def test_function(): def test_function():
... ...

View File

@ -6,15 +6,19 @@ Warnings Capture
.. versionadded:: 3.1 .. versionadded:: 3.1
Starting from version ``3.1``, pytest now automatically catches warnings during test execution Starting from version ``3.1``, pytest now automatically catches warnings during test execution
and displays them at the end of the session:: and displays them at the end of the session:
.. code-block:: python
# content of test_show_warnings.py # content of test_show_warnings.py
import warnings import warnings
def api_v1(): def api_v1():
warnings.warn(UserWarning("api v1, should use functions from v2")) warnings.warn(UserWarning("api v1, should use functions from v2"))
return 1 return 1
def test_one(): def test_one():
assert api_v1() == 1 assert api_v1() == 1
@ -195,28 +199,36 @@ Ensuring code triggers a deprecation warning
You can also call a global helper for checking You can also call a global helper for checking
that a certain function call triggers a ``DeprecationWarning`` or that a certain function call triggers a ``DeprecationWarning`` or
``PendingDeprecationWarning``:: ``PendingDeprecationWarning``:
.. code-block:: python
import pytest import pytest
def test_global(): def test_global():
pytest.deprecated_call(myfunction, 17) pytest.deprecated_call(myfunction, 17)
By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be
caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide
them. If you wish to record them in your own code, use the them. If you wish to record them in your own code, use the
command ``warnings.simplefilter('always')``:: command ``warnings.simplefilter('always')``:
.. code-block:: python
import warnings import warnings
import pytest import pytest
def test_deprecation(recwarn): def test_deprecation(recwarn):
warnings.simplefilter('always') warnings.simplefilter("always")
warnings.warn("deprecated", DeprecationWarning) warnings.warn("deprecated", DeprecationWarning)
assert len(recwarn) == 1 assert len(recwarn) == 1
assert recwarn.pop(DeprecationWarning) assert recwarn.pop(DeprecationWarning)
You can also use it as a contextmanager:: You can also use it as a contextmanager:
.. code-block:: python
def test_global(): def test_global():
with pytest.deprecated_call(): with pytest.deprecated_call():
@ -238,11 +250,14 @@ Asserting warnings with the warns function
.. versionadded:: 2.8 .. versionadded:: 2.8
You can check that code raises a particular warning using ``pytest.warns``, You can check that code raises a particular warning using ``pytest.warns``,
which works in a similar manner to :ref:`raises <assertraises>`:: which works in a similar manner to :ref:`raises <assertraises>`:
.. code-block:: python
import warnings import warnings
import pytest import pytest
def test_warning(): def test_warning():
with pytest.warns(UserWarning): with pytest.warns(UserWarning):
warnings.warn("my warning", UserWarning) warnings.warn("my warning", UserWarning)
@ -269,7 +284,9 @@ You can also call ``pytest.warns`` on a function or code string::
The function also returns a list of all raised warnings (as The function also returns a list of all raised warnings (as
``warnings.WarningMessage`` objects), which you can query for ``warnings.WarningMessage`` objects), which you can query for
additional information:: additional information:
.. code-block:: python
with pytest.warns(RuntimeWarning) as record: with pytest.warns(RuntimeWarning) as record:
warnings.warn("another warning", RuntimeWarning) warnings.warn("another warning", RuntimeWarning)
@ -297,7 +314,9 @@ You can record raised warnings either using ``pytest.warns`` or with
the ``recwarn`` fixture. the ``recwarn`` fixture.
To record with ``pytest.warns`` without asserting anything about the warnings, To record with ``pytest.warns`` without asserting anything about the warnings,
pass ``None`` as the expected warning type:: pass ``None`` as the expected warning type:
.. code-block:: python
with pytest.warns(None) as record: with pytest.warns(None) as record:
warnings.warn("user", UserWarning) warnings.warn("user", UserWarning)
@ -307,10 +326,13 @@ pass ``None`` as the expected warning type::
assert str(record[0].message) == "user" assert str(record[0].message) == "user"
assert str(record[1].message) == "runtime" assert str(record[1].message) == "runtime"
The ``recwarn`` fixture will record warnings for the whole function:: The ``recwarn`` fixture will record warnings for the whole function:
.. code-block:: python
import warnings import warnings
def test_hello(recwarn): def test_hello(recwarn):
warnings.warn("hello", UserWarning) warnings.warn("hello", UserWarning)
assert len(recwarn) == 1 assert len(recwarn) == 1

View File

@ -509,10 +509,13 @@ a :py:class:`Result <pluggy._Result>` instance which encapsulates a result or
exception info. The yield point itself will thus typically not raise exception info. The yield point itself will thus typically not raise
exceptions (unless there are bugs). exceptions (unless there are bugs).
Here is an example definition of a hook wrapper:: Here is an example definition of a hook wrapper:
.. code-block:: python
import pytest import pytest
@pytest.hookimpl(hookwrapper=True) @pytest.hookimpl(hookwrapper=True)
def pytest_pyfunc_call(pyfuncitem): def pytest_pyfunc_call(pyfuncitem):
do_something_before_next_hook_executes() do_something_before_next_hook_executes()
@ -617,10 +620,13 @@ if you depend on a plugin that is not installed, validation will fail and
the error message will not make much sense to your users. the error message will not make much sense to your users.
One approach is to defer the hook implementation to a new plugin instead of One approach is to defer the hook implementation to a new plugin instead of
declaring the hook functions directly in your plugin module, for example:: declaring the hook functions directly in your plugin module, for example:
.. code-block:: python
# contents of myplugin.py # contents of myplugin.py
class DeferPlugin(object): class DeferPlugin(object):
"""Simple plugin to defer pytest-xdist hook functions.""" """Simple plugin to defer pytest-xdist hook functions."""
@ -628,8 +634,9 @@ declaring the hook functions directly in your plugin module, for example::
"""standard xdist hook function. """standard xdist hook function.
""" """
def pytest_configure(config): def pytest_configure(config):
if config.pluginmanager.hasplugin('xdist'): if config.pluginmanager.hasplugin("xdist"):
config.pluginmanager.register(DeferPlugin()) config.pluginmanager.register(DeferPlugin())
This has the added benefit of allowing you to conditionally install hooks This has the added benefit of allowing you to conditionally install hooks

View File

@ -853,7 +853,9 @@ class FixtureDef(object):
exceptions.append(sys.exc_info()) exceptions.append(sys.exc_info())
if exceptions: if exceptions:
e = exceptions[0] e = exceptions[0]
del exceptions # ensure we don't keep all frames alive because of the traceback del (
exceptions
) # ensure we don't keep all frames alive because of the traceback
six.reraise(*e) six.reraise(*e)
finally: finally:

View File

@ -1320,7 +1320,7 @@ class LineMatcher(object):
raise ValueError("line %r not found in output" % fnline) raise ValueError("line %r not found in output" % fnline)
def _log(self, *args): def _log(self, *args):
self._log_output.append(" ".join((str(x) for x in args))) self._log_output.append(" ".join(str(x) for x in args))
@property @property
def _log_text(self): def _log_text(self):

View File

@ -1071,10 +1071,8 @@ class TestFixtureUsages(object):
) )
result = testdir.runpytest_inprocess() result = testdir.runpytest_inprocess()
result.stdout.fnmatch_lines( result.stdout.fnmatch_lines(
(
"*Fixture 'badscope' from test_invalid_scope.py got an unexpected scope value 'functions'" "*Fixture 'badscope' from test_invalid_scope.py got an unexpected scope value 'functions'"
) )
)
def test_funcarg_parametrized_and_used_twice(self, testdir): def test_funcarg_parametrized_and_used_twice(self, testdir):
testdir.makepyfile( testdir.makepyfile(