commit
d609b635f4
|
@ -3,9 +3,15 @@ repos:
|
|||
- repo: https://github.com/ambv/black
|
||||
rev: 18.4a4
|
||||
hooks:
|
||||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
python_version: python3.6
|
||||
- id: black
|
||||
args: [--safe, --quiet]
|
||||
language_version: python3.6
|
||||
- repo: https://github.com/asottile/blacken-docs
|
||||
rev: v0.1.1
|
||||
hooks:
|
||||
- id: blacken-docs
|
||||
additional_dependencies: [black==18.5b1]
|
||||
language_version: python3.6
|
||||
- repo: https://github.com/pre-commit/pre-commit-hooks
|
||||
rev: v1.2.3
|
||||
hooks:
|
||||
|
|
|
@ -40,6 +40,7 @@ An example of a simple test:
|
|||
def inc(x):
|
||||
return x + 1
|
||||
|
||||
|
||||
def test_answer():
|
||||
assert inc(3) == 5
|
||||
|
||||
|
|
|
@ -14,7 +14,7 @@ Install argcomplete using::
|
|||
|
||||
For global activation of all argcomplete enabled python applications run::
|
||||
|
||||
sudo activate-global-python-argcomplete
|
||||
sudo activate-global-python-argcomplete
|
||||
|
||||
For permanent (but not global) ``pytest`` activation, use::
|
||||
|
||||
|
|
|
@ -92,7 +92,7 @@ an example test function that performs some output related checks:
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
def test_myoutput(capsys): # or use "capfd" for fd-level
|
||||
def test_myoutput(capsys): # or use "capfd" for fd-level
|
||||
print("hello")
|
||||
sys.stderr.write("world\n")
|
||||
captured = capsys.readouterr()
|
||||
|
@ -145,9 +145,9 @@ as a context manager, disabling capture inside the ``with`` block:
|
|||
.. code-block:: python
|
||||
|
||||
def test_disabling_capturing(capsys):
|
||||
print('this output is captured')
|
||||
print("this output is captured")
|
||||
with capsys.disabled():
|
||||
print('output not captured, going directly to sys.stdout')
|
||||
print('this output is also captured')
|
||||
print("output not captured, going directly to sys.stdout")
|
||||
print("this output is also captured")
|
||||
|
||||
.. include:: links.inc
|
||||
|
|
|
@ -9,15 +9,18 @@ example: specifying and selecting acceptance tests
|
|||
# ./conftest.py
|
||||
def pytest_option(parser):
|
||||
group = parser.getgroup("myproject")
|
||||
group.addoption("-A", dest="acceptance", action="store_true",
|
||||
help="run (slow) acceptance tests")
|
||||
group.addoption(
|
||||
"-A", dest="acceptance", action="store_true", help="run (slow) acceptance tests"
|
||||
)
|
||||
|
||||
|
||||
def pytest_funcarg__accept(request):
|
||||
return AcceptFixture(request)
|
||||
|
||||
|
||||
class AcceptFixture(object):
|
||||
def __init__(self, request):
|
||||
if not request.config.getoption('acceptance'):
|
||||
if not request.config.getoption("acceptance"):
|
||||
pytest.skip("specify -A to run acceptance tests")
|
||||
self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True)
|
||||
|
||||
|
@ -61,6 +64,7 @@ extend the `accept example`_ by putting this in our test module:
|
|||
arg.tmpdir.mkdir("special")
|
||||
return arg
|
||||
|
||||
|
||||
class TestSpecialAcceptance(object):
|
||||
def test_sometest(self, accept):
|
||||
assert accept.tmpdir.join("special").check()
|
||||
|
|
|
@ -18,10 +18,10 @@ Here is a basic pattern to achieve this:
|
|||
# content of test_sample.py
|
||||
def test_answer(cmdopt):
|
||||
if cmdopt == "type1":
|
||||
print ("first")
|
||||
print("first")
|
||||
elif cmdopt == "type2":
|
||||
print ("second")
|
||||
assert 0 # to see what was printed
|
||||
print("second")
|
||||
assert 0 # to see what was printed
|
||||
|
||||
|
||||
For this to work we need to add a command line option and
|
||||
|
@ -32,9 +32,12 @@ provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`:
|
|||
# content of conftest.py
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--cmdopt", action="store", default="type1",
|
||||
help="my option: type1 or type2")
|
||||
parser.addoption(
|
||||
"--cmdopt", action="store", default="type1", help="my option: type1 or type2"
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def cmdopt(request):
|
||||
|
@ -102,9 +105,12 @@ the command line arguments before they get processed:
|
|||
|
||||
# content of conftest.py
|
||||
import sys
|
||||
|
||||
|
||||
def pytest_load_initial_conftests(args):
|
||||
if 'xdist' in sys.modules: # pytest-xdist plugin
|
||||
if "xdist" in sys.modules: # pytest-xdist plugin
|
||||
import multiprocessing
|
||||
|
||||
num = max(multiprocessing.cpu_count() / 2, 1)
|
||||
args[:] = ["-n", str(num)] + args
|
||||
|
||||
|
@ -136,9 +142,13 @@ line option to control skipping of ``pytest.mark.slow`` marked tests:
|
|||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--runslow", action="store_true",
|
||||
default=False, help="run slow tests")
|
||||
parser.addoption(
|
||||
"--runslow", action="store_true", default=False, help="run slow tests"
|
||||
)
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(config, items):
|
||||
if config.getoption("--runslow"):
|
||||
|
@ -206,10 +216,13 @@ Example:
|
|||
|
||||
# content of test_checkconfig.py
|
||||
import pytest
|
||||
|
||||
|
||||
def checkconfig(x):
|
||||
__tracebackhide__ = True
|
||||
if not hasattr(x, "config"):
|
||||
pytest.fail("not configured: %s" %(x,))
|
||||
pytest.fail("not configured: %s" % (x,))
|
||||
|
||||
|
||||
def test_something():
|
||||
checkconfig(42)
|
||||
|
@ -240,13 +253,16 @@ this to make sure unexpected exception types aren't hidden:
|
|||
import operator
|
||||
import pytest
|
||||
|
||||
|
||||
class ConfigException(Exception):
|
||||
pass
|
||||
|
||||
|
||||
def checkconfig(x):
|
||||
__tracebackhide__ = operator.methodcaller('errisinstance', ConfigException)
|
||||
__tracebackhide__ = operator.methodcaller("errisinstance", ConfigException)
|
||||
if not hasattr(x, "config"):
|
||||
raise ConfigException("not configured: %s" %(x,))
|
||||
raise ConfigException("not configured: %s" % (x,))
|
||||
|
||||
|
||||
def test_something():
|
||||
checkconfig(42)
|
||||
|
@ -269,22 +285,28 @@ running from a test you can do something like this:
|
|||
|
||||
# content of conftest.py
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
import sys
|
||||
|
||||
sys._called_from_test = True
|
||||
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
import sys
|
||||
|
||||
del sys._called_from_test
|
||||
|
||||
and then check for the ``sys._called_from_test`` flag:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
if hasattr(sys, '_called_from_test'):
|
||||
if hasattr(sys, "_called_from_test"):
|
||||
# called from within a test run
|
||||
...
|
||||
else:
|
||||
# called "normally"
|
||||
...
|
||||
|
||||
accordingly in your application. It's also a good idea
|
||||
to use your own application module rather than ``sys``
|
||||
|
@ -301,6 +323,7 @@ It's easy to present extra information in a ``pytest`` run:
|
|||
|
||||
# content of conftest.py
|
||||
|
||||
|
||||
def pytest_report_header(config):
|
||||
return "project deps: mylib-1.1"
|
||||
|
||||
|
@ -325,8 +348,9 @@ display more information if applicable:
|
|||
|
||||
# content of conftest.py
|
||||
|
||||
|
||||
def pytest_report_header(config):
|
||||
if config.getoption('verbose') > 0:
|
||||
if config.getoption("verbose") > 0:
|
||||
return ["info1: did you know that ...", "did you?"]
|
||||
|
||||
which will add info only when run with "--v"::
|
||||
|
@ -367,12 +391,15 @@ out which tests are the slowest. Let's make an artificial test suite:
|
|||
# content of test_some_are_slow.py
|
||||
import time
|
||||
|
||||
|
||||
def test_funcfast():
|
||||
time.sleep(0.1)
|
||||
|
||||
|
||||
def test_funcslow1():
|
||||
time.sleep(0.2)
|
||||
|
||||
|
||||
def test_funcslow2():
|
||||
time.sleep(0.3)
|
||||
|
||||
|
@ -409,17 +436,19 @@ an ``incremental`` marker which is to be used on classes:
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
if "incremental" in item.keywords:
|
||||
if call.excinfo is not None:
|
||||
parent = item.parent
|
||||
parent._previousfailed = item
|
||||
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
if "incremental" in item.keywords:
|
||||
previousfailed = getattr(item.parent, "_previousfailed", None)
|
||||
if previousfailed is not None:
|
||||
pytest.xfail("previous test failed (%s)" %previousfailed.name)
|
||||
pytest.xfail("previous test failed (%s)" % previousfailed.name)
|
||||
|
||||
These two hook implementations work together to abort incremental-marked
|
||||
tests in a class. Here is a test module example:
|
||||
|
@ -430,15 +459,19 @@ tests in a class. Here is a test module example:
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.incremental
|
||||
class TestUserHandling(object):
|
||||
def test_login(self):
|
||||
pass
|
||||
|
||||
def test_modification(self):
|
||||
assert 0
|
||||
|
||||
def test_deletion(self):
|
||||
pass
|
||||
|
||||
|
||||
def test_normal():
|
||||
pass
|
||||
|
||||
|
@ -489,9 +522,11 @@ Here is an example for making a ``db`` fixture available in a directory:
|
|||
# content of a/conftest.py
|
||||
import pytest
|
||||
|
||||
|
||||
class DB(object):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def db():
|
||||
return DB()
|
||||
|
@ -600,6 +635,7 @@ case we just write some information out to a ``failures`` file:
|
|||
import pytest
|
||||
import os.path
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
# execute all other hooks to obtain the report object
|
||||
|
@ -626,6 +662,8 @@ if you then have failing tests:
|
|||
# content of test_module.py
|
||||
def test_fail1(tmpdir):
|
||||
assert 0
|
||||
|
||||
|
||||
def test_fail2():
|
||||
assert 0
|
||||
|
||||
|
@ -678,6 +716,7 @@ here is a little example implemented via a local plugin:
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True, hookwrapper=True)
|
||||
def pytest_runtest_makereport(item, call):
|
||||
# execute all other hooks to obtain the report object
|
||||
|
@ -696,10 +735,10 @@ here is a little example implemented via a local plugin:
|
|||
# request.node is an "item" because we use the default
|
||||
# "function" scope
|
||||
if request.node.rep_setup.failed:
|
||||
print ("setting up a test failed!", request.node.nodeid)
|
||||
print("setting up a test failed!", request.node.nodeid)
|
||||
elif request.node.rep_setup.passed:
|
||||
if request.node.rep_call.failed:
|
||||
print ("executing test failed", request.node.nodeid)
|
||||
print("executing test failed", request.node.nodeid)
|
||||
|
||||
|
||||
if you then have failing tests:
|
||||
|
@ -710,16 +749,20 @@ if you then have failing tests:
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def other():
|
||||
assert 0
|
||||
|
||||
|
||||
def test_setup_fails(something, other):
|
||||
pass
|
||||
|
||||
|
||||
def test_call_fails(something):
|
||||
assert 0
|
||||
|
||||
|
||||
def test_fail2():
|
||||
assert 0
|
||||
|
||||
|
@ -787,7 +830,7 @@ test got stuck if necessary:
|
|||
|
||||
for pid in psutil.pids():
|
||||
environ = psutil.Process(pid).environ()
|
||||
if 'PYTEST_CURRENT_TEST' in environ:
|
||||
if "PYTEST_CURRENT_TEST" in environ:
|
||||
print(f'pytest process {pid} running: {environ["PYTEST_CURRENT_TEST"]}')
|
||||
|
||||
During the test session pytest will set ``PYTEST_CURRENT_TEST`` to the current test
|
||||
|
@ -841,8 +884,9 @@ like ``pytest-timeout`` they must be imported explicitly and passed on to pytest
|
|||
import sys
|
||||
import pytest_timeout # Third party plugin
|
||||
|
||||
if len(sys.argv) > 1 and sys.argv[1] == '--pytest':
|
||||
if len(sys.argv) > 1 and sys.argv[1] == "--pytest":
|
||||
import pytest
|
||||
|
||||
sys.exit(pytest.main(sys.argv[2:], plugins=[pytest_timeout]))
|
||||
else:
|
||||
# normal application execution: at this point argv can be parsed
|
||||
|
|
|
@ -250,9 +250,10 @@ instance, you can simply declare it:
|
|||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def smtp(...):
|
||||
def smtp():
|
||||
# the returned fixture value will be shared for
|
||||
# all tests needing it
|
||||
...
|
||||
|
||||
Finally, the ``class`` scope will invoke the fixture once per test *class*.
|
||||
|
||||
|
@ -274,18 +275,22 @@ Consider the code below:
|
|||
def s1():
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def m1():
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f1(tmpdir):
|
||||
pass
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def f2():
|
||||
pass
|
||||
|
||||
|
||||
def test_foo(f1, m1, f2, s1):
|
||||
...
|
||||
|
||||
|
@ -316,6 +321,7 @@ the code after the *yield* statement serves as the teardown code:
|
|||
import smtplib
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp():
|
||||
smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
|
@ -350,6 +356,7 @@ Note that we can also seamlessly use the ``yield`` syntax with ``with`` statemen
|
|||
import smtplib
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp():
|
||||
with smtplib.SMTP("smtp.gmail.com", 587, timeout=5) as smtp:
|
||||
|
@ -375,12 +382,15 @@ Here's the ``smtp`` fixture changed to use ``addfinalizer`` for cleanup:
|
|||
import smtplib
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp(request):
|
||||
smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5)
|
||||
|
||||
def fin():
|
||||
print ("teardown smtp")
|
||||
print("teardown smtp")
|
||||
smtp.close()
|
||||
|
||||
request.addfinalizer(fin)
|
||||
return smtp # provide the fixture value
|
||||
|
||||
|
@ -867,6 +877,8 @@ You can specify multiple fixtures like this:
|
|||
.. code-block:: python
|
||||
|
||||
@pytest.mark.usefixtures("cleandir", "anotherfixture")
|
||||
def test():
|
||||
...
|
||||
|
||||
and you may specify fixture usage at the test module level, using
|
||||
a generic feature of the mark mechanism:
|
||||
|
|
|
@ -214,10 +214,10 @@ Add this to ``setup.py`` file:
|
|||
from setuptools import setup
|
||||
|
||||
setup(
|
||||
#...,
|
||||
setup_requires=['pytest-runner', ...],
|
||||
tests_require=['pytest', ...],
|
||||
#...,
|
||||
# ...,
|
||||
setup_requires=["pytest-runner", ...],
|
||||
tests_require=["pytest", ...],
|
||||
# ...,
|
||||
)
|
||||
|
||||
|
||||
|
@ -263,25 +263,27 @@ your own setuptools Test command for invoking pytest.
|
|||
|
||||
|
||||
class PyTest(TestCommand):
|
||||
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
|
||||
user_options = [("pytest-args=", "a", "Arguments to pass to pytest")]
|
||||
|
||||
def initialize_options(self):
|
||||
TestCommand.initialize_options(self)
|
||||
self.pytest_args = ''
|
||||
self.pytest_args = ""
|
||||
|
||||
def run_tests(self):
|
||||
import shlex
|
||||
#import here, cause outside the eggs aren't loaded
|
||||
|
||||
# import here, cause outside the eggs aren't loaded
|
||||
import pytest
|
||||
|
||||
errno = pytest.main(shlex.split(self.pytest_args))
|
||||
sys.exit(errno)
|
||||
|
||||
|
||||
setup(
|
||||
#...,
|
||||
tests_require=['pytest'],
|
||||
cmdclass = {'test': PyTest},
|
||||
)
|
||||
# ...,
|
||||
tests_require=["pytest"],
|
||||
cmdclass={"test": PyTest},
|
||||
)
|
||||
|
||||
Now if you run::
|
||||
|
||||
|
|
|
@ -17,6 +17,7 @@ An example of a simple test:
|
|||
def inc(x):
|
||||
return x + 1
|
||||
|
||||
|
||||
def test_answer():
|
||||
assert inc(3) == 5
|
||||
|
||||
|
|
|
@ -138,10 +138,14 @@ the records for the ``setup`` and ``call`` stages during teardown like so:
|
|||
def window(caplog):
|
||||
window = create_window()
|
||||
yield window
|
||||
for when in ('setup', 'call'):
|
||||
messages = [x.message for x in caplog.get_records(when) if x.level == logging.WARNING]
|
||||
for when in ("setup", "call"):
|
||||
messages = [
|
||||
x.message for x in caplog.get_records(when) if x.level == logging.WARNING
|
||||
]
|
||||
if messages:
|
||||
pytest.fail('warning messages encountered during testing: {}'.format(messages))
|
||||
pytest.fail(
|
||||
"warning messages encountered during testing: {}".format(messages)
|
||||
)
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -70,12 +70,12 @@ In general there are two scenarios on how markers should be handled:
|
|||
.. code-block:: python
|
||||
|
||||
# replace this:
|
||||
marker = item.get_marker('log_level')
|
||||
marker = item.get_marker("log_level")
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
# by this:
|
||||
marker = item.get_closest_marker('log_level')
|
||||
marker = item.get_closest_marker("log_level")
|
||||
if marker:
|
||||
level = marker.args[0]
|
||||
|
||||
|
@ -87,13 +87,14 @@ order doesn't even matter. You probably want to think of your marks as a set her
|
|||
.. code-block:: python
|
||||
|
||||
# replace this
|
||||
skipif = item.get_marker('skipif')
|
||||
skipif = item.get_marker("skipif")
|
||||
if skipif:
|
||||
for condition in skipif.args:
|
||||
# eval condition
|
||||
...
|
||||
|
||||
# by this:
|
||||
for skipif in item.iter_markers('skipif'):
|
||||
for skipif in item.iter_markers("skipif"):
|
||||
condition = skipif.args[0]
|
||||
# eval condition
|
||||
|
||||
|
@ -134,5 +135,5 @@ More details can be found in the `original PR <https://github.com/pytest-dev/pyt
|
|||
|
||||
.. note::
|
||||
|
||||
in a future major relase of pytest we will introduce class based markers,
|
||||
at which points markers will no longer be limited to instances of :py:class:`Mark`
|
||||
in a future major relase of pytest we will introduce class based markers,
|
||||
at which points markers will no longer be limited to instances of :py:class:`Mark`
|
||||
|
|
|
@ -71,6 +71,8 @@ so that any attempts within tests to create http requests will fail.
|
|||
.. code-block:: python
|
||||
|
||||
import functools
|
||||
|
||||
|
||||
def test_partial(monkeypatch):
|
||||
with monkeypatch.context() as m:
|
||||
m.setattr(functools, "partial", 3)
|
||||
|
|
|
@ -35,26 +35,29 @@ This is how a functional test could look like:
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def default_context():
|
||||
return {'extra_context': {}}
|
||||
return {"extra_context": {}}
|
||||
|
||||
|
||||
@pytest.fixture(params=[
|
||||
{'author': 'alice'},
|
||||
{'project_slug': 'helloworld'},
|
||||
{'author': 'bob', 'project_slug': 'foobar'},
|
||||
])
|
||||
@pytest.fixture(
|
||||
params=[
|
||||
{"author": "alice"},
|
||||
{"project_slug": "helloworld"},
|
||||
{"author": "bob", "project_slug": "foobar"},
|
||||
]
|
||||
)
|
||||
def extra_context(request):
|
||||
return {'extra_context': request.param}
|
||||
return {"extra_context": request.param}
|
||||
|
||||
|
||||
@pytest.fixture(params=['default', 'extra'])
|
||||
@pytest.fixture(params=["default", "extra"])
|
||||
def context(request):
|
||||
if request.param == 'default':
|
||||
return request.getfuncargvalue('default_context')
|
||||
if request.param == "default":
|
||||
return request.getfuncargvalue("default_context")
|
||||
else:
|
||||
return request.getfuncargvalue('extra_context')
|
||||
return request.getfuncargvalue("extra_context")
|
||||
|
||||
|
||||
def test_generate_project(cookies, context):
|
||||
|
@ -95,8 +98,7 @@ fixtures from existing ones.
|
|||
.. code-block:: python
|
||||
|
||||
pytest.define_combined_fixture(
|
||||
name='context',
|
||||
fixtures=['default_context', 'extra_context'],
|
||||
name="context", fixtures=["default_context", "extra_context"]
|
||||
)
|
||||
|
||||
The new fixture ``context`` inherits the scope from the used fixtures and yield
|
||||
|
@ -123,10 +125,12 @@ all parameters marked as a fixture.
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(params=[
|
||||
pytest.fixture_request('default_context'),
|
||||
pytest.fixture_request('extra_context'),
|
||||
])
|
||||
@pytest.fixture(
|
||||
params=[
|
||||
pytest.fixture_request("default_context"),
|
||||
pytest.fixture_request("extra_context"),
|
||||
]
|
||||
)
|
||||
def context(request):
|
||||
"""Returns all values for ``default_context``, one-by-one before it
|
||||
does the same for ``extra_context``.
|
||||
|
@ -145,10 +149,10 @@ The same helper can be used in combination with ``pytest.mark.parametrize``.
|
|||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'context, expected_response_code',
|
||||
"context, expected_response_code",
|
||||
[
|
||||
(pytest.fixture_request('default_context'), 0),
|
||||
(pytest.fixture_request('extra_context'), 0),
|
||||
(pytest.fixture_request("default_context"), 0),
|
||||
(pytest.fixture_request("extra_context"), 0),
|
||||
],
|
||||
)
|
||||
def test_generate_project(cookies, context, exit_code):
|
||||
|
|
|
@ -198,7 +198,7 @@ For example:
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.mark.timeout(10, 'slow', method='thread')
|
||||
@pytest.mark.timeout(10, "slow", method="thread")
|
||||
def test_function():
|
||||
...
|
||||
|
||||
|
@ -208,8 +208,8 @@ Will create and attach a :class:`Mark <_pytest.mark.structures.Mark>` object to
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
mark.args == (10, 'slow')
|
||||
mark.kwargs == {'method': 'thread'}
|
||||
mark.args == (10, "slow")
|
||||
mark.kwargs == {"method": "thread"}
|
||||
|
||||
|
||||
Fixtures
|
||||
|
@ -225,9 +225,9 @@ Example of a test requiring a fixture:
|
|||
.. code-block:: python
|
||||
|
||||
def test_output(capsys):
|
||||
print('hello')
|
||||
print("hello")
|
||||
out, err = capsys.readouterr()
|
||||
assert out == 'hello\n'
|
||||
assert out == "hello\n"
|
||||
|
||||
|
||||
Example of a fixture requiring another fixture:
|
||||
|
@ -236,7 +236,7 @@ Example of a fixture requiring another fixture:
|
|||
|
||||
@pytest.fixture
|
||||
def db_session(tmpdir):
|
||||
fn = tmpdir / 'db.file'
|
||||
fn = tmpdir / "db.file"
|
||||
return connect(str(fn))
|
||||
|
||||
For more details, consult the full :ref:`fixtures docs <fixture>`.
|
||||
|
@ -368,7 +368,7 @@ doctest_namespace
|
|||
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace['np'] = numpy
|
||||
doctest_namespace["np"] = numpy
|
||||
|
||||
For more details: :ref:`doctest_namespace`.
|
||||
|
||||
|
@ -805,12 +805,14 @@ test functions and methods. Can be either a single mark or a sequence of marks.
|
|||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
pytestmark = pytest.mark.webtest
|
||||
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
pytestmark = (pytest.mark.integration, pytest.mark.slow)
|
||||
|
||||
PYTEST_DONT_REWRITE (module docstring)
|
||||
|
|
|
@ -192,19 +192,19 @@ Here's a quick guide on how to skip tests in a module in different situations:
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
pytestmark = pytest.mark.skip('all tests still WIP')
|
||||
pytestmark = pytest.mark.skip("all tests still WIP")
|
||||
|
||||
2. Skip all tests in a module based on some condition:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytestmark = pytest.mark.skipif(sys.platform == 'win32', 'tests for linux only')
|
||||
pytestmark = pytest.mark.skipif(sys.platform == "win32", "tests for linux only")
|
||||
|
||||
3. Skip all tests in a module if some import is missing:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pexpect = pytest.importorskip('pexpect')
|
||||
pexpect = pytest.importorskip("pexpect")
|
||||
|
||||
|
||||
.. _xfail:
|
||||
|
@ -364,14 +364,20 @@ test instances when using parametrize:
|
|||
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.param(1, 0, marks=pytest.mark.xfail),
|
||||
pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")),
|
||||
(2, 3),
|
||||
(3, 4),
|
||||
(4, 5),
|
||||
pytest.param(10, 11, marks=pytest.mark.skipif(sys.version_info >= (3, 0), reason="py2k")),
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
("n", "expected"),
|
||||
[
|
||||
(1, 2),
|
||||
pytest.param(1, 0, marks=pytest.mark.xfail),
|
||||
pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")),
|
||||
(2, 3),
|
||||
(3, 4),
|
||||
(4, 5),
|
||||
pytest.param(
|
||||
10, 11, marks=pytest.mark.skipif(sys.version_info >= (3, 0), reason="py2k")
|
||||
),
|
||||
],
|
||||
)
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
|
|
|
@ -71,13 +71,15 @@ to save time:
|
|||
# contents of conftest.py
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def image_file(tmpdir_factory):
|
||||
img = compute_expensive_image()
|
||||
fn = tmpdir_factory.mktemp('data').join('img.png')
|
||||
fn = tmpdir_factory.mktemp("data").join("img.png")
|
||||
img.save(str(fn))
|
||||
return fn
|
||||
|
||||
|
||||
# contents of test_image.py
|
||||
def test_histogram(image_file):
|
||||
img = load_image(image_file)
|
||||
|
|
|
@ -272,11 +272,12 @@ Alternatively, you can integrate this functionality with custom markers:
|
|||
|
||||
# content of conftest.py
|
||||
|
||||
|
||||
def pytest_collection_modifyitems(session, config, items):
|
||||
for item in items:
|
||||
for marker in item.iter_markers(name='test_id'):
|
||||
for marker in item.iter_markers(name="test_id"):
|
||||
test_id = marker.args[0]
|
||||
item.user_properties.append(('test_id', test_id))
|
||||
item.user_properties.append(("test_id", test_id))
|
||||
|
||||
And in your tests:
|
||||
|
||||
|
@ -284,6 +285,8 @@ And in your tests:
|
|||
|
||||
# content of test_function.py
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.mark.test_id(1501)
|
||||
def test_function():
|
||||
assert True
|
||||
|
@ -318,7 +321,7 @@ To add an additional xml attribute to a testcase element, you can use
|
|||
def test_function(record_xml_attribute):
|
||||
record_xml_attribute("assertions", "REQ-1234")
|
||||
record_xml_attribute("classname", "custom_classname")
|
||||
print('hello world')
|
||||
print("hello world")
|
||||
assert True
|
||||
|
||||
Unlike ``record_property``, this will not add a new child element.
|
||||
|
@ -377,19 +380,22 @@ to all testcases you can use ``LogXML.add_global_properties``
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def log_global_env_facts(f):
|
||||
|
||||
if pytest.config.pluginmanager.hasplugin('junitxml'):
|
||||
my_junit = getattr(pytest.config, '_xml', None)
|
||||
if pytest.config.pluginmanager.hasplugin("junitxml"):
|
||||
my_junit = getattr(pytest.config, "_xml", None)
|
||||
|
||||
my_junit.add_global_property("ARCH", "PPC")
|
||||
my_junit.add_global_property("STORAGE_TYPE", "CEPH")
|
||||
|
||||
my_junit.add_global_property('ARCH', 'PPC')
|
||||
my_junit.add_global_property('STORAGE_TYPE', 'CEPH')
|
||||
|
||||
@pytest.mark.usefixtures(log_global_env_facts.__name__)
|
||||
def start_and_prepare_env():
|
||||
pass
|
||||
|
||||
|
||||
class TestMe(object):
|
||||
def test_foo(self):
|
||||
assert True
|
||||
|
|
|
@ -94,11 +94,13 @@ even module level:
|
|||
|
||||
import warnings
|
||||
|
||||
|
||||
def api_v1():
|
||||
warnings.warn(UserWarning("api v1, should use functions from v2"))
|
||||
return 1
|
||||
|
||||
@pytest.mark.filterwarnings('ignore:api v1')
|
||||
|
||||
@pytest.mark.filterwarnings("ignore:api v1")
|
||||
def test_one():
|
||||
assert api_v1() == 1
|
||||
|
||||
|
@ -112,7 +114,7 @@ decorator or to all tests in a module by setting the ``pytestmark`` variable:
|
|||
.. code-block:: python
|
||||
|
||||
# turns all warnings into errors for this module
|
||||
pytestmark = pytest.mark.filterwarnings('error')
|
||||
pytestmark = pytest.mark.filterwarnings("error")
|
||||
|
||||
|
||||
.. note::
|
||||
|
|
|
@ -150,19 +150,11 @@ it in your setuptools-invocation:
|
|||
|
||||
setup(
|
||||
name="myproject",
|
||||
packages = ['myproject']
|
||||
|
||||
packages=["myproject"],
|
||||
# the following makes a plugin available to pytest
|
||||
entry_points = {
|
||||
'pytest11': [
|
||||
'name_of_plugin = myproject.pluginmodule',
|
||||
]
|
||||
},
|
||||
|
||||
entry_points={"pytest11": ["name_of_plugin = myproject.pluginmodule"]},
|
||||
# custom PyPI classifier for pytest plugins
|
||||
classifiers=[
|
||||
"Framework :: Pytest",
|
||||
],
|
||||
classifiers=["Framework :: Pytest"],
|
||||
)
|
||||
|
||||
If a package is installed this way, ``pytest`` will load
|
||||
|
@ -213,11 +205,7 @@ With the following typical ``setup.py`` extract:
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
setup(
|
||||
...
|
||||
entry_points={'pytest11': ['foo = pytest_foo.plugin']},
|
||||
...
|
||||
)
|
||||
setup(..., entry_points={"pytest11": ["foo = pytest_foo.plugin"]}, ...)
|
||||
|
||||
In this case only ``pytest_foo/plugin.py`` will be rewritten. If the
|
||||
helper module also contains assert statements which need to be
|
||||
|
@ -232,7 +220,7 @@ import ``helper.py`` normally. The contents of
|
|||
|
||||
import pytest
|
||||
|
||||
pytest.register_assert_rewrite('pytest_foo.helper')
|
||||
pytest.register_assert_rewrite("pytest_foo.helper")
|
||||
|
||||
|
||||
|
||||
|
@ -332,23 +320,25 @@ string value of ``Hello World!`` if we do not supply a value or ``Hello
|
|||
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup('helloworld')
|
||||
group = parser.getgroup("helloworld")
|
||||
group.addoption(
|
||||
'--name',
|
||||
action='store',
|
||||
dest='name',
|
||||
default='World',
|
||||
help='Default "name" for hello().'
|
||||
"--name",
|
||||
action="store",
|
||||
dest="name",
|
||||
default="World",
|
||||
help='Default "name" for hello().',
|
||||
)
|
||||
|
||||
|
||||
@pytest.fixture
|
||||
def hello(request):
|
||||
name = request.config.getoption('name')
|
||||
name = request.config.getoption("name")
|
||||
|
||||
def _hello(name=None):
|
||||
if not name:
|
||||
name = request.config.getoption('name')
|
||||
name = request.config.getoption("name")
|
||||
return "Hello {name}!".format(name=name)
|
||||
|
||||
return _hello
|
||||
|
@ -364,7 +354,8 @@ return a result object, with which we can assert the tests' outcomes.
|
|||
"""Make sure that our plugin works."""
|
||||
|
||||
# create a temporary conftest.py file
|
||||
testdir.makeconftest("""
|
||||
testdir.makeconftest(
|
||||
"""
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(params=[
|
||||
|
@ -374,16 +365,19 @@ return a result object, with which we can assert the tests' outcomes.
|
|||
])
|
||||
def name(request):
|
||||
return request.param
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
# create a temporary pytest test file
|
||||
testdir.makepyfile("""
|
||||
testdir.makepyfile(
|
||||
"""
|
||||
def test_hello_default(hello):
|
||||
assert hello() == "Hello World!"
|
||||
|
||||
def test_hello_name(hello, name):
|
||||
assert hello(name) == "Hello {0}!".format(name)
|
||||
""")
|
||||
"""
|
||||
)
|
||||
|
||||
# run all tests with pytest
|
||||
result = testdir.runpytest()
|
||||
|
@ -425,6 +419,7 @@ Let's look at a possible implementation:
|
|||
def pytest_collection_modifyitems(config, items):
|
||||
# called after collection is completed
|
||||
# you can modify the ``items`` list
|
||||
...
|
||||
|
||||
Here, ``pytest`` will pass in ``config`` (the pytest config object)
|
||||
and ``items`` (the list of collected test items) but will not pass
|
||||
|
@ -511,11 +506,15 @@ after others, i.e. the position in the ``N``-sized list of functions:
|
|||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_collection_modifyitems(items):
|
||||
# will execute as early as possible
|
||||
...
|
||||
|
||||
|
||||
# Plugin 2
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_collection_modifyitems(items):
|
||||
# will execute as late as possible
|
||||
...
|
||||
|
||||
|
||||
# Plugin 3
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
|
Loading…
Reference in New Issue