- fix doc references, refactor fixtures docs to more quickly start

with examples instead of big text blobgs
- also silence -q and -qq reporting some more
This commit is contained in:
holger krekel 2012-10-07 13:06:17 +02:00
parent cda84fb566
commit 30b10a6950
34 changed files with 905 additions and 1059 deletions

View File

@ -21,22 +21,23 @@ class FixtureFunctionMarker:
return function
# XXX a test fails when scope="function" how it should be, investigate
def fixture(scope=None, params=None, autoactive=False):
""" return a decorator to mark a fixture factory function.
def fixture(scope="function", params=None, autoactive=False):
""" (return a) decorator to mark a fixture factory function.
The name of the fixture function can be referenced in a test context
to cause its invocation ahead of running tests. Test modules or classes
can use the pytest.mark.usefixtures(fixturename) marker to specify
needed fixtures. Test functions can also use fixture names as input
This decorator can be used (directly or with parameters) to define
a fixture function. The name of the fixture function can later be
referenced to cause its invocation ahead of running tests: test
modules or classes can use the pytest.mark.usefixtures(fixturename)
marker and test functions can directly use fixture names as input
arguments in which case the fixture instance returned from the fixture
function will be injected.
:arg scope: the scope for which this fixture is shared, one of
"function", "class", "module", "session". Defaults to "function".
"function" (default), "class", "module", "session".
:arg params: an optional list of parameters which will cause multiple
invocations of the fixture functions and their dependent
tests.
invocations of the fixture function and all of the tests
using it.
:arg autoactive: if True, the fixture func is activated for all tests that
can see it. If False (the default) then an explicit
@ -992,7 +993,7 @@ def scopeproperty(name=None, doc=None):
class FixtureRequest(FuncargnamesCompatAttr):
""" A request for fixtures from a test or setup function.
""" A request for fixtures from a test or fixture function.
A request object gives access to attributes of the requesting
test context. It has an optional ``param`` attribute in case
@ -1019,7 +1020,7 @@ class FixtureRequest(FuncargnamesCompatAttr):
@property
def node(self):
""" underlying collection node (depends on request scope)"""
""" underlying collection node (depends on current request scope)"""
return self._getscopeitem(self.scope)
def _getfixturedeflist(self, argname):
@ -1227,12 +1228,13 @@ class FixtureRequest(FuncargnamesCompatAttr):
mp.setattr(self, "scope", scope)
# prepare finalization according to scope
# (XXX analyse exact finalizing mechanics / cleanup)
self.session._setupstate.addfinalizer(fixturedef.finish, self.node)
self._fixturemanager.addargfinalizer(fixturedef.finish, argname)
for subargname in fixturedef.fixturenames: # XXX all deps?
self._fixturemanager.addargfinalizer(fixturedef.finish, subargname)
mp.setattr(self, "addfinalizer", fixturedef.addfinalizer)
# finally perform the factory call
# finally perform the fixture call
val = fixturedef.execute(request=self)
mp.undo()
return val

View File

@ -209,7 +209,7 @@ class TerminalReporter:
self.currentfspath = -2
def pytest_collection(self):
if not self.hasmarkup:
if not self.hasmarkup and self.config.option.verbose >=1:
self.write("collecting ... ", bold=True)
def pytest_collectreport(self, report):
@ -224,6 +224,9 @@ class TerminalReporter:
self.report_collect()
def report_collect(self, final=False):
if self.config.option.verbose < 0:
return
errors = len(self.stats.get('error', []))
skipped = len(self.stats.get('skipped', []))
if final:
@ -455,8 +458,8 @@ class TerminalReporter:
msg = "%s in %.2f seconds" %(line, session_duration)
if self.verbosity >= 0:
self.write_sep("=", msg, bold=True)
else:
self.write_line(msg, bold=True)
#else:
# self.write_line(msg, bold=True)
def summary_deselected(self):
if 'deselected' in self.stats:

View File

@ -5,6 +5,7 @@ Release announcements
.. toctree::
:maxdepth: 2
release-2.3.0
release-2.2.4
release-2.2.2
release-2.2.1

View File

@ -10,16 +10,15 @@ py.test reference documentation
builtin.txt
customize.txt
assert.txt
funcargs.txt
funcarg_compare.txt
setup.txt
fixture.txt
parametrize.txt
xunit_setup.txt
capture.txt
monkeypatch.txt
xdist.txt
tmpdir.txt
skipping.txt
mark.txt
skipping.txt
recwarn.txt
unittest.txt
nose.txt

View File

@ -24,9 +24,8 @@ you will see the return value of the function call::
$ py.test test_assert1.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_assert1.py F
@ -39,7 +38,7 @@ you will see the return value of the function call::
E + where 3 = f()
test_assert1.py:5: AssertionError
========================= 1 failed in 0.02 seconds =========================
========================= 1 failed in 0.01 seconds =========================
py.test has support for showing the values of the most common subexpressions
including calls, attributes, comparisons, and binary and unary
@ -107,9 +106,8 @@ if you run this module::
$ py.test test_assert2.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_assert2.py F
@ -127,7 +125,7 @@ if you run this module::
E '5'
test_assert2.py:5: AssertionError
========================= 1 failed in 0.02 seconds =========================
========================= 1 failed in 0.01 seconds =========================
Special comparisons are done for a number of cases:
@ -171,7 +169,6 @@ you can run the test module and get the custom output defined in
the conftest file::
$ py.test -q test_foocompare.py
collecting ... collected 1 items
F
================================= FAILURES =================================
_______________________________ test_compare _______________________________
@ -184,7 +181,6 @@ the conftest file::
E vals: 1 != 2
test_foocompare.py:8: AssertionError
1 failed in 0.02 seconds
.. _assert-details:
.. _`assert introspection`:

View File

@ -17,7 +17,8 @@ to get an overview on the globally available helpers.
.. automodule:: pytest
:members:
.. _builtinresources:
.. _builtinfixtures:
.. _builtinfuncargs:
Builtin resources / function arguments
-----------------------------------------------------
@ -27,11 +28,8 @@ You can ask for available builtin or project-custom
$ py.test --fixtures
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 0 items
pytestconfig
the pytest config object with access to command line opts.
capsys
enables capturing of writes to sys.stdout/sys.stderr and makes
captured output available via ``capsys.readouterr()`` method calls
@ -76,7 +74,5 @@ You can ask for available builtin or project-custom
See http://docs.python.org/library/warnings.html for information
on warning categories.
cov
A pytest funcarg that provides access to the underlying coverage object.
============================= in 0.01 seconds =============================
============================= in 0.00 seconds =============================

View File

@ -64,9 +64,8 @@ of the failing function and hide the other one::
$ py.test
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 2 items
test_module.py .F
@ -79,8 +78,8 @@ of the failing function and hide the other one::
test_module.py:9: AssertionError
----------------------------- Captured stdout ------------------------------
setting up <function test_func2 at 0x228faa0>
==================== 1 failed, 1 passed in 0.02 seconds ====================
setting up <function test_func2 at 0x2f27488>
==================== 1 failed, 1 passed in 0.01 seconds ====================
Accessing captured output from a test function
---------------------------------------------------

View File

@ -17,7 +17,7 @@
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
version = release = "2.3.0.dev18"
version = release = "2.3.0.dev19"
import sys, os
@ -70,6 +70,7 @@ copyright = u'2011, holger krekel et alii'
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
exclude_patterns = ['links.inc', '_build', 'naming20.txt', 'test/*',
"old_*",
'example/attic.txt',
]
@ -270,7 +271,7 @@ epub_copyright = u'2011, holger krekel et alii'
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {'python': ('http://docs.python.org/', None),
'lib': ("http://docs.python.org/library/", None),
# 'lib': ("http://docs.python.org/2.7library/", None),
}

View File

@ -17,12 +17,15 @@ Full pytest documentation
plugins
talks
develop
funcarg_compare.txt
announce/index
.. toctree::
:hidden:
changelog.txt
resources
funcargs
example/resources_attic
setup.txt
example/remoteinterp.txt

View File

@ -44,10 +44,9 @@ then you can just invoke ``py.test`` without command line options::
$ py.test
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
mymodule.py .
========================= 1 passed in 0.07 seconds =========================
========================= 1 passed in 0.02 seconds =========================

View File

@ -16,7 +16,6 @@ need more examples or have questions. Also take a look at the :ref:`comprehensiv
reportingdemo.txt
simple.txt
mysetup.txt
parametrize.txt
markers.txt
pythoncollection.txt

View File

@ -26,9 +26,7 @@ You can then restrict a test run to only run tests marked with ``webtest``::
$ py.test -v -m webtest
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11 -- /home/hpk/venv/1/bin/python
cachedir: /home/hpk/tmp/doc-exec-426/.cache
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items
test_server.py:3: test_send_http PASSED
@ -40,15 +38,13 @@ Or the inverse, running all tests except the webtest ones::
$ py.test -v -m "not webtest"
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11 -- /home/hpk/venv/1/bin/python
cachedir: /home/hpk/tmp/doc-exec-426/.cache
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items
test_server.py:6: test_something_quick PASSED
================= 1 tests deselected by "-m 'not webtest'" =================
================== 1 passed, 1 deselected in 0.01 seconds ==================
================== 1 passed, 1 deselected in 0.00 seconds ==================
Registering markers
-------------------------------------
@ -69,8 +65,6 @@ You can ask which markers exist for your test suite - the list includes our just
$ py.test --markers
@pytest.mark.webtest: mark a test as a webtest.
@pytest.mark.timeout(timeout, method=None): Set a timeout and timeout method on just one test item. The first argument, *timeout*, is the timeout in seconds while the keyword, *method*, takes the same values as the --timeout_method option.
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
@ -149,41 +143,38 @@ the given argument::
$ py.test -k send_http # running with the above defined examples
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
test_server.py .
=================== 3 tests deselected by '-ksend_http' ====================
================== 1 passed, 3 deselected in 0.02 seconds ==================
================== 1 passed, 3 deselected in 0.01 seconds ==================
And you can also run all tests except the ones that match the keyword::
$ py.test -k-send_http
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
test_mark_classlevel.py ..
test_server.py .
=================== 1 tests deselected by '-k-send_http' ===================
================== 3 passed, 1 deselected in 0.02 seconds ==================
================== 3 passed, 1 deselected in 0.01 seconds ==================
Or to only select the class::
$ py.test -kTestClass
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
test_mark_classlevel.py ..
=================== 2 tests deselected by '-kTestClass' ====================
================== 2 passed, 2 deselected in 0.02 seconds ==================
================== 2 passed, 2 deselected in 0.01 seconds ==================
.. _`adding a custom marker from a plugin`:
@ -230,33 +221,29 @@ the test needs::
$ py.test -E stage2
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_someenv.py s
======================== 1 skipped in 0.01 seconds =========================
======================== 1 skipped in 0.00 seconds =========================
and here is one that specifies exactly the environment needed::
$ py.test -E stage1
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_someenv.py .
========================= 1 passed in 0.01 seconds =========================
========================= 1 passed in 0.00 seconds =========================
The ``--markers`` option always gives you a list of available markers::
$ py.test --markers
@pytest.mark.env(name): mark test to run only on named environment
@pytest.mark.timeout(timeout, method=None): Set a timeout and timeout method on just one test item. The first argument, *timeout*, is the timeout in seconds while the keyword, *method*, takes the same values as the --timeout_method option.
@pytest.mark.skipif(*conditions): skip the given test function if evaluation of all conditions has a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform.
@pytest.mark.xfail(*conditions, reason=None, run=True): mark the the test function as an expected failure. Optionally specify a reason and run=False if you don't even want to execute the test function. Any positional condition strings will be evaluated (like with skipif) and if one is False the marker will not be applied.
@ -304,12 +291,10 @@ test function. From a conftest file we can read it like this::
Let's run this without capturing output and see what we get::
$ py.test -q -s
collecting ... collected 1 items
glob args=('function',) kwargs={'x': 3}
glob args=('class',) kwargs={'x': 2}
glob args=('module',) kwargs={'x': 1}
.
1 passed in 0.01 seconds
marking platform specific tests with pytest
--------------------------------------------------------------
@ -362,23 +347,21 @@ then you will see two test skipped and two executed tests as expected::
$ py.test -rs # this option reports skip reasons
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
test_plat.py s.s.
========================= short test summary info ==========================
SKIP [2] /home/hpk/tmp/doc-exec-426/conftest.py:12: cannot run on platform linux2
SKIP [2] /tmp/doc-exec-257/conftest.py:12: cannot run on platform linux2
=================== 2 passed, 2 skipped in 0.02 seconds ====================
=================== 2 passed, 2 skipped in 0.01 seconds ====================
Note that if you specify a platform via the marker-command line option like this::
$ py.test -m linux2
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
test_plat.py .

View File

@ -1,142 +0,0 @@
.. highlightlang:: python
.. _mysetup:
Mysetup pattern: application specific test fixtures
==========================================================
Here is a basic useful step-by-step example for managing and interacting
with application specific test setup. The goal is to have one place
where we have the glue and test support code for bootstrapping and
configuring application objects and allow test modules and test
functions to stay ignorant of involved details.
Step 1: Implementing the test/app-specific ``mysetup`` pattern
--------------------------------------------------------------
Let's write a simple test function using a ``mysetup`` funcarg::
# content of test_sample.py
def test_answer(mysetup):
app = mysetup.myapp()
answer = app.question()
assert answer == 42
To run this test py.test needs to find and call a factory to
obtain the required ``mysetup`` function argument. To make
an according factory findable we write down a specifically named factory
method in a :ref:`local plugin <localplugin>` ::
# content of conftest.py
from myapp import MyApp
def pytest_funcarg__mysetup(request): # "mysetup" factory function
return MySetup()
class MySetup: # instances of this are seen by test functions
def myapp(self):
return MyApp()
To run the example we stub out a simple ``MyApp`` application object::
# content of myapp.py
class MyApp:
def question(self):
return 6 * 9
You can now run the test::
$ py.test test_sample.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
test_sample.py F
================================= FAILURES =================================
_______________________________ test_answer ________________________________
mysetup = <conftest.MySetup instance at 0x27e5320>
def test_answer(mysetup):
app = mysetup.myapp()
answer = app.question()
> assert answer == 42
E assert 54 == 42
test_sample.py:4: AssertionError
========================= 1 failed in 0.02 seconds =========================
This means that our ``mysetup`` object was successfully instantiated
and ``mysetup.app()`` returned an initialized ``MyApp`` instance.
We can ask it about the question and if you are confused as to what
the concrete question or answers actually mean, please see here_.
.. _here: http://uncyclopedia.wikia.com/wiki/The_Hitchhiker's_Guide_to_the_Galaxy
.. _`tut-cmdlineoption`:
Step 2: Checking a command line option and skipping tests
-----------------------------------------------------------
To add a command line option we update the ``conftest.py`` of
the previous example to add a command line option
and to offer a new mysetup method::
# content of ./conftest.py
import pytest
from myapp import MyApp
def pytest_funcarg__mysetup(request): # "mysetup" factory function
return MySetup(request)
def pytest_addoption(parser):
parser.addoption("--ssh", action="store", default=None,
help="specify ssh host to run tests with")
class MySetup:
def __init__(self, request):
self.config = request.config
def myapp(self):
return MyApp()
def getsshconnection(self):
host = self.config.option.ssh
if host is None:
pytest.skip("specify ssh host with --ssh")
return execnet.SshGateway(host)
Now any test function can use the ``mysetup.getsshconnection()`` method
like this::
# content of test_ssh.py
class TestClass:
def test_function(self, mysetup):
conn = mysetup.getsshconnection()
# work with conn
Running it yields::
$ py.test test_ssh.py -rs
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
test_ssh.py s
========================= short test summary info ==========================
SKIP [1] /home/hpk/tmp/doc-exec-306/conftest.py:22: specify ssh host with --ssh
======================== 1 skipped in 0.02 seconds =========================
If you specify a command line option like ``py.test --ssh=python.org`` the test will execute as expected.
Note that neither the ``TestClass`` nor the ``test_function`` need to
know anything about how to setup the test state. It is handled separately
in your "test setup glue" code in the ``conftest.py`` file. It is easy
to extend the ``mysetup`` object for further needs in the test code - and for use by any other test functions in the files and directories below the ``conftest.py`` file.

View File

@ -27,18 +27,15 @@ now execute the test specification::
nonpython $ py.test test_simple.yml
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 0 items / 1 errors
test_simple.yml .F
================================= FAILURES =================================
______________________________ usecase: hello ______________________________
usecase execution failed
spec failed: 'some': 'other'
no further details known at this point.
==================== 1 failed, 1 passed in 0.11 seconds ====================
================================== ERRORS ==================================
_____________________ ERROR collecting test_simple.yml _____________________
conftest.py:11: in collect
> import yaml # we need a yaml parser, e.g. PyYAML
E ImportError: No module named yaml
========================= 1 error in 0.00 seconds ==========================
You get one dot for the passing ``sub1: sub1`` check and one failure.
Obviously in the above ``conftest.py`` you'll want to implement a more
@ -57,31 +54,27 @@ consulted when reporting in ``verbose`` mode::
nonpython $ py.test -v
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2 -- /home/hpk/venv/1/bin/python
cachedir: /home/hpk/p/pytest/doc/en/.cache
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 0 items / 1 errors
test_simple.yml:1: usecase: ok PASSED
test_simple.yml:1: usecase: hello FAILED
================================= FAILURES =================================
______________________________ usecase: hello ______________________________
usecase execution failed
spec failed: 'some': 'other'
no further details known at this point.
==================== 1 failed, 1 passed in 0.04 seconds ====================
================================== ERRORS ==================================
_____________________ ERROR collecting test_simple.yml _____________________
conftest.py:11: in collect
> import yaml # we need a yaml parser, e.g. PyYAML
E ImportError: No module named yaml
========================= 1 error in 0.01 seconds ==========================
While developing your custom test collection and execution it's also
interesting to just look at the collection tree::
nonpython $ py.test --collectonly
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 2 items
<YamlFile 'test_simple.yml'>
<YamlItem 'ok'>
<YamlItem 'hello'>
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 0 items / 1 errors
============================= in 0.04 seconds =============================
================================== ERRORS ==================================
_____________________ ERROR collecting test_simple.yml _____________________
conftest.py:11: in collect
> import yaml # we need a yaml parser, e.g. PyYAML
E ImportError: No module named yaml
========================= 1 error in 0.01 seconds ==========================

View File

@ -36,7 +36,6 @@ we parametrize two arguments of the test function so that the test
function is called three times. Let's run it::
$ py.test -q
collecting ... collected 3 items
..F
================================= FAILURES =================================
____________________________ test_eval[6*9-42] _____________________________
@ -54,7 +53,6 @@ function is called three times. Let's run it::
E + where 54 = eval('6*9')
test_expectation.py:8: AssertionError
1 failed, 2 passed in 0.02 seconds
As expected only one pair of input/output values fails the simple test function.
@ -94,15 +92,12 @@ Now we add a test configuration like this::
This means that we only run 2 tests if we do not pass ``--all``::
$ py.test -q test_compute.py
collecting ... collected 2 items
..
2 passed in 0.01 seconds
We run only two computations, so we see two dots.
let's run the full monty::
$ py.test -q --all
collecting ... collected 5 items
....F
================================= FAILURES =================================
_____________________________ test_compute[4] ______________________________
@ -114,7 +109,6 @@ let's run the full monty::
E assert 4 < 4
test_compute.py:3: AssertionError
1 failed, 4 passed in 0.02 seconds
As expected when running the full range of ``param1`` values
we'll get an error on the last one.
@ -157,22 +151,20 @@ this is a fully self-contained example which you can run with::
$ py.test test_scenarios.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev14
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
test_scenarios.py ....
========================= 4 passed in 0.02 seconds =========================
========================= 4 passed in 0.01 seconds =========================
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
$ py.test --collectonly test_scenarios.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev14
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 4 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 4 items
<Module 'test_scenarios.py'>
<Class 'TestSampleWithScenarios'>
<Instance '()'>
@ -233,24 +225,22 @@ Let's first see how it looks like at collection time::
$ py.test test_backends.py --collectonly
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev14
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 2 items
<Module 'test_backends.py'>
<Function 'test_db_initialized[d1]'>
<Function 'test_db_initialized[d2]'>
============================= in 0.01 seconds =============================
============================= in 0.00 seconds =============================
And then when we run the test::
$ py.test -q test_backends.py
collecting ... collected 2 items
.F
================================= FAILURES =================================
_________________________ test_db_initialized[d2] __________________________
db = <conftest.DB2 instance at 0x17dd440>
db = <conftest.DB2 instance at 0x13dc9e0>
def test_db_initialized(db):
# a dummy test
@ -259,7 +249,6 @@ And then when we run the test::
E Failed: deliberately failing for demo purposes
test_backends.py:6: Failed
1 failed, 1 passed in 0.01 seconds
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
@ -302,19 +291,17 @@ Our test generator looks up a class-level definition which specifies which
argument sets to use for each test function. Let's run it::
$ py.test -q
collecting ... collected 3 items
F..
================================= FAILURES =================================
________________________ TestClass.test_equals[1-2] ________________________
self = <test_parametrize.TestClass instance at 0x19a6d88>, a = 1, b = 2
self = <test_parametrize.TestClass instance at 0x23ea170>, a = 1, b = 2
def test_equals(self, a, b):
> assert a == b
E assert 1 == 2
test_parametrize.py:18: AssertionError
1 failed, 2 passed in 0.02 seconds
Indirect parametrization with multiple resources
--------------------------------------------------------------
@ -333,8 +320,6 @@ with different sets of arguments for its three arguments:
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
. $ py.test -rs -q multipython.py
collecting ... collected 75 items
............sss............sss............sss............ssssssssssssssssss
========================= short test summary info ==========================
SKIP [27] /home/hpk/p/pytest/doc/en/example/multipython.py:21: 'python2.8' not found
48 passed, 27 skipped in 3.11 seconds

View File

@ -43,8 +43,8 @@ then the test collection looks like this::
$ py.test --collectonly
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.2.5.dev1
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 2 items
<Module 'check_myapp.py'>
<Class 'CheckMyApp'>
<Instance '()'>
@ -82,8 +82,8 @@ You can always peek at the collection tree without running tests like this::
. $ py.test --collectonly pythoncollection.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.2.5.dev1
collecting ... collected 3 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 3 items
<Module 'pythoncollection.py'>
<Function 'test_function'>
<Class 'TestClass'>
@ -135,8 +135,8 @@ interpreters and will leave out the setup.py file::
$ py.test --collectonly
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.2.5.dev1
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
<Module 'pkg/module_py2.py'>
<Function 'test_only_on_python2'>

View File

@ -0,0 +1,198 @@
.. highlightlang:: python
.. _myapp:
Building an SSH connecting Application fixture
==========================================================
The goal of this tutorial-example is to show how you can put efficient
test support and fixture code in one place, allowing test modules and
test functions to stay ignorant of importing, configuration or
setup/teardown details.
The tutorial implements a simple ``RemoteInterpreter`` object that
allows evaluation of python expressions. We are going to use
the `execnet <http://codespeak.net/execnet>`_ package for the
underlying cross-python bridge functionality.
Step 1: Implementing a first test
--------------------------------------------------------------
Let's write a simple test function using a not yet defined ``interp`` fixture::
# content of test_remoteinterpreter.py
def test_eval_simple(interp):
assert interp.eval("6*9") == 42
The test function needs an argument named `interp` and therefore pytest will
look for a :ref:`fixture function` that matches this name. We'll define it
in a :ref:`local plugin <localplugin>` to make it available also to other
test modules::
# content of conftest.py
from remoteinterpreter import RemoteInterpreter
@pytest.fixture
def interp(request):
import execnet
gw = execnet.makegateway()
return RemoteInterpreter(gw)
To run the example we furthermore need to implement a RemoteInterpreter
object which working with the injected execnet-gateway connection::
# content of remoteintepreter.py
class RemoteInterpreter:
def __init__(self, gateway):
self.gateway = gateway
def eval(self, expression):
# execnet open a "gateway" to the remote process
# which enables to remotely execute code and communicate
# to and fro via channels
ch = self.gateway.remote_exec("channel.send(%s)" % expression)
return ch.receive()
That's it, we can now run the test::
$ py.test test_remoteinterpreter.py
Traceback (most recent call last):
File "/home/hpk/p/pytest/.tox/regen/bin/py.test", line 9, in <module>
load_entry_point('pytest==2.3.0.dev19', 'console_scripts', 'py.test')()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 469, in main
config = _prepareconfig(args, plugins)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 463, in _prepareconfig
pluginmanager=_pluginmanager, args=args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 422, in __call__
return self._docall(methods, kwargs)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 433, in _docall
res = mc.execute()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 351, in execute
res = method(**kwargs)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/helpconfig.py", line 25, in pytest_cmdline_parse
config = __multicall__.execute()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 351, in execute
res = method(**kwargs)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 10, in pytest_cmdline_parse
config.parse(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 344, in parse
self._preparse(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 322, in _preparse
self._setinitialconftest(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 301, in _setinitialconftest
self._conftest.setinitial(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 160, in setinitial
self._try_load_conftest(anchor)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 166, in _try_load_conftest
self._path2confmods[None] = self.getconftestmodules(anchor)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 190, in getconftestmodules
clist[:0] = self.getconftestmodules(dp)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 189, in getconftestmodules
clist.append(self.importconftest(conftestpath))
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 218, in importconftest
self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/py/_path/local.py", line 532, in pyimport
__import__(modname)
File "/tmp/doc-exec-261/conftest.py", line 2, in <module>
from remoteinterpreter import RemoteInterpreter
ImportError: No module named remoteinterpreter
.. _`tut-cmdlineoption`:
Step 2: Adding command line configuration
-----------------------------------------------------------
To add a command line option we update the ``conftest.py`` of
the previous example and add a command line option which
is passed on to the MyApp object::
# content of ./conftest.py
import pytest
from myapp import MyApp
def pytest_addoption(parser): # pytest hook called during initialisation
parser.addoption("--ssh", action="store", default=None,
help="specify ssh host to run tests with")
@pytest.fixture
def mysetup(request): # "mysetup" factory function
return MySetup(request.config)
class MySetup:
def __init__(self, config):
self.config = config
self.app = MyApp()
def getsshconnection(self):
import execnet
host = self.config.option.ssh
if host is None:
pytest.skip("specify ssh host with --ssh")
return execnet.SshGateway(host)
Now any test function can use the ``mysetup.getsshconnection()`` method
like this::
# content of test_ssh.py
class TestClass:
def test_function(self, mysetup):
conn = mysetup.getsshconnection()
# work with conn
Running it yields::
$ py.test -q test_ssh.py -rs
Traceback (most recent call last):
File "/home/hpk/p/pytest/.tox/regen/bin/py.test", line 9, in <module>
load_entry_point('pytest==2.3.0.dev19', 'console_scripts', 'py.test')()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 469, in main
config = _prepareconfig(args, plugins)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 463, in _prepareconfig
pluginmanager=_pluginmanager, args=args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 422, in __call__
return self._docall(methods, kwargs)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 433, in _docall
res = mc.execute()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 351, in execute
res = method(**kwargs)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/helpconfig.py", line 25, in pytest_cmdline_parse
config = __multicall__.execute()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 351, in execute
res = method(**kwargs)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 10, in pytest_cmdline_parse
config.parse(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 344, in parse
self._preparse(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 322, in _preparse
self._setinitialconftest(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 301, in _setinitialconftest
self._conftest.setinitial(args)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 160, in setinitial
self._try_load_conftest(anchor)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 166, in _try_load_conftest
self._path2confmods[None] = self.getconftestmodules(anchor)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 190, in getconftestmodules
clist[:0] = self.getconftestmodules(dp)
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 189, in getconftestmodules
clist.append(self.importconftest(conftestpath))
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/config.py", line 218, in importconftest
self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport()
File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/py/_path/local.py", line 532, in pyimport
__import__(modname)
File "/tmp/doc-exec-261/conftest.py", line 2, in <module>
from myapp import MyApp
ImportError: No module named myapp
If you specify a command line option like ``py.test --ssh=python.org`` the test will execute as expected.
Note that neither the ``TestClass`` nor the ``test_function`` need to
know anything about how to setup the test state. It is handled separately
in the ``conftest.py`` file. It is easy
to extend the ``mysetup`` object for further needs in the test code - and for use by any other test functions in the files and directories below the ``conftest.py`` file.

View File

@ -13,8 +13,8 @@ get on the terminal - we are working on that):
assertion $ py.test failure_demo.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
collecting ... collected 39 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 39 items
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
@ -30,7 +30,7 @@ get on the terminal - we are working on that):
failure_demo.py:15: AssertionError
_________________________ TestFailing.test_simple __________________________
self = <failure_demo.TestFailing object at 0x2e4dd50>
self = <failure_demo.TestFailing object at 0x1dbc8d0>
def test_simple(self):
def f():
@ -40,13 +40,13 @@ get on the terminal - we are working on that):
> assert f() == g()
E assert 42 == 43
E + where 42 = <function f at 0x2e73c80>()
E + and 43 = <function g at 0x2e73cf8>()
E + where 42 = <function f at 0x1d45230>()
E + and 43 = <function g at 0x1d452a8>()
failure_demo.py:28: AssertionError
____________________ TestFailing.test_simple_multiline _____________________
self = <failure_demo.TestFailing object at 0x2e4d7d0>
self = <failure_demo.TestFailing object at 0x1dbcdd0>
def test_simple_multiline(self):
otherfunc_multi(
@ -66,19 +66,19 @@ get on the terminal - we are working on that):
failure_demo.py:11: AssertionError
___________________________ TestFailing.test_not ___________________________
self = <failure_demo.TestFailing object at 0x2e4d390>
self = <failure_demo.TestFailing object at 0x1dbc250>
def test_not(self):
def f():
return 42
> assert not f()
E assert not 42
E + where 42 = <function f at 0x2d36cf8>()
E + where 42 = <function f at 0x1d45410>()
failure_demo.py:38: AssertionError
_________________ TestSpecialisedExplanations.test_eq_text _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2e4db10>
self = <failure_demo.TestSpecialisedExplanations object at 0x1dbc290>
def test_eq_text(self):
> assert 'spam' == 'eggs'
@ -89,7 +89,7 @@ get on the terminal - we are working on that):
failure_demo.py:42: AssertionError
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
self = <failure_demo.TestSpecialisedExplanations object at 0x2e6cbd0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db9ed0>
def test_eq_similar_text(self):
> assert 'foo 1 bar' == 'foo 2 bar'
@ -102,7 +102,7 @@ get on the terminal - we are working on that):
failure_demo.py:45: AssertionError
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
self = <failure_demo.TestSpecialisedExplanations object at 0x2e6cdd0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db9d90>
def test_eq_multiline_text(self):
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
@ -115,7 +115,7 @@ get on the terminal - we are working on that):
failure_demo.py:48: AssertionError
______________ TestSpecialisedExplanations.test_eq_long_text _______________
self = <failure_demo.TestSpecialisedExplanations object at 0x2e6cad0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db43d0>
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
@ -132,7 +132,7 @@ get on the terminal - we are working on that):
failure_demo.py:53: AssertionError
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
self = <failure_demo.TestSpecialisedExplanations object at 0x2e6cb50>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db4390>
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
@ -156,7 +156,7 @@ get on the terminal - we are working on that):
failure_demo.py:58: AssertionError
_________________ TestSpecialisedExplanations.test_eq_list _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2dca1d0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db4dd0>
def test_eq_list(self):
> assert [0, 1, 2] == [0, 1, 3]
@ -166,7 +166,7 @@ get on the terminal - we are working on that):
failure_demo.py:61: AssertionError
______________ TestSpecialisedExplanations.test_eq_list_long _______________
self = <failure_demo.TestSpecialisedExplanations object at 0x2dcad10>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db4950>
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
@ -178,7 +178,7 @@ get on the terminal - we are working on that):
failure_demo.py:66: AssertionError
_________________ TestSpecialisedExplanations.test_eq_dict _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2dca3d0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db4510>
def test_eq_dict(self):
> assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2}
@ -191,7 +191,7 @@ get on the terminal - we are working on that):
failure_demo.py:69: AssertionError
_________________ TestSpecialisedExplanations.test_eq_set __________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2dca710>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db40d0>
def test_eq_set(self):
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
@ -207,7 +207,7 @@ get on the terminal - we are working on that):
failure_demo.py:72: AssertionError
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
self = <failure_demo.TestSpecialisedExplanations object at 0x2dca490>
self = <failure_demo.TestSpecialisedExplanations object at 0x1db4150>
def test_eq_longer_list(self):
> assert [1,2] == [1,2,3]
@ -217,7 +217,7 @@ get on the terminal - we are working on that):
failure_demo.py:75: AssertionError
_________________ TestSpecialisedExplanations.test_in_list _________________
self = <failure_demo.TestSpecialisedExplanations object at 0x2dca510>
self = <failure_demo.TestSpecialisedExplanations object at 0x1da5590>
def test_in_list(self):
> assert 1 in [0, 2, 3, 4, 5]
@ -226,7 +226,7 @@ get on the terminal - we are working on that):
failure_demo.py:78: AssertionError
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
self = <failure_demo.TestSpecialisedExplanations object at 0x2db86d0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1da5f50>
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
@ -244,7 +244,7 @@ get on the terminal - we are working on that):
failure_demo.py:82: AssertionError
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
self = <failure_demo.TestSpecialisedExplanations object at 0x2db8450>
self = <failure_demo.TestSpecialisedExplanations object at 0x1da5cd0>
def test_not_in_text_single(self):
text = 'single foo line'
@ -257,7 +257,7 @@ get on the terminal - we are working on that):
failure_demo.py:86: AssertionError
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
self = <failure_demo.TestSpecialisedExplanations object at 0x2db8910>
self = <failure_demo.TestSpecialisedExplanations object at 0x1da5890>
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
@ -270,7 +270,7 @@ get on the terminal - we are working on that):
failure_demo.py:90: AssertionError
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
self = <failure_demo.TestSpecialisedExplanations object at 0x2db88d0>
self = <failure_demo.TestSpecialisedExplanations object at 0x1da53d0>
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
@ -289,7 +289,7 @@ get on the terminal - we are working on that):
i = Foo()
> assert i.b == 2
E assert 1 == 2
E + where 1 = <failure_demo.Foo object at 0x2db8d90>.b
E + where 1 = <failure_demo.Foo object at 0x1da5750>.b
failure_demo.py:101: AssertionError
_________________________ test_attribute_instance __________________________
@ -299,8 +299,8 @@ get on the terminal - we are working on that):
b = 1
> assert Foo().b == 2
E assert 1 == 2
E + where 1 = <failure_demo.Foo object at 0x2db8f50>.b
E + where <failure_demo.Foo object at 0x2db8f50> = <class 'failure_demo.Foo'>()
E + where 1 = <failure_demo.Foo object at 0x1da5710>.b
E + where <failure_demo.Foo object at 0x1da5710> = <class 'failure_demo.Foo'>()
failure_demo.py:107: AssertionError
__________________________ test_attribute_failure __________________________
@ -316,7 +316,7 @@ get on the terminal - we are working on that):
failure_demo.py:116:
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
self = <failure_demo.Foo object at 0x2db8dd0>
self = <failure_demo.Foo object at 0x1da5090>
def _get_b(self):
> raise Exception('Failed to get attrib')
@ -332,15 +332,15 @@ get on the terminal - we are working on that):
b = 2
> assert Foo().b == Bar().b
E assert 1 == 2
E + where 1 = <failure_demo.Foo object at 0x2db8c10>.b
E + where <failure_demo.Foo object at 0x2db8c10> = <class 'failure_demo.Foo'>()
E + and 2 = <failure_demo.Bar object at 0x2db8b90>.b
E + where <failure_demo.Bar object at 0x2db8b90> = <class 'failure_demo.Bar'>()
E + where 1 = <failure_demo.Foo object at 0x1da5b50>.b
E + where <failure_demo.Foo object at 0x1da5b50> = <class 'failure_demo.Foo'>()
E + and 2 = <failure_demo.Bar object at 0x1d51ad0>.b
E + where <failure_demo.Bar object at 0x1d51ad0> = <class 'failure_demo.Bar'>()
failure_demo.py:124: AssertionError
__________________________ TestRaises.test_raises __________________________
self = <failure_demo.TestRaises instance at 0x2d93cf8>
self = <failure_demo.TestRaises instance at 0x1dc5d40>
def test_raises(self):
s = 'qwe'
@ -352,10 +352,10 @@ get on the terminal - we are working on that):
> int(s)
E ValueError: invalid literal for int() with base 10: 'qwe'
<0-codegen /home/hpk/p/pytest/_pytest/python.py:978>:1: ValueError
<0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:838>:1: ValueError
______________________ TestRaises.test_raises_doesnt _______________________
self = <failure_demo.TestRaises instance at 0x2e69c20>
self = <failure_demo.TestRaises instance at 0x1dc7b90>
def test_raises_doesnt(self):
> raises(IOError, "int('3')")
@ -364,7 +364,7 @@ get on the terminal - we are working on that):
failure_demo.py:136: Failed
__________________________ TestRaises.test_raise ___________________________
self = <failure_demo.TestRaises instance at 0x2d96098>
self = <failure_demo.TestRaises instance at 0x1dc0a28>
def test_raise(self):
> raise ValueError("demo error")
@ -373,7 +373,7 @@ get on the terminal - we are working on that):
failure_demo.py:139: ValueError
________________________ TestRaises.test_tupleerror ________________________
self = <failure_demo.TestRaises instance at 0x2d96e60>
self = <failure_demo.TestRaises instance at 0x1dc37a0>
def test_tupleerror(self):
> a,b = [1]
@ -382,7 +382,7 @@ get on the terminal - we are working on that):
failure_demo.py:142: ValueError
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
self = <failure_demo.TestRaises instance at 0x2d99b90>
self = <failure_demo.TestRaises instance at 0x1dba518>
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
@ -395,7 +395,7 @@ get on the terminal - we are working on that):
l is [1, 2, 3]
________________________ TestRaises.test_some_error ________________________
self = <failure_demo.TestRaises instance at 0x2d9b998>
self = <failure_demo.TestRaises instance at 0x1dbf320>
def test_some_error(self):
> if namenotexi:
@ -420,10 +420,10 @@ get on the terminal - we are working on that):
> assert 1 == 0
E assert 1 == 0
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/example/assertion/failure_demo.py:162>:2: AssertionError
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError
____________________ TestMoreErrors.test_complex_error _____________________
self = <failure_demo.TestMoreErrors instance at 0x2e5b6c8>
self = <failure_demo.TestMoreErrors instance at 0x1dc7f38>
def test_complex_error(self):
def f():
@ -452,7 +452,7 @@ get on the terminal - we are working on that):
failure_demo.py:5: AssertionError
___________________ TestMoreErrors.test_z1_unpack_error ____________________
self = <failure_demo.TestMoreErrors instance at 0x2d9a998>
self = <failure_demo.TestMoreErrors instance at 0x1dbe320>
def test_z1_unpack_error(self):
l = []
@ -462,7 +462,7 @@ get on the terminal - we are working on that):
failure_demo.py:179: ValueError
____________________ TestMoreErrors.test_z2_type_error _____________________
self = <failure_demo.TestMoreErrors instance at 0x2dc87a0>
self = <failure_demo.TestMoreErrors instance at 0x1db6170>
def test_z2_type_error(self):
l = 3
@ -472,19 +472,19 @@ get on the terminal - we are working on that):
failure_demo.py:183: TypeError
______________________ TestMoreErrors.test_startswith ______________________
self = <failure_demo.TestMoreErrors instance at 0x2dc55a8>
self = <failure_demo.TestMoreErrors instance at 0x1db6f38>
def test_startswith(self):
s = "123"
g = "456"
> assert s.startswith(g)
E assert <built-in method startswith of str object at 0x2daf3f0>('456')
E + where <built-in method startswith of str object at 0x2daf3f0> = '123'.startswith
E assert <built-in method startswith of str object at 0x1dbdad0>('456')
E + where <built-in method startswith of str object at 0x1dbdad0> = '123'.startswith
failure_demo.py:188: AssertionError
__________________ TestMoreErrors.test_startswith_nested ___________________
self = <failure_demo.TestMoreErrors instance at 0x2dc8518>
self = <failure_demo.TestMoreErrors instance at 0x1dbed40>
def test_startswith_nested(self):
def f():
@ -492,15 +492,15 @@ get on the terminal - we are working on that):
def g():
return "456"
> assert f().startswith(g())
E assert <built-in method startswith of str object at 0x2daf3f0>('456')
E + where <built-in method startswith of str object at 0x2daf3f0> = '123'.startswith
E + where '123' = <function f at 0x2e50aa0>()
E + and '456' = <function g at 0x2e52cf8>()
E assert <built-in method startswith of str object at 0x1dbdad0>('456')
E + where <built-in method startswith of str object at 0x1dbdad0> = '123'.startswith
E + where '123' = <function f at 0x1ddf488>()
E + and '456' = <function g at 0x1ddf848>()
failure_demo.py:195: AssertionError
_____________________ TestMoreErrors.test_global_func ______________________
self = <failure_demo.TestMoreErrors instance at 0x2e69098>
self = <failure_demo.TestMoreErrors instance at 0x1dc79e0>
def test_global_func(self):
> assert isinstance(globf(42), float)
@ -510,18 +510,18 @@ get on the terminal - we are working on that):
failure_demo.py:198: AssertionError
_______________________ TestMoreErrors.test_instance _______________________
self = <failure_demo.TestMoreErrors instance at 0x2d96fc8>
self = <failure_demo.TestMoreErrors instance at 0x1dc3098>
def test_instance(self):
self.x = 6*7
> assert self.x != 42
E assert 42 != 42
E + where 42 = <failure_demo.TestMoreErrors instance at 0x2d96fc8>.x
E + where 42 = <failure_demo.TestMoreErrors instance at 0x1dc3098>.x
failure_demo.py:202: AssertionError
_______________________ TestMoreErrors.test_compare ________________________
self = <failure_demo.TestMoreErrors instance at 0x2d9e170>
self = <failure_demo.TestMoreErrors instance at 0x1db0ab8>
def test_compare(self):
> assert globf(10) < 5
@ -531,7 +531,7 @@ get on the terminal - we are working on that):
failure_demo.py:205: AssertionError
_____________________ TestMoreErrors.test_try_finally ______________________
self = <failure_demo.TestMoreErrors instance at 0x2d9ef80>
self = <failure_demo.TestMoreErrors instance at 0x1db38c0>
def test_try_finally(self):
x = 1
@ -540,4 +540,4 @@ get on the terminal - we are working on that):
E assert 1 == 0
failure_demo.py:210: AssertionError
======================== 39 failed in 0.17 seconds =========================
======================== 39 failed in 0.15 seconds =========================

View File

@ -22,20 +22,22 @@ Here is a basic pattern how to achieve this::
For this to work we need to add a command line option and
provide the ``cmdopt`` through a :ref:`function argument <funcarg>` factory::
provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`::
# content of conftest.py
import pytest
def pytest_addoption(parser):
parser.addoption("--cmdopt", action="store", default="type1",
help="my option: type1 or type2")
def pytest_funcarg__cmdopt(request):
@pytest.fixture
def cmdopt(request):
return request.config.option.cmdopt
Let's run this without supplying our new command line option::
Let's run this without supplying our new option::
$ py.test -q test_sample.py
collecting ... collected 1 items
F
================================= FAILURES =================================
_______________________________ test_answer ________________________________
@ -53,12 +55,10 @@ Let's run this without supplying our new command line option::
test_sample.py:6: AssertionError
----------------------------- Captured stdout ------------------------------
first
1 failed in 0.01 seconds
And now with supplying a command line option::
$ py.test -q --cmdopt=type2
collecting ... collected 1 items
F
================================= FAILURES =================================
_______________________________ test_answer ________________________________
@ -76,14 +76,11 @@ And now with supplying a command line option::
test_sample.py:6: AssertionError
----------------------------- Captured stdout ------------------------------
second
1 failed in 0.01 seconds
Ok, this completes the basic pattern. However, one often rather
wants to process command line options outside of the test and
rather pass in different or more complex objects. See the
next example or refer to :ref:`mysetup` for more information
on real-life examples.
You can see that the command line option arrived in our test. This
completes the basic pattern. However, one often rather wants to process
command line options outside of the test and rather pass in different or
more complex objects.
Dynamically adding command line options
--------------------------------------------------------------
@ -109,13 +106,10 @@ directory with the above conftest.py::
$ py.test
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
gw0 I / gw1 I / gw2 I / gw3 I
gw0 [0] / gw1 [0] / gw2 [0] / gw3 [0]
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 0 items
scheduling tests via LoadScheduling
============================= in 0.52 seconds =============================
============================= in 0.00 seconds =============================
.. _`excontrolskip`:
@ -156,12 +150,12 @@ and when running it will see a skipped "slow" test::
$ py.test -rs # "-rs" means report details on the little 's'
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 2 items
test_module.py .s
========================= short test summary info ==========================
SKIP [1] /tmp/doc-exec-225/conftest.py:9: need --runslow option to run
SKIP [1] /tmp/doc-exec-264/conftest.py:9: need --runslow option to run
=================== 1 passed, 1 skipped in 0.01 seconds ====================
@ -169,8 +163,8 @@ Or run it including the ``slow`` marked test::
$ py.test --runslow
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
collecting ... collected 2 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 2 items
test_module.py ..
@ -203,7 +197,6 @@ unless the ``--fulltrace`` command line option is specified.
Let's run our little function::
$ py.test -q test_checkconfig.py
collecting ... collected 1 items
F
================================= FAILURES =================================
______________________________ test_something ______________________________
@ -213,7 +206,6 @@ Let's run our little function::
E Failed: not configured: 42
test_checkconfig.py:8: Failed
1 failed in 0.01 seconds
Detect if running from within a py.test run
--------------------------------------------------------------
@ -261,9 +253,9 @@ which will add the string to the test header accordingly::
$ py.test
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
project deps: mylib-1.1
collecting ... collected 0 items
collected 0 items
============================= in 0.00 seconds =============================
@ -284,7 +276,7 @@ which will add info only when run with "--v"::
$ py.test -v
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4 -- /home/hpk/venv/0/bin/python
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python
info1: did you know that ...
did you?
collecting ... collected 0 items
@ -295,8 +287,8 @@ and nothing when run plainly::
$ py.test
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
collecting ... collected 0 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 0 items
============================= in 0.00 seconds =============================
@ -327,13 +319,13 @@ Now we can profile which test functions execute the slowest::
$ py.test --durations=3
=========================== test session starts ============================
platform linux2 -- Python 2.7.1 -- pytest-2.2.4
collecting ... collected 3 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 3 items
test_some_are_slow.py ...
========================= slowest 3 test durations =========================
0.20s call test_some_are_slow.py::test_funcslow2
0.10s call test_some_are_slow.py::test_funcslow1
0.00s setup test_some_are_slow.py::test_funcslow2
0.00s call test_some_are_slow.py::test_funcfast
========================= 3 passed in 0.31 seconds =========================

View File

@ -1,17 +1,12 @@
.. _xunitsetup:
.. _setup:
.. _fixture:
.. _fixtures:
.. _`fixture functions`:
.. _`@pytest.fixture`:
pytest fixtures: modular, re-useable, flexible
pytest fixtures: modular, explicit, scalable
========================================================
.. versionadded:: 2.0, 2.3
.. _`funcargs`: funcargs.html
.. _`test parametrization`: funcargs.html#parametrizing-tests
.. _`unittest plugin`: plugin/unittest.html
.. _`xUnit`: http://en.wikipedia.org/wiki/XUnit
.. _`general purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software
.. _`django`: https://www.djangoproject.com/
@ -20,101 +15,59 @@ pytest fixtures: modular, re-useable, flexible
pytest allows to create and use test fixtures in a modular and flexible
manner, offering dramatic improvements over the classic xUnit style of
setup/teardown functions. The `general purpose of test fixtures`_ is to
provide a fixed baseline upon which tests can reliably and
repeatedly execute. With pytest, fixtures have names and can be referenced
from test functions, modules, classes or whole projects. Fixtures are
implemented by **fixture functions** which may return a fixture object
or put extra attributes on test classes or perform global side effects
if needed. Fixtures can themselves access other fixtures, allowing a
**structured modular approach** to organising fixtures for an
application.
setup/teardown functions. The `general purpose of test fixtures`_
is to provide a fixed baseline upon which tests can reliably
and repeatedly execute. With pytest, fixtures have names and can be
activated by referencing them from test functions, modules, classes or
whole projects. Fixtures are implemented by *fixture functions* which
have full access to the requesting test context and can use other
fixtures, allowing a modular and flexible approach to organising
and parametrizing fixtures for an application. Complemented by
pytest's generic :ref:`parametrize features <parametrize>`, pytest
fixtures help to write test suites that scale from simple to complex
with minimal effort.
**Test functions can receive fixture objects by naming them as an input
argument.** For each argument name, a matching fixture
.. _`funcargs`:
.. _`funcarg mechanism`:
.. _`fixture function`:
Fixtures as Function arguments
-----------------------------------------
Test functions can receive fixture objects by naming them as an input
argument. For each argument name, a matching fixture
function will provide a fixture object. This mechanism was already
introduced with pytest-2.0 and is also called the **funcarg mechanism**.
introduced with pytest-2.0 and is also called the *funcarg mechanism*.
It allows test functions to easily receive and work against specific
pre-initialized application objects without having to care about the
details of setup/cleanup procedures. It's a prime example of
`dependency injection`_ where fixture functions take the role of the
*injector* and test functions are the *consumers* of fixture objects.
With pytest-2.3 this mechanism has been generalized and improved as described
further in this document.
**Test classes, modules or whole projects can declare a need for
one or more fixtures**. All required fixture functions will execute
before a test from the specifying context executes. You can use this
to make tests operate from a pre-initialized directory or with
certain environment variables or with pre-initialized applications.
For example, the Django_ project requires database
initialization to be able to import from and use its model objects.
For that, the `pytest-django`_ plugin provides fixtures which your
project can then easily depend or extend on, simply by referencing the
name of the particular fixture.
Let's look at a simple self-contained test module containing
a fixture and a test function using it::
**Fixtures can be shared throughout a test session, module or class.**.
By means of a "scope" declaration on a fixture function, it will
only be invoked once per the specified scope. This allows to reduce the number
of expensive application object setups and thus helps to speed up test runs.
Typical examples are the setup of test databases or establishing
required subprocesses or network connections.
**Fixture functions have limited visilibity** which depends on where they
are defined. If they are defined on a test class, only its test methods
may use it. A fixture defined in a module can only be used
from that test module. A fixture defined in a conftest.py file
can only be used by the tests below the directory of that file.
Lastly, plugins can define fixtures which are available across all
projects.
**Fixture functions can interact with the requesting testcontext**. By
accepting a special ``request`` object, fixture functions can introspect
the function, class or module for which they are invoked and can
optionally register cleanup functions which are called when the last
test finished execution. A good example is `pytest-timeout`_ which
allows to limit the execution time of a test, and will read the
according parameter from a test function or from project-wide settings.
**Fixture functions can be parametrized** in which case they will be called
multiple times, each time executing the set of dependent tests, i. e. the
tests that depend on this fixture. Test functions do usually not need
to be aware of their re-running. Fixture parametrization helps to
write exhaustive functional tests for components which themselves can be
configured in multiple ways.
Basic test function with fixtures
-----------------------------------------------------------
.. versionadded:: 2.3
Let's look at a simple self-contained test module containing a module
visible fixture function and a test function using the provided fixture::
# content of ./test_simplefactory.py
# content of ./test_fixturefuncarg.py
import pytest
@pytest.fixture()
@pytest.fixture
def myfuncarg():
return 42
def test_function(myfuncarg):
assert myfuncarg == 17
assert myfuncarg == 17 # will fail
Here, the ``test_function`` needs a very simple fixture ``myfuncarg`` which
it wants to compare against a specific value. py.test will discover and call
the ``@pytest.fixture`` marked ``myfuncarg`` fixture function. Running the
tests looks like this::
Here, the ``test_function`` needs the ``myfuncarg`` fixture value. pytest
will discover and call the ``@pytest.fixture`` marked ``myfuncarg``
fixture function. Running the test looks like this::
$ py.test test_simplefactory.py
$ py.test test_fixturefuncarg.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev18
plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_simplefactory.py F
test_fixturefuncarg.py F
================================= FAILURES =================================
______________________________ test_function _______________________________
@ -122,17 +75,17 @@ tests looks like this::
myfuncarg = 42
def test_function(myfuncarg):
> assert myfuncarg == 17
> assert myfuncarg == 17 # will fail
E assert 42 == 17
test_simplefactory.py:8: AssertionError
test_fixturefuncarg.py:8: AssertionError
========================= 1 failed in 0.01 seconds =========================
This shows that the test function was called with a ``myfuncarg``
argument value of ``42`` and the assert fails as expected. Here is
value of ``42`` and the assert fails as expected. Here is
how py.test comes to call the test function this way:
1. py.test :ref:`finds <test discovery>` the ``test_function`` because
1. pytest :ref:`finds <test discovery>` the ``test_function`` because
of the ``test_`` prefix. The test function needs a function argument
named ``myfuncarg``. A matching fixture function is discovered by
looking for a fixture function named ``myfuncarg``.
@ -164,6 +117,12 @@ with a list of available function arguments.
Creating and using a session-shared fixture
-----------------------------------------------------------------
By means of a "scope" declaration, a fixture function will
only be invoked once per the specified scope. This allows to reduce the
number of expensive application object setups and thus helps to speed up
test runs. Typical examples are the setup of test databases or
establishing required subprocesses or network connections.
.. regendoc:wipe
Here is a simple example of a fixture function creating a shared
@ -199,12 +158,11 @@ We deliberately insert failing ``assert 0`` statements in order to
inspect what is going on and can now run the tests::
$ py.test -q test_module.py
collecting ... collected 2 items
FF
================================= FAILURES =================================
________________________________ test_ehlo _________________________________
smtp = <smtplib.SMTP instance at 0x2c64128>
smtp = <smtplib.SMTP instance at 0x1c51440>
def test_ehlo(smtp):
response = smtp.ehlo()
@ -213,10 +171,10 @@ inspect what is going on and can now run the tests::
> assert 0 # for demo purposes
E assert 0
test_module.py:5: AssertionError
test_module.py:6: AssertionError
________________________________ test_noop _________________________________
smtp = <smtplib.SMTP instance at 0x2c64128>
smtp = <smtplib.SMTP instance at 0x1c51440>
def test_noop(smtp):
response = smtp.noop()
@ -224,32 +182,34 @@ inspect what is going on and can now run the tests::
> assert 0 # for demo purposes
E assert 0
test_module.py:10: AssertionError
2 failed in 0.15 seconds
test_module.py:11: AssertionError
you see the two ``assert 0`` failing and can also see that
the same (session-scoped) object was passed into the two test functions
because pytest shows the incoming arguments in the traceback.
Adding a finalizer to a fixture
--------------------------------------------------------
Fixtures can interact with the requesting test context
-------------------------------------------------------------
Further extending the ``smtp`` example, we now want to properly
close a smtp server connection after the last test using it
has been run. We can do this by changing the fixture function
to accept the special :ref:`request` object, representing the
requesting test context. After calling the ``request.addfinalizer()``
helper pytest will make sure that the finalizer function is called
after the last test using the ``smtp`` resource has finished.
By using the special :ref:`request` object, fixture functions can introspect
the function, class or module for which they are invoked and can
optionally register cleanup functions which are called when the last
test finished execution.
Further extending the previous ``smtp`` fixture example, let's try to
read the server URL from the module namespace, use module-scoping and
register a finalizer that closes the smtp connection after the last
test finished execution::
# content of conftest.py
import pytest
import smtplib
@pytest.fixture(scope="session")
@pytest.fixture(scope="module")
def smtp(request):
smtp = smtplib.SMTP("merlinux.eu")
server = getattr(request.module, "smtpserver", "merlinux.eu")
smtp = smtplib.SMTP(server)
def fin():
print ("finalizing %s" % smtp)
smtp.close()
@ -260,19 +220,67 @@ The registered ``fin`` function will be called when the last test
using it has executed::
$ py.test -s -q --tb=no
collecting ... collected 2 items
FF
2 failed in 0.21 seconds
finalizing <smtplib.SMTP instance at 0x29f7908>
finalizing <smtplib.SMTP instance at 0x1e15a70>
We see that the ``smtp`` instance is finalized after all
tests executed. If we had specified ``scope='function'``
then fixture setup and cleanup would occur around each
single test.
We see that the ``smtp`` instance is finalized after the two
tests using it tests executed. If we had specified ``scope='function'``
then fixture setup and cleanup would occur around each single test.
Note that the test module itself did not need to change!
Parametrizing a session-shared funcarg resource
Let's quickly create another test module that actually sets the
server URL and has a test to verify the fixture picks it up::
# content of test_anothersmtp.py
smtpserver = "mail.python.org" # will be read by smtp fixture
def test_showhelo(smtp):
assert 0, smtp.helo()
Running it::
$ py.test -qq --tb=short test_anothersmtp.py
F
================================= FAILURES =================================
______________________________ test_showhelo _______________________________
test_anothersmtp.py:5: in test_showhelo
> assert 0, smtp.helo()
E AssertionError: (250, 'mail.python.org')
**Test classes, modules or whole projects can make use of
one or more fixtures**. All required fixture functions will execute
before a test from the specifying context executes. As You can use this
to make tests operate from a pre-initialized directory or with
certain environment variables or with pre-configured global application
settings.
For example, the Django_ project requires database
initialization to be able to import from and use its model objects.
For that, the `pytest-django`_ plugin provides fixtures which your
project can then easily depend or extend on, simply by referencing the
name of the particular fixture.
**Fixture functions have limited visilibity** which depends on where they
are defined. If they are defined on a test class, only its test methods
may use it. A fixture defined in a module can only be used
from that test module. A fixture defined in a conftest.py file
can only be used by the tests below the directory of that file.
Lastly, plugins can define fixtures which are available across all
projects.
Parametrizing a session-shared fixture
-----------------------------------------------------------------
**Fixture functions can be parametrized** in which case they will be called
multiple times, each time executing the set of dependent tests, i. e. the
tests that depend on this fixture. Test functions do usually not need
to be aware of their re-running. Fixture parametrization helps to
write exhaustive functional tests for components which themselves can be
configured in multiple ways.
Extending the previous example, we can flag the fixture to create
two ``smtp`` fixture instances which will cause all tests using the
fixture to run twice. The fixture function gets
@ -297,13 +305,12 @@ for each of which the fixture function will execute and can access
a value via ``request.param``. No test function code needs to change.
So let's just do another run::
$ py.test -q
collecting ... collected 4 items
$ py.test -q test_module.py
FFFF
================================= FAILURES =================================
__________________________ test_ehlo[merlinux.eu] __________________________
smtp = <smtplib.SMTP instance at 0x1c261b8>
smtp = <smtplib.SMTP instance at 0x27ae998>
def test_ehlo(smtp):
response = smtp.ehlo()
@ -312,10 +319,10 @@ So let's just do another run::
> assert 0 # for demo purposes
E assert 0
test_module.py:5: AssertionError
test_module.py:6: AssertionError
__________________________ test_noop[merlinux.eu] __________________________
smtp = <smtplib.SMTP instance at 0x1c261b8>
smtp = <smtplib.SMTP instance at 0x27ae998>
def test_noop(smtp):
response = smtp.noop()
@ -323,10 +330,10 @@ So let's just do another run::
> assert 0 # for demo purposes
E assert 0
test_module.py:10: AssertionError
test_module.py:11: AssertionError
________________________ test_ehlo[mail.python.org] ________________________
smtp = <smtplib.SMTP instance at 0x1c2a4d0>
smtp = <smtplib.SMTP instance at 0x28395f0>
def test_ehlo(smtp):
response = smtp.ehlo()
@ -334,10 +341,10 @@ So let's just do another run::
> assert "merlinux" in response[1]
E assert 'merlinux' in 'mail.python.org\nSIZE 10240000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN'
test_module.py:4: AssertionError
test_module.py:5: AssertionError
________________________ test_noop[mail.python.org] ________________________
smtp = <smtplib.SMTP instance at 0x1c2a4d0>
smtp = <smtplib.SMTP instance at 0x28395f0>
def test_noop(smtp):
response = smtp.noop()
@ -345,42 +352,17 @@ So let's just do another run::
> assert 0 # for demo purposes
E assert 0
test_module.py:10: AssertionError
4 failed in 6.62 seconds
test_module.py:11: AssertionError
We now get four failures because we are running the two tests twice with
different ``smtp`` fixture instances. Note that with the
``mail.python.org`` connection the second test fails in ``test_ehlo``
because it expects a specific server string.
We also see that the two ``smtp`` instances are finalized appropriately.
Looking at test collection without running tests
------------------------------------------------------
You can also look at the tests which pytest collects without running them::
$ py.test --collectonly
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev18
plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout
collecting ... collected 4 items
<Module 'test_module.py'>
<Function 'test_ehlo[merlinux.eu]'>
<Function 'test_noop[merlinux.eu]'>
<Function 'test_ehlo[mail.python.org]'>
<Function 'test_noop[mail.python.org]'>
============================= in 0.01 seconds =============================
Our fixture parameters show up in the test id of the test functions.
Note that pytest orders your test run by resource usage, minimizing
the number of active resources at any given time.
.. _`interdependent fixtures`:
Interdepdendent fixtures
Using fixtures from a fixture function
----------------------------------------------------------
You can not only use fixtures in test functions but fixture functions
@ -410,15 +392,13 @@ Here we declare an ``app`` fixture which receives the previously defined
$ py.test -v test_appsetup.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev18 -- /home/hpk/venv/1/bin/python
cachedir: /tmp/doc-exec-6/.cache
plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items
test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED
test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED
========================= 2 passed in 0.14 seconds =========================
========================= 2 passed in 0.09 seconds =========================
Due to the parametrization of ``smtp`` the test will run twice with two
different ``App`` instances and respective smtp servers. There is no
@ -429,6 +409,7 @@ session-scoped ``smtp``: it is fine for fixtures to use "broader" scoped
fixtures but not the other way round: A session-scoped fixture could
not use a module-scoped one in a meaningful way.
.. _`automatic per-resource grouping`:
Automatic grouping of tests by fixture instances
@ -473,9 +454,7 @@ Let's run the tests in verbose mode and with looking at the print-output::
$ py.test -v -s test_module.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev18 -- /home/hpk/venv/1/bin/python
cachedir: /tmp/doc-exec-6/.cache
plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 8 items
test_module.py:16: test_0[1] PASSED
@ -487,7 +466,7 @@ Let's run the tests in verbose mode and with looking at the print-output::
test_module.py:20: test_2[1-mod2] PASSED
test_module.py:20: test_2[2-mod2] PASSED
========================= 8 passed in 0.02 seconds =========================
========================= 8 passed in 0.01 seconds =========================
test0 1
test0 2
create mod1
@ -505,18 +484,20 @@ You can see that the parametrized module-scoped ``modarg`` resource caused
an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed
before the ``mod2`` resource was setup.
.. _`usefixtures`:
Marking test classes, modules, projects with required fixtures
using fixtures from classes, modules or projects
----------------------------------------------------------------------
.. regendoc:wipe
Sometimes test functions do not directly get access to a fixture object.
For example, each test in a test class may require to operate with an
Sometimes test functions do not directly need access to a fixture object.
For example, tests may require to operate with an
empty directory as the current working directory. Here is how you can
can use the standard :ref:`tempfile <lib:tempfile>` and pytest fixtures
to achieve it. We separate the creation of the fixture into
a conftest.py file::
can use the standard `tempfile <http://docs.python.org/library/tempfile.html>`_ and pytest fixtures to
achieve it. We separate the creation of the fixture into a conftest.py
file::
# content of conftest.py
@ -529,7 +510,7 @@ a conftest.py file::
newpath = tempfile.mkdtemp()
os.chdir(newpath)
and declare its use in a test module via a ``needs`` marker::
and declare its use in a test module via a ``usefixtures`` marker::
# content of test_setenv.py
import os
@ -546,20 +527,18 @@ and declare its use in a test module via a ``needs`` marker::
assert os.listdir(os.getcwd()) == []
Due to the ``usefixtures`` marker, the ``cleandir`` fixture
will be required for the execution of each of the test methods, just as if
will be required for the execution of each test method, just as if
you specified a "cleandir" function argument to each of them. Let's run it
to verify our fixture is activated::
to verify our fixture is activated and the tests pass::
$ py.test -q
collecting ... collected 2 items
..
2 passed in 0.02 seconds
You may specify the need for multiple fixtures::
You can specify multiple fixtures like this::
@pytest.mark.usefixtures("cleandir", "anotherfixture")
and you may specify fixture needs at the test module level, using
and you may specify fixture usage at the test module level, using
a generic feature of the mark mechanism::
pytestmark = pytest.mark.usefixtures("cleandir")
@ -572,18 +551,19 @@ into an ini-file::
[pytest]
usefixtures = cleandir
.. _`autoactive fixtures`:
autoactive fixtures at class/module/directory/global level
autoactive fixtures (xUnit setup on steroids)
----------------------------------------------------------------------
.. regendoc:wipe
Occasionally, you may want to have fixtures get invoked automatically
without any ``usefixtures`` or funcargs reference. As a practical example,
suppose we have a database fixture which has a begin/rollback/commit
architecture and we want to automatically surround each test method by a
transaction and a rollback. Here is a dummy self-contained implementation
of this idea::
without a `usefixtures`_ or `funcargs`_ reference. As a practical
example, suppose we have a database fixture which has a
begin/rollback/commit architecture and we want to automatically surround
each test method by a transaction and a rollback. Here is a dummy
self-contained implementation of this idea::
# content of test_db_transact.py
@ -617,9 +597,7 @@ specify it.
If we run it, we get two passing tests::
$ py.test -q
collecting ... collected 2 items
..
2 passed in 0.02 seconds
And here is how autoactive fixtures work in other scopes:
@ -658,9 +636,20 @@ and then have a TestClass using it by declaring the need::
While all test methods in this TestClass will use the transaction
fixture, other test classes or function will not do so without a marker or funcarg.
controlled visibility of fixture functions
----------------------------------------------------
If during implementing your tests you realize that you
want to use a fixture function from multiple test files you can move it
to a :ref:`conftest.py <conftest.py>` file or even separately installable
:ref:`plugins <plugins>` without changing test code. The discovery of
fixtures functions starts at test classes, then test modules, then
``conftest.py`` files and finally builtin and third party plugins.
.. currentmodule:: _pytest.python
.. _`@pytest.fixture`:
.. _`pytest.fixture`:
``@pytest.fixture``: marking a fixture function
--------------------------------------------------------------

View File

@ -1,18 +1,19 @@
.. _`funcargcompare`:
=============================================================
pytest-2.3: reasoning for the new funcarg and setup functions
=============================================================
**Target audience**: Reading this document requires basic knowledge of
python testing, xUnit setup methods and the (previous) basic pytest
funcarg mechanism, see http://pytest.org/2.2.4/funcargs.html
If you are new to pytest, then you can simply ignore this
section and read the other sections.
.. currentmodule:: _pytest
Shortcomings of the previous pytest_funcarg__ mechanism
===========================================================
Shortcomings of the previous ``pytest_funcarg__`` mechanism
--------------------------------------------------------------
The pre pytest-2.3 funcarg mechanism calls a factory each time a
funcarg for a test function is required. If a factory wants to
@ -58,12 +59,15 @@ There are several limitations and difficulties with this approach:
funcarg resource if it isn't stated in the test function signature.
All of these limitations are addressed with pytest-2.3 and its
new facilities.
improved :ref:`fixture mechanism <fixture>`.
Direct scoping of funcarg factories
Direct scoping of fixture/funcarg factories
--------------------------------------------------------
Instead of calling cached_setup(), you can use the :ref:`@pytest.fixture <@pytest.fixture>` decorator and directly state the scope::
Instead of calling cached_setup() with a cache scope, you can use the
:ref:`@pytest.fixture <pytest.fixture>` decorator and directly state
the scope::
@pytest.fixture(scope="session")
def db(request):
@ -142,7 +146,7 @@ But it is then not possible to define scoping and parametrization.
It is thus recommended to use the factory decorator.
solving per-session setup / the new @setup marker
solving per-session setup / autoactive fixtures
--------------------------------------------------------------
pytest for a long time offered a pytest_configure and a pytest_sessionstart
@ -169,17 +173,44 @@ during test execution and parametrization happens at collection time.
It follows that pytest_configure/session/runtest_setup are often not
appropriate for implementing common fixture needs. Therefore,
pytest-2.3 introduces a new :ref:`@pytest.setup <setup>` marker
for setup functions and it accepts an optional "scope" parameter.
pytest-2.3 introduces :ref:`autoactive fixtures` which fully
integrate with the generic :ref:`fixture mechanism <fixture>`
and obsolete many prior uses of pytest hooks.
See :ref:`setup` for more explanation and examples.
funcarg and setup discovery now happens at collection time
funcargs/fixture discovery now happens at collection time
---------------------------------------------------------------------
pytest-2.3 takes care to discover funcarg factories and @setup methods
pytest-2.3 takes care to discover fixture/funcarg factories
at collection time. This is more efficient especially for large test suites.
Moreover, a call to "py.test --collectonly" should be able to in the future
show a lot of setup-information and thus presents a nice method to get an
overview of resource management in your project.
overview of fixture management in your project.
.. _`compatibility notes`:
.. _`funcargscompat`:
Conclusion and compatibility notes
---------------------------------------------------------
**Fixtures** were originally introduced to pytest-2.0. In pytest-2.3
the mechanism was extended and refined:
* previously funcarg factories were specified with a special
``pytest_funcarg__NAME`` prefix instead of using the
``@pytest.fixture`` decorator.
* Factories received a :ref:`request <request>` object which managed caching through
``request.cached_setup()`` calls and allowed using other funcargs via
``request.getfuncargvalue()`` calls. These intricate APIs made it hard
to do proper parametrization and implement resource caching. The
new ``@pytest.fixture`` decorator allows to simply declare the scope
and let pytest figure things out for you.
* if you used parametrization and funcarg factories which made use of
``request.cached_setup()`` it is recommeneded to invest a few minutes
and simplify your fixture function code to use the :ref:`@pytest.fixture`
decorator instead. This will also allow to take advantage of
the automatic per-resource grouping of tests.

View File

@ -1,392 +1,14 @@
.. _resources:
.. _`funcargs`:
.. _`funcarg mechanism`:
=======================================================
funcargs: resource injection and parametrization
=======================================================
.. note::
pytest-2.3 introduces major refinements to the test setup and funcarg
mechanisms introduced to pytest-2.0. All pre-2.3 usages remain
supported and several use cases, among them scoping and parametrization
of funcarg resources, are now easier to accomplish. For more background,
see `compatibility notes`_ and the detailed :ref:`reasoning for the new
funcarg and setup functions <funcargcompare>`.
.. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection
Introduction
====================
pytest supports the injection of test resources into test and setup functions
and flexibly control their life cycle in relation to the overall test
execution. Moreover, tests can get executed multiple times if you have
different variants of test resources to test with.
The basic mechanism for injecting objects is called the *funcarg
mechanism* because objects are injected when a test or setup
**function** states it as an **argument**. The injected argument
is created by a call to a registered **fixture function** for each argument
name. This mechanism is an example of `Dependency Injection`_
and helps to de-couple test code from the setup of required
objects: at test writing time you do not need to care for the details of
where and how your required test resources are constructed, if they are
shared on a per-class, module or session basis, or if your test function
is invoked multiple times with differently configured resource
instances.
Fixture dependency injection allows to organise test resources
in a modular explicit way so that test functions state their needs
in their signature. pytest additionally offers powerful xunit-style
:ref:`setup functions <setup functions>` for the cases where you need
to create implicit test state that is not passed explicitely to test functions.
When a test function is invoked multiple times with different arguments we
speak of **parametrized testing**. You can use it e. g. to repeatedly run test
functions against different database backends or to check that certain
inputs lead to certain outputs.
Concretely, there are three main means of funcarg management:
* a `@pytest.fixture`_ marker to define resource factories,
their scoping and parametrization. Factories can themselves
receive resources through their function arguments, easing
the setup of `interdependent resources`_. Factories can use
the special `request`_ object to access details from where
the factory or setup function is called and for registering finalizers.
* a `@pytest.mark.parametrize`_ marker for executing test functions
multiple times with different argument sets,
* a `pytest_generate_tests`_ plugin hook marker for implementing
your parametrization for a test function which may depend on
command line options, class/module attributes etc.
Apart from making it easy to manage your own test resources
pytest also comes with some :ref:`builtinresources` which
you can use without defining them yourself. Third-party plugins
offer yet more domain-specific funcarg resources (for example the
`pytest-django plugin <http://pypi.python.org/pypi/pytest-django>`_) so
that after plugin installation you can simply use them in
your test and setup functions. This all contributes to high
re-useability of test resource management and goes far beyond what can
be done with the classical xUnit style approach which encodes resource
setup statically into the test source code, leading to duplicate and
hard-to change fixtures.
.. _`@pytest.fixture`:
``@pytest.fixture``: Creating parametrized, scoped resources
=====================================================================
Basic funcarg injection example
-----------------------------------------------------------
Let's look at a simple self-contained test module using a factory
and a funcarg::
# content of ./test_simplefactory.py
import pytest
@pytest.fixture()
def myfuncarg():
return 42
def test_function(myfuncarg):
assert myfuncarg == 17
Here, the ``test_function`` needs an object named ``myfuncarg`` and thus
py.test will discover and call the ``@pytest.fixture`` marked ``myfuncarg``
factory function. Running the tests looks like this::
$ py.test test_simplefactory.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev11
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 1 items
test_simplefactory.py F
================================= FAILURES =================================
______________________________ test_function _______________________________
myfuncarg = 42
def test_function(myfuncarg):
> assert myfuncarg == 17
E assert 42 == 17
test_simplefactory.py:8: AssertionError
========================= 1 failed in 0.01 seconds =========================
This shows that the test function was called with a ``myfuncarg``
argument value of ``42`` and the assert fails as expected. Here is
how py.test comes to call the test function this way:
1. py.test :ref:`finds <test discovery>` the ``test_function`` because
of the ``test_`` prefix. The test function needs a function argument
named ``myfuncarg``. A matching factory function is discovered by
looking for a factory function named ``myfuncarg``.
2. ``myfuncarg()`` is called to create a value ``42``.
3. ``test_function(42)`` is now called and results in the above
reported exception because of the assertion mismatch.
Note that if you misspell a function argument or want
to use one that isn't available, you'll see an error
with a list of available function arguments.
.. Note::
You can always issue::
py.test --fixtures test_simplefactory.py
to see available function arguments.
Location independency of funcarg factories
----------------------------------------------------
If during implementing your tests you realize that you
want to use a factory from multiple test files you can move it
to a :ref:`conftest.py <conftest.py>` file or even separately installable
:ref:`plugins <plugins>` without changing test code. The discovery of
funcarg factories starts at test classes, then test modules, then
``conftest.py`` files and finally builtin and 3-rd party plugins.
.. _`test generators`:
.. _`parametrizing-tests`:
.. _`parametrized test functions`:
Parametrizing test functions
==========================================================================
While the `@pytest.fixture`_ decorator allows to define parametrization
of funcarg resources at the factory-level, there are also means to
define parametrization at test functions directly:
* `@pytest.mark.parametrize`_ to provide multiple argument sets
for a particular test function or class.
* `pytest_generate_tests`_ to implement your own custom parametrization
scheme or extensions.
.. _`@pytest.mark.parametrize`:
``@pytest.mark.parametrize``: parametrizing test functions
---------------------------------------------------------------------
.. regendoc: wipe
.. versionadded:: 2.2
The builtin ``pytest.mark.parametrize`` decorator enables
parametrization of arguments for a test function. Here is a typical example
of a test function that wants check for expected output given a certain input::
# content of test_expectation.py
import pytest
@pytest.mark.parametrize(("input", "expected"), [
("3+5", 8),
("2+4", 6),
("6*9", 42),
])
def test_eval(input, expected):
assert eval(input) == expected
The ``@parametrize`` decorator defines three different argument sets for the
two ``(input, output)`` arguments of ``test_eval`` function so the latter
will be run three times::
$ py.test -q
collecting ... collected 13 items
....F........
================================= FAILURES =================================
____________________________ test_eval[6*9-42] _____________________________
input = '6*9', expected = 42
@pytest.mark.parametrize(("input", "expected"), [
("3+5", 8),
("2+4", 6),
("6*9", 42),
])
def test_eval(input, expected):
> assert eval(input) == expected
E assert 54 == 42
E + where 54 = eval('6*9')
test_expectation.py:8: AssertionError
1 failed, 12 passed in 6.41 seconds
As expected only one pair of input/output values fails the simple test function.
As usual you can see the ``input`` and ``output`` values in the traceback.
Note that there are various ways how you can mark groups of functions,
see :ref:`mark`.
.. _`pytest_generate_tests`:
Basic ``pytest_generate_tests`` example
---------------------------------------------
.. XXX
> line 598 "Basic ``pytest_generate_tests`` example" - I think this is
> not a very basic example! I think it is copied from parametrize.txt
> page, where it might make more sense. Here is what I would consider a
> basic example.
>
> # code
> def isSquare(n):
> n = n ** 0.5
> return int(n) == n
>
> # test file
> def pytest_generate_tests(metafunc):
> squares = [1, 4, 9, 16, 25, 36, 49]
> for n in range(1, 50):
> expected = n in squares
> if metafunc.function.__name__ == 'test_isSquare':
> metafunc.addcall(id=n, funcargs=dict(n=n,
> expected=expected))
>
>
> def test_isSquare(n, expected):
> assert isSquare(n) == expected
.. XXX
consider adding more examples, also mixed (factory-parametrized/test-function-parametrized, see mail from Brianna)
The ``pytest_generate_tests`` hook is typically used if you want
to go beyond what ``@pytest.mark.parametrize`` offers. For example,
let's say we want to execute a test with different computation
parameters and the parameter range shall be determined by a command
line argument. Let's first write a simple (do-nothing) computation test::
# content of test_compute.py
def test_compute(param1):
assert param1 < 4
Now we add a ``conftest.py`` file containing the addition of a
command line option and the generation of tests depending on
that option::
# content of conftest.py
def pytest_addoption(parser):
parser.addoption("--all", action="store_true",
help="run all combinations")
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
if metafunc.config.option.all:
end = 5
else:
end = 2
metafunc.parametrize("param1", range(end))
This means that we only run two tests if no option is passed::
$ py.test -q test_compute.py
collecting ... collected 2 items
..
2 passed in 0.01 seconds
And we run five tests if we add the ``--all`` option::
$ py.test -q --all test_compute.py
collecting ... collected 5 items
....F
================================= FAILURES =================================
_____________________________ test_compute[4] ______________________________
param1 = 4
def test_compute(param1):
> assert param1 < 4
E assert 4 < 4
test_compute.py:3: AssertionError
1 failed, 4 passed in 0.02 seconds
As expected when running the full range of ``param1`` values
we'll get an error on the last one.
You might want to look at :ref:`more parametrization examples <paramexamples>`.
.. _`metafunc object`:
The **metafunc** object
-------------------------------------------
metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a testfunction and to generate tests
according to test configuration or values specified
in the class or module where a test function is defined:
``metafunc.fixturenames``: set of required function arguments for given function
``metafunc.function``: underlying python test function
``metafunc.cls``: class object where the test function is defined in or None.
``metafunc.module``: the module object where the test function is defined in.
``metafunc.config``: access to command line opts and general config
``metafunc.funcargnames``: alias for ``fixturenames``, for pre-2.3 compatibility
.. automethod:: Metafunc.parametrize
.. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists)
.. regendoc:wipe
.. _`compatibility notes`:
.. _`funcargscompat`:
Compatibility notes
============================================================
**Fixtures** were originally introduced to pytest-2.0. In pytest-2.3
the mechanism was extended and refined:
* previously funcarg factories were specified with a special
``pytest_funcarg__NAME`` prefix instead of using the
``@pytest.fixture`` decorator.
* Factories received a `request`_ object which managed caching through
``request.cached_setup()`` calls and allowed using other funcargs via
``request.getfuncargvalue()`` calls. These intricate APIs made it hard
to do proper parametrization and implement resource caching. The
new ``@pytest.fixture`` decorator allows to simply declare the scope
and let pytest figure things out for you.
* if you used parametrization and funcarg factories which made use of
``request.cached_setup()`` it is recommeneded to invest a few minutes
and simplify your fixture function code to use the `@pytest.fixture`_
decorator instead. This will also allow to take advantage of
the `automatic per-resource grouping`_ of tests.
.. note::
Throughout the pytest documents the ``pytest_funcarg__NAME`` way of
defining a fixture function is often termed "old-style". Their
use remains fully supported and existing code using it should run
unmodified.
pytest-2.3 introduces major refinements to fixture management
of which the funcarg mechanism introduced with pytest-2.0 remains
a core part. The documentation has been refactored as well
and you can read on here:
- :ref:`fixtures`
- :ref:`parametrize`
- :ref:`funcargcompare`

View File

@ -1,7 +1,7 @@
Installation and Getting Started
===================================
**Pythons**: Python 2.4-3.2, Jython, PyPy
**Pythons**: Python 2.4-3.3, Jython, PyPy
**Platforms**: Unix/Posix and Windows
@ -22,14 +22,7 @@ Installation options::
To check your installation has installed the correct version::
$ py.test --version
This is py.test version 2.3.0.dev2, imported from /home/hpk/p/pytest/pytest.pyc
setuptools registered plugins:
pytest-xdist-1.8 at /home/hpk/p/pytest-xdist/xdist/plugin.pyc
pytest-bugzilla-0.1 at /home/hpk/tmp/eanxgeek/pytest_bugzilla.pyc
pytest-cache-0.9 at /home/hpk/p/pytest-cache/pytest_cache.pyc
oejskit-0.9.0 at /home/hpk/p/js-infrastructure/oejskit/pytest_jstests.pyc
pytest-pep8-1.0.1 at /home/hpk/venv/1/local/lib/python2.7/site-packages/pytest_pep8.pyc
pytest-cov-1.6 at /home/hpk/venv/1/local/lib/python2.7/site-packages/pytest_cov.pyc
This is py.test version 2.3.0.dev19, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
If you get an error checkout :ref:`installation issues`.
@ -51,9 +44,8 @@ That's it. You can execute the test function now::
$ py.test
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_sample.py F
@ -66,7 +58,7 @@ That's it. You can execute the test function now::
E + where 4 = func(3)
test_sample.py:5: AssertionError
========================= 1 failed in 0.02 seconds =========================
========================= 1 failed in 0.01 seconds =========================
py.test found the ``test_answer`` function by following :ref:`standard test discovery rules <test discovery>`, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``.
@ -99,9 +91,7 @@ use the ``raises`` helper::
Running it with, this time in "quiet" reporting mode::
$ py.test -q test_sysexit.py
collecting ... collected 1 items
.
1 passed in 0.02 seconds
.. todo:: For further ways to assert exceptions see the `raises`
@ -127,12 +117,11 @@ There is no need to subclass anything. We can simply
run the module by passing its filename::
$ py.test -q test_class.py
collecting ... collected 2 items
.F
================================= FAILURES =================================
____________________________ TestClass.test_two ____________________________
self = <test_class.TestClass instance at 0x2343830>
self = <test_class.TestClass instance at 0x1ce8758>
def test_two(self):
x = "hello"
@ -140,7 +129,6 @@ run the module by passing its filename::
E assert hasattr('hello', 'check')
test_class.py:8: AssertionError
1 failed, 1 passed in 0.02 seconds
The first test passed, the second failed. Again we can easily see
the intermediate values used in the assertion, helping us to
@ -150,9 +138,9 @@ Going functional: requesting a unique temporary directory
--------------------------------------------------------------
For functional tests one often needs to create some files
and pass them to application objects. py.test provides
the versatile :ref:`funcarg mechanism` which allows to request
arbitrary resources, for example a unique temporary directory::
and pass them to application objects. pytest provides
:ref:`builtinfixtures` which allow to request arbitrary
resources, for example a unique temporary directory::
# content of test_tmpdir.py
def test_needsfiles(tmpdir):
@ -160,16 +148,15 @@ arbitrary resources, for example a unique temporary directory::
assert 0
We list the name ``tmpdir`` in the test function signature and
py.test will lookup and call a factory to create the resource
py.test will lookup and call a fixture factory to create the resource
before performing the test function call. Let's just run it::
$ py.test -q test_tmpdir.py
collecting ... collected 1 items
F
================================= FAILURES =================================
_____________________________ test_needsfiles ______________________________
tmpdir = local('/home/hpk/tmp/pytest-2885/test_needsfiles0')
tmpdir = local('/tmp/pytest-168/test_needsfiles0')
def test_needsfiles(tmpdir):
print tmpdir
@ -178,15 +165,14 @@ before performing the test function call. Let's just run it::
test_tmpdir.py:3: AssertionError
----------------------------- Captured stdout ------------------------------
/home/hpk/tmp/pytest-2885/test_needsfiles0
1 failed in 0.22 seconds
/tmp/pytest-168/test_needsfiles0
Before the test runs, a unique-per-test-invocation temporary directory
was created. More info at :ref:`tmpdir handling`.
You can find out what kind of builtin :ref:`funcargs` exist by typing::
You can find out what kind of builtin :ref:`fixtures` exist by typing::
py.test --fixtures # shows builtin and custom function arguments
py.test --fixtures # shows builtin and custom fixtures
Where to go next
-------------------------------------

View File

@ -91,9 +91,6 @@ required for calling the test command. You can also pass additional
arguments to the subprocess-calls such as your test directory or other
options.
.. _`test discovery`:
.. _`Python test discovery`:
Integration with setuptools/distribute test commands
----------------------------------------------------
@ -129,6 +126,8 @@ Now if you run::
this will download py.test if needed and then run py.test
as you would expect it to.
.. _`test discovery`:
.. _`Python test discovery`:
Conventions for Python test discovery
-------------------------------------------------

View File

@ -1,11 +1,11 @@
Welcome to pytest!
pytest: makes you a better programmer
=============================================
- **a mature full-featured testing tool**
- **a mature full-featured Python testing tool**
- runs on Posix/Windows, Python 2.4-3.2, PyPy and Jython-2.5.1
- runs on Posix/Windows, Python 2.4-3.3, PyPy and Jython-2.5.1
- :ref:`comprehensive online <toc>` and `PDF documentation <pytest.pdf>`_
- continuously `tested on many Python interpreters <http://hudson.testrun.org/view/pytest/job/pytest/>`_
- used in :ref:`many projects and organisations <projects>`, in test
@ -25,12 +25,9 @@ Welcome to pytest!
- **supports functional testing and complex test setups**
- (new in 2.3) :ref:`easy test resource management, scoping and
parametrization <resources>`
- (new in 2.3) :ref:`setup functions`.
- (new in 2.2) :ref:`durations`
- (much improved in 2.2) :ref:`marking and test selection <mark>`
- (improved in 2.2) :ref:`parametrized test functions <parametrized test functions>`
- (new in 2.3) :ref:`modular parametrizeable fixtures <fixture>`
- :ref:`marking and test selection <mark>`
- :ref:`parametrized test functions <parametrized test functions>`
- advanced :ref:`skip and xfail`
- can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
- can :ref:`continuously re-run failing tests <looponfailing>`

188
doc/en/parametrize.txt Normal file
View File

@ -0,0 +1,188 @@
.. _`test generators`:
.. _`parametrizing-tests`:
.. _`parametrized test functions`:
.. _`parametrize`:
Parametrizing fixtures and test functions
==========================================================================
While the :ref:`@pytest.fixture` decorator allows to define parametrization
at the level of fixture functions, there are two more parametrizations:
* `@pytest.mark.parametrize`_ to provide multiple argument/fixture sets
for a particular test function or class.
* `pytest_generate_tests`_ to implement your own custom parametrization
scheme or extensions.
.. _`@pytest.mark.parametrize`:
``@pytest.mark.parametrize``: parametrizing test functions
---------------------------------------------------------------------
.. regendoc: wipe
.. versionadded:: 2.2
The builtin ``pytest.mark.parametrize`` decorator enables
parametrization of arguments for a test function. Here is a typical example
of a test function that wants check for expected output given a certain input::
# content of test_expectation.py
import pytest
@pytest.mark.parametrize(("input", "expected"), [
("3+5", 8),
("2+4", 6),
("6*9", 42),
])
def test_eval(input, expected):
assert eval(input) == expected
The ``@parametrize`` decorator defines three different argument sets for the
two ``(input, output)`` arguments of ``test_eval`` function so the latter
will be run three times::
$ py.test -q
..F
================================= FAILURES =================================
____________________________ test_eval[6*9-42] _____________________________
input = '6*9', expected = 42
@pytest.mark.parametrize(("input", "expected"), [
("3+5", 8),
("2+4", 6),
("6*9", 42),
])
def test_eval(input, expected):
> assert eval(input) == expected
E assert 54 == 42
E + where 54 = eval('6*9')
test_expectation.py:8: AssertionError
As expected only one pair of input/output values fails the simple test function.
As usual you can see the ``input`` and ``output`` values in the traceback.
Note that there are various ways how you can mark groups of functions,
see :ref:`mark`.
.. _`pytest_generate_tests`:
Basic ``pytest_generate_tests`` example
---------------------------------------------
.. XXX
> line 598 "Basic ``pytest_generate_tests`` example" - I think this is
> not a very basic example! I think it is copied from parametrize.txt
> page, where it might make more sense. Here is what I would consider a
> basic example.
>
> # code
> def isSquare(n):
> n = n ** 0.5
> return int(n) == n
>
> # test file
> def pytest_generate_tests(metafunc):
> squares = [1, 4, 9, 16, 25, 36, 49]
> for n in range(1, 50):
> expected = n in squares
> if metafunc.function.__name__ == 'test_isSquare':
> metafunc.addcall(id=n, funcargs=dict(n=n,
> expected=expected))
>
>
> def test_isSquare(n, expected):
> assert isSquare(n) == expected
.. XXX
consider adding more examples, also mixed (factory-parametrized/test-function-parametrized, see mail from Brianna)
The ``pytest_generate_tests`` hook is typically used if you want
to go beyond what ``@pytest.mark.parametrize`` offers. For example,
let's say we want to execute a test with different computation
parameters and the parameter range shall be determined by a command
line argument. Let's first write a simple (do-nothing) computation test::
# content of test_compute.py
def test_compute(param1):
assert param1 < 4
Now we add a ``conftest.py`` file containing the addition of a
command line option and the generation of tests depending on
that option::
# content of conftest.py
def pytest_addoption(parser):
parser.addoption("--all", action="store_true",
help="run all combinations")
def pytest_generate_tests(metafunc):
if 'param1' in metafunc.fixturenames:
if metafunc.config.option.all:
end = 5
else:
end = 2
metafunc.parametrize("param1", range(end))
This means that we only run two tests if no option is passed::
$ py.test -q test_compute.py
..
And we run five tests if we add the ``--all`` option::
$ py.test -q --all test_compute.py
....F
================================= FAILURES =================================
_____________________________ test_compute[4] ______________________________
param1 = 4
def test_compute(param1):
> assert param1 < 4
E assert 4 < 4
test_compute.py:3: AssertionError
As expected when running the full range of ``param1`` values
we'll get an error on the last one.
You might want to look at :ref:`more parametrization examples <paramexamples>`.
.. _`metafunc object`:
The **metafunc** object
-------------------------------------------
.. currentmodule:: _pytest.python
metafunc objects are passed to the ``pytest_generate_tests`` hook.
They help to inspect a testfunction and to generate tests
according to test configuration or values specified
in the class or module where a test function is defined:
``metafunc.fixturenames``: set of required function arguments for given function
``metafunc.function``: underlying python test function
``metafunc.cls``: class object where the test function is defined in or None.
``metafunc.module``: the module object where the test function is defined in.
``metafunc.config``: access to command line opts and general config
``metafunc.funcargnames``: alias for ``fixturenames``, for pre-2.3 compatibility
.. automethod:: Metafunc.parametrize
.. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists)

View File

@ -1,12 +1,10 @@
Page has moved to fixture
setup: is now an "autoactive fixture"
========================================================
During development prior to the pytest-2.3 release the name
``pytest.setup`` was used but before the release it was renamed
to :ref:`pytest.fixture` mainly to avoid the misconception that there
should be a ``pytest.teardown`` as well.
and moved to become part of the general fixture mechanism,
namely :ref:`autoactive fixtures`
Please refer to :ref:`pytest.fixture` for information on the new
fixture functions.

View File

@ -130,9 +130,8 @@ Running it with the report-on-xfail option gives this output::
example $ py.test -rx xfail_demo.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 6 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 6 items
xfail_demo.py xxxxxx
========================= short test summary info ==========================
@ -148,7 +147,7 @@ Running it with the report-on-xfail option gives this output::
XFAIL xfail_demo.py::test_hello6
reason: reason
======================== 6 xfailed in 0.04 seconds =========================
======================== 6 xfailed in 0.03 seconds =========================
.. _`evaluation of skipif/xfail conditions`:

View File

@ -18,12 +18,11 @@ Basic usage and funcargs:
- `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_
Function arguments:
Fixtures and Function arguments:
- :ref:`mysetup`
- `application setup in test functions with funcargs`_
- :ref:`fixtures`
- `monkey patching done right`_ (blog post, consult `monkeypatch
plugin`_ for actual 1.0 API)
plugin`_ for up-to-date API)
Test parametrization:

View File

@ -28,16 +28,15 @@ Running this would result in a passed test except for the last
$ py.test test_tmpdir.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev2
plugins: xdist, bugzilla, cache, oejskit, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_tmpdir.py F
================================= FAILURES =================================
_____________________________ test_create_file _____________________________
tmpdir = local('/home/hpk/tmp/pytest-2886/test_create_file0')
tmpdir = local('/tmp/pytest-169/test_create_file0')
def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt")
@ -48,7 +47,7 @@ Running this would result in a passed test except for the last
E assert 0
test_tmpdir.py:7: AssertionError
========================= 1 failed in 0.23 seconds =========================
========================= 1 failed in 0.02 seconds =========================
.. _`base temporary directory`:

View File

@ -1,7 +1,7 @@
.. _`unittest.TestCase`:
Support for unittest.TestCase
Support for unittest.TestCase / Integration of fixtures
=====================================================================
py.test has limited support for running Python `unittest.py style`_ tests.
@ -24,9 +24,8 @@ Running it yields::
$ py.test test_unittest.py
=========================== test session starts ============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev12
plugins: xdist, bugzilla, cache, oejskit, cli, timeout, pep8, cov
collecting ... collected 1 items
platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19
collected 1 items
test_unittest.py F
@ -43,20 +42,20 @@ Running it yields::
test_unittest.py:8: AssertionError
----------------------------- Captured stdout ------------------------------
hello
========================= 1 failed in 0.03 seconds =========================
========================= 1 failed in 0.01 seconds =========================
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
Moreover, you can use the new :ref:`@pytest.setup functions <@pytest.setup>`
functions and make use of pytest's unique :ref:`funcarg mechanism` in your
test suite::
Moreover, you can use pytest's new :ref:`autoactive fixtures`
functions, thereby connecting pytest's :ref:`fixture mechanism <fixture>`
with a setup/teardown style::
# content of test_unittest_funcargs.py
import pytest
import unittest
class MyTest(unittest.TestCase):
@pytest.setup()
@pytest.fixture(autoactive=True)
def chdir(self, tmpdir):
tmpdir.chdir() # change to pytest-provided temporary directory
tmpdir.join("samplefile.ini").write("# testdata")
@ -70,41 +69,66 @@ function took care to prepare a directory with some test data
which the unittest-testcase method can now use::
$ py.test -q test_unittest_funcargs.py
collecting ... collected 1 items
.
1 passed in 0.28 seconds
If you want to make a database attribute available on unittest.TestCases
instances, based on a marker, you can do it using :ref:`pytest.mark`` and
:ref:`setup functions`::
instances, you can do it using :ref:`usefixtures` and a simple
:ref:`fixture function`::
# content of test_unittest_marked_db.py
import pytest
import unittest
@pytest.fixture()
def db():
@pytest.fixture
def db(request):
class DummyDB:
x = 1
return DummyDB()
@pytest.setup()
def stick_db_to_self(request, db):
if hasattr(request.node.markers, "needsdb"):
entries = []
db = DummyDB()
if request.instance is not None:
request.instance.db = db
return db
@pytest.mark.usefixtures("db")
class MyTest(unittest.TestCase):
def test_method(self):
assert not hasattr(self, "db")
def test_append(self):
self.db.entries.append(1)
@pytest.mark.needsdb
def test_method2(self):
assert self.db.x == 1
# check we have a fresh instance
assert len(self.db.entries) == 0
Running it passes both tests, one of which will see a ``db`` attribute
because of the according ``needsdb`` marker::
Running it passes both tests::
$ py.test -q test_unittest_marked_db.py
collecting ... collected 2 items
..
2 passed in 0.03 seconds
If you rather want to provide a class-cached "db" attribute, you
can write a slightly different fixture using a ``scope`` parameter
for the fixture decorator ::
# content of test_unittest_class_db.py
import pytest
import unittest
@pytest.fixture(scope="class")
def db_class(request):
class DummyDB:
entries = []
db = DummyDB()
if request.cls is not None:
request.cls.db = db
return db
@pytest.mark.usefixtures("db_class")
class MyTest(unittest.TestCase):
def test_append(self):
self.db.entries.append(1)
def test_method2(self):
# check we DONT have a fresh instance
assert len(self.db.entries) == 1
Running it again passes both tests::
$ py.test -q test_unittest_class_db.py
..

View File

@ -183,9 +183,7 @@ Running it will show that ``MyPlugin`` was added and its
hook was invoked::
$ python myinvoke.py
collecting ... collected 0 items
in 0.01 seconds
*** test run reporting finishing
.. include:: links.inc

View File

@ -1,16 +1,17 @@
.. _`classic xunit`:
.. _xunitsetup:
classic xunit-style setup
========================================
.. note::
This section describes the old way how you can implement setup and
This section describes the classic way how you can implement setup and
teardown on a per-module/class/function basis. It remains fully
supported but it is recommended to rather use :ref:`fixture functions
<fixture>` or :ref:`funcargs <resources>` for implementing your
needs to prepare and fix the test state for your tests.
supported but it is recommended to rather use the more flexible,
more modular and more scalable :ref:`fixture functions
<fixture>` for implementing for fixing test state for your tests.
Module level setup/teardown
--------------------------------------

11
tox.ini
View File

@ -43,13 +43,22 @@ deps=
[testenv:doc]
basepython=python
changedir=doc
changedir=doc/en
deps=:pypi:sphinx
pytest
commands=
make clean
make html
[testenv:regen]
basepython=python
changedir=doc/en
deps=:pypi:sphinx
pytest
commands=
make regen
[testenv:py31]
deps=:pypi:nose>=1.0