diff --git a/_pytest/core.py b/_pytest/core.py index 93bd553b2..b231c1686 100644 --- a/_pytest/core.py +++ b/_pytest/core.py @@ -463,9 +463,13 @@ def _prepareconfig(args=None, plugins=None): pluginmanager=_pluginmanager, args=args) def main(args=None, plugins=None): - """ returned exit code integer, after an in-process testing run - with the given command line arguments, preloading an optional list - of passed in plugin objects. """ + """ return exit code, after performing an in-process test run. + + :arg args: list of command line arguments. + + :arg plugins: list of plugin objects to be auto-registered during + initialization. + """ config = _prepareconfig(args, plugins) exitstatus = config.hook.pytest_cmdline_main(config=config) return exitstatus diff --git a/_pytest/python.py b/_pytest/python.py index 04a0d6d47..cf1c9217b 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -637,27 +637,26 @@ class Metafunc(FuncargnamesCompatAttr): """ Add new invocations to the underlying test function using the list of argvalues for the given argnames. Parametrization is performed during the collection phase. If you need to setup expensive resources - you may pass indirect=True and implement a fixture function which can - perform the expensive setup just before a test is actually run. + see about setting indirect=True to do it rather at test setup time. :arg argnames: an argument name or a list of argument names :arg argvalues: a list of values for the argname or a list of tuples of values for the list of argument names. - :arg indirect: if True each argvalue corresponding to an argument will - be passed as request.param to its respective fixture function so - that it can perform more expensive setups during the setup phase of - a test rather than at collection time. + :arg indirect: if True each argvalue corresponding to an argname will + be passed as request.param to its respective argname fixture + function so that it can perform more expensive setups during the + setup phase of a test rather than at collection time. :arg ids: list of string ids each corresponding to the argvalues so that they are part of the test id. If no ids are provided they will be generated automatically from the argvalues. - :arg scope: if specified: denotes the scope of the parameters. - The scope is used for sorting tests by parameters. It will - also override any fixture-function defined scope, allowing - to set a dynamic scope from test context and configuration. + :arg scope: if specified it denotes the scope of the parameters. + The scope is used for grouping tests by parameter instances. + It will also override any fixture-function defined scope, allowing + to set a dynamic scope using test context or configuration. """ if not isinstance(argnames, (tuple, list)): argnames = (argnames,) @@ -762,7 +761,7 @@ def _showfixtures_main(config, session): pluginname = plugin.__name__ for name, factory in available: loc = getlocation(factory, curdir) - if verbose: + if verbose > 0: funcargspec = "%s -- %s" %(name, loc,) else: funcargspec = name @@ -989,24 +988,25 @@ def scopeproperty(name=None, doc=None): class FixtureRequest(FuncargnamesCompatAttr): - """ A request for fixtures from a test or fixture function. + """ A request for a fixture from a test or fixture function. - A request object gives access to attributes of the requesting - test context. It has an optional ``param`` attribute in case - of parametrization. + A request object gives access to the requesting test context + and has an optional ``param`` attribute in case + the fixture is parametrized indirectly. """ def __init__(self, pyfuncitem): self._pyfuncitem = pyfuncitem if hasattr(pyfuncitem, '_requestparam'): self.param = pyfuncitem._requestparam + #: fixture for which this request is being performed + self.fixturename = None #: Scope string, one of "function", "cls", "module", "session" self.scope = "function" self.getparent = pyfuncitem.getparent self._funcargs = self._pyfuncitem.funcargs.copy() self._arg2fixturedeflist = {} self._fixturemanager = pyfuncitem.session._fixturemanager - self._currentarg = None self._parentid = pyfuncitem.parent.nodeid self.fixturenames, self._arg2fixturedeflist_ = \ self._fixturemanager.getfixtureclosure( @@ -1087,7 +1087,7 @@ class FixtureRequest(FuncargnamesCompatAttr): if scope != "function" and hasattr(self, "param"): # parametrized resources are sorted by param # so we rather store finalizers per (argname, param) - colitem = (self._currentarg, self.param) + colitem = (self.fixturename, self.param) else: colitem = self._getscopeitem(scope) self._pyfuncitem.session._setupstate.addfinalizer( @@ -1131,7 +1131,7 @@ class FixtureRequest(FuncargnamesCompatAttr): """ if not hasattr(self.config, '_setupcache'): self.config._setupcache = {} # XXX weakref? - cachekey = (self._currentarg, self._getscopeitem(scope), extrakey) + cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) cache = self.config._setupcache try: val = cache[cachekey] @@ -1152,17 +1152,13 @@ class FixtureRequest(FuncargnamesCompatAttr): return val def getfuncargvalue(self, argname): - """ Retrieve a fixture function argument by name for this test - function invocation. This allows one function argument factory - to call another function argument factory. If there are two - funcarg factories for the same test function argument the first - factory may use ``getfuncargvalue`` to call the second one and - do something additional with the resource. + """ Dynamically retrieve a named fixture function argument. - **Note**, however, that starting with pytest-2.3 it is usually - easier and better to directly use the needed funcarg in the - factory function signature. This will also work seemlessly - with parametrization and the new resource setup optimizations. + As of pytest-2.3, it is easier and usually better to access other + fixture values by stating it as an input argument in the fixture + function. If you only can decide about using another fixture at test + setup time, you may use this function to retrieve it inside a fixture + function body. """ try: return self._funcargs[argname] @@ -1187,12 +1183,12 @@ class FixtureRequest(FuncargnamesCompatAttr): if fixturedef.active: return fixturedef.cached_result - # prepare request _currentarg and param attributes before + # prepare request fixturename and param attributes before # calling into fixture function argname = fixturedef.argname node = self._pyfuncitem mp = monkeypatch() - mp.setattr(self, '_currentarg', argname) + mp.setattr(self, 'fixturename', argname) try: param = node.callspec.getparam(argname) except (AttributeError, ValueError): @@ -1246,7 +1242,6 @@ class FixtureRequest(FuncargnamesCompatAttr): p, lineno, factory.__name__, args)) return lines - def _getscopeitem(self, scope): if scope == "function": return self._pyfuncitem @@ -1262,7 +1257,7 @@ class FixtureRequest(FuncargnamesCompatAttr): raise ValueError("unknown finalization scope %r" %(scope,)) def __repr__(self): - return "" %(self._pyfuncitem) + return "" %(self.node) class ScopeMismatchError(Exception): """ A fixture function tries to use a different fixture function which diff --git a/_pytest/runner.py b/_pytest/runner.py index c774835de..01e6a4840 100644 --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -409,7 +409,9 @@ skip.Exception = Skipped def fail(msg="", pytrace=True): """ explicitely fail an currently-executing test with the given Message. - if @pytrace is not True the msg represents the full failure information. + + :arg pytrace: if false the msg represents the full failure information + and no python traceback will be reported. """ __tracebackhide__ = True raise Failed(msg=msg, pytrace=pytrace) diff --git a/doc/en/assert.txt b/doc/en/assert.txt index ed3f58c60..c60c329f2 100644 --- a/doc/en/assert.txt +++ b/doc/en/assert.txt @@ -24,7 +24,7 @@ you will see the return value of the function call:: $ py.test test_assert1.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_assert1.py F @@ -55,6 +55,8 @@ will be simply shown in the traceback. See :ref:`assert-details` for more information on assertion introspection. +.. _`assertraises`: + Assertions about expected exceptions ------------------------------------------ @@ -106,7 +108,7 @@ if you run this module:: $ py.test test_assert2.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_assert2.py F diff --git a/doc/en/builtin.txt b/doc/en/builtin.txt index 7f42d75d0..dc3cadd12 100644 --- a/doc/en/builtin.txt +++ b/doc/en/builtin.txt @@ -1,35 +1,78 @@ .. _`pytest helpers`: -Pytest builtin helpers +Pytest API and builtin fixtures ================================================ -builtin pytest.* functions and helping objects ------------------------------------------------------ +This is a list of ``pytest.*`` API functions and fixtures. -You can always use an interactive Python prompt and type:: +For information on plugin hooks and objects, see :ref:`plugins`. + +For information on the ``pytest.mark`` mechanism, see :ref:`mark`. + +For the below objects, you can also interactively ask for help, e.g. by +typing on the Python interactive prompt something like:: import pytest help(pytest) -to get an overview on the globally available helpers. +.. currentmodule:: pytest -.. automodule:: pytest +Invoking pytest interactively +--------------------------------------------------- + +.. autofunction:: main + +More examples at :ref:`pytest.main-usage` + + +Helpers for assertions about Exceptions/Warnings +-------------------------------------------------------- + +.. autofunction:: raises + +Examples at :ref:`assertraises`. + +.. autofunction:: deprecated_call + +Raising a specific test outcome +-------------------------------------- + +You can use the following functions in your test, fixture or setup +functions to force a certain test outcome. Note that most often +you can rather use declarative marks, see :ref:`skipping`. + +.. autofunction:: fail +.. autofunction:: skip +.. autofunction:: importorskip +.. autofunction:: xfail +.. autofunction:: exit + +fixtures and requests +----------------------------------------------------- + +To mark a fixture function: + +.. autofunction:: fixture + +Tutorial at :ref:`fixtures`. + +The ``request`` object that can be used from fixture functions. + +.. autoclass:: _pytest.python.FixtureRequest() :members: + .. _builtinfixtures: .. _builtinfuncargs: -Builtin resources / function arguments ------------------------------------------------------ +Builtin fixtures/function arguments +----------------------------------------- You can ask for available builtin or project-custom -:ref:`function arguments ` by typing:: +:ref:`fixtures ` by typing:: - $ py.test --fixtures - =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 - collected 0 items + $ py.test -q --fixtures capsys enables capturing of writes to sys.stdout/sys.stderr and makes captured output available via ``capsys.readouterr()`` method calls @@ -75,4 +118,3 @@ You can ask for available builtin or project-custom on warning categories. - ============================= in 0.00 seconds ============================= diff --git a/doc/en/capture.txt b/doc/en/capture.txt index f2ff00401..ee14bb9df 100644 --- a/doc/en/capture.txt +++ b/doc/en/capture.txt @@ -64,7 +64,7 @@ of the failing function and hide the other one:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 2 items test_module.py .F @@ -78,7 +78,7 @@ of the failing function and hide the other one:: test_module.py:9: AssertionError ----------------------------- Captured stdout ------------------------------ - setting up + setting up ==================== 1 failed, 1 passed in 0.01 seconds ==================== Accessing captured output from a test function diff --git a/doc/en/conf.py b/doc/en/conf.py index 0fec05496..0dc0754d3 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -17,7 +17,7 @@ # # The full version, including alpha/beta/rc tags. # The short X.Y version. -version = release = "2.3.0.dev19" +version = release = "2.3.0.dev20" import sys, os @@ -71,7 +71,11 @@ copyright = u'2011, holger krekel et alii' # directories to ignore when looking for source files. exclude_patterns = ['links.inc', '_build', 'naming20.txt', 'test/*', "old_*", - 'example/attic.txt', + '*attic*', + '*/attic*', + 'funcargs.txt', + 'setup.txt', + 'example/remoteinterp.txt', ] diff --git a/doc/en/contents.txt b/doc/en/contents.txt index 7138ca9c8..eb5e3699b 100644 --- a/doc/en/contents.txt +++ b/doc/en/contents.txt @@ -24,8 +24,4 @@ Full pytest documentation :hidden: changelog.txt - funcargs - example/resources_attic - setup.txt - example/remoteinterp.txt diff --git a/doc/en/doctest.txt b/doc/en/doctest.txt index 8a25b28f3..13974c090 100644 --- a/doc/en/doctest.txt +++ b/doc/en/doctest.txt @@ -44,7 +44,7 @@ then you can just invoke ``py.test`` without command line options:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items mymodule.py . diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.txt index 257390a8c..43b355121 100644 --- a/doc/en/example/markers.txt +++ b/doc/en/example/markers.txt @@ -26,19 +26,19 @@ You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 2 items test_server.py:3: test_send_http PASSED =================== 1 tests deselected by "-m 'webtest'" =================== - ================== 1 passed, 1 deselected in 0.01 seconds ================== + ================== 1 passed, 1 deselected in 0.00 seconds ================== Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 2 items test_server.py:6: test_something_quick PASSED @@ -143,7 +143,7 @@ the given argument:: $ py.test -k send_http # running with the above defined examples =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items test_server.py . @@ -155,7 +155,7 @@ And you can also run all tests except the ones that match the keyword:: $ py.test -k-send_http =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items test_mark_classlevel.py .. @@ -168,7 +168,7 @@ Or to only select the class:: $ py.test -kTestClass =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items test_mark_classlevel.py .. @@ -221,18 +221,18 @@ the test needs:: $ py.test -E stage2 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_someenv.py s - ======================== 1 skipped in 0.00 seconds ========================= + ======================== 1 skipped in 0.01 seconds ========================= and here is one that specifies exactly the environment needed:: $ py.test -E stage1 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_someenv.py . @@ -347,12 +347,12 @@ then you will see two test skipped and two executed tests as expected:: $ py.test -rs # this option reports skip reasons =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items test_plat.py s.s. ========================= short test summary info ========================== - SKIP [2] /tmp/doc-exec-257/conftest.py:12: cannot run on platform linux2 + SKIP [2] /tmp/doc-exec-189/conftest.py:12: cannot run on platform linux2 =================== 2 passed, 2 skipped in 0.01 seconds ==================== @@ -360,7 +360,7 @@ Note that if you specify a platform via the marker-command line option like this $ py.test -m linux2 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items test_plat.py . diff --git a/doc/en/example/nonpython.txt b/doc/en/example/nonpython.txt index eb34f34ad..2dfa62a06 100644 --- a/doc/en/example/nonpython.txt +++ b/doc/en/example/nonpython.txt @@ -27,7 +27,7 @@ now execute the test specification:: nonpython $ py.test test_simple.yml =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 0 items / 1 errors ================================== ERRORS ================================== @@ -54,7 +54,7 @@ consulted when reporting in ``verbose`` mode:: nonpython $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 0 items / 1 errors ================================== ERRORS ================================== @@ -69,7 +69,7 @@ interesting to just look at the collection tree:: nonpython $ py.test --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 0 items / 1 errors ================================== ERRORS ================================== diff --git a/doc/en/example/parametrize.txt b/doc/en/example/parametrize.txt index 9a7e00c7e..b9907924e 100644 --- a/doc/en/example/parametrize.txt +++ b/doc/en/example/parametrize.txt @@ -151,7 +151,7 @@ this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items test_scenarios.py .... @@ -163,7 +163,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ py.test --collectonly test_scenarios.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 4 items @@ -225,7 +225,7 @@ Let's first see how it looks like at collection time:: $ py.test test_backends.py --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 2 items @@ -240,7 +240,7 @@ And then when we run the test:: ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -295,7 +295,7 @@ argument sets to use for each test function. Let's run it:: ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b diff --git a/doc/en/example/pythoncollection.txt b/doc/en/example/pythoncollection.txt index 20d5c6f57..3b085b994 100644 --- a/doc/en/example/pythoncollection.txt +++ b/doc/en/example/pythoncollection.txt @@ -43,7 +43,7 @@ then the test collection looks like this:: $ py.test --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 2 items @@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this:: . $ py.test --collectonly pythoncollection.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 3 items @@ -135,7 +135,7 @@ interpreters and will leave out the setup.py file:: $ py.test --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items diff --git a/doc/en/example/remoteinterp.txt b/doc/en/example/remoteinterp.txt index 80093e23c..1d8f183bb 100644 --- a/doc/en/example/remoteinterp.txt +++ b/doc/en/example/remoteinterp.txt @@ -63,8 +63,8 @@ That's it, we can now run the test:: $ py.test test_remoteinterpreter.py Traceback (most recent call last): File "/home/hpk/p/pytest/.tox/regen/bin/py.test", line 9, in - load_entry_point('pytest==2.3.0.dev19', 'console_scripts', 'py.test')() - File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 469, in main + load_entry_point('pytest==2.3.0.dev20', 'console_scripts', 'py.test')() + File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 473, in main config = _prepareconfig(args, plugins) File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 463, in _prepareconfig pluginmanager=_pluginmanager, args=args) @@ -98,7 +98,7 @@ That's it, we can now run the test:: self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport() File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/py/_path/local.py", line 532, in pyimport __import__(modname) - File "/tmp/doc-exec-261/conftest.py", line 2, in + File "/tmp/doc-exec-193/conftest.py", line 2, in from remoteinterpreter import RemoteInterpreter ImportError: No module named remoteinterpreter @@ -150,8 +150,8 @@ Running it yields:: $ py.test -q test_ssh.py -rs Traceback (most recent call last): File "/home/hpk/p/pytest/.tox/regen/bin/py.test", line 9, in - load_entry_point('pytest==2.3.0.dev19', 'console_scripts', 'py.test')() - File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 469, in main + load_entry_point('pytest==2.3.0.dev20', 'console_scripts', 'py.test')() + File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 473, in main config = _prepareconfig(args, plugins) File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/core.py", line 463, in _prepareconfig pluginmanager=_pluginmanager, args=args) @@ -185,7 +185,7 @@ Running it yields:: self._conftestpath2mod[conftestpath] = mod = conftestpath.pyimport() File "/home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/py/_path/local.py", line 532, in pyimport __import__(modname) - File "/tmp/doc-exec-261/conftest.py", line 2, in + File "/tmp/doc-exec-193/conftest.py", line 2, in from myapp import MyApp ImportError: No module named myapp diff --git a/doc/en/example/reportingdemo.txt b/doc/en/example/reportingdemo.txt index b374ec5dc..9a8c53f5f 100644 --- a/doc/en/example/reportingdemo.txt +++ b/doc/en/example/reportingdemo.txt @@ -13,7 +13,7 @@ get on the terminal - we are working on that): assertion $ py.test failure_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF @@ -30,7 +30,7 @@ get on the terminal - we are working on that): failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ - self = + self = def test_simple(self): def f(): @@ -40,13 +40,13 @@ get on the terminal - we are working on that): > assert f() == g() E assert 42 == 43 - E + where 42 = () - E + and 43 = () + E + where 42 = () + E + and 43 = () failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -66,19 +66,19 @@ get on the terminal - we are working on that): failure_demo.py:11: AssertionError ___________________________ TestFailing.test_not ___________________________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = () + E + where 42 = () failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -89,7 +89,7 @@ get on the terminal - we are working on that): failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -102,7 +102,7 @@ get on the terminal - we are working on that): failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -115,7 +115,7 @@ get on the terminal - we are working on that): failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -132,7 +132,7 @@ get on the terminal - we are working on that): failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -156,7 +156,7 @@ get on the terminal - we are working on that): failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,7 +166,7 @@ get on the terminal - we are working on that): failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -178,7 +178,7 @@ get on the terminal - we are working on that): failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} @@ -191,7 +191,7 @@ get on the terminal - we are working on that): failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -207,7 +207,7 @@ get on the terminal - we are working on that): failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -217,7 +217,7 @@ get on the terminal - we are working on that): failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] @@ -226,7 +226,7 @@ get on the terminal - we are working on that): failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -244,7 +244,7 @@ get on the terminal - we are working on that): failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -257,7 +257,7 @@ get on the terminal - we are working on that): failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -270,7 +270,7 @@ get on the terminal - we are working on that): failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -289,7 +289,7 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .b + E + where 1 = .b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ @@ -299,8 +299,8 @@ get on the terminal - we are working on that): b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .b - E + where = () + E + where 1 = .b + E + where = () failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ @@ -316,7 +316,7 @@ get on the terminal - we are working on that): failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = + self = def _get_b(self): > raise Exception('Failed to get attrib') @@ -332,15 +332,15 @@ get on the terminal - we are working on that): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .b - E + where = () - E + and 2 = .b - E + where = () + E + where 1 = .b + E + where = () + E + and 2 = .b + E + where = () failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ - self = + self = def test_raises(self): s = 'qwe' @@ -352,10 +352,10 @@ get on the terminal - we are working on that): > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:838>:1: ValueError + <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:833>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") @@ -364,7 +364,7 @@ get on the terminal - we are working on that): failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ - self = + self = def test_raise(self): > raise ValueError("demo error") @@ -373,7 +373,7 @@ get on the terminal - we are working on that): failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ - self = + self = def test_tupleerror(self): > a,b = [1] @@ -382,7 +382,7 @@ get on the terminal - we are working on that): failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -395,7 +395,7 @@ get on the terminal - we are working on that): l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - self = + self = def test_some_error(self): > if namenotexi: @@ -423,7 +423,7 @@ get on the terminal - we are working on that): <2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - self = + self = def test_complex_error(self): def f(): @@ -452,7 +452,7 @@ get on the terminal - we are working on that): failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -462,7 +462,7 @@ get on the terminal - we are working on that): failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - self = + self = def test_z2_type_error(self): l = 3 @@ -472,19 +472,19 @@ get on the terminal - we are working on that): failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) - E assert ('456') - E + where = '123'.startswith + E assert ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - self = + self = def test_startswith_nested(self): def f(): @@ -492,15 +492,15 @@ get on the terminal - we are working on that): def g(): return "456" > assert f().startswith(g()) - E assert ('456') - E + where = '123'.startswith - E + where '123' = () - E + and '456' = () + E assert ('456') + E + where = '123'.startswith + E + where '123' = () + E + and '456' = () failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -510,18 +510,18 @@ get on the terminal - we are working on that): failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -531,7 +531,7 @@ get on the terminal - we are working on that): failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - self = + self = def test_try_finally(self): x = 1 @@ -540,4 +540,4 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:210: AssertionError - ======================== 39 failed in 0.15 seconds ========================= + ======================== 39 failed in 0.16 seconds ========================= diff --git a/doc/en/example/simple.txt b/doc/en/example/simple.txt index a97abc98a..030119306 100644 --- a/doc/en/example/simple.txt +++ b/doc/en/example/simple.txt @@ -106,7 +106,7 @@ directory with the above conftest.py:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 0 items ============================= in 0.00 seconds ============================= @@ -150,12 +150,12 @@ and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 2 items test_module.py .s ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-264/conftest.py:9: need --runslow option to run + SKIP [1] /tmp/doc-exec-195/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.01 seconds ==================== @@ -163,7 +163,7 @@ Or run it including the ``slow`` marked test:: $ py.test --runslow =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 2 items test_module.py .. @@ -253,7 +253,7 @@ which will add the string to the test header accordingly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 project deps: mylib-1.1 collected 0 items @@ -276,7 +276,7 @@ which will add info only when run with "--v":: $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 -- /home/hpk/p/pytest/.tox/regen/bin/python info1: did you know that ... did you? collecting ... collected 0 items @@ -287,7 +287,7 @@ and nothing when run plainly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 0 items ============================= in 0.00 seconds ============================= @@ -319,7 +319,7 @@ Now we can profile which test functions execute the slowest:: $ py.test --durations=3 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 3 items test_some_are_slow.py ... diff --git a/doc/en/fixture.txt b/doc/en/fixture.txt index 8d870e2b0..db771518e 100644 --- a/doc/en/fixture.txt +++ b/doc/en/fixture.txt @@ -5,12 +5,12 @@ pytest fixtures: modular, explicit, scalable ======================================================== -.. versionadded:: 2.0, 2.3 +.. currentmodule:: _pytest.python + +.. versionadded:: 2.0/2.3 .. _`xUnit`: http://en.wikipedia.org/wiki/XUnit .. _`general purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software -.. _`django`: https://www.djangoproject.com/ -.. _`pytest-django`: https://pypi.python.org/pytest-django .. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection#Definition pytest allows to create and use test fixtures in a modular and flexible @@ -31,69 +31,73 @@ with minimal effort. .. _`funcargs`: .. _`funcarg mechanism`: .. _`fixture function`: +.. _`@pytest.fixture`: +.. _`pytest.fixture`: -Fixtures as Function arguments +Fixtures as Function arguments (funcargs) ----------------------------------------- Test functions can receive fixture objects by naming them as an input -argument. For each argument name, a matching fixture -function will provide a fixture object. This mechanism was already -introduced with pytest-2.0 and is also called the *funcarg mechanism*. -It allows test functions to easily receive and work against specific -pre-initialized application objects without having to care about the -details of setup/cleanup procedures. It's a prime example of -`dependency injection`_ where fixture functions take the role of the -*injector* and test functions are the *consumers* of fixture objects. +argument. For each argument name, a fixture function with that name provides +a fixture object. Fixture functions are registered by marking them with +:py:func:`pytest.fixture`. Let's look at a simple self-contained test +module containing a fixture and a test function using it:: -Let's look at a simple self-contained test module containing -a fixture and a test function using it:: - - # content of ./test_fixturefuncarg.py + # content of ./test_smtpsimple.py import pytest @pytest.fixture - def myfuncarg(): - return 42 + def smtp(): + import smtplib + return smtplib.SMTP("merlinux.eu") - def test_function(myfuncarg): - assert myfuncarg == 17 # will fail + def test_ehlo(smtp): + response, msg = smtp.ehlo() + assert response == 250 + assert "merlinux" in msg + assert 0 # for demo purposes -Here, the ``test_function`` needs the ``myfuncarg`` fixture value. pytest -will discover and call the ``@pytest.fixture`` marked ``myfuncarg`` +Here, the ``test_function`` needs the ``smtp`` fixture value. pytest +will discover and call the ``@pytest.fixture`` marked ``smtp`` fixture function. Running the test looks like this:: - $ py.test test_fixturefuncarg.py + $ py.test test_smtpsimple.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 + plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout collected 1 items - test_fixturefuncarg.py F + test_smtpsimple.py F ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + ________________________________ test_ehlo _________________________________ - myfuncarg = 42 + smtp = - def test_function(myfuncarg): - > assert myfuncarg == 17 # will fail - E assert 42 == 17 + def test_ehlo(smtp): + response, msg = smtp.ehlo() + assert response == 250 + assert "merlinux" in msg + > assert 0 # for demo purposes + E assert 0 - test_fixturefuncarg.py:8: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + test_smtpsimple.py:12: AssertionError + ========================= 1 failed in 0.11 seconds ========================= -This shows that the test function was called with a ``myfuncarg`` -value of ``42`` and the assert fails as expected. Here is -how py.test comes to call the test function this way: +In the failure traceback we see that the test function was called with a +``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture +function. The test function fails on our deliberate ``assert 0``. Here is +an exact protocol of how py.test comes to call the test function this way: -1. pytest :ref:`finds ` the ``test_function`` because +1. pytest :ref:`finds ` the ``test_ehlo`` because of the ``test_`` prefix. The test function needs a function argument - named ``myfuncarg``. A matching fixture function is discovered by - looking for a fixture function named ``myfuncarg``. + named ``smtp``. A matching fixture function is discovered by + looking for a fixture-marked function named ``smtp``. -2. ``myfuncarg()`` is called to create a value ``42``. +2. ``smtp()`` is called to create an instance. -3. ``test_function(42)`` is now called and results in the above - reported exception because of the assertion mismatch. +3. ``test_ehlo()`` is called and fails in the last + line of the test function. Note that if you misspell a function argument or want to use one that isn't available, you'll see an error @@ -113,21 +117,36 @@ with a list of available function arguments. but is not advertised as the primary means of declaring fixture functions. +Funcargs a prime example of dependency injection +--------------------------------------------------- -Creating and using a session-shared fixture +When injecting fixtures to test functions, pytest-2.0 introduced the +term "funcargs" or "funcarg mechanism" which continues to be present +also in pytest-2.3 docs. It now refers to the specific case of injecting +fixture values to test functions by arguments. With pytest-2.3 there are +more possibilities to use fixtures but "funcargs" probably will remain +as the main way of dealing with fixtures. + +As the following examples show in more detail, funcargs allow test +functions to easily receive and work against specific pre-initialized +application objects without having to care about import/setup/cleanup +details. It's a prime example of `dependency injection`_ where fixture +functions take the role of the *injector* and test functions are the +*consumers* of fixture objects. + +Working with a session-shared fixture ----------------------------------------------------------------- -By means of a "scope" declaration, a fixture function will -only be invoked once per the specified scope. This allows to reduce the -number of expensive application object setups and thus helps to speed up -test runs. Typical examples are the setup of test databases or -establishing required subprocesses or network connections. - .. regendoc:wipe -Here is a simple example of a fixture function creating a shared -``smtplib.SMTP`` connection fixture which test functions from -any test module inside the directory of a ``conftest.py`` file may use:: +Fixtures requiring network access depend on connectivity and are +usually time-expensive to create. Extending the previous example, we +can add a ``scope='session'`` parameter to the ``smtp`` fixture function +to cause it to only be invoked once per session. Multiple test +functions will thus only involve a once-per-test session creation of the +fixture instance. Also, we now split the creation of the fixture into a +``conftest.py`` file which will automatically loaded when running a test +module:: # content of conftest.py import pytest @@ -137,9 +156,9 @@ any test module inside the directory of a ``conftest.py`` file may use:: def smtp(): return smtplib.SMTP("merlinux.eu") -The name of the fixture is ``smtp`` and you can access its result by +The name of the fixture again is ``smtp`` and you can access its result by listing the name ``smtp`` as an input parameter in any test or setup -function:: +function (in or below the directory where ``conftest.py`` is located):: # content of test_module.py @@ -157,24 +176,29 @@ function:: We deliberately insert failing ``assert 0`` statements in order to inspect what is going on and can now run the tests:: - $ py.test -q test_module.py - FF + $ py.test test_module.py + =========================== test session starts ============================ + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 + plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout + collected 2 items + + test_module.py FF + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - assert "merlinux" in response[1] - > assert 0 # for demo purposes - E assert 0 + > assert "python" in response[1] + E assert 'python' in 'hq.merlinux.eu\nPIPELINING\nSIZE 25000000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN' - test_module.py:6: AssertionError + test_module.py:5: AssertionError ________________________________ test_noop _________________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -183,22 +207,25 @@ inspect what is going on and can now run the tests:: E assert 0 test_module.py:11: AssertionError + ========================= 2 failed in 0.12 seconds ========================= -you see the two ``assert 0`` failing and can also see that -the same (session-scoped) object was passed into the two test functions -because pytest shows the incoming arguments in the traceback. +You see the two ``assert 0`` failing and more importantly you can also see +that the same (session-scoped) ``smtp`` object was passed into the two +test functions because pytest shows the incoming argument values in the +traceback. As a result, the two test functions using ``smtp`` run as +quick as a single one because they reuse the same instance. Fixtures can interact with the requesting test context ------------------------------------------------------------- -By using the special :ref:`request` object, fixture functions can introspect -the function, class or module for which they are invoked and can -optionally register cleanup functions which are called when the last -test finished execution. +By accepting the special :py:class:`request ` argument, +fixture functions can introspect the function, class or module for which +they are invoked and can optionally register finalizing cleanup +functions which are called when the last test finished execution. Further extending the previous ``smtp`` fixture example, let's try to -read the server URL from the module namespace, use module-scoping and +read the server URL from the module namespace, also use module-caching and register a finalizer that closes the smtp connection after the last test finished execution:: @@ -221,7 +248,7 @@ using it has executed:: $ py.test -s -q --tb=no FF - finalizing + finalizing We see that the ``smtp`` instance is finalized after the two tests using it tests executed. If we had specified ``scope='function'`` @@ -233,7 +260,7 @@ server URL and has a test to verify the fixture picks it up:: # content of test_anothersmtp.py - smtpserver = "mail.python.org" # will be read by smtp fixture + smtpserver = "merlinux.eu" # will be read by smtp fixture def test_showhelo(smtp): assert 0, smtp.helo() @@ -246,29 +273,9 @@ Running it:: ______________________________ test_showhelo _______________________________ test_anothersmtp.py:5: in test_showhelo > assert 0, smtp.helo() - E AssertionError: (250, 'mail.python.org') + E AssertionError: (250, 'hq.merlinux.eu') -**Test classes, modules or whole projects can make use of -one or more fixtures**. All required fixture functions will execute -before a test from the specifying context executes. As You can use this -to make tests operate from a pre-initialized directory or with -certain environment variables or with pre-configured global application -settings. - -For example, the Django_ project requires database -initialization to be able to import from and use its model objects. -For that, the `pytest-django`_ plugin provides fixtures which your -project can then easily depend or extend on, simply by referencing the -name of the particular fixture. - - -Fixture functions have limited visilibity which depends on where they -are defined. If they are defined on a test class, only its test methods -may use it. A fixture defined in a module can only be used -from that test module. A fixture defined in a conftest.py file -can only be used by the tests below the directory of that file. -Lastly, plugins can define fixtures which are available across all -projects. +.. _`request`: :ref:pyclass:`_pytest.python.FixtureRequest` .. _`fixture-parametrize`: @@ -311,19 +318,18 @@ So let's just do another run:: ================================= FAILURES ================================= __________________________ test_ehlo[merlinux.eu] __________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - assert "merlinux" in response[1] - > assert 0 # for demo purposes - E assert 0 + > assert "python" in response[1] + E assert 'python' in 'hq.merlinux.eu\nPIPELINING\nSIZE 25000000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN' - test_module.py:6: AssertionError + test_module.py:5: AssertionError __________________________ test_noop[merlinux.eu] __________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -334,18 +340,19 @@ So let's just do another run:: test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - > assert "merlinux" in response[1] - E assert 'merlinux' in 'mail.python.org\nSIZE 10240000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN' + assert "python" in response[1] + > assert 0 # for demo purposes + E assert 0 - test_module.py:5: AssertionError + test_module.py:6: AssertionError ________________________ test_noop[mail.python.org] ________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -355,15 +362,15 @@ So let's just do another run:: test_module.py:11: AssertionError -We now get four failures because we are running the two tests twice with -different ``smtp`` fixture instances. Note that with the -``mail.python.org`` connection the second test fails in ``test_ehlo`` -because it expects a specific server string. +We see that our two test functions each ran twice, against the different +``smtp`` instances. Note also, that with the ``mail.python.org`` +connection the second test fails in ``test_ehlo`` because a +different server string is expected than what arrived. .. _`interdependent fixtures`: -Using fixtures from a fixture function +Modularity: using fixtures from a fixture function ---------------------------------------------------------- You can not only use fixtures in test functions but fixture functions @@ -393,13 +400,15 @@ Here we declare an ``app`` fixture which receives the previously defined $ py.test -v test_appsetup.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 -- /home/hpk/venv/1/bin/python + cachedir: /tmp/doc-exec-135/.cache + plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout collecting ... collected 2 items test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED - ========================= 2 passed in 0.09 seconds ========================= + ========================= 2 passed in 0.08 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -425,8 +434,8 @@ before the next fixture instance is created. Among other things, this eases testing of applications which create and use global state. The following example uses two parametrized funcargs, one of which is -scoped on a per-module basis, and all the functions perform ``print`` call s -to show the flow of calls:: +scoped on a per-module basis, and all the functions perform ``print`` calls +to show the setup/teardown flow:: # content of test_module.py import pytest @@ -455,7 +464,9 @@ Let's run the tests in verbose mode and with looking at the print-output:: $ py.test -v -s test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 -- /home/hpk/venv/1/bin/python + cachedir: /tmp/doc-exec-135/.cache + plugins: xdist, bugzilla, cache, oejskit, cli, pep8, cov, timeout collecting ... collected 8 items test_module.py:16: test_0[1] PASSED @@ -467,7 +478,7 @@ Let's run the tests in verbose mode and with looking at the print-output:: test_module.py:20: test_2[1-mod2] PASSED test_module.py:20: test_2[2-mod2] PASSED - ========================= 8 passed in 0.01 seconds ========================= + ========================= 8 passed in 0.02 seconds ========================= test0 1 test0 2 create mod1 @@ -494,9 +505,10 @@ using fixtures from classes, modules or projects .. regendoc:wipe Sometimes test functions do not directly need access to a fixture object. -For example, tests may require to operate with an -empty directory as the current working directory. Here is how you can -can use the standard `tempfile `_ and pytest fixtures to +For example, tests may require to operate with an empty directory as the +current working directory but otherwise do not care for the concrete +directory. Here is how you can can use the standard `tempfile +`_ and pytest fixtures to achieve it. We separate the creation of the fixture into a conftest.py file:: @@ -552,6 +564,7 @@ into an ini-file:: [pytest] usefixtures = cleandir + .. _`autoactive fixtures`: autoactive fixtures (xUnit setup on steroids) @@ -596,14 +609,15 @@ self-contained implementation of this idea:: The class-level ``transact`` fixture is marked with *autoactive=true* which implies that all test methods in the class will use this fixture -without a need to specify it. +without a need to state it in the test function signature or with a +class-level ``usefixtures`` decorator. If we run it, we get two passing tests:: $ py.test -q .. -And here is how autoactive fixtures work in other scopes: +Here is how autoactive fixtures work in other scopes: - if an autoactive fixture is defined in a test module, all its test functions automatically use it. @@ -621,7 +635,7 @@ And here is how autoactive fixtures work in other scopes: Note that the above ``transact`` fixture may very well be a fixture that you want to make available in your project without having it generally active. The canonical way to do that is to put the transact definition -into a conftest.py file without using ``autoactive``:: +into a conftest.py file **without** using ``autoactive``:: # content of conftest.py @pytest.fixture() @@ -637,10 +651,10 @@ and then e.g. have a TestClass using it by declaring the need:: ... All test methods in this TestClass will use the transaction fixture while -other test classes or functions will not do so unless they also add -a ``transact`` reference. +other test classes or functions in the module will not use it unless +they also add a ``transact`` reference. -controlled visibility of fixture functions +Shifting (visibility of) fixture functions ---------------------------------------------------- If during implementing your tests you realize that you @@ -650,44 +664,3 @@ to a :ref:`conftest.py ` file or even separately installable fixtures functions starts at test classes, then test modules, then ``conftest.py`` files and finally builtin and third party plugins. -.. currentmodule:: _pytest.python - -.. _`@pytest.fixture`: -.. _`pytest.fixture`: - -``@pytest.fixture``: marking a fixture function --------------------------------------------------------------- - -The ``@pytest.fixture`` marker allows to - -* mark a function as a factory for fixtures, useable by test and other - fixture functions - -* declare a scope which determines the level of caching, i.e. how often - the factory will be called. Valid scopes are ``session``, ``module``, - ``class`` and ``function``. - -* define a list of parameters in order to run dependent tests multiple - times with different fixtures - -.. _`request`: - -``request``: interacting with test invocation context --------------------------------------------------------------- - -The ``request`` object may be received by fixture functions -and provides methods to: - -* to inspect attributes of the requesting test context, such as - ``function``, ``cls``, ``module``, ``session`` and the pytest - ``config`` object. A request object passed to a parametrized factory - will also carry a ``request.param`` object (A parametrized factory and - all of its dependent tests will be called with each of the factory-specified - ``params``). - -* to add finalizers/teardowns to be invoked when the last - test of the requesting test context executes - -.. autoclass:: _pytest.python.FixtureRequest() - :members: - diff --git a/doc/en/funcarg_compare.txt b/doc/en/funcarg_compare.txt index e8bd12c10..5c925fbff 100644 --- a/doc/en/funcarg_compare.txt +++ b/doc/en/funcarg_compare.txt @@ -102,8 +102,8 @@ the tests requiring "db" will run twice as well. The "mysql" and This new way of parametrizing funcarg factories should in many cases allow to re-use already written factories because effectively -``request.param`` are already the parametrization attribute for test -functions/classes were parametrized via +``request.param`` was already used when test functions/classes were +parametrized via :py:func:`~_pytest.python.Metafunc.parametrize(indirect=True)` calls. Of course it's perfectly fine to combine parametrization and scoping:: @@ -193,18 +193,19 @@ overview of fixture management in your project. Conclusion and compatibility notes --------------------------------------------------------- -**Fixtures** were originally introduced to pytest-2.0. In pytest-2.3 -the mechanism was extended and refined: +**funcargs** were originally introduced to pytest-2.0. In pytest-2.3 +the mechanism was extended and refined and is now described as +fixtures: * previously funcarg factories were specified with a special ``pytest_funcarg__NAME`` prefix instead of using the ``@pytest.fixture`` decorator. -* Factories received a :ref:`request ` object which managed caching through +* Factories received a ``request`` object which managed caching through ``request.cached_setup()`` calls and allowed using other funcargs via ``request.getfuncargvalue()`` calls. These intricate APIs made it hard to do proper parametrization and implement resource caching. The - new ``@pytest.fixture`` decorator allows to simply declare the scope + new :py:func:`pytest.fixture`` decorator allows to declare the scope and let pytest figure things out for you. * if you used parametrization and funcarg factories which made use of diff --git a/doc/en/getting-started.txt b/doc/en/getting-started.txt index 1583bd729..f8f4fe884 100644 --- a/doc/en/getting-started.txt +++ b/doc/en/getting-started.txt @@ -22,7 +22,7 @@ Installation options:: To check your installation has installed the correct version:: $ py.test --version - This is py.test version 2.3.0.dev19, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc + This is py.test version 2.3.0.dev20, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc If you get an error checkout :ref:`installation issues`. @@ -44,7 +44,7 @@ That's it. You can execute the test function now:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_sample.py F @@ -121,7 +121,7 @@ run the module by passing its filename:: ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - self = + self = def test_two(self): x = "hello" @@ -156,7 +156,7 @@ before performing the test function call. Let's just run it:: ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - tmpdir = local('/tmp/pytest-168/test_needsfiles0') + tmpdir = local('/tmp/pytest-46/test_needsfiles0') def test_needsfiles(tmpdir): print tmpdir @@ -165,7 +165,7 @@ before performing the test function call. Let's just run it:: test_tmpdir.py:3: AssertionError ----------------------------- Captured stdout ------------------------------ - /tmp/pytest-168/test_needsfiles0 + /tmp/pytest-46/test_needsfiles0 Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff --git a/doc/en/index.txt b/doc/en/index.txt index b70926df9..fafe5e334 100644 --- a/doc/en/index.txt +++ b/doc/en/index.txt @@ -1,48 +1,48 @@ -pytest: makes you a better programmer +pytest: makes you write better programs ============================================= -- **a mature full-featured Python testing tool** +**a mature full-featured Python testing tool** - runs on Posix/Windows, Python 2.4-3.3, PyPy and Jython-2.5.1 - :ref:`comprehensive online ` and `PDF documentation `_ - used in :ref:`many projects and organisations `, in test suites ranging from 10 to 10s of thousands of tests - comes with many :ref:`tested examples ` - - supports :ref:`good integration practises ` -- **provides no-boilerplate testing** +**provides easy no-boilerplate testing** - makes it :ref:`easy to get started `, - - refined :ref:`usage options ` - :ref:`assert with the assert statement` - helpful :ref:`traceback and failing assertion reporting ` - allows :ref:`print debugging ` and :ref:`the capturing of standard output during test execution ` - supports :pep:`8` compliant coding styles in tests + - refined :ref:`usage options ` -- **supports functional testing and complex test setups** +**scales from simple unit to complex functional testing** - (new in 2.3) :ref:`modular parametrizeable fixtures ` - - :ref:`marking and test selection ` + - :ref:`mark` - :ref:`parametrized test functions ` - - advanced :ref:`skip and xfail` + - :ref:`skipping` - can :ref:`distribute tests to multiple CPUs ` through :ref:`xdist plugin ` - can :ref:`continuously re-run failing tests ` - many :ref:`builtin helpers ` - flexible :ref:`Python test discovery` -- **integrates many common testing methods** +**integrates many common testing methods**: - can run many ``nose``, ``unittest.py`` and ``doctest.py`` style tests, including running testcases made for Django and trial + - supports :ref:`good integration practises ` - supports extended :ref:`xUnit style setup ` - supports domain-specific :ref:`non-python tests` - supports the generation of testing coverage reports - `Javascript unit- and functional testing`_ -- **extensive plugin and customization system** +**extensive plugin and customization system**: - all collection, reporting, running aspects are delegated to hook functions - customizations can be per-directory, per-project or per PyPI released plugins diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.txt index a0ee212ba..0756fef01 100644 --- a/doc/en/parametrize.txt +++ b/doc/en/parametrize.txt @@ -9,7 +9,7 @@ Parametrizing fixtures and test functions pytest supports test parametrization in several well-integrated ways: -- :ref:`@pytest.fixture` allows to define :ref:`parametrization +- :py:func:`pytest.fixture` allows to define :ref:`parametrization at the level of fixture functions `. * `@pytest.mark.parametrize`_ allows to define parametrization at the @@ -21,6 +21,7 @@ pytest supports test parametrization in several well-integrated ways: .. _`@pytest.mark.parametrize`: + ``@pytest.mark.parametrize``: parametrizing test functions --------------------------------------------------------------------- @@ -43,12 +44,17 @@ to an expected output:: def test_eval(input, expected): assert eval(input) == expected -The ``@parametrize`` decorator defines three different argument sets for the -two ``(input, output)`` arguments of ``test_eval`` function so the latter -will be run three times:: +Here, the ``@parametrize`` decorator defines three different argument +sets for the two ``(input, output)`` arguments of the ``test_eval`` function +which will thus run three times:: - $ py.test -q - ..F + $ py.test + =========================== test session starts ============================ + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 + collected 3 items + + test_expectation.py ..F + ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ @@ -65,11 +71,12 @@ will be run three times:: E + where 54 = eval('6*9') test_expectation.py:8: AssertionError + ==================== 1 failed, 2 passed in 0.01 seconds ==================== As expected only one pair of input/output values fails the simple test function. -As usual you can see the ``input`` and ``output`` values in the traceback. +And as usual with test function arguments, you can see the ``input`` and ``output`` values in the traceback. -Note that there are various ways how you can mark groups of functions, +Note that there ways how you can mark a class or a module, see :ref:`mark`. @@ -81,13 +88,14 @@ Basic ``pytest_generate_tests`` example Sometimes you may want to implement your own parametrization scheme or implement some dynamism for determining the parameters or scope of a fixture. For this, you can use the ``pytest_generate_tests`` hook -which is called for every test function. Through the special `metafunc` -object you can inspect the requesting test context and, most importantly, -you can call ``metafunc.parametrize()`` to pass in parametrizatin. -For example, let's say we want to execute a test that takes some string -input and we want to pass that in with a command line option -``--stringinput=value``. Let's first write a simple test accepting -a ``stringinput`` fixture function argument:: +which is called when collecting a test function. Through the passed in +`metafunc` object you can inspect the requesting test context and, most +importantly, you can call ``metafunc.parametrize()`` to cause +parametrization. + +For example, let's say we want to run a test taking string inputs which +we want to set via a new py.test command line option. Let's first write +a simple test accepting a ``stringinput`` fixture function argument:: # content of test_strings.py @@ -100,7 +108,7 @@ command line option and the parametrization of our test function:: # content of conftest.py def pytest_addoption(parser): - parser.addoption("--stringinput", action="append", + parser.addoption("--stringinput", action="append", default=[], help="list of stringinputs to pass to test functions") def pytest_generate_tests(metafunc): @@ -113,7 +121,7 @@ If we now pass two stringinput values, our test will run twice:: $ py.test -q --stringinput="hello" --stringinput="world" test_strings.py .. -Let's run with a stringinput that will lead to an error:: +Let's also run with a stringinput that will lead to a failing test:: $ py.test -q --stringinput="!" test_strings.py F @@ -124,12 +132,21 @@ Let's run with a stringinput that will lead to an error:: def test_valid_string(stringinput): > assert stringinput.isalpha() - E assert () - E + where = '!'.isalpha + E assert () + E + where = '!'.isalpha test_strings.py:3: AssertionError -As expected our test function will error out. +As expected our test function fails. + +If you don't specify a stringinput it will be skipped because +``metafunc.parametrize()`` will be called with an empty parameter +listlist:: + + $ py.test -q -rs test_strings.py + s + ========================= short test summary info ========================== + SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:949: got empty parameter set, function test_valid_string at /tmp/doc-exec-161/test_strings.py:1 For further examples, you might want to look at :ref:`more parametrization examples `. diff --git a/doc/en/plugins.txt b/doc/en/plugins.txt index 2c69a4adc..ba5bdbac8 100644 --- a/doc/en/plugins.txt +++ b/doc/en/plugins.txt @@ -3,7 +3,7 @@ Working with plugins and conftest files ============================================= -py.test implements all aspects of configuration, collection, running and reporting by calling `well specified hooks`_. Virtually any Python module can be registered as a plugin. It can implement any number of hook functions (usually two or three) which all have a ``pytest_`` prefix, making hook functions easy to distinguish and find. There are three basic locations types: +py.test implements all aspects of configuration, collection, running and reporting by calling `well specified hooks`_. Virtually any Python module can be registered as a plugin. It can implement any number of hook functions (usually two or three) which all have a ``pytest_`` prefix, making hook functions easy to distinguish and find. There are three basic location types: * `builtin plugins`_: loaded from py.test's own ``pytest/plugin`` directory. * `external plugins`_: modules discovered through `setuptools entry points`_ @@ -64,14 +64,28 @@ tool, for example:: pip uninstall pytest-NAME If a plugin is installed, py.test automatically finds and integrates it, -there is no need to activate it. Here is a list of known plugins: +there is no need to activate it. Here is a initial list of known plugins: + +.. _`django`: https://www.djangoproject.com/ + +* `pytest-django `: write tests + for `django`_ apps, using pytest integration. * `pytest-capturelog `_: to capture and assert about messages from the logging module * `pytest-xdist `_: - to distribute tests to CPUs and remote hosts, looponfailing mode, - see also :ref:`xdist` + to distribute tests to CPUs and remote hosts, to run in boxed + mode which allows to survive segmentation faults, to run in + looponfailing mode, automatically re-running failing tests + on file changes, see also :ref:`xdist` + +* `pytest-timeout `_: + to timeout tests based on function marks or global definitions. + +* `pytest-cache `_: + to interactively re-run failing tests and help other plugins to + store test run information across invocations. * `pytest-cov `_: coverage reporting, compatible with distributed testing @@ -81,7 +95,6 @@ there is no need to activate it. Here is a list of known plugins: * `oejskit `_: a plugin to run javascript unittests in life browsers - (**version 0.8.9 not compatible with pytest-2.0**) You may discover more plugins through a `pytest- pypi.python.org search`_. @@ -98,8 +111,8 @@ If you want to write a plugin, there are many real-life examples you can copy from: * a custom collection example plugin: :ref:`yaml plugin` -* around 20 `builtin plugins`_ which comprise py.test's own functionality -* around 10 `external plugins`_ providing additional features +* around 20 `builtin plugins`_ which provide py.test's own functionality +* many `external plugins`_ providing additional features All of these plugins implement the documented `well specified hooks`_ to extend and add functionality. diff --git a/doc/en/skipping.txt b/doc/en/skipping.txt index 3a7e6a299..93c87aa09 100644 --- a/doc/en/skipping.txt +++ b/doc/en/skipping.txt @@ -1,5 +1,7 @@ .. _`skip and xfail`: +.. _skipping: + Skip and xfail: dealing with tests that can not succeed ===================================================================== @@ -130,7 +132,7 @@ Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 6 items xfail_demo.py xxxxxx diff --git a/doc/en/tmpdir.txt b/doc/en/tmpdir.txt index 40804e69e..73acc5a29 100644 --- a/doc/en/tmpdir.txt +++ b/doc/en/tmpdir.txt @@ -28,7 +28,7 @@ Running this would result in a passed test except for the last $ py.test test_tmpdir.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_tmpdir.py F @@ -36,7 +36,7 @@ Running this would result in a passed test except for the last ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - tmpdir = local('/tmp/pytest-169/test_create_file0') + tmpdir = local('/tmp/pytest-47/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -47,7 +47,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.02 seconds ========================= + ========================= 1 failed in 0.01 seconds ========================= .. _`base temporary directory`: diff --git a/doc/en/unittest.txt b/doc/en/unittest.txt index 20be826a1..36bcc50d4 100644 --- a/doc/en/unittest.txt +++ b/doc/en/unittest.txt @@ -4,7 +4,7 @@ Support for unittest.TestCase / Integration of fixtures ===================================================================== -py.test has limited support for running Python `unittest.py style`_ tests. +py.test has support for running Python `unittest.py style`_ tests. It will automatically collect ``unittest.TestCase`` subclasses and their ``test`` methods in test files. It will invoke ``setUp/tearDown`` methods but also perform py.test's standard ways @@ -24,7 +24,7 @@ Running it yields:: $ py.test test_unittest.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev19 + platform linux2 -- Python 2.7.3 -- pytest-2.3.0.dev20 collected 1 items test_unittest.py F diff --git a/doc/en/usage.txt b/doc/en/usage.txt index fd3594fc0..9c88a9f26 100644 --- a/doc/en/usage.txt +++ b/doc/en/usage.txt @@ -150,6 +150,8 @@ for example ``-x`` if you only want to send one particular failure. Currently only pasting to the http://bpaste.net service is implemented. +.. _`pytest.main-usage`: + Calling pytest from Python code ----------------------------------------------------