introduce metafunc.parametrize() and @pytest.mark.parametrize with examples. deprecate metafunc.addcall()
This commit is contained in:
parent
48a6a504b6
commit
ec0565fac5
|
@ -1,6 +1,13 @@
|
|||
Changes between 2.1.3 and XXX 2.2.0
|
||||
----------------------------------------
|
||||
|
||||
- add an all-powerful metafunc.parametrize function which allows to
|
||||
parametrize test function arguments in multiple steps and therefore
|
||||
from indepdenent plugins and palces.
|
||||
- add a @pytest.mark.parametrize helper which allows to easily
|
||||
call a test function with different argument values
|
||||
- Add examples to the "parametrize" example page, including a quick port
|
||||
of Test scenarios and the new parametrize function and decorator.
|
||||
- introduce registration for "pytest.mark.*" helpers via ini-files
|
||||
or through plugin hooks. Also introduce a "--strict" option which
|
||||
will treat unregistered markers as errors
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.2.0.dev7'
|
||||
__version__ = '2.2.0.dev8'
|
||||
|
|
|
@ -4,6 +4,7 @@ import inspect
|
|||
import sys
|
||||
import pytest
|
||||
from py._code.code import TerminalRepr
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
|
||||
import _pytest
|
||||
cutdir = py.path.local(_pytest.__file__).dirpath()
|
||||
|
@ -26,6 +27,23 @@ def pytest_cmdline_main(config):
|
|||
showfuncargs(config)
|
||||
return 0
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
try:
|
||||
param = metafunc.function.parametrize
|
||||
except AttributeError:
|
||||
return
|
||||
metafunc.parametrize(*param.args, **param.kwargs)
|
||||
|
||||
def pytest_configure(config):
|
||||
config.addinivalue_line("markers",
|
||||
"parametrize(argnames, argvalues): call a test function multiple "
|
||||
"times passing in multiple different argument value sets. Example: "
|
||||
"@parametrize(arg1, [1,2]) would lead to two calls of the decorated "
|
||||
"test function, one with arg1=1 and another with arg1=2."
|
||||
)
|
||||
|
||||
|
||||
@pytest.mark.trylast
|
||||
def pytest_namespace():
|
||||
raises.Exception = pytest.fail.Exception
|
||||
|
@ -425,6 +443,7 @@ class Function(FunctionMixin, pytest.Item):
|
|||
"yielded functions (deprecated) cannot have funcargs")
|
||||
else:
|
||||
if callspec is not None:
|
||||
self.callspec = callspec
|
||||
self.funcargs = callspec.funcargs or {}
|
||||
self._genid = callspec.id
|
||||
if hasattr(callspec, "param"):
|
||||
|
@ -501,15 +520,59 @@ def fillfuncargs(function):
|
|||
request._fillfuncargs()
|
||||
|
||||
_notexists = object()
|
||||
class CallSpec:
|
||||
def __init__(self, funcargs, id, param):
|
||||
self.funcargs = funcargs
|
||||
self.id = id
|
||||
|
||||
class CallSpec2(object):
|
||||
def __init__(self, metafunc):
|
||||
self.metafunc = metafunc
|
||||
self.funcargs = {}
|
||||
self._idlist = []
|
||||
self.params = {}
|
||||
self._globalid = _notexists
|
||||
self._globalid_args = set()
|
||||
self._globalparam = _notexists
|
||||
|
||||
def copy(self, metafunc):
|
||||
cs = CallSpec2(self.metafunc)
|
||||
cs.funcargs.update(self.funcargs)
|
||||
cs.params.update(self.params)
|
||||
cs._idlist = list(self._idlist)
|
||||
cs._globalid = self._globalid
|
||||
cs._globalid_args = self._globalid_args
|
||||
cs._globalparam = self._globalparam
|
||||
return cs
|
||||
|
||||
def _checkargnotcontained(self, arg):
|
||||
if arg in self.params or arg in self.funcargs:
|
||||
raise ValueError("duplicate %r" %(arg,))
|
||||
|
||||
def getparam(self, name):
|
||||
try:
|
||||
return self.params[name]
|
||||
except KeyError:
|
||||
if self._globalparam is _notexists:
|
||||
raise ValueError(name)
|
||||
return self._globalparam
|
||||
|
||||
@property
|
||||
def id(self):
|
||||
return "-".join(filter(None, self._idlist))
|
||||
|
||||
def setmulti(self, valtype, argnames, valset, id):
|
||||
for arg,val in zip(argnames, valset):
|
||||
self._checkargnotcontained(arg)
|
||||
getattr(self, valtype)[arg] = val
|
||||
self._idlist.append(id)
|
||||
|
||||
def setall(self, funcargs, id, param):
|
||||
for x in funcargs:
|
||||
self._checkargnotcontained(x)
|
||||
self.funcargs.update(funcargs)
|
||||
if id is not _notexists:
|
||||
self._idlist.append(id)
|
||||
if param is not _notexists:
|
||||
self.param = param
|
||||
def __repr__(self):
|
||||
return "<CallSpec id=%r param=%r funcargs=%r>" %(
|
||||
self.id, getattr(self, 'param', '?'), self.funcargs)
|
||||
assert self._globalparam is _notexists
|
||||
self._globalparam = param
|
||||
|
||||
|
||||
class Metafunc:
|
||||
def __init__(self, function, config=None, cls=None, module=None):
|
||||
|
@ -523,31 +586,69 @@ class Metafunc:
|
|||
self._calls = []
|
||||
self._ids = py.builtin.set()
|
||||
|
||||
def parametrize(self, argnames, argvalues, indirect=False, ids=None):
|
||||
""" parametrize calls to the underlying test function during
|
||||
the collection phase of a test run. parametrize may be called
|
||||
multiple times for disjunct argnames sets.
|
||||
|
||||
:arg argnames: an argument name or a list of argument names
|
||||
|
||||
:arg argvalues: a list of values for a single argument if argnames
|
||||
specified a single argument only or a list of tuples which specify
|
||||
values for the multiple argument names.
|
||||
|
||||
:arg indirect: if True each argvalue corresponding to an argument will be
|
||||
passed as request.param to the respective funcarg factory so that
|
||||
it can perform more expensive setups during the setup phase of
|
||||
a test rather than at collection time (which is the default).
|
||||
|
||||
:arg ids: list of string ids corresponding to the (list of) argvalues
|
||||
so that they are part of the test id. If no ids are provided
|
||||
they will be generated automatically from the argvalues.
|
||||
"""
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = (argnames,)
|
||||
argvalues = [(val,) for val in argvalues]
|
||||
for arg in argnames:
|
||||
if arg not in self.funcargnames:
|
||||
raise ValueError("%r has no argument %r" %(self.function, arg))
|
||||
valtype = indirect and "params" or "funcargs"
|
||||
if not ids:
|
||||
idmaker = IDMaker()
|
||||
ids = list(map(idmaker, argvalues))
|
||||
newcalls = []
|
||||
for callspec in self._calls or [CallSpec2(self)]:
|
||||
for i, valset in enumerate(argvalues):
|
||||
assert len(valset) == len(argnames)
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti(valtype, argnames, valset, ids[i])
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
def addcall(self, funcargs=None, id=_notexists, param=_notexists):
|
||||
""" add a new call to the underlying test function during the
|
||||
collection phase of a test run. Note that request.addcall() is
|
||||
""" (deprecated, use parametrize) add a new call to the underlying
|
||||
test function during
|
||||
the collection phase of a test run. Note that request.addcall() is
|
||||
called during the test collection phase prior and independently
|
||||
to actual test execution. Therefore you should perform setup
|
||||
of resources in a funcarg factory which can be instrumented
|
||||
with the ``param``.
|
||||
to actual test execution. You should only use addcall()
|
||||
if you need to specify multiple arguments of a test function
|
||||
|
||||
:arg funcargs: argument keyword dictionary used when invoking
|
||||
the test function.
|
||||
|
||||
:arg id: used for reporting and identification purposes. If you
|
||||
don't supply an `id` the length of the currently
|
||||
list of calls to the test function will be used.
|
||||
don't supply an `id` an automatic unique id will be generated.
|
||||
|
||||
:arg param: will be exposed to a later funcarg factory invocation
|
||||
through the ``request.param`` attribute. It allows to
|
||||
defer test fixture setup activities to when an actual
|
||||
test is run.
|
||||
:arg param: a parameter which will be exposed to a later funcarg factory
|
||||
invocation through the ``request.param`` attribute.
|
||||
"""
|
||||
assert funcargs is None or isinstance(funcargs, dict)
|
||||
if funcargs is not None:
|
||||
for name in funcargs:
|
||||
if name not in self.funcargnames:
|
||||
pytest.fail("funcarg %r not used in this function." % name)
|
||||
else:
|
||||
funcargs = {}
|
||||
if id is None:
|
||||
raise ValueError("id=None not allowed")
|
||||
if id is _notexists:
|
||||
|
@ -556,11 +657,26 @@ class Metafunc:
|
|||
if id in self._ids:
|
||||
raise ValueError("duplicate id %r" % id)
|
||||
self._ids.add(id)
|
||||
self._calls.append(CallSpec(funcargs, id, param))
|
||||
|
||||
cs = CallSpec2(self)
|
||||
cs.setall(funcargs, id, param)
|
||||
self._calls.append(cs)
|
||||
|
||||
class IDMaker:
|
||||
def __init__(self):
|
||||
self.counter = 0
|
||||
def __call__(self, valset):
|
||||
l = []
|
||||
for val in valset:
|
||||
if not isinstance(val, (int, str)):
|
||||
val = "."+str(self.counter)
|
||||
self.counter += 1
|
||||
l.append(str(val))
|
||||
return "-".join(l)
|
||||
|
||||
class FuncargRequest:
|
||||
""" A request for function arguments from a test function.
|
||||
|
||||
|
||||
Note that there is an optional ``param`` attribute in case
|
||||
there was an invocation to metafunc.addcall(param=...).
|
||||
If no such call was done in a ``pytest_generate_tests``
|
||||
|
@ -693,11 +809,18 @@ class FuncargRequest:
|
|||
self._raiselookupfailed(argname)
|
||||
funcargfactory = self._name2factory[argname].pop()
|
||||
oldarg = self._currentarg
|
||||
self._currentarg = argname
|
||||
mp = monkeypatch()
|
||||
mp.setattr(self, '_currentarg', argname)
|
||||
try:
|
||||
param = self._pyfuncitem.callspec.getparam(argname)
|
||||
except (AttributeError, ValueError):
|
||||
pass
|
||||
else:
|
||||
mp.setattr(self, 'param', param, raising=False)
|
||||
try:
|
||||
self._funcargs[argname] = res = funcargfactory(request=self)
|
||||
finally:
|
||||
self._currentarg = oldarg
|
||||
mp.undo()
|
||||
return res
|
||||
|
||||
def _getscopeitem(self, scope):
|
||||
|
|
|
@ -1,19 +1,25 @@
|
|||
py.test 2.2.0: improved test markers and duration profiling
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.0 is a quite (*) backward compatible release of the popular
|
||||
py.test testing tool. It includes the following new features:
|
||||
pytest-2.2.0 is a quite [1] backward compatible release of the popular
|
||||
py.test testing tool. There are a couple of new features:
|
||||
|
||||
* new "--duration=N" option showing the N slowest test execution
|
||||
* "--duration=N" option showing the N slowest test execution
|
||||
or setup/teardown calls.
|
||||
|
||||
* new "-m markexpr" option for selecting tests according to their mark
|
||||
* @pytest.mark.parametrize decorator for runnin test functions
|
||||
with multiple values and a new more powerful metafunc.parametrize()
|
||||
helper to be used from pytest_generate_tests. Multiple parametrize
|
||||
functions can now be invoked for the same test function.
|
||||
|
||||
* new ini-variable for registering test markers and a "--strict"
|
||||
option that will error out if you are using unregistered markers
|
||||
* "-m markexpr" option for selecting tests according to their mark and
|
||||
a new "markers" ini-variable for registering test markers. The new "--strict"
|
||||
option will bail out with an error if you are using unregistered markers.
|
||||
|
||||
Usage of improved parametrize is documented in examples at
|
||||
http://pytest.org/latest/example/parametrize.html
|
||||
Usages of the improved marking mechanism is illustrated by a couple
|
||||
of initial examples, see XXX
|
||||
of initial examples, see http://pytest.org/latest/example/markers.html
|
||||
|
||||
Besides there is the usual set of bug fixes along with a cleanup of
|
||||
pytest's own test suite allowing it to run on a wider range of environments.
|
||||
|
@ -27,8 +33,15 @@ If you want to install or upgrade pytest you might just type::
|
|||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, XXX for their
|
||||
help and feedback on various issues.
|
||||
|
||||
(*) incompatible changes:
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
||||
[1] notes on incompatibility
|
||||
------------------------------
|
||||
|
||||
* You need a new version of the pytest-xdist plugin (1.7) for distributing
|
||||
test runs.
|
||||
|
@ -40,9 +53,3 @@ If you want to install or upgrade pytest you might just type::
|
|||
most code probably "just" works because the hook was already called
|
||||
for failing setup/teardown phases of a test.
|
||||
|
||||
Thanks to Ronny Pfannschmidt, David Burns, Jeff Donner, Daniel Nouri, XXX for their
|
||||
help and feedback on various issues.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
|
|
@ -7,13 +7,11 @@ import py
|
|||
pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8']
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'python1' in metafunc.funcargnames:
|
||||
assert 'python2' in metafunc.funcargnames
|
||||
for obj in metafunc.function.multiarg.kwargs['obj']:
|
||||
for py1 in pythonlist:
|
||||
for py2 in pythonlist:
|
||||
metafunc.addcall(id="%s-%s-%s" % (py1, py2, obj),
|
||||
param=(py1, py2, obj))
|
||||
for arg in metafunc.funcargnames:
|
||||
if arg.startswith("python"):
|
||||
metafunc.parametrize(arg, pythonlist, indirect=True)
|
||||
elif arg == "obj":
|
||||
metafunc.parametrize("obj", metafunc.function.multiarg.kwargs['obj'])
|
||||
|
||||
@py.test.mark.multiarg(obj=[42, {}, {1:3},])
|
||||
def test_basic_objects(python1, python2, obj):
|
||||
|
@ -23,14 +21,11 @@ def test_basic_objects(python1, python2, obj):
|
|||
def pytest_funcarg__python1(request):
|
||||
tmpdir = request.getfuncargvalue("tmpdir")
|
||||
picklefile = tmpdir.join("data.pickle")
|
||||
return Python(request.param[0], picklefile)
|
||||
return Python(request.param, picklefile)
|
||||
|
||||
def pytest_funcarg__python2(request):
|
||||
python1 = request.getfuncargvalue("python1")
|
||||
return Python(request.param[1], python1.picklefile)
|
||||
|
||||
def pytest_funcarg__obj(request):
|
||||
return request.param[2]
|
||||
return Python(request.param, python1.picklefile)
|
||||
|
||||
class Python:
|
||||
def __init__(self, version, picklefile):
|
||||
|
|
|
@ -4,18 +4,69 @@
|
|||
Parametrizing tests
|
||||
=================================================
|
||||
|
||||
py.test allows to easily implement your own custom
|
||||
parametrization scheme for tests. Here we provide
|
||||
some examples for inspiration and re-use.
|
||||
.. currentmodule:: _pytest.python
|
||||
|
||||
py.test allows to easily parametrize test functions.
|
||||
In the following we provide some examples using
|
||||
the builtin mechanisms.
|
||||
|
||||
.. _parametrizemark:
|
||||
|
||||
simple "decorator" parametrization of a test function
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. versionadded:: 2.2
|
||||
|
||||
The builtin ``parametrize`` marker allows you to easily write generic
|
||||
test functions that will be invoked with multiple input/output values::
|
||||
|
||||
# content of test_expectation.py
|
||||
import pytest
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
("6*9", 42),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
assert eval(input) == expected
|
||||
|
||||
Here we parametrize two arguments of the test function so that the test
|
||||
function is called three times. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 3 items
|
||||
..F
|
||||
=================================== FAILURES ===================================
|
||||
______________________________ test_eval[6*9-42] _______________________________
|
||||
|
||||
input = '6*9', expected = 42
|
||||
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
("6*9", 42),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
> assert eval(input) == expected
|
||||
E assert 54 == 42
|
||||
E + where 54 = eval('6*9')
|
||||
|
||||
test_expectation.py:9: AssertionError
|
||||
1 failed, 2 passed in 0.03 seconds
|
||||
|
||||
As expected only one pair of input/output values fails the simple test function.
|
||||
|
||||
Note that there are various ways how you can mark groups of functions,
|
||||
see :ref:`mark`.
|
||||
|
||||
Generating parameters combinations, depending on command line
|
||||
----------------------------------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Let's say we want to execute a test with different parameters
|
||||
and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple computation test::
|
||||
Let's say we want to execute a test with different computation
|
||||
parameters and the parameter range shall be determined by a command
|
||||
line argument. Let's first write a simple (do-nothing) computation test::
|
||||
|
||||
# content of test_compute.py
|
||||
|
||||
|
@ -36,8 +87,7 @@ Now we add a test configuration like this::
|
|||
end = 5
|
||||
else:
|
||||
end = 2
|
||||
for i in range(end):
|
||||
metafunc.addcall(funcargs={'param1': i})
|
||||
metafunc.parametrize("param1", range(end))
|
||||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
|
@ -52,8 +102,8 @@ let's run the full monty::
|
|||
$ py.test -q --all
|
||||
collecting ... collected 5 items
|
||||
....F
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_compute[4] ______________________________
|
||||
=================================== FAILURES ===================================
|
||||
_______________________________ test_compute[4] ________________________________
|
||||
|
||||
param1 = 4
|
||||
|
||||
|
@ -67,15 +117,73 @@ let's run the full monty::
|
|||
As expected when running the full range of ``param1`` values
|
||||
we'll get an error on the last one.
|
||||
|
||||
Deferring the setup of parametrizing resources
|
||||
a quick port of "testscenarios"
|
||||
------------------------------------
|
||||
|
||||
.. _`test scenarios`: http://bazaar.launchpad.net/~lifeless/testscenarios/trunk/annotate/head%3A/doc/example.py
|
||||
|
||||
Here is a quick port of to run tests configured with `test scenarios`_,
|
||||
an add-on from Robert Collins for the standard unittest framework. We
|
||||
only have to work a bit to construct the correct arguments for pytest's
|
||||
:py:func:`Metafunc.parametrize`::
|
||||
|
||||
# content of test_scenarios.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
idlist = []
|
||||
argvalues = []
|
||||
for scenario in metafunc.cls.scenarios:
|
||||
idlist.append(scenario[0])
|
||||
items = scenario[1].items()
|
||||
argnames = [x[0] for x in items]
|
||||
argvalues.append(([x[1] for x in items]))
|
||||
metafunc.parametrize(argnames, argvalues, ids=idlist)
|
||||
|
||||
scenario1 = ('basic', {'attribute': 'value'})
|
||||
scenario2 = ('advanced', {'attribute': 'value2'})
|
||||
|
||||
class TestSampleWithScenarios:
|
||||
scenarios = [scenario1, scenario2]
|
||||
|
||||
def test_demo(self, attribute):
|
||||
assert isinstance(attribute, str)
|
||||
|
||||
this is a fully self-contained example which you can run with::
|
||||
|
||||
$ py.test test_scenarios.py
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_scenarios.py ..
|
||||
|
||||
=========================== 2 passed in 0.02 seconds ===========================
|
||||
|
||||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
|
||||
|
||||
$ py.test --collectonly test_scenarios.py
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
<Instance '()'>
|
||||
<Function 'test_demo[basic]'>
|
||||
<Function 'test_demo[advanced]'>
|
||||
|
||||
=============================== in 0.01 seconds ===============================
|
||||
|
||||
Deferring the setup of parametrized resources
|
||||
---------------------------------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
The parametrization of test functions happens at collection
|
||||
time. It is often a good idea to setup possibly expensive
|
||||
resources only when the actual test is run. Here is a simple
|
||||
example how you can achieve that::
|
||||
time. It is a good idea to setup expensive resources like DB
|
||||
connections or subprocess only when the actual test is run.
|
||||
Here is a simple example how you can achieve that, first
|
||||
the actual test requiring a ``db`` object::
|
||||
|
||||
# content of test_backends.py
|
||||
|
||||
|
@ -85,17 +193,15 @@ example how you can achieve that::
|
|||
if db.__class__.__name__ == "DB2":
|
||||
pytest.fail("deliberately failing for demo purposes")
|
||||
|
||||
Now we add a test configuration that takes care to generate
|
||||
two invocations of the ``test_db_initialized`` function and
|
||||
furthermore a factory that creates a database object when
|
||||
each test is actually run::
|
||||
We can now add a test configuration that generates two invocations of
|
||||
the ``test_db_initialized`` function and also implements a factory that
|
||||
creates a database object for the actual test invocations::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if 'db' in metafunc.funcargnames:
|
||||
metafunc.addcall(param="d1")
|
||||
metafunc.addcall(param="d2")
|
||||
metafunc.parametrize("db", ['d1', 'd2'], indirect=True)
|
||||
|
||||
class DB1:
|
||||
"one database object"
|
||||
|
@ -113,24 +219,24 @@ each test is actually run::
|
|||
Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collectonly
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8
|
||||
collecting ... collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[0]'>
|
||||
<Function 'test_db_initialized[1]'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
<Function 'test_db_initialized[d2]'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
=============================== in 0.01 seconds ===============================
|
||||
|
||||
And then when we run the test::
|
||||
|
||||
$ py.test -q test_backends.py
|
||||
collecting ... collected 2 items
|
||||
.F
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
=================================== FAILURES ===================================
|
||||
___________________________ test_db_initialized[d2] ____________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x101316b90>
|
||||
db = <conftest.DB2 instance at 0x1013195f0>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
|
@ -141,32 +247,35 @@ And then when we run the test::
|
|||
test_backends.py:6: Failed
|
||||
1 failed, 1 passed in 0.02 seconds
|
||||
|
||||
Now you see that one invocation of the test passes and another fails,
|
||||
as it to be expected.
|
||||
The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``pytest_funcarg__db`` factory has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase.
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
Parametrizing test methods through per-class configuration
|
||||
--------------------------------------------------------------
|
||||
|
||||
.. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py
|
||||
|
||||
|
||||
Here is an example ``pytest_generate_function`` function implementing a
|
||||
parametrization scheme similar to Michael Foords `unittest
|
||||
parameterizer`_ in a lot less code::
|
||||
parameterizer`_ but in a lot less code::
|
||||
|
||||
# content of ./test_parametrize.py
|
||||
import pytest
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
# called once per each test function
|
||||
for funcargs in metafunc.cls.params[metafunc.function.__name__]:
|
||||
# schedule a new test function run with applied **funcargs
|
||||
metafunc.addcall(funcargs=funcargs)
|
||||
funcarglist = metafunc.cls.params[metafunc.function.__name__]
|
||||
argnames = list(funcarglist[0])
|
||||
metafunc.parametrize(argnames, [[funcargs[name] for name in argnames]
|
||||
for funcargs in funcarglist])
|
||||
|
||||
class TestClass:
|
||||
# a map specifying multiple argument sets for a test method
|
||||
params = {
|
||||
'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ],
|
||||
'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)],
|
||||
'test_zerodivision': [dict(a=1, b=0), ],
|
||||
}
|
||||
|
||||
def test_equals(self, a, b):
|
||||
|
@ -175,114 +284,35 @@ parameterizer`_ in a lot less code::
|
|||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Running it means we are two tests for each test functions, using
|
||||
the respective settings::
|
||||
Our test generator looks up a class-level definition which specifies which
|
||||
argument sets to use for each test function. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
collecting ... collected 6 items
|
||||
.FF..F
|
||||
================================= FAILURES =================================
|
||||
__________________________ test_db_initialized[1] __________________________
|
||||
collecting ... collected 3 items
|
||||
F..
|
||||
=================================== FAILURES ===================================
|
||||
__________________________ TestClass.test_equals[1-2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x10131c488>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
if db.__class__.__name__ == "DB2":
|
||||
> pytest.fail("deliberately failing for demo purposes")
|
||||
E Failed: deliberately failing for demo purposes
|
||||
|
||||
test_backends.py:6: Failed
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x101322170>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass instance at 0x1013158c0>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize.py:17: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x1013228c0>, a = 3, b = 2
|
||||
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize.py:20: Failed
|
||||
3 failed, 3 passed in 0.05 seconds
|
||||
|
||||
Parametrizing test methods through a decorator
|
||||
--------------------------------------------------------------
|
||||
|
||||
Modifying the previous example we can also allow decorators
|
||||
for parametrizing test methods::
|
||||
|
||||
# content of test_parametrize2.py
|
||||
|
||||
import pytest
|
||||
|
||||
# test support code
|
||||
def params(funcarglist):
|
||||
def wrapper(function):
|
||||
function.funcarglist = funcarglist
|
||||
return function
|
||||
return wrapper
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
for funcargs in getattr(metafunc.function, 'funcarglist', ()):
|
||||
metafunc.addcall(funcargs=funcargs)
|
||||
|
||||
# actual test code
|
||||
class TestClass:
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
assert a == b
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
pytest.raises(ZeroDivisionError, "a/b")
|
||||
|
||||
Running it gives similar results as before::
|
||||
|
||||
$ py.test -q test_parametrize2.py
|
||||
collecting ... collected 4 items
|
||||
F..F
|
||||
================================= FAILURES =================================
|
||||
_________________________ TestClass.test_equals[0] _________________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x10130ac20>, a = 1, b = 2
|
||||
|
||||
@params([dict(a=1, b=2), dict(a=3, b=3), ])
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
E assert 1 == 2
|
||||
|
||||
test_parametrize2.py:19: AssertionError
|
||||
______________________ TestClass.test_zerodivision[1] ______________________
|
||||
|
||||
self = <test_parametrize2.TestClass instance at 0x10131c878>, a = 3, b = 2
|
||||
|
||||
@params([dict(a=1, b=0), dict(a=3, b=2)])
|
||||
def test_zerodivision(self, a, b):
|
||||
> pytest.raises(ZeroDivisionError, "a/b")
|
||||
E Failed: DID NOT RAISE
|
||||
|
||||
test_parametrize2.py:23: Failed
|
||||
2 failed, 2 passed in 0.04 seconds
|
||||
test_parametrize.py:18: AssertionError
|
||||
1 failed, 2 passed in 0.03 seconds
|
||||
|
||||
Checking serialization between Python interpreters
|
||||
--------------------------------------------------------------
|
||||
|
||||
Here is a stripped down real-life example of using parametrized
|
||||
testing for testing serialization between different interpreters.
|
||||
testing for testing serialization, invoking different python interpreters.
|
||||
We define a ``test_basic_objects`` function which is to be run
|
||||
with different sets of arguments for its three arguments::
|
||||
|
||||
* ``python1``: first python interpreter
|
||||
* ``python2``: second python interpreter
|
||||
* ``obj``: object to be dumped from first interpreter and loaded into second interpreter
|
||||
* ``python1``: first python interpreter, run to pickle-dump an object to a file
|
||||
* ``python2``: second interpreter, run to pickle-load an object from a file
|
||||
* ``obj``: object to be dumped/loaded
|
||||
|
||||
.. literalinclude:: multipython.py
|
||||
|
||||
|
@ -290,5 +320,5 @@ Running it (with Python-2.4 through to Python2.7 installed)::
|
|||
|
||||
. $ py.test -q multipython.py
|
||||
collecting ... collected 75 items
|
||||
ssssss...ss...ss...ssssssssssss...ss...ss...ssssssssssss...ss...ss...ssssss
|
||||
27 passed, 48 skipped in 3.04 seconds
|
||||
ssssssssssssssssss.........ssssss.........ssssss.........ssssssssssssssssss
|
||||
27 passed, 48 skipped in 4.87 seconds
|
||||
|
|
|
@ -61,14 +61,14 @@ py.test will discover and call the factory named
|
|||
Running the test looks like this::
|
||||
|
||||
$ py.test test_simplefactory.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8
|
||||
collecting ... collected 1 items
|
||||
|
||||
test_simplefactory.py F
|
||||
|
||||
================================= FAILURES =================================
|
||||
______________________________ test_function _______________________________
|
||||
=================================== FAILURES ===================================
|
||||
________________________________ test_function _________________________________
|
||||
|
||||
myfuncarg = 42
|
||||
|
||||
|
@ -77,7 +77,7 @@ Running the test looks like this::
|
|||
E assert 42 == 17
|
||||
|
||||
test_simplefactory.py:5: AssertionError
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
=========================== 1 failed in 0.02 seconds ===========================
|
||||
|
||||
This means that indeed the test function was called with a ``myfuncarg``
|
||||
argument value of ``42`` and the assert fails. Here is how py.test
|
||||
|
@ -158,23 +158,22 @@ hook to generate several calls to the same test function::
|
|||
# content of test_example.py
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "numiter" in metafunc.funcargnames:
|
||||
for i in range(10):
|
||||
metafunc.addcall(funcargs=dict(numiter=i))
|
||||
metafunc.parametrize("numiter", range(10))
|
||||
|
||||
def test_func(numiter):
|
||||
assert numiter < 9
|
||||
|
||||
Running this::
|
||||
Running this will generate ten invocations of ``test_func`` passing in each of the items in the list of ``range(10)``::
|
||||
|
||||
$ py.test test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py .........F
|
||||
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_func[9] _______________________________
|
||||
=================================== FAILURES ===================================
|
||||
_________________________________ test_func[9] _________________________________
|
||||
|
||||
numiter = 9
|
||||
|
||||
|
@ -182,16 +181,16 @@ Running this::
|
|||
> assert numiter < 9
|
||||
E assert 9 < 9
|
||||
|
||||
test_example.py:7: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.04 seconds ====================
|
||||
test_example.py:6: AssertionError
|
||||
====================== 1 failed, 9 passed in 0.07 seconds ======================
|
||||
|
||||
Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
Obviously, only when ``numiter`` has the value of ``9`` does the test fail. Note that the ``pytest_generate_tests(metafunc)`` hook is called during
|
||||
the test collection phase which is separate from the actual test running.
|
||||
Let's just look at what is collected::
|
||||
|
||||
$ py.test --collectonly test_example.py
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8
|
||||
collecting ... collected 10 items
|
||||
<Module 'test_example.py'>
|
||||
<Function 'test_func[0]'>
|
||||
|
@ -205,37 +204,19 @@ Let's just look at what is collected::
|
|||
<Function 'test_func[8]'>
|
||||
<Function 'test_func[9]'>
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
=============================== in 0.01 seconds ===============================
|
||||
|
||||
If you want to select only the run with the value ``7`` you could do::
|
||||
|
||||
$ py.test -v -k 7 test_example.py # or -k test_func[7]
|
||||
=========================== test session starts ============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.1.3 -- /Users/hpk/venv/0/bin/python
|
||||
============================= test session starts ==============================
|
||||
platform darwin -- Python 2.7.1 -- pytest-2.2.0.dev8 -- /Users/hpk/venv/1/bin/python
|
||||
collecting ... collected 10 items
|
||||
|
||||
test_example.py:6: test_func[0] PASSED
|
||||
test_example.py:6: test_func[1] PASSED
|
||||
test_example.py:6: test_func[2] PASSED
|
||||
test_example.py:6: test_func[3] PASSED
|
||||
test_example.py:6: test_func[4] PASSED
|
||||
test_example.py:6: test_func[5] PASSED
|
||||
test_example.py:6: test_func[6] PASSED
|
||||
test_example.py:6: test_func[7] PASSED
|
||||
test_example.py:6: test_func[8] PASSED
|
||||
test_example.py:6: test_func[9] FAILED
|
||||
test_example.py:5: test_func[7] PASSED
|
||||
|
||||
================================= FAILURES =================================
|
||||
_______________________________ test_func[9] _______________________________
|
||||
|
||||
numiter = 9
|
||||
|
||||
def test_func(numiter):
|
||||
> assert numiter < 9
|
||||
E assert 9 < 9
|
||||
|
||||
test_example.py:7: AssertionError
|
||||
==================== 1 failed, 9 passed in 0.05 seconds ====================
|
||||
========================= 9 tests deselected by '-k7' ==========================
|
||||
==================== 1 passed, 9 deselected in 0.01 seconds ====================
|
||||
|
||||
You might want to look at :ref:`more parametrization examples <paramexamples>`.
|
||||
|
||||
|
@ -259,4 +240,5 @@ in the class or module where a test function is defined:
|
|||
|
||||
``metafunc.config``: access to command line opts and general config
|
||||
|
||||
.. automethod:: Metafunc.parametrize(name, values, idmaker=None)
|
||||
.. automethod:: Metafunc.addcall(funcargs=None, id=_notexists, param=_notexists)
|
||||
|
|
|
@ -10,9 +10,11 @@ By using the ``pytest.mark`` helper you can easily set
|
|||
metadata on your test functions. To begin with, there are
|
||||
some builtin markers, for example:
|
||||
|
||||
* skipif - skip a test function if a certain condition is met
|
||||
* xfail - produce an "expected failure" outcome if a certain
|
||||
* :ref:`skipif <skipif>` - skip a test function if a certain condition is met
|
||||
* :ref:`xfail <xfail>` - produce an "expected failure" outcome if a certain
|
||||
condition is met
|
||||
* :ref:`parametrize <parametrizemark>` to perform multiple calls
|
||||
to the same test function.
|
||||
|
||||
It's also easy to create custom markers or to apply markers
|
||||
to whole test classes or modules.
|
||||
|
|
|
@ -23,7 +23,8 @@ Function arguments:
|
|||
|
||||
Test parametrization:
|
||||
|
||||
- `generating parametrized tests with funcargs`_
|
||||
- `generating parametrized tests with funcargs`_ (uses deprecated
|
||||
``addcall()`` API.
|
||||
- `test generators and cached setup`_
|
||||
- `parametrizing tests, generalized`_ (blog post)
|
||||
- `putting test-hooks into local or global plugins`_ (blog post)
|
||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
|||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.2.0.dev7',
|
||||
version='2.2.0.dev8',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
|
|
@ -520,12 +520,6 @@ def test_getfuncargnames():
|
|||
if sys.version_info < (3,0):
|
||||
assert funcargs.getfuncargnames(A.f) == ['arg1']
|
||||
|
||||
def test_callspec_repr():
|
||||
cs = funcargs.CallSpec({}, 'hello', 1)
|
||||
repr(cs)
|
||||
cs = funcargs.CallSpec({}, 'hello', funcargs._notexists)
|
||||
repr(cs)
|
||||
|
||||
class TestFillFuncArgs:
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit
|
||||
|
@ -886,6 +880,7 @@ class TestMetafunc:
|
|||
def function(): pass
|
||||
metafunc = funcargs.Metafunc(function)
|
||||
assert not metafunc.funcargnames
|
||||
repr(metafunc._calls)
|
||||
|
||||
def test_function_basic(self):
|
||||
def func(arg1, arg2="qwe"): pass
|
||||
|
@ -925,9 +920,9 @@ class TestMetafunc:
|
|||
metafunc.addcall(param=obj)
|
||||
metafunc.addcall(param=1)
|
||||
assert len(metafunc._calls) == 3
|
||||
assert metafunc._calls[0].param == obj
|
||||
assert metafunc._calls[1].param == obj
|
||||
assert metafunc._calls[2].param == 1
|
||||
assert metafunc._calls[0].getparam("arg1") == obj
|
||||
assert metafunc._calls[1].getparam("arg1") == obj
|
||||
assert metafunc._calls[2].getparam("arg1") == 1
|
||||
|
||||
def test_addcall_funcargs(self):
|
||||
def func(x): pass
|
||||
|
@ -941,7 +936,119 @@ class TestMetafunc:
|
|||
assert metafunc._calls[1].funcargs == {'x': 3}
|
||||
assert not hasattr(metafunc._calls[1], 'param')
|
||||
|
||||
class TestGenfuncFunctional:
|
||||
def test_parametrize_error(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize("x", [1,2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6]))
|
||||
metafunc.parametrize("y", [1,2])
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
|
||||
pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6]))
|
||||
|
||||
def test_parametrize_and_id(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
|
||||
metafunc.parametrize("x", [1,2], ids=['basic', 'advanced'])
|
||||
metafunc.parametrize("y", ["abc", "def"])
|
||||
ids = [x.id for x in metafunc._calls]
|
||||
assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"]
|
||||
|
||||
def test_parametrize_with_userobjects(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
class A:
|
||||
pass
|
||||
metafunc.parametrize("x", [A(), A()])
|
||||
metafunc.parametrize("y", list("ab"))
|
||||
assert metafunc._calls[0].id == ".0-a"
|
||||
assert metafunc._calls[1].id == ".0-b"
|
||||
assert metafunc._calls[2].id == ".1-a"
|
||||
assert metafunc._calls[3].id == ".1-b"
|
||||
|
||||
def test_addcall_and_parametrize(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.addcall({'x': 1})
|
||||
metafunc.parametrize('y', [2,3])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2}
|
||||
assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3}
|
||||
assert metafunc._calls[0].id == "0-2"
|
||||
assert metafunc._calls[1].id == "0-3"
|
||||
|
||||
def test_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||
|
||||
def test_addcalls_and_parametrize_indirect(self):
|
||||
def func(x, y): pass
|
||||
metafunc = funcargs.Metafunc(func)
|
||||
metafunc.addcall(param="123")
|
||||
metafunc.parametrize('x', [1], indirect=True)
|
||||
metafunc.parametrize('y', [2,3], indirect=True)
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == {}
|
||||
assert metafunc._calls[1].funcargs == {}
|
||||
assert metafunc._calls[0].params == dict(x=1,y=2)
|
||||
assert metafunc._calls[1].params == dict(x=1,y=3)
|
||||
|
||||
def test_parametrize_functional(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize('x', [1,2], indirect=True)
|
||||
metafunc.parametrize('y', [2])
|
||||
def pytest_funcarg__x(request):
|
||||
return request.param * 10
|
||||
def pytest_funcarg__y(request):
|
||||
return request.param
|
||||
|
||||
def test_simple(x,y):
|
||||
assert x in (10,20)
|
||||
assert y == 2
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_simple*1-2*",
|
||||
"*test_simple*2-2*",
|
||||
"*2 passed*",
|
||||
])
|
||||
|
||||
def test_parametrize_onearg(self):
|
||||
metafunc = funcargs.Metafunc(lambda x: None)
|
||||
metafunc.parametrize("x", [1,2])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == dict(x=1)
|
||||
assert metafunc._calls[0].id == "1"
|
||||
assert metafunc._calls[1].funcargs == dict(x=2)
|
||||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_parametrize_onearg_indirect(self):
|
||||
metafunc = funcargs.Metafunc(lambda x: None)
|
||||
metafunc.parametrize("x", [1,2], indirect=True)
|
||||
assert metafunc._calls[0].params == dict(x=1)
|
||||
assert metafunc._calls[0].id == "1"
|
||||
assert metafunc._calls[1].params == dict(x=2)
|
||||
assert metafunc._calls[1].id == "2"
|
||||
|
||||
def test_parametrize_twoargs(self):
|
||||
metafunc = funcargs.Metafunc(lambda x,y: None)
|
||||
metafunc.parametrize(("x", "y"), [(1,2), (3,4)])
|
||||
assert len(metafunc._calls) == 2
|
||||
assert metafunc._calls[0].funcargs == dict(x=1, y=2)
|
||||
assert metafunc._calls[0].id == "1-2"
|
||||
assert metafunc._calls[1].funcargs == dict(x=3, y=4)
|
||||
assert metafunc._calls[1].id == "3-4"
|
||||
|
||||
class TestMetafuncFunctional:
|
||||
def test_attributes(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
# assumes that generate/provide runs in the same process
|
||||
|
@ -1109,6 +1216,46 @@ class TestGenfuncFunctional:
|
|||
"*1 pass*",
|
||||
])
|
||||
|
||||
def test_parametrize_functional2(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("arg1", [1,2])
|
||||
metafunc.parametrize("arg2", [4,5])
|
||||
def test_hello(arg1, arg2):
|
||||
assert 0, (arg1, arg2)
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*(1, 4)*",
|
||||
"*(1, 5)*",
|
||||
"*(2, 4)*",
|
||||
"*(2, 5)*",
|
||||
"*4 failed*",
|
||||
])
|
||||
|
||||
def test_parametrize_and_inner_getfuncargvalue(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize("arg1", [1], indirect=True)
|
||||
metafunc.parametrize("arg2", [10], indirect=True)
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
x = request.getfuncargvalue("arg2")
|
||||
return x + request.param
|
||||
|
||||
def pytest_funcarg__arg2(request):
|
||||
return request.param
|
||||
|
||||
def test_func1(arg1, arg2):
|
||||
assert arg1 == 11
|
||||
""")
|
||||
result = testdir.runpytest("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func1*1*PASS*",
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
|
||||
def test_conftest_funcargs_only_available_in_subdir(testdir):
|
||||
sub1 = testdir.mkpydir("sub1")
|
||||
sub2 = testdir.mkpydir("sub2")
|
||||
|
|
Loading…
Reference in New Issue