2010-11-06 06:37:25 +08:00
|
|
|
|
|
|
|
.. highlightlang:: python
|
|
|
|
|
2010-11-25 19:11:10 +08:00
|
|
|
basic patterns and examples
|
2010-11-06 06:37:25 +08:00
|
|
|
==========================================================
|
|
|
|
|
|
|
|
pass different values to a test function, depending on command line options
|
|
|
|
----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
Suppose we want to write a test that depends on a command line option.
|
|
|
|
Here is a basic pattern how to achieve this::
|
|
|
|
|
|
|
|
# content of test_sample.py
|
|
|
|
def test_answer(cmdopt):
|
|
|
|
if cmdopt == "type1":
|
|
|
|
print ("first")
|
|
|
|
elif cmdopt == "type2":
|
|
|
|
print ("second")
|
|
|
|
assert 0 # to see what was printed
|
|
|
|
|
|
|
|
|
|
|
|
For this to work we need to add a command line option and
|
2010-11-25 19:11:10 +08:00
|
|
|
provide the ``cmdopt`` through a :ref:`function argument <funcarg>` factory::
|
2010-11-06 06:37:25 +08:00
|
|
|
|
|
|
|
# content of conftest.py
|
|
|
|
def pytest_addoption(parser):
|
|
|
|
parser.addoption("--cmdopt", action="store", default="type1",
|
|
|
|
help="my option: type1 or type2")
|
|
|
|
|
|
|
|
def pytest_funcarg__cmdopt(request):
|
|
|
|
return request.config.option.cmdopt
|
|
|
|
|
|
|
|
Let's run this without supplying our new command line option::
|
|
|
|
|
|
|
|
$ py.test -q
|
|
|
|
F
|
|
|
|
================================= FAILURES =================================
|
|
|
|
_______________________________ test_answer ________________________________
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
cmdopt = 'type1'
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
def test_answer(cmdopt):
|
|
|
|
if cmdopt == "type1":
|
|
|
|
print ("first")
|
|
|
|
elif cmdopt == "type2":
|
|
|
|
print ("second")
|
|
|
|
> assert 0 # to see what was printed
|
|
|
|
E assert 0
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
test_sample.py:6: AssertionError
|
|
|
|
----------------------------- Captured stdout ------------------------------
|
|
|
|
first
|
|
|
|
1 failed in 0.02 seconds
|
|
|
|
|
|
|
|
And now with supplying a command line option::
|
|
|
|
|
|
|
|
$ py.test -q --cmdopt=type2
|
|
|
|
F
|
|
|
|
================================= FAILURES =================================
|
|
|
|
_______________________________ test_answer ________________________________
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
cmdopt = 'type2'
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
def test_answer(cmdopt):
|
|
|
|
if cmdopt == "type1":
|
|
|
|
print ("first")
|
|
|
|
elif cmdopt == "type2":
|
|
|
|
print ("second")
|
|
|
|
> assert 0 # to see what was printed
|
|
|
|
E assert 0
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
test_sample.py:6: AssertionError
|
|
|
|
----------------------------- Captured stdout ------------------------------
|
|
|
|
second
|
|
|
|
1 failed in 0.02 seconds
|
|
|
|
|
|
|
|
Ok, this completes the basic pattern. However, one often rather
|
|
|
|
wants to process command line options outside of the test and
|
|
|
|
rather pass in different or more complex objects. See the
|
|
|
|
next example or refer to :ref:`mysetup` for more information
|
|
|
|
on real-life examples.
|
|
|
|
|
|
|
|
generating parameters combinations, depending on command line
|
|
|
|
----------------------------------------------------------------------------
|
|
|
|
|
|
|
|
Let's say we want to execute a test with different parameters
|
|
|
|
and the parameter range shall be determined by a command
|
|
|
|
line argument. Let's first write a simple computation test::
|
|
|
|
|
|
|
|
# content of test_compute.py
|
|
|
|
|
|
|
|
def test_compute(param1):
|
|
|
|
assert param1 < 4
|
|
|
|
|
|
|
|
Now we add a test configuration like this::
|
|
|
|
|
|
|
|
# content of conftest.py
|
|
|
|
|
|
|
|
def pytest_addoption(parser):
|
|
|
|
parser.addoption("--all", action="store_true",
|
|
|
|
help="run all combinations")
|
|
|
|
|
|
|
|
def pytest_generate_tests(metafunc):
|
|
|
|
if 'param1' in metafunc.funcargnames:
|
|
|
|
if metafunc.config.option.all:
|
|
|
|
end = 5
|
|
|
|
else:
|
|
|
|
end = 2
|
|
|
|
for i in range(end):
|
|
|
|
metafunc.addcall(funcargs={'param1': i})
|
|
|
|
|
|
|
|
This means that we only run 2 tests if we do not pass ``--all``::
|
|
|
|
|
|
|
|
$ py.test -q test_compute.py
|
|
|
|
..
|
|
|
|
2 passed in 0.01 seconds
|
|
|
|
|
|
|
|
We run only two computations, so we see two dots.
|
|
|
|
let's run the full monty::
|
|
|
|
|
|
|
|
$ py.test -q --all test_compute.py
|
|
|
|
....F
|
|
|
|
================================= FAILURES =================================
|
|
|
|
_____________________________ test_compute[4] ______________________________
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
param1 = 4
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
def test_compute(param1):
|
|
|
|
> assert param1 < 4
|
|
|
|
E assert 4 < 4
|
2010-11-06 18:38:53 +08:00
|
|
|
|
2010-11-06 06:37:25 +08:00
|
|
|
test_compute.py:3: AssertionError
|
2010-11-07 17:19:58 +08:00
|
|
|
1 failed, 4 passed in 0.03 seconds
|
2010-11-06 06:37:25 +08:00
|
|
|
|
|
|
|
As expected when running the full range of ``param1`` values
|
|
|
|
we'll get an error on the last one.
|
2010-11-21 04:35:55 +08:00
|
|
|
|
|
|
|
|
|
|
|
.. _`retrieved by hooks as item keywords`:
|
|
|
|
|
|
|
|
control skipping of tests according to command line option
|
|
|
|
--------------------------------------------------------------
|
|
|
|
|
|
|
|
Here is a ``conftest.py`` file adding a ``--runslow`` command
|
|
|
|
line option to control skipping of ``slow`` marked tests::
|
|
|
|
|
|
|
|
# content of conftest.py
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
def pytest_addoption(parser):
|
|
|
|
parser.addoption("--runslow", action="store_true",
|
|
|
|
help="run slow tests")
|
|
|
|
|
|
|
|
def pytest_runtest_setup(item):
|
|
|
|
if 'slow' in item.keywords and not item.config.getvalue("runslow"):
|
|
|
|
pytest.skip("need --runslow option to run")
|
|
|
|
|
|
|
|
We can now write a test module like this::
|
|
|
|
|
|
|
|
# content of test_module.py
|
|
|
|
|
|
|
|
import pytest
|
|
|
|
slow = pytest.mark.slow
|
|
|
|
|
|
|
|
def test_func_fast():
|
|
|
|
pass
|
|
|
|
|
|
|
|
@slow
|
|
|
|
def test_func_slow():
|
|
|
|
pass
|
|
|
|
|
|
|
|
and when running it will see a skipped "slow" test::
|
|
|
|
|
|
|
|
$ py.test test_module.py -rs # "-rs" means report details on the little 's'
|
|
|
|
=========================== test session starts ============================
|
|
|
|
platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
|
|
|
|
test path 1: test_module.py
|
|
|
|
|
|
|
|
test_module.py .s
|
|
|
|
========================= short test summary info ==========================
|
|
|
|
SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run
|
|
|
|
|
|
|
|
=================== 1 passed, 1 skipped in 0.02 seconds ====================
|
|
|
|
|
|
|
|
Or run it including the ``slow`` marked test::
|
|
|
|
|
|
|
|
$ py.test test_module.py --runslow
|
|
|
|
=========================== test session starts ============================
|
|
|
|
platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30
|
|
|
|
test path 1: test_module.py
|
|
|
|
|
|
|
|
test_module.py ..
|
|
|
|
|
|
|
|
========================= 2 passed in 0.01 seconds =========================
|
|
|
|
|
|
|
|
|
|
|
|
writing well integrated assertion helpers
|
|
|
|
--------------------------------------------------
|
|
|
|
|
|
|
|
If you have a test helper function called from a test you can
|
|
|
|
use the ``pytest.fail`` marker to fail a test with a certain message.
|
|
|
|
The test support function will not show up in the traceback if you
|
|
|
|
set the ``__tracebackhide__`` option somewhere in the helper function.
|
|
|
|
Example::
|
|
|
|
|
|
|
|
# content of test_checkconfig.py
|
|
|
|
import pytest
|
|
|
|
def checkconfig(x):
|
|
|
|
__tracebackhide__ = True
|
|
|
|
if not hasattr(x, "config"):
|
|
|
|
pytest.fail("not configured: %s" %(x,))
|
|
|
|
|
|
|
|
def test_something():
|
|
|
|
checkconfig(42)
|
|
|
|
|
|
|
|
The ``__tracebackhide__`` setting influences py.test showing
|
|
|
|
of tracebacks: the ``checkconfig`` function will not be shown
|
|
|
|
unless the ``--fulltrace`` command line option is specified.
|
|
|
|
Let's run our little function::
|
|
|
|
|
|
|
|
$ py.test -q
|
|
|
|
F
|
|
|
|
================================= FAILURES =================================
|
|
|
|
______________________________ test_something ______________________________
|
|
|
|
|
|
|
|
def test_something():
|
|
|
|
> checkconfig(42)
|
|
|
|
E Failed: not configured: 42
|
|
|
|
|
|
|
|
test_checkconfig.py:8: Failed
|
|
|
|
1 failed in 0.02 seconds
|
|
|
|
|
|
|
|
|
|
|
|
Detect if running from within a py.test run
|
|
|
|
--------------------------------------------------------------
|
|
|
|
|
|
|
|
Usually it is a bad idea to make application code
|
|
|
|
behave differently if called from a test. But if you
|
|
|
|
absolutely must find out if your application code is
|
|
|
|
running from a test you can do something like this::
|
|
|
|
|
|
|
|
# content of conftest.py in your testing directory
|
|
|
|
|
|
|
|
def pytest_configure(config):
|
|
|
|
import sys
|
|
|
|
sys._called_from_test = True
|
|
|
|
|
|
|
|
def pytest_unconfigure(config):
|
|
|
|
del sys._called_from_test
|
|
|
|
|
|
|
|
and then check for the ``sys._called_from_test`` flag::
|
|
|
|
|
|
|
|
if hasattr(sys, '_called_from_test'):
|
|
|
|
# called from within a test run
|
|
|
|
else:
|
|
|
|
# called "normally"
|
|
|
|
|
|
|
|
accordingly in your application. It's also a good idea
|
|
|
|
to rather use your own application module rather than ``sys``
|
|
|
|
for handling flag.
|
|
|
|
|