From 158e160823c6fe8f6b92a971de32824cbe9bb9e3 Mon Sep 17 00:00:00 2001 From: holger krekel Date: Sat, 20 Nov 2010 21:35:55 +0100 Subject: [PATCH] merging and refining examples, also refining skipping documentation. --- doc/announce/index.txt | 4 + .../example}/assertion/failure_demo.py | 0 .../global_testmodule_config/conftest.py | 0 .../global_testmodule_config/test_hello.py | 0 .../example}/assertion/test_failures.py | 1 + .../assertion/test_setup_flow_example.py | 0 doc/example/builtin.txt | 36 ----- doc/example/controlskip.txt | 57 ------- .../example}/costlysetup/conftest.py | 1 + .../example}/costlysetup/sub1/__init__.py | 0 doc/example/costlysetup/sub1/test_quick.py | 3 + .../example}/costlysetup/sub2/__init__.py | 0 .../example}/costlysetup/sub2/test_two.py | 0 doc/example/detectpytest.txt | 29 ---- doc/example/index.txt | 11 +- .../example/multipython.py | 4 +- doc/example/parametrize.txt | 142 +++++++++++++++++ .../{collectonly.py => pythoncollection.py} | 0 doc/example/pythoncollection.txt | 6 +- doc/example/simple.txt | 144 +++++++++++++++++- doc/example/xfail_demo.py | 21 +++ doc/example/xunit_setup.txt | 74 --------- doc/plugins.txt | 2 +- doc/pytest.ini | 2 + doc/skipping.txt | 71 +++++---- example/funcarg/conftest.py | 3 - .../funcarg/costlysetup/sub1/test_quick.py | 3 - example/funcarg/mysetup/__init__.py | 1 - example/funcarg/mysetup/conftest.py | 9 -- example/funcarg/mysetup/myapp.py | 5 - example/funcarg/mysetup/test_sample.py | 5 - example/funcarg/mysetup2/__init__.py | 1 - example/funcarg/mysetup2/conftest.py | 24 --- example/funcarg/mysetup2/myapp.py | 5 - example/funcarg/mysetup2/test_sample.py | 6 - example/funcarg/mysetup2/test_ssh.py | 5 - .../funcarg/parametrize/test_parametrize.py | 17 --- .../funcarg/parametrize/test_parametrize2.py | 25 --- .../funcarg/parametrize/test_parametrize3.py | 15 -- example/funcarg/test_simpleprovider.py | 7 - example/funcarg/urloption/conftest.py | 15 -- example/genhtml.py | 13 -- example/genhtmlcss.py | 23 --- example/genxml.py | 17 --- example/xfail_demo.py | 16 -- 45 files changed, 371 insertions(+), 452 deletions(-) rename {example => doc/example}/assertion/failure_demo.py (100%) rename {example => doc/example}/assertion/global_testmodule_config/conftest.py (100%) rename {example => doc/example}/assertion/global_testmodule_config/test_hello.py (100%) rename {example => doc/example}/assertion/test_failures.py (93%) rename {example => doc/example}/assertion/test_setup_flow_example.py (100%) delete mode 100644 doc/example/builtin.txt delete mode 100644 doc/example/controlskip.txt rename {example/funcarg => doc/example}/costlysetup/conftest.py (89%) rename {example/funcarg => doc/example}/costlysetup/sub1/__init__.py (100%) create mode 100644 doc/example/costlysetup/sub1/test_quick.py rename {example/funcarg => doc/example}/costlysetup/sub2/__init__.py (100%) rename {example/funcarg => doc/example}/costlysetup/sub2/test_two.py (100%) delete mode 100644 doc/example/detectpytest.txt rename example/funcarg/test_multi_python.py => doc/example/multipython.py (95%) create mode 100644 doc/example/parametrize.txt rename doc/example/{collectonly.py => pythoncollection.py} (100%) create mode 100644 doc/example/xfail_demo.py delete mode 100644 doc/example/xunit_setup.txt create mode 100644 doc/pytest.ini delete mode 100644 example/funcarg/conftest.py delete mode 100644 example/funcarg/costlysetup/sub1/test_quick.py delete mode 100644 example/funcarg/mysetup/__init__.py delete mode 100644 example/funcarg/mysetup/conftest.py delete mode 100644 example/funcarg/mysetup/myapp.py delete mode 100644 example/funcarg/mysetup/test_sample.py delete mode 100644 example/funcarg/mysetup2/__init__.py delete mode 100644 example/funcarg/mysetup2/conftest.py delete mode 100644 example/funcarg/mysetup2/myapp.py delete mode 100644 example/funcarg/mysetup2/test_sample.py delete mode 100644 example/funcarg/mysetup2/test_ssh.py delete mode 100644 example/funcarg/parametrize/test_parametrize.py delete mode 100644 example/funcarg/parametrize/test_parametrize2.py delete mode 100644 example/funcarg/parametrize/test_parametrize3.py delete mode 100644 example/funcarg/test_simpleprovider.py delete mode 100644 example/funcarg/urloption/conftest.py delete mode 100644 example/genhtml.py delete mode 100644 example/genhtmlcss.py delete mode 100644 example/genxml.py delete mode 100644 example/xfail_demo.py diff --git a/doc/announce/index.txt b/doc/announce/index.txt index 7590c06a3..d987409c7 100644 --- a/doc/announce/index.txt +++ b/doc/announce/index.txt @@ -6,6 +6,10 @@ Release announcements :maxdepth: 2 release-2.0.0 + +.. toctree:: + :hidden: + release-1.3.4 release-1.3.3 release-1.3.2 diff --git a/example/assertion/failure_demo.py b/doc/example/assertion/failure_demo.py similarity index 100% rename from example/assertion/failure_demo.py rename to doc/example/assertion/failure_demo.py diff --git a/example/assertion/global_testmodule_config/conftest.py b/doc/example/assertion/global_testmodule_config/conftest.py similarity index 100% rename from example/assertion/global_testmodule_config/conftest.py rename to doc/example/assertion/global_testmodule_config/conftest.py diff --git a/example/assertion/global_testmodule_config/test_hello.py b/doc/example/assertion/global_testmodule_config/test_hello.py similarity index 100% rename from example/assertion/global_testmodule_config/test_hello.py rename to doc/example/assertion/global_testmodule_config/test_hello.py diff --git a/example/assertion/test_failures.py b/doc/example/assertion/test_failures.py similarity index 93% rename from example/assertion/test_failures.py rename to doc/example/assertion/test_failures.py index 7fecc345f..b8db738fe 100644 --- a/example/assertion/test_failures.py +++ b/doc/example/assertion/test_failures.py @@ -1,6 +1,7 @@ import py failure_demo = py.path.local(__file__).dirpath('failure_demo.py') +pytest_plugins = 'pytester', def test_failure_demo_fails_properly(testdir): target = testdir.tmpdir.join(failure_demo.basename) diff --git a/example/assertion/test_setup_flow_example.py b/doc/example/assertion/test_setup_flow_example.py similarity index 100% rename from example/assertion/test_setup_flow_example.py rename to doc/example/assertion/test_setup_flow_example.py diff --git a/doc/example/builtin.txt b/doc/example/builtin.txt deleted file mode 100644 index 71d8c4e98..000000000 --- a/doc/example/builtin.txt +++ /dev/null @@ -1,36 +0,0 @@ - -writing well integrated assertion helpers -======================================================== - -If you have a test helper function called from a test you can -use the ``pytest.fail`` marker to fail a test with a certain message. -The test support function will not show up in the traceback if you -set the ``__tracebackhide__`` option somewhere in the helper function. -Example:: - - # content of test_checkconfig.py - import pytest - def checkconfig(x): - __tracebackhide__ = True - if not hasattr(x, "config"): - pytest.fail("not configured: %s" %(x,)) - - def test_something(): - checkconfig(42) - -The ``__tracebackhide__`` setting influences py.test showing -of tracebacks: the ``checkconfig`` function will not be shown -unless the ``--fulltrace`` command line option is specified. -Let's run our little function:: - - $ py.test -q - F - ================================= FAILURES ================================= - ______________________________ test_something ______________________________ - - def test_something(): - > checkconfig(42) - E Failed: not configured: 42 - - test_checkconfig.py:8: Failed - 1 failed in 0.02 seconds diff --git a/doc/example/controlskip.txt b/doc/example/controlskip.txt deleted file mode 100644 index 0f22a8890..000000000 --- a/doc/example/controlskip.txt +++ /dev/null @@ -1,57 +0,0 @@ - -.. _`retrieved by hooks as item keywords`: - -control skipping of tests according to command line option --------------------------------------------------------------- - -Here is a ``conftest.py`` file adding a ``--runslow`` command -line option to control skipping of ``slow`` marked tests:: - - # content of conftest.py - - import pytest - def pytest_addoption(parser): - parser.addoption("--runslow", action="store_true", - help="run slow tests") - - def pytest_runtest_setup(item): - if 'slow' in item.keywords and not item.config.getvalue("runslow"): - pytest.skip("need --runslow option to run") - -We can now write a test module like this:: - - # content of test_module.py - - import pytest - slow = pytest.mark.slow - - def test_func_fast(): - pass - - @slow - def test_func_slow(): - pass - -and when running it will see a skipped "slow" test:: - - $ py.test test_module.py -rs # "-rs" means report details on the little 's' - =========================== test session starts ============================ - platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30 - test path 1: test_module.py - - test_module.py .s - ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run - - =================== 1 passed, 1 skipped in 0.02 seconds ==================== - -Or run it including the ``slow`` marked test:: - - $ py.test test_module.py --runslow - =========================== test session starts ============================ - platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30 - test path 1: test_module.py - - test_module.py .. - - ========================= 2 passed in 0.01 seconds ========================= diff --git a/example/funcarg/costlysetup/conftest.py b/doc/example/costlysetup/conftest.py similarity index 89% rename from example/funcarg/costlysetup/conftest.py rename to doc/example/costlysetup/conftest.py index b206395ba..0b83d42e8 100644 --- a/example/funcarg/costlysetup/conftest.py +++ b/doc/example/costlysetup/conftest.py @@ -9,6 +9,7 @@ def pytest_funcarg__setup(request): class CostlySetup: def __init__(self): import time + print ("performing costly setup") time.sleep(5) self.timecostly = 1 diff --git a/example/funcarg/costlysetup/sub1/__init__.py b/doc/example/costlysetup/sub1/__init__.py similarity index 100% rename from example/funcarg/costlysetup/sub1/__init__.py rename to doc/example/costlysetup/sub1/__init__.py diff --git a/doc/example/costlysetup/sub1/test_quick.py b/doc/example/costlysetup/sub1/test_quick.py new file mode 100644 index 000000000..d97657867 --- /dev/null +++ b/doc/example/costlysetup/sub1/test_quick.py @@ -0,0 +1,3 @@ + +def test_quick(setup): + pass diff --git a/example/funcarg/costlysetup/sub2/__init__.py b/doc/example/costlysetup/sub2/__init__.py similarity index 100% rename from example/funcarg/costlysetup/sub2/__init__.py rename to doc/example/costlysetup/sub2/__init__.py diff --git a/example/funcarg/costlysetup/sub2/test_two.py b/doc/example/costlysetup/sub2/test_two.py similarity index 100% rename from example/funcarg/costlysetup/sub2/test_two.py rename to doc/example/costlysetup/sub2/test_two.py diff --git a/doc/example/detectpytest.txt b/doc/example/detectpytest.txt deleted file mode 100644 index 50490f344..000000000 --- a/doc/example/detectpytest.txt +++ /dev/null @@ -1,29 +0,0 @@ - -Detect if running from within a py.test run --------------------------------------------------------------- - -Usually it is a bad idea to make application code -behave differently if called from a test. But if you -absolutely must find out if your application code is -running from a test you can do something like this:: - - # content of conftest.py in your testing directory - - def pytest_configure(config): - import sys - sys._called_from_test = True - - def pytest_unconfigure(config): - del sys._called_from_test - -and then check for the ``sys._called_from_test`` flag:: - - if hasattr(sys, '_called_from_test'): - # called from within a test run - else: - # called "normally" - -accordingly in your application. It's also a good idea -to rather use your own application module rather than ``sys`` -for handling flag. - diff --git a/doc/example/index.txt b/doc/example/index.txt index a84ddf984..f07fa5dbf 100644 --- a/doc/example/index.txt +++ b/doc/example/index.txt @@ -10,11 +10,8 @@ need more examples or have questions. .. toctree:: :maxdepth: 2 - builtin.txt - pythoncollection.txt - controlskip.txt - mysetup.txt - detectpytest.txt - nonpython.txt simple.txt - xunit_setup.txt + pythoncollection.txt + mysetup.txt + parametrize.txt + nonpython.txt diff --git a/example/funcarg/test_multi_python.py b/doc/example/multipython.py similarity index 95% rename from example/funcarg/test_multi_python.py rename to doc/example/multipython.py index f386d163a..4993366c2 100644 --- a/example/funcarg/test_multi_python.py +++ b/doc/example/multipython.py @@ -1,12 +1,10 @@ """ - module containing a parametrized tests testing cross-python serialization via the pickle module. """ import py -pythonlist = ['python2.3', 'python2.4', 'python2.5', 'python2.6'] -# 'jython' 'python3.1'] +pythonlist = ['python2.4', 'python2.5', 'python2.6', 'python2.7', 'python2.8'] def pytest_generate_tests(metafunc): if 'python1' in metafunc.funcargnames: diff --git a/doc/example/parametrize.txt b/doc/example/parametrize.txt new file mode 100644 index 000000000..10052dca0 --- /dev/null +++ b/doc/example/parametrize.txt @@ -0,0 +1,142 @@ + +parametrizing tests +================================================= + +py.test allows to easily implement your own custom +parametrization scheme for tests. Here we provide +some examples for inspiration and re-use. + +Parametrizing test methods through per-class configuration +-------------------------------------------------------------- + +.. _`unittest parameterizer`: http://code.google.com/p/unittest-ext/source/browse/trunk/params.py + +Here is an example ``pytest_generate_function`` function implementing a +parametrization scheme similar to Michael Foords `unittest +parameterizer`_ in a lot less code:: + + # content of ./test_parametrize.py + import pytest + + def pytest_generate_tests(metafunc): + # called once per each test function + for funcargs in metafunc.cls.params[metafunc.function.__name__]: + # schedule a new test function run with applied **funcargs + metafunc.addcall(funcargs=funcargs) + + class TestClass: + # a map specifying multiple argument sets for a test method + params = { + 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], + 'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)], + } + + def test_equals(self, a, b): + assert a == b + + def test_zerodivision(self, a, b): + pytest.raises(ZeroDivisionError, "a/b") + +Running it means we are two tests for each test functions, using +the respective settings:: + + $ py.test -q + F..F + ================================= FAILURES ================================= + _________________________ TestClass.test_equals[0] _________________________ + + self = , a = 1, b = 2 + + def test_equals(self, a, b): + > assert a == b + E assert 1 == 2 + + test_parametrize.py:17: AssertionError + ______________________ TestClass.test_zerodivision[1] ______________________ + + self = , a = 3, b = 2 + + def test_zerodivision(self, a, b): + > pytest.raises(ZeroDivisionError, "a/b") + E Failed: DID NOT RAISE + + test_parametrize.py:20: Failed + 2 failed, 2 passed in 0.03 seconds + +Parametrizing test methods through a decorator +-------------------------------------------------------------- + +Modifying the previous example we can also allow decorators +for parametrizing test methods:: + + # content of test_parametrize2.py + + import pytest + + # test support code + def params(funcarglist): + def wrapper(function): + function.funcarglist = funcarglist + return function + return wrapper + + def pytest_generate_tests(metafunc): + for funcargs in getattr(metafunc.function, 'funcarglist', ()): + metafunc.addcall(funcargs=funcargs) + + # actual test code + class TestClass: + @params([dict(a=1, b=2), dict(a=3, b=3), ]) + def test_equals(self, a, b): + assert a == b + + @params([dict(a=1, b=0), dict(a=3, b=2)]) + def test_zerodivision(self, a, b): + pytest.raises(ZeroDivisionError, "a/b") + +Running it gives similar results as before:: + + $ py.test -q test_parametrize2.py + F..F + ================================= FAILURES ================================= + _________________________ TestClass.test_equals[0] _________________________ + + self = , a = 1, b = 2 + + @params([dict(a=1, b=2), dict(a=3, b=3), ]) + def test_equals(self, a, b): + > assert a == b + E assert 1 == 2 + + test_parametrize2.py:19: AssertionError + ______________________ TestClass.test_zerodivision[1] ______________________ + + self = , a = 3, b = 2 + + @params([dict(a=1, b=0), dict(a=3, b=2)]) + def test_zerodivision(self, a, b): + > pytest.raises(ZeroDivisionError, "a/b") + E Failed: DID NOT RAISE + + test_parametrize2.py:23: Failed + 2 failed, 2 passed in 0.03 seconds + +checking serialization between Python interpreters +-------------------------------------------------------------- + +Here is a stripped down real-life example of using parametrized +testing for testing serialization betwee different interpreters. +We define a ``test_basic_objects`` function which is to be run +with different sets of arguments for its three arguments:: + +* ``python1``: first python interpreter +* ``python2``: second python interpreter +* ``obj``: object to be dumped from first interpreter and loaded into second interpreter + +.. literalinclude:: multipython.py + +Running it (with Python-2.4 through to Python2.7 installed):: + + . $ py.test -q multipython.py + ....s....s....s....ssssss....s....s....s....ssssss....s....s....s....ssssss + 48 passed, 27 skipped in 2.55 seconds diff --git a/doc/example/collectonly.py b/doc/example/pythoncollection.py similarity index 100% rename from doc/example/collectonly.py rename to doc/example/pythoncollection.py diff --git a/doc/example/pythoncollection.txt b/doc/example/pythoncollection.txt index 0e51d9c4c..98c7071ec 100644 --- a/doc/example/pythoncollection.txt +++ b/doc/example/pythoncollection.txt @@ -21,7 +21,7 @@ their file system path and then running the test. Through an ini-file and the :confval:`addopts` option you can make this change more permanently:: - # content of setup.cfg or tox.ini + # content of pytest.ini [pytest] addopts = --pyargs @@ -30,8 +30,8 @@ finding out what is collected You can always peek at the collection tree without running tests like this:: - . $ py.test --collectonly collectonly.py - + . $ py.test --collectonly pythoncollection.py + diff --git a/doc/example/simple.txt b/doc/example/simple.txt index 4e4d634cf..87b9668b0 100644 --- a/doc/example/simple.txt +++ b/doc/example/simple.txt @@ -1,9 +1,26 @@ .. highlightlang:: python -simple patterns using hooks +simple hook using patterns ========================================================== +adding custom options +---------------------- + +py.test supports adding of standard optparse_ Options. +A plugin may implement the ``addoption`` hook for registering +custom options:: + + def pytest_addoption(parser): + parser.addoption("-M", "--myopt", action="store", + help="specify string to set myopt") + + def pytest_configure(config): + if config.option.myopt: + # do action based on option value + +.. _optparse: http://docs.python.org/library/optparse.html + pass different values to a test function, depending on command line options ---------------------------------------------------------------------------- @@ -134,3 +151,128 @@ let's run the full monty:: As expected when running the full range of ``param1`` values we'll get an error on the last one. + + +.. _`retrieved by hooks as item keywords`: + +control skipping of tests according to command line option +-------------------------------------------------------------- + +Here is a ``conftest.py`` file adding a ``--runslow`` command +line option to control skipping of ``slow`` marked tests:: + + # content of conftest.py + + import pytest + def pytest_addoption(parser): + parser.addoption("--runslow", action="store_true", + help="run slow tests") + + def pytest_runtest_setup(item): + if 'slow' in item.keywords and not item.config.getvalue("runslow"): + pytest.skip("need --runslow option to run") + +We can now write a test module like this:: + + # content of test_module.py + + import pytest + slow = pytest.mark.slow + + def test_func_fast(): + pass + + @slow + def test_func_slow(): + pass + +and when running it will see a skipped "slow" test:: + + $ py.test test_module.py -rs # "-rs" means report details on the little 's' + =========================== test session starts ============================ + platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30 + test path 1: test_module.py + + test_module.py .s + ========================= short test summary info ========================== + SKIP [1] /tmp/doc-exec-104/conftest.py:9: need --runslow option to run + + =================== 1 passed, 1 skipped in 0.02 seconds ==================== + +Or run it including the ``slow`` marked test:: + + $ py.test test_module.py --runslow + =========================== test session starts ============================ + platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev30 + test path 1: test_module.py + + test_module.py .. + + ========================= 2 passed in 0.01 seconds ========================= + + +writing well integrated assertion helpers +-------------------------------------------------- + +If you have a test helper function called from a test you can +use the ``pytest.fail`` marker to fail a test with a certain message. +The test support function will not show up in the traceback if you +set the ``__tracebackhide__`` option somewhere in the helper function. +Example:: + + # content of test_checkconfig.py + import pytest + def checkconfig(x): + __tracebackhide__ = True + if not hasattr(x, "config"): + pytest.fail("not configured: %s" %(x,)) + + def test_something(): + checkconfig(42) + +The ``__tracebackhide__`` setting influences py.test showing +of tracebacks: the ``checkconfig`` function will not be shown +unless the ``--fulltrace`` command line option is specified. +Let's run our little function:: + + $ py.test -q + F + ================================= FAILURES ================================= + ______________________________ test_something ______________________________ + + def test_something(): + > checkconfig(42) + E Failed: not configured: 42 + + test_checkconfig.py:8: Failed + 1 failed in 0.02 seconds + + +Detect if running from within a py.test run +-------------------------------------------------------------- + +Usually it is a bad idea to make application code +behave differently if called from a test. But if you +absolutely must find out if your application code is +running from a test you can do something like this:: + + # content of conftest.py in your testing directory + + def pytest_configure(config): + import sys + sys._called_from_test = True + + def pytest_unconfigure(config): + del sys._called_from_test + +and then check for the ``sys._called_from_test`` flag:: + + if hasattr(sys, '_called_from_test'): + # called from within a test run + else: + # called "normally" + +accordingly in your application. It's also a good idea +to rather use your own application module rather than ``sys`` +for handling flag. + diff --git a/doc/example/xfail_demo.py b/doc/example/xfail_demo.py new file mode 100644 index 000000000..c0efbc7af --- /dev/null +++ b/doc/example/xfail_demo.py @@ -0,0 +1,21 @@ +import pytest +xfail = pytest.mark.xfail + +@xfail +def test_hello(): + assert 0 + +@xfail(run=False) +def test_hello2(): + assert 0 + +@xfail("hasattr(os, 'sep')") +def test_hello3(): + assert 0 + +@xfail(reason="bug 110") +def test_hello4(): + assert 0 + +def test_hello5(): + pytest.xfail("reason") diff --git a/doc/example/xunit_setup.txt b/doc/example/xunit_setup.txt deleted file mode 100644 index 2fa6de211..000000000 --- a/doc/example/xunit_setup.txt +++ /dev/null @@ -1,74 +0,0 @@ -Learning by examples -===================== - -adding custom options ----------------------- - -py.test supports adding of standard optparse_ Options. -A plugin may implement the ``addoption`` hook for registering -custom options:: - - def pytest_addoption(parser): - parser.addoption("-M", "--myopt", action="store", - help="specify string to set myopt") - - def pytest_configure(config): - if config.option.myopt: - # do action based on option value - # - -.. _optparse: http://docs.python.org/library/optparse.html - -order of setup/teardown module/class/item methods -==================================================== - -managing state at module, class and method level ------------------------------------------------------------- - -Here is a working example for what goes on when you setup modules, -classes and methods:: - - # [[from py/documentation/example/pytest/test_setup_flow_example.py]] - - def setup_module(module): - module.TestStateFullThing.classcount = 0 - - class TestStateFullThing: - def setup_class(cls): - cls.classcount += 1 - - def teardown_class(cls): - cls.classcount -= 1 - - def setup_method(self, method): - self.id = eval(method.func_name[5:]) - - def test_42(self): - assert self.classcount == 1 - assert self.id == 42 - - def test_23(self): - assert self.classcount == 1 - assert self.id == 23 - - def teardown_module(module): - assert module.TestStateFullThing.classcount == 0 - -For this example the control flow happens as follows:: - - import test_setup_flow_example - setup_module(test_setup_flow_example) - setup_class(TestStateFullThing) - instance = TestStateFullThing() - setup_method(instance, instance.test_42) - instance.test_42() - setup_method(instance, instance.test_23) - instance.test_23() - teardown_class(TestStateFullThing) - teardown_module(test_setup_flow_example) - -Note that ``setup_class(TestStateFullThing)`` is called and not -``TestStateFullThing.setup_class()`` which would require you -to insert ``setup_class = classmethod(setup_class)`` to make -your setup function callable. Did we mention that lazyness -is a virtue? diff --git a/doc/plugins.txt b/doc/plugins.txt index ccca1271c..deeebd4f8 100644 --- a/doc/plugins.txt +++ b/doc/plugins.txt @@ -65,7 +65,7 @@ tool, for example:: If a plugin is installed, py.test automatically finds and integrates it, there is no need to activate it. If you don't need a plugin anymore simply -de-install it. You can find a list of valid plugins through a +de-install it. You can find a list of available plugins through a `pytest- pypi.python.org search`_. .. _`available installable plugins`: diff --git a/doc/pytest.ini b/doc/pytest.ini new file mode 100644 index 000000000..01e3f9d29 --- /dev/null +++ b/doc/pytest.ini @@ -0,0 +1,2 @@ +[pytest] +# just defined to prevent the root level tox.ini to kick in diff --git a/doc/skipping.txt b/doc/skipping.txt index fb7b254d7..c4994942d 100644 --- a/doc/skipping.txt +++ b/doc/skipping.txt @@ -2,18 +2,19 @@ skip and xfail mechanisms ===================================================================== -You can mark test functions for a conditional *skip* or as *xfail*, -expected-to-fail. Skipping a test avoids running a test. -Whereas an xfail-marked test usually is run but if it fails it is -not reported in detail and counted separately. The latter allows -to keep track of real implementation problems whereas test skips -are normally tied to a condition, such as a platform or dependency -requirement without which considering or running the test does -not make sense. If a test fails under all conditions then it's -probably best to mark your test as 'xfail'. +You can skip or "xfail" test functions, either by marking functions +through a decorator or by calling the ``pytest.skip|xfail`` helpers. +A *skip* means that you expect your test to pass unless a certain configuration or condition (e.g. wrong Python interpreter, missing dependency) prevents it to run. And *xfail* means that you expect your test to fail because there is an +implementation problem. Counting and listing *xfailing* tests separately +helps to maintain a list of implementation problems and you can provide +info such as a bug number or a URL to provide a human readable problem context. -By running ``py.test -rxs`` you will see extra reporting -information on skips and xfail-run tests at the end of a test run. +Usually detailed information about skipped/xfailed tests is not shown +to avoid cluttering the output. You can use the ``-r`` option to +see details corresponding to the "short" letters shown in the +test progress:: + + py.test -rxs # show extra info on skips and xfail tests .. _skipif: @@ -47,7 +48,7 @@ at module level like this:: ... -skip groups of test functions +skip test functions of a class -------------------------------------- As with all function :ref:`marking` you can do it at @@ -58,8 +59,7 @@ for skipping all methods of a test class based on platform:: pytestmark = pytest.mark.skipif("sys.platform == 'win32'") def test_function(self): - # will not be setup or run under 'win32' platform - # + "will not be setup or run under 'win32' platform" The ``pytestmark`` decorator will be applied to each test function. If your code targets python2.6 or above you can equivalently use @@ -69,8 +69,7 @@ the skipif decorator on classes:: class TestPosixCalls: def test_function(self): - # will not be setup or run under 'win32' platform - # + "will not be setup or run under 'win32' platform" It is fine in general to apply multiple "skipif" decorators on a single function - this means that if any of the conditions @@ -94,6 +93,13 @@ This test will be run but no traceback will be reported when it fails. Instead terminal reporting will list it in the "expected to fail" or "unexpectedly passing" sections. +By specifying on the commandline:: + + pytest --runxfail + +you can force the running and reporting of an ``xfail`` marked test +as if it weren't marked at all. + Same as with skipif_ you can also selectively expect a failure depending on platform:: @@ -101,19 +107,32 @@ depending on platform:: def test_function(): ... -To not run a test and still regard it as "xfailed":: +You can also avoid running an "xfail" test at all or +specify a reason such as a bug ID or similar. Here is +a simple test file with usages: - @pytest.mark.xfail(..., run=False) +.. literalinclude:: example/xfail_demo.py -To specify an explicit reason to be shown with xfailure detail:: +Running it with the report-on-xfail option gives this output:: - @pytest.mark.xfail(..., reason="my reason") - -By specifying on the commandline:: - - pytest --runxfail - -you can force the running and reporting of a runnable ``xfail`` marked test. + example $ py.test -rx xfail_demo.py + =========================== test session starts ============================ + platform linux2 -- Python 2.6.5 -- pytest-2.0.0.dev31 + test path 1: xfail_demo.py + + xfail_demo.py xxxxx + ========================= short test summary info ========================== + XFAIL xfail_demo.py::test_hello + XFAIL xfail_demo.py::test_hello2 + reason: [NOTRUN] + XFAIL xfail_demo.py::test_hello3 + condition: hasattr(os, 'sep') + XFAIL xfail_demo.py::test_hello4 + bug 110 + XFAIL xfail_demo.py::test_hello5 + reason: reason + + ======================== 5 xfailed in 0.04 seconds ========================= imperative xfail from within a test or setup function ------------------------------------------------------ diff --git a/example/funcarg/conftest.py b/example/funcarg/conftest.py deleted file mode 100644 index 79336142a..000000000 --- a/example/funcarg/conftest.py +++ /dev/null @@ -1,3 +0,0 @@ -import py - -collect_ignore = 'mysetup', 'mysetup2', 'test_simpleprovider.py', 'parametrize' diff --git a/example/funcarg/costlysetup/sub1/test_quick.py b/example/funcarg/costlysetup/sub1/test_quick.py deleted file mode 100644 index 753b0e9c4..000000000 --- a/example/funcarg/costlysetup/sub1/test_quick.py +++ /dev/null @@ -1,3 +0,0 @@ - -def test_quick(): - pass diff --git a/example/funcarg/mysetup/__init__.py b/example/funcarg/mysetup/__init__.py deleted file mode 100644 index effcb85e6..000000000 --- a/example/funcarg/mysetup/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# XXX this file should not need to be here but is here for proper sys.path mangling diff --git a/example/funcarg/mysetup/conftest.py b/example/funcarg/mysetup/conftest.py deleted file mode 100644 index 70c19c767..000000000 --- a/example/funcarg/mysetup/conftest.py +++ /dev/null @@ -1,9 +0,0 @@ - -from mysetup.myapp import MyApp - -def pytest_funcarg__mysetup(request): - return MySetup() - -class MySetup: - def myapp(self): - return MyApp() diff --git a/example/funcarg/mysetup/myapp.py b/example/funcarg/mysetup/myapp.py deleted file mode 100644 index 2ecd83d11..000000000 --- a/example/funcarg/mysetup/myapp.py +++ /dev/null @@ -1,5 +0,0 @@ - -class MyApp: - def question(self): - return 6 * 9 - diff --git a/example/funcarg/mysetup/test_sample.py b/example/funcarg/mysetup/test_sample.py deleted file mode 100644 index 56e9b52b3..000000000 --- a/example/funcarg/mysetup/test_sample.py +++ /dev/null @@ -1,5 +0,0 @@ - -def test_answer(mysetup): - app = mysetup.myapp() - answer = app.question() - assert answer == 42 diff --git a/example/funcarg/mysetup2/__init__.py b/example/funcarg/mysetup2/__init__.py deleted file mode 100644 index effcb85e6..000000000 --- a/example/funcarg/mysetup2/__init__.py +++ /dev/null @@ -1 +0,0 @@ -# XXX this file should not need to be here but is here for proper sys.path mangling diff --git a/example/funcarg/mysetup2/conftest.py b/example/funcarg/mysetup2/conftest.py deleted file mode 100644 index f598a179f..000000000 --- a/example/funcarg/mysetup2/conftest.py +++ /dev/null @@ -1,24 +0,0 @@ -import py -from mysetup2.myapp import MyApp - -def pytest_funcarg__mysetup(request): - return MySetup(request) - -def pytest_addoption(parser): - parser.addoption("--ssh", action="store", default=None, - help="specify ssh host to run tests with") - - -class MySetup: - def __init__(self, request): - self.config = request.config - - def myapp(self): - return MyApp() - - def getsshconnection(self): - host = self.config.option.ssh - if host is None: - py.test.skip("specify ssh host with --ssh") - return execnet.SshGateway(host) - diff --git a/example/funcarg/mysetup2/myapp.py b/example/funcarg/mysetup2/myapp.py deleted file mode 100644 index 2ecd83d11..000000000 --- a/example/funcarg/mysetup2/myapp.py +++ /dev/null @@ -1,5 +0,0 @@ - -class MyApp: - def question(self): - return 6 * 9 - diff --git a/example/funcarg/mysetup2/test_sample.py b/example/funcarg/mysetup2/test_sample.py deleted file mode 100644 index 58fd7874d..000000000 --- a/example/funcarg/mysetup2/test_sample.py +++ /dev/null @@ -1,6 +0,0 @@ - -def test_answer(mysetup): - app = mysetup.myapp() - answer = app.question() - assert answer == 42 - diff --git a/example/funcarg/mysetup2/test_ssh.py b/example/funcarg/mysetup2/test_ssh.py deleted file mode 100644 index 7b888779f..000000000 --- a/example/funcarg/mysetup2/test_ssh.py +++ /dev/null @@ -1,5 +0,0 @@ - -class TestClass: - def test_function(self, mysetup): - conn = mysetup.getsshconnection() - # work with conn diff --git a/example/funcarg/parametrize/test_parametrize.py b/example/funcarg/parametrize/test_parametrize.py deleted file mode 100644 index 1d4bf1faf..000000000 --- a/example/funcarg/parametrize/test_parametrize.py +++ /dev/null @@ -1,17 +0,0 @@ -import py - -def pytest_generate_tests(metafunc): - for funcargs in metafunc.cls.params[metafunc.function.__name__]: - metafunc.addcall(funcargs=funcargs) - -class TestClass: - params = { - 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], - 'test_zerodivision': [dict(a=1, b=0), dict(a=3, b=2)], - } - - def test_equals(self, a, b): - assert a == b - - def test_zerodivision(self, a, b): - py.test.raises(ZeroDivisionError, "a/b") diff --git a/example/funcarg/parametrize/test_parametrize2.py b/example/funcarg/parametrize/test_parametrize2.py deleted file mode 100644 index a4371452e..000000000 --- a/example/funcarg/parametrize/test_parametrize2.py +++ /dev/null @@ -1,25 +0,0 @@ -import py - -# test support code -def params(funcarglist): - def wrapper(function): - function.funcarglist = funcarglist - return function - return wrapper - -def pytest_generate_tests(metafunc): - for funcargs in getattr(metafunc.function, 'funcarglist', ()): - metafunc.addcall(funcargs=funcargs) - - -# actual test code - -class TestClass: - @params([dict(a=1, b=2), dict(a=3, b=3), dict(a=5, b=4)], ) - def test_equals(self, a, b): - assert a == b - - @params([dict(a=1, b=0), dict(a=3, b=2)]) - def test_zerodivision(self, a, b): - py.test.raises(ZeroDivisionError, "a/b") - diff --git a/example/funcarg/parametrize/test_parametrize3.py b/example/funcarg/parametrize/test_parametrize3.py deleted file mode 100644 index 570b22ac2..000000000 --- a/example/funcarg/parametrize/test_parametrize3.py +++ /dev/null @@ -1,15 +0,0 @@ - -# following hook can be put unchanged into a local or global plugin -def pytest_generate_tests(metafunc): - for scenario in metafunc.cls.scenarios: - metafunc.addcall(id=scenario[0], funcargs=scenario[1]) - - -scenario1 = ('basic', {'attribute': 'value'}) -scenario2 = ('advanced', {'attribute': 'value2'}) - -class TestSampleWithScenarios: - scenarios = [scenario1, scenario2] - - def test_demo(self, attribute): - assert isinstance(attribute, str) diff --git a/example/funcarg/test_simpleprovider.py b/example/funcarg/test_simpleprovider.py deleted file mode 100644 index beb13cabf..000000000 --- a/example/funcarg/test_simpleprovider.py +++ /dev/null @@ -1,7 +0,0 @@ -# ./test_simpleprovider.py -def pytest_funcarg__myfuncarg(request): - return 42 - -def test_function(myfuncarg): - assert myfuncarg == 17 - diff --git a/example/funcarg/urloption/conftest.py b/example/funcarg/urloption/conftest.py deleted file mode 100644 index c764dc6ac..000000000 --- a/example/funcarg/urloption/conftest.py +++ /dev/null @@ -1,15 +0,0 @@ -# conftest.py -import py - - -def pytest_addoption(parser): - grp = parser.getgroup("testserver options") - grp.addoption("--url", action="store", default=None, - help="url for testserver") - -def pytest_funcarg__url(request): - url = request.config.getvalue("url") - if url is None: - py.test.skip("need --url") - return url - diff --git a/example/genhtml.py b/example/genhtml.py deleted file mode 100644 index b5c8f525b..000000000 --- a/example/genhtml.py +++ /dev/null @@ -1,13 +0,0 @@ -from py.xml import html - -paras = "First Para", "Second para" - -doc = html.html( - html.head( - html.meta(name="Content-Type", value="text/html; charset=latin1")), - html.body( - [html.p(p) for p in paras])) - -print unicode(doc).encode('latin1') - - diff --git a/example/genhtmlcss.py b/example/genhtmlcss.py deleted file mode 100644 index 3e6d0af54..000000000 --- a/example/genhtmlcss.py +++ /dev/null @@ -1,23 +0,0 @@ -import py -html = py.xml.html - -class my(html): - "a custom style" - class body(html.body): - style = html.Style(font_size = "120%") - - class h2(html.h2): - style = html.Style(background = "grey") - - class p(html.p): - style = html.Style(font_weight="bold") - -doc = my.html( - my.head(), - my.body( - my.h2("hello world"), - my.p("bold as bold can") - ) -) - -print doc.unicode(indent=2) diff --git a/example/genxml.py b/example/genxml.py deleted file mode 100644 index 5f754e889..000000000 --- a/example/genxml.py +++ /dev/null @@ -1,17 +0,0 @@ - -import py -class ns(py.xml.Namespace): - pass - -doc = ns.books( - ns.book( - ns.author("May Day"), - ns.title("python for java programmers"),), - ns.book( - ns.author("why", class_="somecssclass"), - ns.title("Java for Python programmers"),), - publisher="N.N", - ) -print doc.unicode(indent=2).encode('utf8') - - diff --git a/example/xfail_demo.py b/example/xfail_demo.py deleted file mode 100644 index ec98cdb89..000000000 --- a/example/xfail_demo.py +++ /dev/null @@ -1,16 +0,0 @@ -import py - -@py.test.mark.xfail -def test_hello(): - assert 0 - -@py.test.mark.xfail(run=False) -def test_hello2(): - assert 0 - -@py.test.mark.xfail("hasattr(os, 'sep')") -def test_hello3(): - assert 0 - -def test_hello5(): - py.test.xfail("reason")