From 43d27ec7ed753bb9b6c447c94decbdd5fdcfcf4f Mon Sep 17 00:00:00 2001 From: Ronny Pfannschmidt Date: Sat, 6 Jun 2015 23:30:49 +0200 Subject: [PATCH] use regendoc normalization and regenerate docs --HG-- branch : regendoc-upgrade --- Makefile | 12 +- doc/en/assert.txt | 30 +-- doc/en/builtin.txt | 2 +- doc/en/capture.txt | 16 +- doc/en/doctest.txt | 8 +- doc/en/example/markers.txt | 160 ++++++++-------- doc/en/example/nonpython.txt | 32 ++-- doc/en/example/parametrize.txt | 75 ++++---- doc/en/example/pythoncollection.txt | 28 +-- doc/en/example/reportingdemo.txt | 226 +++++++++++----------- doc/en/example/simple.txt | 283 ++++++++++++++-------------- doc/en/example/special.txt | 2 +- doc/en/fixture.txt | 135 ++++++------- doc/en/getting-started.txt | 36 ++-- doc/en/parametrize.txt | 38 ++-- doc/en/skipping.txt | 10 +- doc/en/tmpdir.txt | 14 +- doc/en/unittest.txt | 20 +- doc/en/yieldfixture.txt | 2 +- requirements-docs.txt | 1 + 20 files changed, 572 insertions(+), 558 deletions(-) diff --git a/Makefile b/Makefile index ddf287418..b92a88977 100644 --- a/Makefile +++ b/Makefile @@ -1,6 +1,11 @@ # Set of targets useful for development/release process PYTHON = python2.7 PATH := $(PWD)/.env/bin:$(PATH) +REGENDOC_ARGS := \ + --normalize "/={8,} (.*) ={8,}/======= \1 ========/" \ + --normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \ + --normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \ + --normalize "@/tmp/pytest-\d+/@/tmp/pytest-NaN/@" # prepare virtual python environment .env: @@ -16,10 +21,11 @@ clean: # generate documentation docs: develop - find doc/en -name '*.txt' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc + find doc/en -name '*.txt' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc ${REGENDOC_ARGS} cd doc/en; make html # upload documentation upload-docs: develop - find doc/en -name '*.txt' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc --update - cd doc/en; make install + find doc/en -name '*.txt' -not -path 'doc/en/_build/*' | xargs .env/bin/regendoc ${REGENDOC_ARGS} --update + #cd doc/en; make install + diff --git a/doc/en/assert.txt b/doc/en/assert.txt index 0c07de0c1..aed43803f 100644 --- a/doc/en/assert.txt +++ b/doc/en/assert.txt @@ -25,15 +25,15 @@ to assert that your function returns a certain value. If this assertion fails you will see the return value of the function call:: $ py.test test_assert1.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-87, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_assert1.py F - ================================= FAILURES ================================= - ______________________________ test_function _______________________________ + ======= FAILURES ======== + _______ test_function ________ def test_function(): > assert f() == 4 @@ -41,7 +41,7 @@ you will see the return value of the function call:: E + where 3 = f() test_assert1.py:5: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ======= 1 failed in 0.12 seconds ======== ``pytest`` has support for showing the values of the most common subexpressions including calls, attributes, comparisons, and binary and unary @@ -135,15 +135,15 @@ when it encounters comparisons. For example:: if you run this module:: $ py.test test_assert2.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-87, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_assert2.py F - ================================= FAILURES ================================= - ___________________________ test_set_comparison ____________________________ + ======= FAILURES ======== + _______ test_set_comparison ________ def test_set_comparison(): set1 = set("1308") @@ -157,7 +157,7 @@ if you run this module:: E Use -v to get the full diff test_assert2.py:5: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ======= 1 failed in 0.12 seconds ======== Special comparisons are done for a number of cases: @@ -202,8 +202,8 @@ the conftest file:: $ py.test -q test_foocompare.py F - ================================= FAILURES ================================= - _______________________________ test_compare _______________________________ + ======= FAILURES ======== + _______ test_compare ________ def test_compare(): f1 = Foo(1) @@ -213,7 +213,7 @@ the conftest file:: E vals: 1 != 2 test_foocompare.py:8: AssertionError - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds .. _assert-details: .. _`assert introspection`: diff --git a/doc/en/builtin.txt b/doc/en/builtin.txt index d3cfc8b0c..713e625db 100644 --- a/doc/en/builtin.txt +++ b/doc/en/builtin.txt @@ -115,4 +115,4 @@ You can ask for available builtin or project-custom directory. The returned object is a `py.path.local`_ path object. - in 0.00 seconds + in 0.12 seconds diff --git a/doc/en/capture.txt b/doc/en/capture.txt index a8c6e6c7d..5fbc8e50a 100644 --- a/doc/en/capture.txt +++ b/doc/en/capture.txt @@ -63,24 +63,24 @@ and running this module will show you precisely the output of the failing function and hide the other one:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-90, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .F - ================================= FAILURES ================================= - ________________________________ test_func2 ________________________________ + ======= FAILURES ======== + _______ test_func2 ________ def test_func2(): > assert False E assert False test_module.py:9: AssertionError - -------------------------- Captured stdout setup --------------------------- - setting up - ==================== 1 failed, 1 passed in 0.01 seconds ==================== + ---------------------------- Captured stdout setup ----------------------------- + setting up + ======= 1 failed, 1 passed in 0.12 seconds ======== Accessing captured output from a test function --------------------------------------------------- diff --git a/doc/en/doctest.txt b/doc/en/doctest.txt index 1dbc8c3d4..73537a7d3 100644 --- a/doc/en/doctest.txt +++ b/doc/en/doctest.txt @@ -43,14 +43,14 @@ and another like this:: then you can just invoke ``py.test`` without command line options:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-96, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 items mymodule.py . - ========================= 1 passed in 0.06 seconds ========================= + ======= 1 passed in 0.12 seconds ======== It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.txt index 5e5951e58..f001965ae 100644 --- a/doc/en/example/markers.txt +++ b/doc/en/example/markers.txt @@ -30,30 +30,30 @@ You can "mark" a test function with custom metadata like this:: You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED - =================== 3 tests deselected by "-m 'webtest'" =================== - ================== 1 passed, 3 deselected in 0.01 seconds ================== + ======= 3 tests deselected by "-m 'webtest'" ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_something_quick PASSED test_server.py::test_another PASSED test_server.py::TestClass::test_method PASSED - ================= 1 tests deselected by "-m 'not webtest'" ================= - ================== 3 passed, 1 deselected in 0.01 seconds ================== + ======= 1 tests deselected by "-m 'not webtest'" ======== + ======= 3 passed, 1 deselected in 0.12 seconds ======== Selecting tests based on their node ID -------------------------------------- @@ -63,39 +63,39 @@ arguments to select only specified tests. This makes it easy to select tests based on their module, class, method, or function name:: $ py.test -v test_server.py::TestClass::test_method - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 5 items test_server.py::TestClass::test_method PASSED - ========================= 1 passed in 0.01 seconds ========================= + ======= 1 passed in 0.12 seconds ======== You can also select on the class:: $ py.test -v test_server.py::TestClass - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::TestClass::test_method PASSED - ========================= 1 passed in 0.01 seconds ========================= + ======= 1 passed in 0.12 seconds ======== Or select multiple nodes:: $ py.test -v test_server.py::TestClass test_server.py::test_send_http - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items test_server.py::TestClass::test_method PASSED test_server.py::test_send_http PASSED - ========================= 2 passed in 0.01 seconds ========================= + ======= 2 passed in 0.12 seconds ======== .. _node-id: @@ -124,44 +124,44 @@ exact match on markers that ``-m`` provides. This makes it easy to select tests based on their names:: $ py.test -v -k http # running with the above defined example module - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED - ====================== 3 tests deselected by '-khttp' ====================== - ================== 1 passed, 3 deselected in 0.01 seconds ================== + ======= 3 tests deselected by '-khttp' ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== And you can also run all tests except the ones that match the keyword:: $ py.test -k "not send_http" -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_something_quick PASSED test_server.py::test_another PASSED test_server.py::TestClass::test_method PASSED - ================= 1 tests deselected by '-knot send_http' ================== - ================== 3 passed, 1 deselected in 0.01 seconds ================== + ======= 1 tests deselected by '-knot send_http' ======== + ======= 3 passed, 1 deselected in 0.12 seconds ======== Or to select "http" and "quick" tests:: $ py.test -k "http or quick" -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED test_server.py::test_something_quick PASSED - ================= 2 tests deselected by '-khttp or quick' ================== - ================== 2 passed, 2 deselected in 0.01 seconds ================== + ======= 2 tests deselected by '-khttp or quick' ======== + ======= 2 passed, 2 deselected in 0.12 seconds ======== .. note:: @@ -201,9 +201,9 @@ You can ask which markers exist for your test suite - the list includes our just @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - @pytest.hookimpl(tryfirst=True): mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - @pytest.hookimpl(trylast=True): mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. For an example on how to add and work with markers from a plugin, see @@ -341,26 +341,26 @@ and an example invocations specifying a different environment than what the test needs:: $ py.test -E stage2 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_someenv.py s - ======================== 1 skipped in 0.01 seconds ========================= + ======= 1 skipped in 0.12 seconds ======== and here is one that specifies exactly the environment needed:: $ py.test -E stage1 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_someenv.py . - ========================= 1 passed in 0.01 seconds ========================= + ======= 1 passed in 0.12 seconds ======== The ``--markers`` option always gives you a list of available markers:: @@ -375,9 +375,9 @@ The ``--markers`` option always gives you a list of available markers:: @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - @pytest.hookimpl(tryfirst=True): mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - @pytest.hookimpl(trylast=True): mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. Reading markers which were set from multiple places @@ -420,7 +420,7 @@ Let's run this without capturing output and see what we get:: glob args=('class',) kwargs={'x': 2} glob args=('module',) kwargs={'x': 1} . - 1 passed in 0.01 seconds + 1 passed in 0.12 seconds marking platform specific tests with pytest -------------------------------------------------------------- @@ -472,29 +472,29 @@ Let's do a little test file to show how this looks like:: then you will see two test skipped and two executed tests as expected:: $ py.test -rs # this option reports skip reasons - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py sss. - ========================= short test summary info ========================== - SKIP [3] /tmp/doc-exec-157/conftest.py:12: cannot run on platform linux + test_plat.py s.s. + ======= short test summary info ======== + SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux2 - =================== 1 passed, 3 skipped in 0.01 seconds ==================== + ======= 2 passed, 2 skipped in 0.12 seconds ======== Note that if you specify a platform via the marker-command line option like this:: $ py.test -m linux2 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py s + test_plat.py . - =================== 3 tests deselected by "-m 'linux2'" ==================== - ================= 1 skipped, 3 deselected in 0.01 seconds ================== + ======= 3 tests deselected by "-m 'linux2'" ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -538,47 +538,47 @@ We want to dynamically define two markers and can do it in a We can now use the ``-m option`` to select one set:: $ py.test -m interface --tb=short - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_module.py FF - ================================= FAILURES ================================= - __________________________ test_interface_simple ___________________________ + ======= FAILURES ======== + _______ test_interface_simple ________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - __________________________ test_interface_complex __________________________ + _______ test_interface_complex ________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ================== 2 tests deselected by "-m 'interface'" ================== - ================== 2 failed, 2 deselected in 0.02 seconds ================== + ======= 2 tests deselected by "-m 'interface'" ======== + ======= 2 failed, 2 deselected in 0.12 seconds ======== or to select both "event" and "interface" tests:: $ py.test -m "interface or event" --tb=short - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-157, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_module.py FFF - ================================= FAILURES ================================= - __________________________ test_interface_simple ___________________________ + ======= FAILURES ======== + _______ test_interface_simple ________ test_module.py:3: in test_interface_simple assert 0 E assert 0 - __________________________ test_interface_complex __________________________ + _______ test_interface_complex ________ test_module.py:6: in test_interface_complex assert 0 E assert 0 - ____________________________ test_event_simple _____________________________ + _______ test_event_simple ________ test_module.py:9: in test_event_simple assert 0 E assert 0 - ============= 1 tests deselected by "-m 'interface or event'" ============== - ================== 3 failed, 1 deselected in 0.02 seconds ================== + ======= 1 tests deselected by "-m 'interface or event'" ======== + ======= 3 failed, 1 deselected in 0.12 seconds ======== diff --git a/doc/en/example/nonpython.txt b/doc/en/example/nonpython.txt index dd344dfbf..49bc2c30b 100644 --- a/doc/en/example/nonpython.txt +++ b/doc/en/example/nonpython.txt @@ -26,19 +26,19 @@ and if you installed `PyYAML`_ or a compatible YAML-parser you can now execute the test specification:: nonpython $ py.test test_simple.yml - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 2 items test_simple.yml .F - ================================= FAILURES ================================= - ______________________________ usecase: hello ______________________________ + ======= FAILURES ======== + _______ usecase: hello ________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.19 seconds ==================== + ======= 1 failed, 1 passed in 0.12 seconds ======== You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more @@ -56,31 +56,31 @@ your own domain specific testing language this way. consulted when reporting in ``verbose`` mode:: nonpython $ py.test -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $PWD/doc/en, inifile: pytest.ini collecting ... collected 2 items test_simple.yml::ok PASSED test_simple.yml::hello FAILED - ================================= FAILURES ================================= - ______________________________ usecase: hello ______________________________ + ======= FAILURES ======== + _______ usecase: hello ________ usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.05 seconds ==================== + ======= 1 failed, 1 passed in 0.12 seconds ======== While developing your custom test collection and execution it's also interesting to just look at the collection tree:: nonpython $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 2 items - ============================= in 0.04 seconds ============================= + ======= in 0.12 seconds ======== diff --git a/doc/en/example/parametrize.txt b/doc/en/example/parametrize.txt index 623ef2192..d897cf762 100644 --- a/doc/en/example/parametrize.txt +++ b/doc/en/example/parametrize.txt @@ -46,15 +46,15 @@ This means that we only run 2 tests if we do not pass ``--all``:: $ py.test -q test_compute.py .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds We run only two computations, so we see two dots. let's run the full monty:: $ py.test -q --all ....F - ================================= FAILURES ================================= - _____________________________ test_compute[4] ______________________________ + ======= FAILURES ======== + _______ test_compute[4] ________ param1 = 4 @@ -63,7 +63,7 @@ let's run the full monty:: E assert 4 < 4 test_compute.py:3: AssertionError - 1 failed, 4 passed in 0.02 seconds + 1 failed, 4 passed in 0.12 seconds As expected when running the full range of ``param1`` values we'll get an error on the last one. @@ -126,11 +126,11 @@ objects, they are still using the default pytest representation:: $ py.test test_time.py --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: - ============================= in 0.00 seconds ============================= + ======= in 0.12 seconds ======== ERROR: file not found: test_time.py A quick port of "testscenarios" @@ -170,22 +170,22 @@ only have to work a bit to construct the correct arguments for pytest's this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_scenarios.py .... - ========================= 4 passed in 0.02 seconds ========================= + ======= 4 passed in 0.12 seconds ======== If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: $ py.test --collect-only test_scenarios.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -195,7 +195,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== Note that we told ``metafunc.parametrize()`` that your scenario values should be considered class-scoped. With pytest-2.3 this leads to a @@ -248,24 +248,24 @@ creates a database object for the actual test invocations:: Let's first see how it looks like at collection time:: $ py.test test_backends.py --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== And then when we run the test:: $ py.test -q test_backends.py .F - ================================= FAILURES ================================= - _________________________ test_db_initialized[d2] __________________________ + ======= FAILURES ======== + _______ test_db_initialized[d2] ________ - db = + db = def test_db_initialized(db): # a dummy test @@ -274,7 +274,7 @@ And then when we run the test:: E Failed: deliberately failing for demo purposes test_backends.py:6: Failed - 1 failed, 1 passed in 0.01 seconds + 1 failed, 1 passed in 0.12 seconds The first invocation with ``db == "DB1"`` passed while the second with ``db == "DB2"`` failed. Our ``db`` fixture function has instantiated each of the DB values during the setup phase while the ``pytest_generate_tests`` generated two according calls to the ``test_db_initialized`` during the collection phase. @@ -318,17 +318,17 @@ argument sets to use for each test function. Let's run it:: $ py.test -q F.. - ================================= FAILURES ================================= - ________________________ TestClass.test_equals[2-1] ________________________ + ======= FAILURES ======== + _______ TestClass.test_equals[1-2] ________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b E assert 1 == 2 test_parametrize.py:18: AssertionError - 1 failed, 2 passed in 0.02 seconds + 1 failed, 2 passed in 0.12 seconds Indirect parametrization with multiple fixtures -------------------------------------------------------------- @@ -347,8 +347,11 @@ is to be run with different sets of arguments for its three arguments: Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize):: . $ py.test -rs -q multipython.py - ........................... - 27 passed in 4.14 seconds + ssssssssssss...ssssssssssss + ======= short test summary info ======== + SKIP [12] $PWD/doc/en/example/multipython.py:22: 'python3.3' not found + SKIP [12] $PWD/doc/en/example/multipython.py:22: 'python2.6' not found + 3 passed, 24 skipped in 0.12 seconds Indirect parametrization of optional implementations/imports -------------------------------------------------------------------- @@ -394,16 +397,16 @@ And finally a little test module:: If you run this with reporting for skips enabled:: $ py.test -rs test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-159, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .s - ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-159/conftest.py:10: could not import 'opt2' + ======= short test summary info ======== + SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2' - =================== 1 passed, 1 skipped in 0.01 seconds ==================== + ======= 1 passed, 1 skipped in 0.12 seconds ======== You'll see that we don't have a ``opt2`` module and thus the second test run of our ``test_func1`` was skipped. A few notes: diff --git a/doc/en/example/pythoncollection.txt b/doc/en/example/pythoncollection.txt index b8abdb262..c35d7e992 100644 --- a/doc/en/example/pythoncollection.txt +++ b/doc/en/example/pythoncollection.txt @@ -42,9 +42,9 @@ that match ``*_check``. For example, if we have:: then the test collection looks like this:: $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-160, inifile: setup.cfg + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: setup.cfg collected 2 items @@ -52,7 +52,7 @@ then the test collection looks like this:: - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== .. note:: @@ -88,9 +88,9 @@ Finding out what is collected You can always peek at the collection tree without running tests like this:: . $ py.test --collect-only pythoncollection.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 3 items @@ -99,7 +99,7 @@ You can always peek at the collection tree without running tests like this:: - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== customizing test collection to find all .py files --------------------------------------------------------- @@ -142,12 +142,14 @@ then a pytest run on python2 will find the one test when run with a python2 interpreters and will leave out the setup.py file:: $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-160, inifile: pytest.ini - collected 0 items + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini + collected 1 items + + - ============================= in 0.01 seconds ============================= + ======= in 0.12 seconds ======== If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection. diff --git a/doc/en/example/reportingdemo.txt b/doc/en/example/reportingdemo.txt index 6d62c4211..31c8738a5 100644 --- a/doc/en/example/reportingdemo.txt +++ b/doc/en/example/reportingdemo.txt @@ -12,15 +12,15 @@ get on the terminal - we are working on that): .. code-block:: python assertion $ py.test failure_demo.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 42 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF - ================================= FAILURES ================================= - ____________________________ test_generative[0] ____________________________ + ======= FAILURES ======== + _______ test_generative[0] ________ param1 = 3, param2 = 6 @@ -29,9 +29,9 @@ get on the terminal - we are working on that): E assert (3 * 2) < 6 failure_demo.py:15: AssertionError - _________________________ TestFailing.test_simple __________________________ + _______ TestFailing.test_simple ________ - self = + self = def test_simple(self): def f(): @@ -41,13 +41,13 @@ get on the terminal - we are working on that): > assert f() == g() E assert 42 == 43 - E + where 42 = .f at 0x7f65f2315510>() - E + and 43 = .g at 0x7f65f2323510>() + E + where 42 = () + E + and 43 = () failure_demo.py:28: AssertionError - ____________________ TestFailing.test_simple_multiline _____________________ + _______ TestFailing.test_simple_multiline ________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -55,7 +55,7 @@ get on the terminal - we are working on that): > 6*9) failure_demo.py:33: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 42, b = 54 @@ -65,21 +65,21 @@ get on the terminal - we are working on that): E assert 42 == 54 failure_demo.py:11: AssertionError - ___________________________ TestFailing.test_not ___________________________ + _______ TestFailing.test_not ________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = .f at 0x7f65f2323598>() + E + where 42 = () failure_demo.py:38: AssertionError - _________________ TestSpecialisedExplanations.test_eq_text _________________ + _______ TestSpecialisedExplanations.test_eq_text ________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -88,9 +88,9 @@ get on the terminal - we are working on that): E + eggs failure_demo.py:42: AssertionError - _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ + _______ TestSpecialisedExplanations.test_eq_similar_text ________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -101,9 +101,9 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:45: AssertionError - ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ + _______ TestSpecialisedExplanations.test_eq_multiline_text ________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -114,9 +114,9 @@ get on the terminal - we are working on that): E bar failure_demo.py:48: AssertionError - ______________ TestSpecialisedExplanations.test_eq_long_text _______________ + _______ TestSpecialisedExplanations.test_eq_long_text ________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -131,9 +131,9 @@ get on the terminal - we are working on that): E ? ^ failure_demo.py:53: AssertionError - _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ + _______ TestSpecialisedExplanations.test_eq_long_text_multiline ________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -155,9 +155,9 @@ get on the terminal - we are working on that): E 2 failure_demo.py:58: AssertionError - _________________ TestSpecialisedExplanations.test_eq_list _________________ + _______ TestSpecialisedExplanations.test_eq_list ________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,9 +166,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:61: AssertionError - ______________ TestSpecialisedExplanations.test_eq_list_long _______________ + _______ TestSpecialisedExplanations.test_eq_list_long ________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -179,9 +179,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:66: AssertionError - _________________ TestSpecialisedExplanations.test_eq_dict _________________ + _______ TestSpecialisedExplanations.test_eq_dict ________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -196,9 +196,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:69: AssertionError - _________________ TestSpecialisedExplanations.test_eq_set __________________ + _______ TestSpecialisedExplanations.test_eq_set ________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -213,9 +213,9 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:72: AssertionError - _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ + _______ TestSpecialisedExplanations.test_eq_longer_list ________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -224,18 +224,18 @@ get on the terminal - we are working on that): E Use -v to get the full diff failure_demo.py:75: AssertionError - _________________ TestSpecialisedExplanations.test_in_list _________________ + _______ TestSpecialisedExplanations.test_in_list ________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] failure_demo.py:78: AssertionError - __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ + _______ TestSpecialisedExplanations.test_not_in_text_multiline ________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -251,9 +251,9 @@ get on the terminal - we are working on that): E tail failure_demo.py:82: AssertionError - ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ + _______ TestSpecialisedExplanations.test_not_in_text_single ________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -264,9 +264,9 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:86: AssertionError - _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ + _______ TestSpecialisedExplanations.test_not_in_text_single_long ________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -277,9 +277,9 @@ get on the terminal - we are working on that): E ? +++ failure_demo.py:90: AssertionError - ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ + _______ TestSpecialisedExplanations.test_not_in_text_single_long_term ________ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -290,7 +290,7 @@ get on the terminal - we are working on that): E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ failure_demo.py:94: AssertionError - ______________________________ test_attribute ______________________________ + _______ test_attribute ________ def test_attribute(): class Foo(object): @@ -298,21 +298,21 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .Foo object at 0x7f65f1c814e0>.b + E + where 1 = .b failure_demo.py:101: AssertionError - _________________________ test_attribute_instance __________________________ + _______ test_attribute_instance ________ def test_attribute_instance(): class Foo(object): b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .Foo object at 0x7f65f1c7f7f0>.b - E + where .Foo object at 0x7f65f1c7f7f0> = .Foo'>() + E + where 1 = .b + E + where = () failure_demo.py:107: AssertionError - __________________________ test_attribute_failure __________________________ + _______ test_attribute_failure ________ def test_attribute_failure(): class Foo(object): @@ -323,16 +323,16 @@ get on the terminal - we are working on that): > assert i.b == 2 failure_demo.py:116: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = .Foo object at 0x7f65f1c97dd8> + self = def _get_b(self): > raise Exception('Failed to get attrib') E Exception: Failed to get attrib failure_demo.py:113: Exception - _________________________ test_attribute_multiple __________________________ + _______ test_attribute_multiple ________ def test_attribute_multiple(): class Foo(object): @@ -341,57 +341,57 @@ get on the terminal - we are working on that): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .Foo object at 0x7f65f1c9b630>.b - E + where .Foo object at 0x7f65f1c9b630> = .Foo'>() - E + and 2 = .Bar object at 0x7f65f1c9b2b0>.b - E + where .Bar object at 0x7f65f1c9b2b0> = .Bar'>() + E + where 1 = .b + E + where = () + E + and 2 = .b + E + where = () failure_demo.py:124: AssertionError - __________________________ TestRaises.test_raises __________________________ + _______ TestRaises.test_raises ________ - self = + self = def test_raises(self): s = 'qwe' > raises(TypeError, "int(s)") failure_demo.py:133: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen /tmp/sandbox/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1075>:1: ValueError - ______________________ TestRaises.test_raises_doesnt _______________________ + <0-codegen $PWD/_pytest/python.py:1091>:1: ValueError + _______ TestRaises.test_raises_doesnt ________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE failure_demo.py:136: Failed - __________________________ TestRaises.test_raise ___________________________ + _______ TestRaises.test_raise ________ - self = + self = def test_raise(self): > raise ValueError("demo error") E ValueError: demo error failure_demo.py:139: ValueError - ________________________ TestRaises.test_tupleerror ________________________ + _______ TestRaises.test_tupleerror ________ - self = + self = def test_tupleerror(self): > a,b = [1] E ValueError: need more than 1 value to unpack failure_demo.py:142: ValueError - ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ + _______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ________ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -400,18 +400,18 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:147: TypeError - --------------------------- Captured stdout call --------------------------- + ----------------------------- Captured stdout call ----------------------------- l is [1, 2, 3] - ________________________ TestRaises.test_some_error ________________________ + _______ TestRaises.test_some_error ________ - self = + self = def test_some_error(self): > if namenotexi: - E NameError: name 'namenotexi' is not defined + E NameError: global name 'namenotexi' is not defined failure_demo.py:150: NameError - ____________________ test_dynamic_compile_shows_nicely _____________________ + _______ test_dynamic_compile_shows_nicely ________ def test_dynamic_compile_shows_nicely(): src = 'def foo():\n assert 1 == 0\n' @@ -423,16 +423,16 @@ get on the terminal - we are working on that): > module.foo() failure_demo.py:165: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ def foo(): > assert 1 == 0 E assert 1 == 0 - <2-codegen 'abc-123' /tmp/sandbox/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError - ____________________ TestMoreErrors.test_complex_error _____________________ + <2-codegen 'abc-123' $PWD/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError + _______ TestMoreErrors.test_complex_error ________ - self = + self = def test_complex_error(self): def f(): @@ -442,10 +442,10 @@ get on the terminal - we are working on that): > somefunc(f(), g()) failure_demo.py:175: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:8: in somefunc otherfunc(x,y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ a = 44, b = 43 @@ -454,9 +454,9 @@ get on the terminal - we are working on that): E assert 44 == 43 failure_demo.py:5: AssertionError - ___________________ TestMoreErrors.test_z1_unpack_error ____________________ + _______ TestMoreErrors.test_z1_unpack_error ________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -464,9 +464,9 @@ get on the terminal - we are working on that): E ValueError: need more than 0 values to unpack failure_demo.py:179: ValueError - ____________________ TestMoreErrors.test_z2_type_error _____________________ + _______ TestMoreErrors.test_z2_type_error ________ - self = + self = def test_z2_type_error(self): l = 3 @@ -474,21 +474,21 @@ get on the terminal - we are working on that): E TypeError: 'int' object is not iterable failure_demo.py:183: TypeError - ______________________ TestMoreErrors.test_startswith ______________________ + _______ TestMoreErrors.test_startswith ________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) - E assert ('456') - E + where = '123'.startswith + E assert ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError - __________________ TestMoreErrors.test_startswith_nested ___________________ + _______ TestMoreErrors.test_startswith_nested ________ - self = + self = def test_startswith_nested(self): def f(): @@ -496,15 +496,15 @@ get on the terminal - we are working on that): def g(): return "456" > assert f().startswith(g()) - E assert ('456') - E + where = '123'.startswith - E + where '123' = .f at 0x7f65f1c32950>() - E + and '456' = .g at 0x7f65f1c32ea0>() + E assert ('456') + E + where = '123'.startswith + E + where '123' = () + E + and '456' = () failure_demo.py:195: AssertionError - _____________________ TestMoreErrors.test_global_func ______________________ + _______ TestMoreErrors.test_global_func ________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -512,20 +512,20 @@ get on the terminal - we are working on that): E + where 43 = globf(42) failure_demo.py:198: AssertionError - _______________________ TestMoreErrors.test_instance _______________________ + _______ TestMoreErrors.test_instance ________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError - _______________________ TestMoreErrors.test_compare ________________________ + _______ TestMoreErrors.test_compare ________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -533,9 +533,9 @@ get on the terminal - we are working on that): E + where 11 = globf(10) failure_demo.py:205: AssertionError - _____________________ TestMoreErrors.test_try_finally ______________________ + _______ TestMoreErrors.test_try_finally ________ - self = + self = def test_try_finally(self): x = 1 @@ -544,9 +544,9 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:210: AssertionError - ___________________ TestCustomAssertMsg.test_single_line ___________________ + _______ TestCustomAssertMsg.test_single_line ________ - self = + self = def test_single_line(self): class A: @@ -555,12 +555,12 @@ get on the terminal - we are working on that): > assert A.a == b, "A.a appears not to be b" E AssertionError: A.a appears not to be b E assert 1 == 2 - E + where 1 = .A'>.a + E + where 1 = .a failure_demo.py:221: AssertionError - ____________________ TestCustomAssertMsg.test_multiline ____________________ + _______ TestCustomAssertMsg.test_multiline ________ - self = + self = def test_multiline(self): class A: @@ -572,12 +572,12 @@ get on the terminal - we are working on that): E or does not appear to be b E one of those E assert 1 == 2 - E + where 1 = .A'>.a + E + where 1 = .a failure_demo.py:227: AssertionError - ___________________ TestCustomAssertMsg.test_custom_repr ___________________ + _______ TestCustomAssertMsg.test_custom_repr ________ - self = + self = def test_custom_repr(self): class JSON: @@ -595,4 +595,4 @@ get on the terminal - we are working on that): E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a failure_demo.py:237: AssertionError - ======================== 42 failed in 0.35 seconds ========================= + ======= 42 failed in 0.12 seconds ======== diff --git a/doc/en/example/simple.txt b/doc/en/example/simple.txt index f41f4bb5b..34211d055 100644 --- a/doc/en/example/simple.txt +++ b/doc/en/example/simple.txt @@ -39,11 +39,11 @@ Let's run this without supplying our new option:: $ py.test -q test_sample.py F - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ - + ======= FAILURES ======== + _______ test_answer ________ + cmdopt = 'type1' - + def test_answer(cmdopt): if cmdopt == "type1": print ("first") @@ -51,21 +51,21 @@ Let's run this without supplying our new option:: print ("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError - --------------------------- Captured stdout call --------------------------- + ----------------------------- Captured stdout call ----------------------------- first - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds And now with supplying a command line option:: $ py.test -q --cmdopt=type2 F - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ - + ======= FAILURES ======== + _______ test_answer ________ + cmdopt = 'type2' - + def test_answer(cmdopt): if cmdopt == "type1": print ("first") @@ -73,11 +73,11 @@ And now with supplying a command line option:: print ("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError - --------------------------- Captured stdout call --------------------------- + ----------------------------- Captured stdout call ----------------------------- second - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds You can see that the command line option arrived in our test. This completes the basic pattern. However, one often rather wants to process @@ -107,12 +107,12 @@ of subprocesses close to your CPU. Running in an empty directory with the above conftest.py:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== .. _`excontrolskip`: @@ -152,28 +152,28 @@ We can now write a test module like this:: and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s - ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-162/conftest.py:9: need --runslow option to run - - =================== 1 passed, 1 skipped in 0.01 seconds ==================== + ======= short test summary info ======== + SKIP [1] $REGENDOC_TMPDIR/conftest.py:9: need --runslow option to run + + ======= 1 passed, 1 skipped in 0.12 seconds ======== Or run it including the ``slow`` marked test:: $ py.test --runslow - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .. - - ========================= 2 passed in 0.01 seconds ========================= + + ======= 2 passed in 0.12 seconds ======== Writing well integrated assertion helpers -------------------------------------------------- @@ -203,15 +203,15 @@ Let's run our little function:: $ py.test -q test_checkconfig.py F - ================================= FAILURES ================================= - ______________________________ test_something ______________________________ - + ======= FAILURES ======== + _______ test_something ________ + def test_something(): > checkconfig(42) E Failed: not configured: 42 - + test_checkconfig.py:8: Failed - 1 failed in 0.02 seconds + 1 failed in 0.12 seconds Detect if running from within a pytest run -------------------------------------------------------------- @@ -258,13 +258,13 @@ It's easy to present extra information in a ``pytest`` run:: which will add the string to the test header accordingly:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 project deps: mylib-1.1 + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== .. regendoc:wipe @@ -282,24 +282,24 @@ you present more information appropriately:: which will add info only when run with "--v":: $ py.test -v - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 info1: did you know that ... did you? + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== and nothing when run plainly:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - - ============================= in 0.00 seconds ============================= + + ======= in 0.12 seconds ======== profiling test duration -------------------------- @@ -327,18 +327,18 @@ out which tests are the slowest. Let's make an artifical test suite:: Now we can profile which test functions execute the slowest:: $ py.test --durations=3 - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_some_are_slow.py ... - - ========================= slowest 3 test durations ========================= + + ======= slowest 3 test durations ======== 0.20s call test_some_are_slow.py::test_funcslow2 0.10s call test_some_are_slow.py::test_funcslow1 - 0.00s setup test_some_are_slow.py::test_funcslow2 - ========================= 3 passed in 0.31 seconds ========================= + 0.00s setup test_some_are_slow.py::test_funcfast + ======= 3 passed in 0.12 seconds ======== incremental testing - test steps --------------------------------------------------- @@ -389,27 +389,27 @@ tests in a class. Here is a test module example:: If we run this:: $ py.test -rx - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_step.py .Fx. - - ================================= FAILURES ================================= - ____________________ TestUserHandling.test_modification ____________________ - - self = - + + ======= FAILURES ======== + _______ TestUserHandling.test_modification ________ + + self = + def test_modification(self): > assert 0 E assert 0 - + test_step.py:9: AssertionError - ========================= short test summary info ========================== + ======= short test summary info ======== XFAIL test_step.py::TestUserHandling::()::test_deletion reason: previous test failed (test_modification) - ============== 1 failed, 2 passed, 1 xfailed in 0.02 seconds =============== + ======= 1 failed, 2 passed, 1 xfailed in 0.12 seconds ======== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -460,56 +460,56 @@ the ``db`` fixture:: We can run this:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - + test_step.py .Fx. a/test_db.py F a/test_db2.py F b/test_error.py E - - ================================== ERRORS ================================== - _______________________ ERROR at setup of test_root ________________________ - file /tmp/doc-exec-162/b/test_error.py, line 1 + + ======= ERRORS ======== + _______ ERROR at setup of test_root ________ + file $REGENDOC_TMPDIR/b/test_error.py, line 1 def test_root(db): # no db here, will error out fixture 'db' not found - available fixtures: pytestconfig, capsys, recwarn, monkeypatch, tmpdir, capfd + available fixtures: pytestconfig, recwarn, monkeypatch, capfd, capsys, tmpdir use 'py.test --fixtures [testpath]' for help on them. - - /tmp/doc-exec-162/b/test_error.py:1 - ================================= FAILURES ================================= - ____________________ TestUserHandling.test_modification ____________________ - - self = - + + $REGENDOC_TMPDIR/b/test_error.py:1 + ======= FAILURES ======== + _______ TestUserHandling.test_modification ________ + + self = + def test_modification(self): > assert 0 E assert 0 - + test_step.py:9: AssertionError - _________________________________ test_a1 __________________________________ - - db = - + _______ test_a1 ________ + + db = + def test_a1(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: E assert 0 - + a/test_db.py:2: AssertionError - _________________________________ test_a2 __________________________________ - - db = - + _______ test_a2 ________ + + db = + def test_a2(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: E assert 0 - + a/test_db2.py:2: AssertionError - ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.05 seconds ========== + ======= 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ======== The two test modules in the ``a`` directory see the same ``db`` fixture instance while the one test in the sister-directory ``b`` doesn't see it. We could of course @@ -563,37 +563,36 @@ if you then have failing tests:: and run them:: $ py.test test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF - - ================================= FAILURES ================================= - ________________________________ test_fail1 ________________________________ - - tmpdir = local('/tmp/pytest-22/test_fail10') - + + ======= FAILURES ======== + _______ test_fail1 ________ + + tmpdir = local('/tmp/pytest-NaN/test_fail10') + def test_fail1(tmpdir): > assert 0 E assert 0 - + test_module.py:2: AssertionError - ________________________________ test_fail2 ________________________________ - + _______ test_fail2 ________ + def test_fail2(): > assert 0 E assert 0 - + test_module.py:4: AssertionError - ========================= 2 failed in 0.02 seconds ========================= + ======= 2 failed in 0.12 seconds ======== you will have a "failures" file which contains the failing test ids:: $ cat failures - test_module.py::test_fail1 (/tmp/pytest-22/test_fail10) - test_module.py::test_fail2 + cat: failures: No such file or directory Making test result information available in fixtures ----------------------------------------------------------- @@ -654,42 +653,42 @@ if you then have failing tests:: and run it:: $ py.test -s test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-162, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - - test_module.py Esetting up a test failed! test_module.py::test_setup_fails - Fexecuting test failed test_module.py::test_call_fails + + test_module.py E('setting up a test failed!', 'test_module.py::test_setup_fails') + F('executing test failed', 'test_module.py::test_call_fails') F - - ================================== ERRORS ================================== - ____________________ ERROR at setup of test_setup_fails ____________________ - + + ======= ERRORS ======== + _______ ERROR at setup of test_setup_fails ________ + @pytest.fixture def other(): > assert 0 E assert 0 - + test_module.py:6: AssertionError - ================================= FAILURES ================================= - _____________________________ test_call_fails ______________________________ - + ======= FAILURES ======== + _______ test_call_fails ________ + something = None - + def test_call_fails(something): > assert 0 E assert 0 - + test_module.py:12: AssertionError - ________________________________ test_fail2 ________________________________ - + _______ test_fail2 ________ + def test_fail2(): > assert 0 E assert 0 - + test_module.py:15: AssertionError - ==================== 2 failed, 1 error in 0.02 seconds ===================== + ======= 2 failed, 1 warnings, 1 error in 0.12 seconds ======== You'll see that the fixture finalizers could use the precise reporting information. @@ -744,4 +743,4 @@ This makes it convenient to execute your tests from within your frozen application, using standard ``py.test`` command-line options:: $ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/ - /bin/sh: 1: ./app_main: not found + /bin/sh: ./app_main: No such file or directory diff --git a/doc/en/example/special.txt b/doc/en/example/special.txt index ba58a1cd7..58e66d44e 100644 --- a/doc/en/example/special.txt +++ b/doc/en/example/special.txt @@ -69,4 +69,4 @@ If you run this without output capturing:: .test other .test_unit1 method called . - 4 passed in 0.03 seconds + 4 passed in 0.12 seconds diff --git a/doc/en/fixture.txt b/doc/en/fixture.txt index 4b2e97a0c..fabd8b139 100644 --- a/doc/en/fixture.txt +++ b/doc/en/fixture.txt @@ -74,17 +74,17 @@ will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>` marked ``smtp`` fixture function. Running the test looks like this:: $ py.test test_smtpsimple.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_smtpsimple.py F - ================================= FAILURES ================================= - ________________________________ test_ehlo _________________________________ + ======= FAILURES ======== + _______ test_ehlo ________ - smtp = + smtp = def test_ehlo(smtp): response, msg = smtp.ehlo() @@ -93,7 +93,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: E assert 0 test_smtpsimple.py:11: AssertionError - ========================= 1 failed in 1.07 seconds ========================= + ======= 1 failed in 0.12 seconds ======== In the failure traceback we see that the test function was called with a ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -192,28 +192,29 @@ We deliberately insert failing ``assert 0`` statements in order to inspect what is going on and can now run the tests:: $ py.test test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py FF - ================================= FAILURES ================================= - ________________________________ test_ehlo _________________________________ + ======= FAILURES ======== + _______ test_ehlo ________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - > assert "merlinux" in response[1] - E TypeError: Type str doesn't support the buffer API + assert "merlinux" in response[1] + > assert 0 # for demo purposes + E assert 0 - test_module.py:5: TypeError - ________________________________ test_noop _________________________________ + test_module.py:6: AssertionError + _______ test_noop ________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -222,7 +223,7 @@ inspect what is going on and can now run the tests:: E assert 0 test_module.py:11: AssertionError - ========================= 2 failed in 0.82 seconds ========================= + ======= 2 failed in 0.12 seconds ======== You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp`` object was passed into the two @@ -270,7 +271,7 @@ Let's execute it:: $ py.test -s -q --tb=no FFteardown smtp - 2 failed in 1.44 seconds + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two tests finished execution. Note that if we decorated our fixture @@ -310,8 +311,9 @@ We use the ``request.module`` attribute to optionally obtain an again, nothing much has changed:: $ py.test -s -q --tb=no - FF - 2 failed in 0.62 seconds + FFteardown smtp + + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the server URL in its module namespace:: @@ -327,11 +329,11 @@ Running it:: $ py.test -qq --tb=short test_anothersmtp.py F - ================================= FAILURES ================================= - ______________________________ test_showhelo _______________________________ + ======= FAILURES ======== + _______ test_showhelo ________ test_anothersmtp.py:5: in test_showhelo assert 0, smtp.helo() - E AssertionError: (250, b'mail.python.org') + E AssertionError: (250, 'hq.merlinux.eu') E assert 0 voila! The ``smtp`` fixture function picked up our mail server name @@ -376,21 +378,22 @@ So let's just do another run:: $ py.test -q test_module.py FFFF - ================================= FAILURES ================================= - __________________________ test_ehlo[merlinux.eu] __________________________ + ======= FAILURES ======== + _______ test_ehlo[merlinux.eu] ________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - > assert "merlinux" in response[1] - E TypeError: Type str doesn't support the buffer API + assert "merlinux" in response[1] + > assert 0 # for demo purposes + E assert 0 - test_module.py:5: TypeError - __________________________ test_noop[merlinux.eu] __________________________ + test_module.py:6: AssertionError + _______ test_noop[merlinux.eu] ________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -399,22 +402,22 @@ So let's just do another run:: E assert 0 test_module.py:11: AssertionError - ________________________ test_ehlo[mail.python.org] ________________________ + _______ test_ehlo[mail.python.org] ________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 > assert "merlinux" in response[1] - E TypeError: Type str doesn't support the buffer API + E assert 'merlinux' in 'mail.python.org\nSIZE 51200000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - test_module.py:5: TypeError - -------------------------- Captured stdout setup --------------------------- - finalizing - ________________________ test_noop[mail.python.org] ________________________ + test_module.py:5: AssertionError + ---------------------------- Captured stdout setup ----------------------------- + finalizing + _______ test_noop[mail.python.org] ________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -423,7 +426,7 @@ So let's just do another run:: E assert 0 test_module.py:11: AssertionError - 4 failed in 1.75 seconds + 4 failed in 0.12 seconds We see that our two test functions each ran twice, against the different ``smtp`` instances. Note also, that with the ``mail.python.org`` @@ -473,9 +476,9 @@ return ``None`` then pytest's auto-generated ID will be used. Running the above tests results in the following test IDs being used:: $ py.test --collect-only - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 6 items @@ -486,7 +489,7 @@ Running the above tests results in the following test IDs being used:: - ============================= in 0.02 seconds ============================= + ======= in 0.12 seconds ======== .. _`interdependent fixtures`: @@ -519,15 +522,15 @@ Here we declare an ``app`` fixture which receives the previously defined ``smtp`` fixture and instantiates an ``App`` object with it. Let's run it:: $ py.test -v test_appsetup.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items test_appsetup.py::test_smtp_exists[merlinux.eu] PASSED test_appsetup.py::test_smtp_exists[mail.python.org] PASSED - ========================= 2 passed in 1.09 seconds ========================= + ======= 2 passed in 0.12 seconds ======== Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -584,31 +587,31 @@ to show the setup/teardown flow:: Let's run the tests in verbose mode and with looking at the print-output:: $ py.test -v -s test_module.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 -- /tmp/sandbox/pytest/.tox/regen/bin/python3.4 - rootdir: /tmp/doc-exec-98, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 -- $PWD/.env/bin/python2.7 + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - test_module.py::test_0[1] test0 1 + test_module.py::test_0[1] (' test0', 1) PASSED - test_module.py::test_0[2] test0 2 + test_module.py::test_0[2] (' test0', 2) PASSED - test_module.py::test_1[mod1] create mod1 - test1 mod1 + test_module.py::test_1[mod1] ('create', 'mod1') + (' test1', 'mod1') PASSED - test_module.py::test_2[1-mod1] test2 1 mod1 + test_module.py::test_2[1-mod1] (' test2', 1, 'mod1') PASSED - test_module.py::test_2[2-mod1] test2 2 mod1 + test_module.py::test_2[2-mod1] (' test2', 2, 'mod1') PASSED - test_module.py::test_1[mod2] create mod2 - test1 mod2 + test_module.py::test_1[mod2] ('create', 'mod2') + (' test1', 'mod2') PASSED - test_module.py::test_2[1-mod2] test2 1 mod2 + test_module.py::test_2[1-mod2] (' test2', 1, 'mod2') PASSED - test_module.py::test_2[2-mod2] test2 2 mod2 + test_module.py::test_2[2-mod2] (' test2', 2, 'mod2') PASSED - ========================= 8 passed in 0.02 seconds ========================= + ======= 8 passed in 0.12 seconds ======== You can see that the parametrized module-scoped ``modarg`` resource caused an ordering of test execution that lead to the fewest possible "active" resources. The finalizer for the ``mod1`` parametrized resource was executed @@ -664,7 +667,7 @@ to verify our fixture is activated and the tests pass:: $ py.test -q .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds You can specify multiple fixtures like this:: @@ -736,7 +739,7 @@ If we run it, we get two passing tests:: $ py.test -q .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds Here is how autouse fixtures work in other scopes: diff --git a/doc/en/getting-started.txt b/doc/en/getting-started.txt index 5800bd340..0bc0fe484 100644 --- a/doc/en/getting-started.txt +++ b/doc/en/getting-started.txt @@ -27,7 +27,7 @@ Installation options:: To check your installation has installed the correct version:: $ py.test --version - This is pytest version 2.7.1, imported from /tmp/sandbox/pytest/.tox/regen/lib/python3.4/site-packages/pytest.py + This is pytest version 2.8.0.dev4, imported from $PWD/pytest.pyc If you get an error checkout :ref:`installation issues`. @@ -48,15 +48,15 @@ Let's create a first test file with a simple test function:: That's it. You can execute the test function now:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-101, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_sample.py F - ================================= FAILURES ================================= - _______________________________ test_answer ________________________________ + ======= FAILURES ======== + _______ test_answer ________ def test_answer(): > assert func(3) == 5 @@ -64,7 +64,7 @@ That's it. You can execute the test function now:: E + where 4 = func(3) test_sample.py:5: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ======= 1 failed in 0.12 seconds ======== ``pytest`` found the ``test_answer`` function by following :ref:`standard test discovery rules `, basically detecting the ``test_`` prefixes. We got a failure report because our little ``func(3)`` call did not return ``5``. @@ -98,7 +98,7 @@ Running it with, this time in "quiet" reporting mode:: $ py.test -q test_sysexit.py . - 1 passed in 0.01 seconds + 1 passed in 0.12 seconds .. todo:: For further ways to assert exceptions see the `raises` @@ -125,10 +125,10 @@ run the module by passing its filename:: $ py.test -q test_class.py .F - ================================= FAILURES ================================= - ____________________________ TestClass.test_two ____________________________ + ======= FAILURES ======== + _______ TestClass.test_two ________ - self = + self = def test_two(self): x = "hello" @@ -136,7 +136,7 @@ run the module by passing its filename:: E assert hasattr('hello', 'check') test_class.py:8: AssertionError - 1 failed, 1 passed in 0.01 seconds + 1 failed, 1 passed in 0.12 seconds The first test passed, the second failed. Again we can easily see the intermediate values used in the assertion, helping us to @@ -161,10 +161,10 @@ before performing the test function call. Let's just run it:: $ py.test -q test_tmpdir.py F - ================================= FAILURES ================================= - _____________________________ test_needsfiles ______________________________ + ======= FAILURES ======== + _______ test_needsfiles ________ - tmpdir = local('/tmp/pytest-18/test_needsfiles0') + tmpdir = local('/tmp/pytest-NaN/test_needsfiles0') def test_needsfiles(tmpdir): print (tmpdir) @@ -172,9 +172,9 @@ before performing the test function call. Let's just run it:: E assert 0 test_tmpdir.py:3: AssertionError - --------------------------- Captured stdout call --------------------------- - /tmp/pytest-18/test_needsfiles0 - 1 failed in 0.05 seconds + ----------------------------- Captured stdout call ----------------------------- + /tmp/pytest-NaN/test_needsfiles0 + 1 failed in 0.12 seconds Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.txt index b93491abc..09849fbd6 100644 --- a/doc/en/parametrize.txt +++ b/doc/en/parametrize.txt @@ -52,15 +52,15 @@ tuples so that the ``test_eval`` function will run three times using them in turn:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-109, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_expectation.py ..F - ================================= FAILURES ================================= - ____________________________ test_eval[6*9-42] _____________________________ + ======= FAILURES ======== + _______ test_eval[6*9-42] ________ input = '6*9', expected = 42 @@ -75,7 +75,7 @@ them in turn:: E + where 54 = eval('6*9') test_expectation.py:8: AssertionError - ==================== 1 failed, 2 passed in 0.02 seconds ==================== + ======= 1 failed, 2 passed in 0.12 seconds ======== As designed in this example, only one pair of input/output values fails the simple test function. And as usual with test function arguments, @@ -100,14 +100,14 @@ for example with the builtin ``mark.xfail``:: Let's run this:: $ py.test - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-109, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_expectation.py ..x - =================== 2 passed, 1 xfailed in 0.02 seconds ==================== + ======= 2 passed, 1 xfailed in 0.12 seconds ======== The one parameter set which caused a failure previously now shows up as an "xfailed (expected to fail)" test. @@ -159,24 +159,24 @@ If we now pass two stringinput values, our test will run twice:: $ py.test -q --stringinput="hello" --stringinput="world" test_strings.py .. - 2 passed in 0.01 seconds + 2 passed in 0.12 seconds Let's also run with a stringinput that will lead to a failing test:: $ py.test -q --stringinput="!" test_strings.py F - ================================= FAILURES ================================= - ___________________________ test_valid_string[!] ___________________________ + ======= FAILURES ======== + _______ test_valid_string[!] ________ stringinput = '!' def test_valid_string(stringinput): > assert stringinput.isalpha() - E assert () - E + where = '!'.isalpha + E assert () + E + where = '!'.isalpha test_strings.py:3: AssertionError - 1 failed in 0.01 seconds + 1 failed in 0.12 seconds As expected our test function fails. @@ -186,9 +186,9 @@ listlist:: $ py.test -q -rs test_strings.py s - ========================= short test summary info ========================== - SKIP [1] /tmp/sandbox/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1185: got empty parameter set, function test_valid_string at /tmp/doc-exec-109/test_strings.py:1 - 1 skipped in 0.01 seconds + ======= short test summary info ======== + SKIP [1] $PWD/_pytest/python.py:1201: got empty parameter set, function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 + 1 skipped in 0.12 seconds For further examples, you might want to look at :ref:`more parametrization examples `. diff --git a/doc/en/skipping.txt b/doc/en/skipping.txt index 28ea3bd27..b3bd4e5a8 100644 --- a/doc/en/skipping.txt +++ b/doc/en/skipping.txt @@ -163,13 +163,13 @@ a simple test file with the several usages: Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/sandbox/pytest/doc/en, inifile: pytest.ini + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $PWD/doc/en, inifile: pytest.ini collected 7 items xfail_demo.py xxxxxxx - ========================= short test summary info ========================== + ======= short test summary info ======== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 reason: [NOTRUN] @@ -183,7 +183,7 @@ Running it with the report-on-xfail option gives this output:: reason: reason XFAIL xfail_demo.py::test_hello7 - ======================== 7 xfailed in 0.06 seconds ========================= + ======= 7 xfailed in 0.12 seconds ======== .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.txt b/doc/en/tmpdir.txt index 4fb00ebd2..8c535beba 100644 --- a/doc/en/tmpdir.txt +++ b/doc/en/tmpdir.txt @@ -28,17 +28,17 @@ Running this would result in a passed test except for the last ``assert 0`` line which we use to look at values:: $ py.test test_tmpdir.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-118, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 1 items test_tmpdir.py F - ================================= FAILURES ================================= - _____________________________ test_create_file _____________________________ + ======= FAILURES ======== + _______ test_create_file ________ - tmpdir = local('/tmp/pytest-19/test_create_file0') + tmpdir = local('/tmp/pytest-NaN/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -49,7 +49,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.04 seconds ========================= + ======= 1 failed in 0.12 seconds ======== .. _`base temporary directory`: diff --git a/doc/en/unittest.txt b/doc/en/unittest.txt index 736b23296..dd57ef0d2 100644 --- a/doc/en/unittest.txt +++ b/doc/en/unittest.txt @@ -87,36 +87,36 @@ Due to the deliberately failing assert statements, we can take a look at the ``self.db`` values in the traceback:: $ py.test test_unittest_db.py - =========================== test session starts ============================ - platform linux -- Python 3.4.1 -- py-1.4.27 -- pytest-2.7.1 - rootdir: /tmp/doc-exec-119, inifile: + ======= test session starts ======== + platform linux2 -- Python 2.7.9, pytest-2.8.0.dev4, py-1.4.28, pluggy-0.3.0 + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_unittest_db.py FF - ================================= FAILURES ================================= - ___________________________ MyTest.test_method1 ____________________________ + ======= FAILURES ======== + _______ MyTest.test_method1 ________ self = def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes - E AssertionError: .DummyDB object at 0x7f97382031d0> + E AssertionError: E assert 0 test_unittest_db.py:9: AssertionError - ___________________________ MyTest.test_method2 ____________________________ + _______ MyTest.test_method2 ________ self = def test_method2(self): > assert 0, self.db # fail for demo purposes - E AssertionError: .DummyDB object at 0x7f97382031d0> + E AssertionError: E assert 0 test_unittest_db.py:12: AssertionError - ========================= 2 failed in 0.04 seconds ========================= + ======= 2 failed in 0.12 seconds ======== This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention @@ -163,7 +163,7 @@ Running this test module ...:: $ py.test -q test_unittest_cleandir.py . - 1 passed in 0.25 seconds + 1 passed in 0.12 seconds ... gives us one passed test because the ``initdir`` fixture function was executed ahead of the ``test_method``. diff --git a/doc/en/yieldfixture.txt b/doc/en/yieldfixture.txt index bb6a3d421..ee88a27df 100644 --- a/doc/en/yieldfixture.txt +++ b/doc/en/yieldfixture.txt @@ -43,7 +43,7 @@ Let's run it with output capturing disabled:: test called .teardown after yield - 1 passed in 0.01 seconds + 1 passed in 0.12 seconds We can also seamlessly use the new syntax with ``with`` statements. Let's simplify the above ``passwd`` fixture:: diff --git a/requirements-docs.txt b/requirements-docs.txt index 647e47445..be3a232e5 100644 --- a/requirements-docs.txt +++ b/requirements-docs.txt @@ -1,2 +1,3 @@ sphinx==1.2.3 regendoc +pyyaml