From 617a5fcf98d56d19779eace036555f17a8d08422 Mon Sep 17 00:00:00 2001 From: Bruno Oliveira Date: Wed, 20 Jun 2018 00:29:58 +0000 Subject: [PATCH] Preparing release version 3.6.2 --- CHANGELOG.rst | 46 ++++ changelog/3525.trivial.rst | 1 - changelog/3545.trivial.rst | 1 - changelog/3549.doc.rst | 1 - changelog/3552.trivial.rst | 1 - changelog/3555.bugfix.rst | 1 - changelog/3563.bugfix.rst | 1 - changelog/3567.trivial.rst | 1 - changelog/3569.bugfix.rst | 1 - changelog/3583.bugfix.rst | 1 - doc/en/announce/index.rst | 1 + doc/en/announce/release-3.6.2.rst | 29 +++ doc/en/assert.rst | 20 +- doc/en/builtin.rst | 28 +-- doc/en/cache.rst | 58 ++--- doc/en/capture.rst | 8 +- doc/en/doctest.rst | 4 +- doc/en/example/markers.rst | 92 +++---- doc/en/example/nonpython.rst | 10 +- doc/en/example/parametrize.rst | 34 +-- doc/en/example/pythoncollection.rst | 6 +- doc/en/example/reportingdemo.rst | 370 ++++++++++++++-------------- doc/en/example/simple.rst | 102 ++++---- doc/en/fixture.rst | 80 +++--- doc/en/getting-started.rst | 20 +- doc/en/index.rst | 8 +- doc/en/parametrize.rst | 20 +- doc/en/skipping.rst | 6 +- doc/en/tmpdir.rst | 10 +- doc/en/unittest.rst | 16 +- doc/en/usage.rst | 2 +- doc/en/warnings.rst | 18 +- 32 files changed, 532 insertions(+), 465 deletions(-) delete mode 100644 changelog/3525.trivial.rst delete mode 100644 changelog/3545.trivial.rst delete mode 100644 changelog/3549.doc.rst delete mode 100644 changelog/3552.trivial.rst delete mode 100644 changelog/3555.bugfix.rst delete mode 100644 changelog/3563.bugfix.rst delete mode 100644 changelog/3567.trivial.rst delete mode 100644 changelog/3569.bugfix.rst delete mode 100644 changelog/3583.bugfix.rst create mode 100644 doc/en/announce/release-3.6.2.rst diff --git a/CHANGELOG.rst b/CHANGELOG.rst index f61319db7..21a090414 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -8,6 +8,52 @@ .. towncrier release notes start +Pytest 3.6.2 (2018-06-20) +========================= + +Bug Fixes +--------- + +- Fix regression in ``Node.add_marker`` by extracting the mark object of a + ``MarkDecorator``. (`#3555 + `_) + +- Warnings without ``location`` were reported as ``None``. This is corrected to + now report ````. (`#3563 + `_) + +- Continue to call finalizers in the stack when a finalizer in a former scope + raises an exception. (`#3569 + `_) + +- Fix encoding error with `print` statements in doctests (`#3583 + `_) + + +Improved Documentation +---------------------- + +- Add documentation for the ``--strict`` flag. (`#3549 + `_) + + +Trivial/Internal Changes +------------------------ + +- Update old quotation style to parens in fixture.rst documentation. (`#3525 + `_) + +- Improve display of hint about ``--fulltrace`` with ``KeyboardInterrupt``. + (`#3545 `_) + +- pytest's testsuite is no longer runnable through ``python setup.py test`` -- + instead invoke ``pytest`` or ``tox`` directly. (`#3552 + `_) + +- Fix typo in documentation (`#3567 + `_) + + Pytest 3.6.1 (2018-06-05) ========================= diff --git a/changelog/3525.trivial.rst b/changelog/3525.trivial.rst deleted file mode 100644 index 4607c3bd5..000000000 --- a/changelog/3525.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Update old quotation style to parens in fixture.rst documentation. diff --git a/changelog/3545.trivial.rst b/changelog/3545.trivial.rst deleted file mode 100644 index 0b98f78b1..000000000 --- a/changelog/3545.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Improve display of hint about ``--fulltrace`` with ``KeyboardInterrupt``. diff --git a/changelog/3549.doc.rst b/changelog/3549.doc.rst deleted file mode 100644 index 62bc4f9a5..000000000 --- a/changelog/3549.doc.rst +++ /dev/null @@ -1 +0,0 @@ -Add documentation for the ``--strict`` flag. diff --git a/changelog/3552.trivial.rst b/changelog/3552.trivial.rst deleted file mode 100644 index dc4fb8be5..000000000 --- a/changelog/3552.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -pytest's testsuite is no longer runnable through ``python setup.py test`` -- instead invoke ``pytest`` or ``tox`` directly. diff --git a/changelog/3555.bugfix.rst b/changelog/3555.bugfix.rst deleted file mode 100644 index b86012c6b..000000000 --- a/changelog/3555.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix regression in ``Node.add_marker`` by extracting the mark object of a ``MarkDecorator``. diff --git a/changelog/3563.bugfix.rst b/changelog/3563.bugfix.rst deleted file mode 100644 index 8ba24c5dd..000000000 --- a/changelog/3563.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Warnings without ``location`` were reported as ``None``. This is corrected to now report ````. diff --git a/changelog/3567.trivial.rst b/changelog/3567.trivial.rst deleted file mode 100644 index b0bd6fdd4..000000000 --- a/changelog/3567.trivial.rst +++ /dev/null @@ -1 +0,0 @@ -Fix typo in documentation diff --git a/changelog/3569.bugfix.rst b/changelog/3569.bugfix.rst deleted file mode 100644 index 586ecaf95..000000000 --- a/changelog/3569.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Continue to call finalizers in the stack when a finalizer in a former scope raises an exception. diff --git a/changelog/3583.bugfix.rst b/changelog/3583.bugfix.rst deleted file mode 100644 index cfa662580..000000000 --- a/changelog/3583.bugfix.rst +++ /dev/null @@ -1 +0,0 @@ -Fix encoding error with `print` statements in doctests diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index 09840708d..107fcd2ad 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,7 @@ Release announcements :maxdepth: 2 + release-3.6.2 release-3.6.1 release-3.6.0 release-3.5.1 diff --git a/doc/en/announce/release-3.6.2.rst b/doc/en/announce/release-3.6.2.rst new file mode 100644 index 000000000..a1215f576 --- /dev/null +++ b/doc/en/announce/release-3.6.2.rst @@ -0,0 +1,29 @@ +pytest-3.6.2 +======================================= + +pytest 3.6.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Alan Velasco +* Alex Barbato +* Anthony Sottile +* Bartosz Cierocki +* Bruno Oliveira +* Daniel Hahler +* Guoqiang Zhang +* Hynek Schlawack +* John T. Wodder II +* Michael Käufl +* Ronny Pfannschmidt +* Samuel Dion-Girardeau + + +Happy testing, +The pytest Development Team diff --git a/doc/en/assert.rst b/doc/en/assert.rst index e0e9b9305..bcc0a28c9 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -29,17 +29,17 @@ you will see the return value of the function call:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert1.py F [100%] - + ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + def test_function(): > assert f() == 4 E assert 3 == 4 E + where 3 = f() - + test_assert1.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -172,12 +172,12 @@ if you run this module:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_assert2.py F [100%] - + ================================= FAILURES ================================= ___________________________ test_set_comparison ____________________________ - + def test_set_comparison(): set1 = set("1308") set2 = set("8035") @@ -188,7 +188,7 @@ if you run this module:: E Extra items in the right set: E '5' E Use -v to get the full diff - + test_assert2.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -241,14 +241,14 @@ the conftest file:: F [100%] ================================= FAILURES ================================= _______________________________ test_compare _______________________________ - + def test_compare(): f1 = Foo(1) f2 = Foo(2) > assert f1 == f2 E assert Comparing Foo instances: E vals: 1 != 2 - + test_foocompare.py:11: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index c2d23469b..f5e1fd155 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -17,13 +17,13 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a $ pytest -q --fixtures cache Return a cache object that can persist state between testing sessions. - + cache.get(key, default) cache.set(key, value) - + Keys must be a ``/`` separated value, where the first part is usually the name of your plugin or application to avoid clashes with other cache users. - + Values can be any object handled by the json stdlib module. capsys Enable capturing of writes to ``sys.stdout`` and ``sys.stderr`` and make @@ -49,9 +49,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a Fixture that returns a :py:class:`dict` that will be injected into the namespace of doctests. pytestconfig Session-scoped fixture that returns the :class:`_pytest.config.Config` object. - + Example:: - + def test_foo(pytestconfig): if pytestconfig.getoption("verbose"): ... @@ -61,9 +61,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a configured reporters, like JUnit XML. The fixture is callable with ``(name, value)``, with value being automatically xml-encoded. - + Example:: - + def test_function(record_property): record_property("example_key", 1) record_xml_property @@ -74,9 +74,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a automatically xml-encoded caplog Access and control log capturing. - + Captured logs are available through the following methods:: - + * caplog.text -> string containing formatted log output * caplog.records -> list of logging.LogRecord instances * caplog.record_tuples -> list of (logger_name, level, message) tuples @@ -84,7 +84,7 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: - + monkeypatch.setattr(obj, name, value, raising=True) monkeypatch.delattr(obj, name, raising=True) monkeypatch.setitem(mapping, name, value) @@ -93,14 +93,14 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a monkeypatch.delenv(name, value, raising=True) monkeypatch.syspath_prepend(path) monkeypatch.chdir(path) - + All modifications will be undone after the requesting test function or fixture has finished. The ``raising`` parameter determines if a KeyError or AttributeError will be raised if the set/deletion operation has no target. recwarn Return a :class:`WarningsRecorder` instance that records all warnings emitted by test functions. - + See http://docs.python.org/library/warnings.html for information on warning categories. tmpdir_factory @@ -111,9 +111,9 @@ For information about fixtures, see :ref:`fixtures`. To see a complete list of a created as a sub directory of the base temporary directory. The returned object is a `py.path.local`_ path object. - + .. _`py.path.local`: https://py.readthedocs.io/en/latest/path.html - + no tests ran in 0.12 seconds You can also interactively ask for help, e.g. by typing on the Python interactive prompt something like:: diff --git a/doc/en/cache.rst b/doc/en/cache.rst index 37bcf7070..437109db0 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -49,26 +49,26 @@ If you run this for the first time you will see two failures:: .................F.......F........................ [100%] ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed 2 failed, 48 passed in 0.12 seconds @@ -80,31 +80,31 @@ If you then run it with ``--lf``:: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items / 48 deselected run-last-failure: rerun previous 2 failures - + test_50.py FF [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed ================= 2 failed, 48 deselected in 0.12 seconds ================== @@ -121,31 +121,31 @@ of ``FF`` and dots):: rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items run-last-failure: rerun previous 2 failures first - + test_50.py FF................................................ [100%] - + ================================= FAILURES ================================= _______________________________ test_num[17] _______________________________ - + i = 17 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed _______________________________ test_num[25] _______________________________ - + i = 25 - + @pytest.mark.parametrize("i", range(50)) def test_num(i): if i in (17, 25): > pytest.fail("bad luck") E Failed: bad luck - + test_50.py:6: Failed =================== 2 failed, 48 passed in 0.12 seconds ==================== @@ -198,13 +198,13 @@ of the sleep:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -215,13 +215,13 @@ the cache and this will be quick:: F [100%] ================================= FAILURES ================================= ______________________________ test_function _______________________________ - + mydata = 42 - + def test_function(mydata): > assert mydata == 23 E assert 42 == 23 - + test_caching.py:14: AssertionError 1 failed in 0.12 seconds @@ -246,7 +246,7 @@ You can always peek at the content of the cache using the ['test_caching.py::test_function'] example/value contains: 42 - + ======================= no tests ran in 0.12 seconds ======================= Clearing Cache content diff --git a/doc/en/capture.rst b/doc/en/capture.rst index ab86fb55f..549845cc9 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -68,16 +68,16 @@ of the failing function and hide the other one:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .F [100%] - + ================================= FAILURES ================================= ________________________________ test_func2 ________________________________ - + def test_func2(): > assert False E assert False - + test_module.py:9: AssertionError -------------------------- Captured stdout setup --------------------------- setting up diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 9488ee826..ac470d105 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -65,9 +65,9 @@ then you can just invoke ``pytest`` without command line options:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 1 item - + mymodule.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index bf352bc81..176fdccdb 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -35,9 +35,9 @@ You can then restrict a test run to only run tests marked with ``webtest``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== Or the inverse, running all tests except the webtest ones:: @@ -48,11 +48,11 @@ Or the inverse, running all tests except the webtest ones:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Selecting tests based on their node ID @@ -68,9 +68,9 @@ tests based on their module, class, method, or function name:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= You can also select on the class:: @@ -81,9 +81,9 @@ You can also select on the class:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 1 item - + test_server.py::TestClass::test_method PASSED [100%] - + ========================= 1 passed in 0.12 seconds ========================= Or select multiple nodes:: @@ -94,10 +94,10 @@ Or select multiple nodes:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_server.py::TestClass::test_method PASSED [ 50%] test_server.py::test_send_http PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= .. _node-id: @@ -132,9 +132,9 @@ select tests based on their names:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 3 deselected - + test_server.py::test_send_http PASSED [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== And you can also run all tests except the ones that match the keyword:: @@ -145,11 +145,11 @@ And you can also run all tests except the ones that match the keyword:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 1 deselected - + test_server.py::test_something_quick PASSED [ 33%] test_server.py::test_another PASSED [ 66%] test_server.py::TestClass::test_method PASSED [100%] - + ================== 3 passed, 1 deselected in 0.12 seconds ================== Or to select "http" and "quick" tests:: @@ -160,10 +160,10 @@ Or to select "http" and "quick" tests:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items / 2 deselected - + test_server.py::test_send_http PASSED [ 50%] test_server.py::test_something_quick PASSED [100%] - + ================== 2 passed, 2 deselected in 0.12 seconds ================== .. note:: @@ -199,21 +199,21 @@ You can ask which markers exist for your test suite - the list includes our just $ pytest --markers @pytest.mark.webtest: mark a test as a webtest. - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + For an example on how to add and work with markers from a plugin, see :ref:`adding a custom marker from a plugin`. @@ -352,9 +352,9 @@ the test needs:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py s [100%] - + ======================== 1 skipped in 0.12 seconds ========================= and here is one that specifies exactly the environment needed:: @@ -364,30 +364,30 @@ and here is one that specifies exactly the environment needed:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_someenv.py . [100%] - + ========================= 1 passed in 0.12 seconds ========================= The ``--markers`` option always gives you a list of available markers:: $ pytest --markers @pytest.mark.env(name): mark test to run only on named environment - + @pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test. - + @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html - + @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. - - @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures - + + @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures + @pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible. - + @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. - + .. _`passing callables to custom markers`: @@ -523,11 +523,11 @@ then you will see two tests skipped and two executed tests as expected:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_plat.py s.s. [100%] ========================= short test summary info ========================== SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux - + =================== 2 passed, 2 skipped in 0.12 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: @@ -537,9 +537,9 @@ Note that if you specify a platform via the marker-command line option like this platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 3 deselected - + test_plat.py . [100%] - + ================== 1 passed, 3 deselected in 0.12 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -588,9 +588,9 @@ We can now use the ``-m option`` to select one set:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 2 deselected - + test_module.py FF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple @@ -609,9 +609,9 @@ or to select both "event" and "interface" tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items / 1 deselected - + test_module.py FFF [100%] - + ================================= FAILURES ================================= __________________________ test_interface_simple ___________________________ test_module.py:3: in test_interface_simple diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 4f5adf63f..ca7b2c8df 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -30,9 +30,9 @@ now execute the test specification:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items - + test_simple.yml F. [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -63,10 +63,10 @@ consulted when reporting in ``verbose`` mode:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items - + test_simple.yml::hello FAILED [ 50%] test_simple.yml::ok PASSED [100%] - + ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ usecase execution failed @@ -87,5 +87,5 @@ interesting to just look at the collection tree:: - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index 882700fec..49ffa7288 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -55,13 +55,13 @@ let's run the full monty:: ....F [100%] ================================= FAILURES ================================= _____________________________ test_compute[4] ______________________________ - + param1 = 4 - + def test_compute(param1): > assert param1 < 4 E assert 4 < 4 - + test_compute.py:3: AssertionError 1 failed, 4 passed in 0.12 seconds @@ -151,7 +151,7 @@ objects, they are still using the default pytest representation:: - + ======================= no tests ran in 0.12 seconds ======================= In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs @@ -198,9 +198,9 @@ this is a fully self-contained example which you can run with:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_scenarios.py .... [100%] - + ========================= 4 passed in 0.12 seconds ========================= If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function:: @@ -218,7 +218,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia - + ======================= no tests ran in 0.12 seconds ======================= Note that we told ``metafunc.parametrize()`` that your scenario values @@ -279,7 +279,7 @@ Let's first see how it looks like at collection time:: - + ======================= no tests ran in 0.12 seconds ======================= And then when we run the test:: @@ -288,15 +288,15 @@ And then when we run the test:: .F [100%] ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - + db = - + def test_db_initialized(db): # a dummy test if db.__class__.__name__ == "DB2": > pytest.fail("deliberately failing for demo purposes") E Failed: deliberately failing for demo purposes - + test_backends.py:6: Failed 1 failed, 1 passed in 0.12 seconds @@ -339,7 +339,7 @@ The result of this test will be successful:: collected 1 item - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -384,13 +384,13 @@ argument sets to use for each test function. Let's run it:: F.. [100%] ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ - + self = , a = 1, b = 2 - + def test_equals(self, a, b): > assert a == b E assert 1 == 2 - + test_parametrize.py:18: AssertionError 1 failed, 2 passed in 0.12 seconds @@ -462,11 +462,11 @@ If you run this with reporting for skips enabled:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== You'll see that we don't have an ``opt2`` module and thus the second test run diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 8e9d3ae62..58b4364b5 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -133,7 +133,7 @@ then the test collection looks like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. note:: @@ -180,7 +180,7 @@ You can always peek at the collection tree without running tests like this:: - + ======================= no tests ran in 0.12 seconds ======================= .. _customizing-test-collection: @@ -243,5 +243,5 @@ file will be left out:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 9e9d65b6d..00aa3c842 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -14,83 +14,83 @@ get on the terminal - we are working on that):: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items - + failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF [100%] - + ================================= FAILURES ================================= ____________________________ test_generative[0] ____________________________ - + param1 = 3, param2 = 6 - + def test_generative(param1, param2): > assert param1 * 2 < param2 E assert (3 * 2) < 6 - + failure_demo.py:19: AssertionError _________________________ TestFailing.test_simple __________________________ - + self = - + def test_simple(self): - + def f(): return 42 - + def g(): return 43 - + > assert f() == g() E assert 42 == 43 E + where 42 = .f at 0xdeadbeef>() E + and 43 = .g at 0xdeadbeef>() - + failure_demo.py:37: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - + self = - + def test_simple_multiline(self): > otherfunc_multi(42, 6 * 9) - - failure_demo.py:40: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:40: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 42, b = 54 - + def otherfunc_multi(a, b): > assert a == b E assert 42 == 54 - + failure_demo.py:15: AssertionError ___________________________ TestFailing.test_not ___________________________ - + self = - + def test_not(self): - + def f(): return 42 - + > assert not f() E assert not 42 E + where 42 = .f at 0xdeadbeef>() - + failure_demo.py:47: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - + self = - + def test_eq_text(self): > assert "spam" == "eggs" E AssertionError: assert 'spam' == 'eggs' E - spam E + eggs - + failure_demo.py:53: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - + self = - + def test_eq_similar_text(self): > assert "foo 1 bar" == "foo 2 bar" E AssertionError: assert 'foo 1 bar' == 'foo 2 bar' @@ -98,12 +98,12 @@ get on the terminal - we are working on that):: E ? ^ E + foo 2 bar E ? ^ - + failure_demo.py:56: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - + self = - + def test_eq_multiline_text(self): > assert "foo\nspam\nbar" == "foo\neggs\nbar" E AssertionError: assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -111,12 +111,12 @@ get on the terminal - we are working on that):: E - spam E + eggs E bar - + failure_demo.py:59: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - + self = - + def test_eq_long_text(self): a = "1" * 100 + "a" + "2" * 100 b = "1" * 100 + "b" + "2" * 100 @@ -128,12 +128,12 @@ get on the terminal - we are working on that):: E ? ^ E + 1111111111b222222222 E ? ^ - + failure_demo.py:64: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - + self = - + def test_eq_long_text_multiline(self): a = "1\n" * 100 + "a" + "2\n" * 100 b = "1\n" * 100 + "b" + "2\n" * 100 @@ -146,25 +146,25 @@ get on the terminal - we are working on that):: E 1 E 1 E 1... - E + E E ...Full output truncated (7 lines hidden), use '-vv' to show - + failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - + self = - + def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] E assert [0, 1, 2] == [0, 1, 3] E At index 2 diff: 2 != 3 E Use -v to get the full diff - + failure_demo.py:72: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - + self = - + def test_eq_list_long(self): a = [0] * 100 + [1] + [3] * 100 b = [0] * 100 + [2] + [3] * 100 @@ -172,12 +172,12 @@ get on the terminal - we are working on that):: E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...] E At index 100 diff: 1 != 2 E Use -v to get the full diff - + failure_demo.py:77: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - + self = - + def test_eq_dict(self): > assert {"a": 0, "b": 1, "c": 0} == {"a": 0, "b": 2, "d": 0} E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -188,14 +188,14 @@ get on the terminal - we are working on that):: E {'c': 0} E Right contains more items: E {'d': 0}... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:80: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - + self = - + def test_eq_set(self): > assert {0, 10, 11, 12} == {0, 20, 21} E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21} @@ -206,34 +206,34 @@ get on the terminal - we are working on that):: E Extra items in the right set: E 20 E 21... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:83: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - + self = - + def test_eq_longer_list(self): > assert [1, 2] == [1, 2, 3] E assert [1, 2] == [1, 2, 3] E Right contains more items, first extra item: 3 E Use -v to get the full diff - + failure_demo.py:86: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - + self = - + def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] E assert 1 in [0, 2, 3, 4, 5] - + failure_demo.py:89: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - + self = - + def test_not_in_text_multiline(self): text = "some multiline\ntext\nwhich\nincludes foo\nand a\ntail" > assert "foo" not in text @@ -245,14 +245,14 @@ get on the terminal - we are working on that):: E includes foo E ? +++ E and a... - E + E E ...Full output truncated (2 lines hidden), use '-vv' to show - + failure_demo.py:93: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - + self = - + def test_not_in_text_single(self): text = "single foo line" > assert "foo" not in text @@ -260,172 +260,172 @@ get on the terminal - we are working on that):: E 'foo' is contained here: E single foo line E ? +++ - + failure_demo.py:97: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - + self = - + def test_not_in_text_single_long(self): text = "head " * 50 + "foo " + "tail " * 20 > assert "foo" not in text E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: - E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ - + failure_demo.py:101: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - + self = - + def test_not_in_text_single_long_term(self): text = "head " * 50 + "f" * 70 + "tail " * 20 > assert "f" * 70 not in text E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: - E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail + E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ - + failure_demo.py:105: AssertionError ______________________________ test_attribute ______________________________ - + def test_attribute(): - + class Foo(object): b = 1 - + i = Foo() > assert i.b == 2 E assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b - + failure_demo.py:114: AssertionError _________________________ test_attribute_instance __________________________ - + def test_attribute_instance(): - + class Foo(object): b = 1 - + > assert Foo().b == 2 E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() - + failure_demo.py:122: AssertionError __________________________ test_attribute_failure __________________________ - + def test_attribute_failure(): - + class Foo(object): - + def _get_b(self): raise Exception("Failed to get attrib") - + b = property(_get_b) - + i = Foo() > assert i.b == 2 - - failure_demo.py:135: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:135: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + self = .Foo object at 0xdeadbeef> - + def _get_b(self): > raise Exception("Failed to get attrib") E Exception: Failed to get attrib - + failure_demo.py:130: Exception _________________________ test_attribute_multiple __________________________ - + def test_attribute_multiple(): - + class Foo(object): b = 1 - + class Bar(object): b = 2 - + > assert Foo().b == Bar().b E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() E + and 2 = .Bar object at 0xdeadbeef>.b E + where .Bar object at 0xdeadbeef> = .Bar'>() - + failure_demo.py:146: AssertionError __________________________ TestRaises.test_raises __________________________ - + self = - + def test_raises(self): s = "qwe" # NOQA > raises(TypeError, "int(s)") - - failure_demo.py:157: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:157: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:634>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - + self = - + def test_raises_doesnt(self): > raises(IOError, "int('3')") E Failed: DID NOT RAISE - + failure_demo.py:160: Failed __________________________ TestRaises.test_raise ___________________________ - + self = - + def test_raise(self): > raise ValueError("demo error") E ValueError: demo error - + failure_demo.py:163: ValueError ________________________ TestRaises.test_tupleerror ________________________ - + self = - + def test_tupleerror(self): > a, b = [1] # NOQA E ValueError: not enough values to unpack (expected 2, got 1) - + failure_demo.py:166: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - + self = - + def test_reinterpret_fails_with_print_for_the_fun_of_it(self): items = [1, 2, 3] print("items is %r" % items) > a, b = items.pop() E TypeError: 'int' object is not iterable - + failure_demo.py:171: TypeError --------------------------- Captured stdout call --------------------------- items is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - + self = - + def test_some_error(self): > if namenotexi: # NOQA E NameError: name 'namenotexi' is not defined - + failure_demo.py:174: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ - + def test_dynamic_compile_shows_nicely(): import imp import sys - + src = "def foo():\n assert 1 == 0\n" name = "abc-123" module = imp.new_module(name) @@ -433,66 +433,66 @@ get on the terminal - we are working on that):: py.builtin.exec_(code, module.__dict__) sys.modules[name] = module > module.foo() - - failure_demo.py:192: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + failure_demo.py:192: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def foo(): > assert 1 == 0 E AssertionError - + <2-codegen 'abc-123' $REGENDOC_TMPDIR/assertion/failure_demo.py:189>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - + self = - + def test_complex_error(self): - + def f(): return 44 - + def g(): return 43 - + > somefunc(f(), g()) - - failure_demo.py:205: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + + failure_demo.py:205: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ failure_demo.py:11: in somefunc otherfunc(x, y) - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + a = 44, b = 43 - + def otherfunc(a, b): > assert a == b E assert 44 == 43 - + failure_demo.py:7: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - + self = - + def test_z1_unpack_error(self): items = [] > a, b = items E ValueError: not enough values to unpack (expected 2, got 0) - + failure_demo.py:209: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - + self = - + def test_z2_type_error(self): items = 3 > a, b = items E TypeError: 'int' object is not iterable - + failure_demo.py:213: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - + self = - + def test_startswith(self): s = "123" g = "456" @@ -500,96 +500,96 @@ get on the terminal - we are working on that):: E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith - + failure_demo.py:218: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - + self = - + def test_startswith_nested(self): - + def f(): return "123" - + def g(): return "456" - + > assert f().startswith(g()) E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith E + where '123' = .f at 0xdeadbeef>() E + and '456' = .g at 0xdeadbeef>() - + failure_demo.py:228: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - + self = - + def test_global_func(self): > assert isinstance(globf(42), float) E assert False E + where False = isinstance(43, float) E + where 43 = globf(42) - + failure_demo.py:231: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - + self = - + def test_instance(self): self.x = 6 * 7 > assert self.x != 42 E assert 42 != 42 E + where 42 = .x - + failure_demo.py:235: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - + self = - + def test_compare(self): > assert globf(10) < 5 E assert 11 < 5 E + where 11 = globf(10) - + failure_demo.py:238: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - + self = - + def test_try_finally(self): x = 1 try: > assert x == 0 E assert 1 == 0 - + failure_demo.py:243: AssertionError ___________________ TestCustomAssertMsg.test_single_line ___________________ - + self = - + def test_single_line(self): - + class A(object): a = 1 - + b = 2 > assert A.a == b, "A.a appears not to be b" E AssertionError: A.a appears not to be b E assert 1 == 2 E + where 1 = .A'>.a - + failure_demo.py:256: AssertionError ____________________ TestCustomAssertMsg.test_multiline ____________________ - + self = - + def test_multiline(self): - + class A(object): a = 1 - + b = 2 > assert ( A.a == b @@ -599,20 +599,20 @@ get on the terminal - we are working on that):: E one of those E assert 1 == 2 E + where 1 = .A'>.a - + failure_demo.py:264: AssertionError ___________________ TestCustomAssertMsg.test_custom_repr ___________________ - + self = - + def test_custom_repr(self): - + class JSON(object): a = 1 - + def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" - + a = JSON() b = 2 > assert a.a == b, a @@ -622,12 +622,12 @@ get on the terminal - we are working on that):: E } E assert 1 == 2 E + where 1 = This is JSON\n{\n 'foo': 'bar'\n}.a - + failure_demo.py:278: AssertionError ============================= warnings summary ============================= - None + Metafunc.addcall is deprecated and scheduled to be removed in pytest 4.0. Please use Metafunc.parametrize instead. - + -- Docs: http://doc.pytest.org/en/latest/warnings.html ================== 42 failed, 1 warnings in 0.12 seconds =================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 180637ae9..b2fd8f8e8 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -49,9 +49,9 @@ Let's run this without supplying our new option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type1' - + def test_answer(cmdopt): if cmdopt == "type1": print("first") @@ -59,7 +59,7 @@ Let's run this without supplying our new option:: print("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- first @@ -71,9 +71,9 @@ And now with supplying a command line option:: F [100%] ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + cmdopt = 'type2' - + def test_answer(cmdopt): if cmdopt == "type1": print("first") @@ -81,7 +81,7 @@ And now with supplying a command line option:: print("second") > assert 0 # to see what was printed E assert 0 - + test_sample.py:6: AssertionError --------------------------- Captured stdout call --------------------------- second @@ -124,7 +124,7 @@ directory with the above conftest.py:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. _`excontrolskip`: @@ -182,11 +182,11 @@ and when running it will see a skipped "slow" test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .s [100%] ========================= short test summary info ========================== SKIP [1] test_module.py:8: need --runslow option to run - + =================== 1 passed, 1 skipped in 0.12 seconds ==================== Or run it including the ``slow`` marked test:: @@ -196,9 +196,9 @@ Or run it including the ``slow`` marked test:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py .. [100%] - + ========================= 2 passed in 0.12 seconds ========================= Writing well integrated assertion helpers @@ -236,11 +236,11 @@ Let's run our little function:: F [100%] ================================= FAILURES ================================= ______________________________ test_something ______________________________ - + def test_something(): > checkconfig(42) E Failed: not configured: 42 - + test_checkconfig.py:11: Failed 1 failed in 0.12 seconds @@ -335,7 +335,7 @@ which will add the string to the test header accordingly:: project deps: mylib-1.1 rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= .. regendoc:wipe @@ -363,7 +363,7 @@ which will add info only when run with "--v":: did you? rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= and nothing when run plainly:: @@ -373,7 +373,7 @@ and nothing when run plainly:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items - + ======================= no tests ran in 0.12 seconds ======================= profiling test duration @@ -410,9 +410,9 @@ Now we can profile which test functions execute the slowest:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_some_are_slow.py ... [100%] - + ========================= slowest 3 test durations ========================= 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 @@ -482,18 +482,18 @@ If we run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - + test_step.py .Fx. [100%] - + ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:11: AssertionError ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::()::test_deletion @@ -563,12 +563,12 @@ We can run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items - + test_step.py .Fx. [ 57%] a/test_db.py F [ 71%] a/test_db2.py F [ 85%] b/test_error.py E [100%] - + ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ file $REGENDOC_TMPDIR/b/test_error.py, line 1 @@ -576,37 +576,37 @@ We can run this:: E fixture 'db' not found > available fixtures: cache, capfd, capfdbinary, caplog, capsys, capsysbinary, doctest_namespace, monkeypatch, pytestconfig, record_property, record_xml_attribute, record_xml_property, recwarn, tmpdir, tmpdir_factory > use 'pytest --fixtures [testpath]' for help on them. - + $REGENDOC_TMPDIR/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - + self = - + def test_modification(self): > assert 0 E assert 0 - + test_step.py:11: AssertionError _________________________________ test_a1 __________________________________ - + db = - + def test_a1(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ - + db = - + def test_a2(db): > assert 0, db # to show value E AssertionError: E assert 0 - + a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.12 seconds ========== @@ -674,25 +674,25 @@ and run them:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ - + tmpdir = local('PYTEST_TMPDIR/test_fail10') - + def test_fail1(tmpdir): > assert 0 E assert 0 - + test_module.py:2: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:6: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -773,36 +773,36 @@ and run it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_module.py Esetting up a test failed! test_module.py::test_setup_fails Fexecuting test failed test_module.py::test_call_fails F - + ================================== ERRORS ================================== ____________________ ERROR at setup of test_setup_fails ____________________ - + @pytest.fixture def other(): > assert 0 E assert 0 - + test_module.py:7: AssertionError ================================= FAILURES ================================= _____________________________ test_call_fails ______________________________ - + something = None - + def test_call_fails(something): > assert 0 E assert 0 - + test_module.py:15: AssertionError ________________________________ test_fail2 ________________________________ - + def test_fail2(): > assert 0 E assert 0 - + test_module.py:19: AssertionError ==================== 2 failed, 1 error in 0.12 seconds ===================== diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index d4d386792..d236b480c 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -73,20 +73,20 @@ marked ``smtp`` fixture function. Running the test looks like this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_smtpsimple.py F [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_smtpsimple.py:11: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -209,32 +209,32 @@ inspect what is going on and can now run the tests:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_module.py FF [100%] - + ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________________ test_noop _________________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ========================= 2 failed in 0.12 seconds ========================= @@ -337,7 +337,7 @@ Let's execute it:: $ pytest -s -q --tb=no FFteardown smtp - + 2 failed in 0.12 seconds We see that the ``smtp`` instance is finalized after the two @@ -446,7 +446,7 @@ again, nothing much has changed:: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the @@ -567,51 +567,51 @@ So let's just do another run:: FFFF [100%] ================================= FAILURES ================================= ________________________ test_ehlo[smtp.gmail.com] _________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg > assert 0 # for demo purposes E assert 0 - + test_module.py:6: AssertionError ________________________ test_noop[smtp.gmail.com] _________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - + smtp = - + def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' - + test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- finalizing ________________________ test_noop[mail.python.org] ________________________ - + smtp = - + def test_noop(smtp): response, msg = smtp.noop() assert response == 250 > assert 0 # for demo purposes E assert 0 - + test_module.py:11: AssertionError ------------------------- Captured stdout teardown ------------------------- finalizing @@ -683,7 +683,7 @@ Running the above tests results in the following test IDs being used:: - + ======================= no tests ran in 0.12 seconds ======================= .. _`fixture-parametrize-marks`: @@ -713,11 +713,11 @@ Running this test will *skip* the invocation of ``data_set`` with value ``2``:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 3 items - + test_fixture_marks.py::test_data[0] PASSED [ 33%] test_fixture_marks.py::test_data[1] PASSED [ 66%] test_fixture_marks.py::test_data[2] SKIPPED [100%] - + =================== 2 passed, 1 skipped in 0.12 seconds ==================== .. _`interdependent fixtures`: @@ -756,10 +756,10 @@ Here we declare an ``app`` fixture which receives the previously defined cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items - + test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED [ 50%] test_appsetup.py::test_smtp_exists[mail.python.org] PASSED [100%] - + ========================= 2 passed in 0.12 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two @@ -825,26 +825,26 @@ Let's run the tests in verbose mode and with looking at the print-output:: cachedir: .pytest_cache rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items - + test_module.py::test_0[1] SETUP otherarg 1 RUN test0 with otherarg 1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_0[2] SETUP otherarg 2 RUN test0 with otherarg 2 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod1] SETUP modarg mod1 RUN test1 with modarg mod1 PASSED test_module.py::test_2[mod1-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod1 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod1-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod1 PASSED TEARDOWN otherarg 2 - + test_module.py::test_1[mod2] TEARDOWN modarg mod1 SETUP modarg mod2 RUN test1 with modarg mod2 @@ -852,13 +852,13 @@ Let's run the tests in verbose mode and with looking at the print-output:: test_module.py::test_2[mod2-1] SETUP otherarg 1 RUN test2 with otherarg 1 and modarg mod2 PASSED TEARDOWN otherarg 1 - + test_module.py::test_2[mod2-2] SETUP otherarg 2 RUN test2 with otherarg 2 and modarg mod2 PASSED TEARDOWN otherarg 2 TEARDOWN modarg mod2 - - + + ========================= 8 passed in 0.12 seconds ========================= You can see that the parametrized module-scoped ``modarg`` resource caused an diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index f2dbec5e9..aae0bf971 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -50,17 +50,17 @@ That’s it. You can now execute the test function:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert func(3) == 5 E assert 4 == 5 E + where 4 = func(3) - + test_sample.py:5: AssertionError ========================= 1 failed in 0.12 seconds ========================= @@ -117,15 +117,15 @@ Once you develop multiple tests, you may want to group them into a class. pytest .F [100%] ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - + self = - + def test_two(self): x = "hello" > assert hasattr(x, 'check') E AssertionError: assert False E + where False = hasattr('hello', 'check') - + test_class.py:8: AssertionError 1 failed, 1 passed in 0.12 seconds @@ -147,14 +147,14 @@ List the name ``tmpdir`` in the test function signature and ``pytest`` will look F [100%] ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - + tmpdir = local('PYTEST_TMPDIR/test_needsfiles0') - + def test_needsfiles(tmpdir): print (tmpdir) > assert 0 E assert 0 - + test_tmpdir.py:3: AssertionError --------------------------- Captured stdout call --------------------------- PYTEST_TMPDIR/test_needsfiles0 diff --git a/doc/en/index.rst b/doc/en/index.rst index 6a382e571..0539a1c55 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -29,17 +29,17 @@ To execute it:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_sample.py F [100%] - + ================================= FAILURES ================================= _______________________________ test_answer ________________________________ - + def test_answer(): > assert inc(3) == 5 E assert 4 == 5 E + where 4 = inc(3) - + test_sample.py:6: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 693cf1913..f87f47d08 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -57,14 +57,14 @@ them in turn:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..F [100%] - + ================================= FAILURES ================================= ____________________________ test_eval[6*9-42] _____________________________ - + test_input = '6*9', expected = 42 - + @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), ("2+4", 6), @@ -74,7 +74,7 @@ them in turn:: > assert eval(test_input) == expected E AssertionError: assert 54 == 42 E + where 54 = eval('6*9') - + test_expectation.py:8: AssertionError ==================== 1 failed, 2 passed in 0.12 seconds ==================== @@ -106,9 +106,9 @@ Let's run this:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items - + test_expectation.py ..x [100%] - + =================== 2 passed, 1 xfailed in 0.12 seconds ==================== The one parameter set which caused a failure previously now @@ -174,15 +174,15 @@ Let's also run with a stringinput that will lead to a failing test:: F [100%] ================================= FAILURES ================================= ___________________________ test_valid_string[!] ___________________________ - + stringinput = '!' - + def test_valid_string(stringinput): > assert stringinput.isalpha() E AssertionError: assert False E + where False = () E + where = '!'.isalpha - + test_strings.py:3: AssertionError 1 failed in 0.12 seconds diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index bfa6f3e75..a057d1d12 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -334,12 +334,12 @@ Running it with the report-on-xfail option gives this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - + xfail_demo.py xxxxxxx [100%] ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -349,7 +349,7 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - + ======================== 7 xfailed in 0.12 seconds ========================= .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 421b4c898..0171e3168 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -32,14 +32,14 @@ Running this would result in a passed test except for the last platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_tmpdir.py F [100%] - + ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - + tmpdir = local('PYTEST_TMPDIR/test_create_file0') - + def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") p.write("content") @@ -47,7 +47,7 @@ Running this would result in a passed test except for the last assert len(tmpdir.listdir()) == 1 > assert 0 E assert 0 - + test_tmpdir.py:7: AssertionError ========================= 1 failed in 0.12 seconds ========================= diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 53192b346..ec9f466b9 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -130,30 +130,30 @@ the ``self.db`` values in the traceback:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items - + test_unittest_db.py FF [100%] - + ================================= FAILURES ================================= ___________________________ MyTest.test_method1 ____________________________ - + self = - + def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ - + self = - + def test_method2(self): > assert 0, self.db # fail for demo purposes E AssertionError: .DummyDB object at 0xdeadbeef> E assert 0 - + test_unittest_db.py:12: AssertionError ========================= 2 failed in 0.12 seconds ========================= diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 25be54395..07077468a 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -502,7 +502,7 @@ hook was invoked:: $ python myinvoke.py . [100%]*** test run reporting finishing - + .. note:: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index df93a02b5..62f96ccf7 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -25,14 +25,14 @@ Running pytest now produces this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_show_warnings.py . [100%] - + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) - + -- Docs: http://doc.pytest.org/en/latest/warnings.html =================== 1 passed, 1 warnings in 0.12 seconds =================== @@ -45,17 +45,17 @@ them into errors:: F [100%] ================================= FAILURES ================================= _________________________________ test_one _________________________________ - + def test_one(): > assert api_v1() == 1 - - test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) E UserWarning: api v1, should use functions from v2 - + test_show_warnings.py:4: UserWarning 1 failed in 0.12 seconds