From 765b053984c71fdb350d6ebdc31beff5964e7b69 Mon Sep 17 00:00:00 2001 From: holger krekel Date: Tue, 20 Nov 2012 13:42:00 +0100 Subject: [PATCH] bump version, add announcement, regen docs --- CHANGELOG | 8 +-- _pytest/__init__.py | 2 +- doc/en/announce/index.txt | 1 + doc/en/announce/release-2.3.4.txt | 39 +++++++++++ doc/en/assert.txt | 4 +- doc/en/capture.txt | 4 +- doc/en/conf.py | 2 +- doc/en/doctest.txt | 4 +- doc/en/example/markers.txt | 52 +++++--------- doc/en/example/nonpython.txt | 10 +-- doc/en/example/parametrize.txt | 10 +-- doc/en/example/pythoncollection.txt | 8 +-- doc/en/example/reportingdemo.txt | 104 ++++++++++++++-------------- doc/en/example/simple.txt | 49 +++++++------ doc/en/fixture.txt | 30 ++++---- doc/en/getting-started.txt | 10 +-- doc/en/parametrize.txt | 8 +-- doc/en/skipping.txt | 2 +- doc/en/tmpdir.txt | 6 +- doc/en/unittest.txt | 6 +- setup.py | 2 +- tox.ini | 2 +- 22 files changed, 193 insertions(+), 170 deletions(-) create mode 100644 doc/en/announce/release-2.3.4.txt diff --git a/CHANGELOG b/CHANGELOG index b0a6accbd..83e8250a1 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,9 +1,9 @@ -Changes between 2.3.3 and 2.3.4.dev +Changes between 2.3.3 and 2.3.4 ----------------------------------- -- yielded tests will activate autouse-fixtures -- NOTE: yielded tests cannot use fixtures - if you need this - you may want to use the post-2.0 parametrize features, see +- yielded test functions will now have autouse-fixtures active but + cannot accept fixtures as funcargs - it's anyway recommended to + rather use the post-2.0 parametrize features instead of yield, see: http://pytest.org/latest/example/parametrize.html - fix autouse-issue where autouse-fixtures would not be discovered if defined in a a/conftest.py file and tests in a/tests/test_some.py diff --git a/_pytest/__init__.py b/_pytest/__init__.py index 5c84caa5f..7528dcc06 100644 --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.3.4.dev6' +__version__ = '2.4.6' diff --git a/doc/en/announce/index.txt b/doc/en/announce/index.txt index b03f6bd78..33072fc34 100644 --- a/doc/en/announce/index.txt +++ b/doc/en/announce/index.txt @@ -5,6 +5,7 @@ Release announcements .. toctree:: :maxdepth: 2 + release-2.3.4 release-2.3.3 release-2.3.2 release-2.3.1 diff --git a/doc/en/announce/release-2.3.4.txt b/doc/en/announce/release-2.3.4.txt new file mode 100644 index 000000000..6efbbcd77 --- /dev/null +++ b/doc/en/announce/release-2.3.4.txt @@ -0,0 +1,39 @@ +pytest-2.3.4: stabilization, more flexible selection via "-k expr" +=========================================================================== + +pytest-2.3.4 is a small stabilization release of the py.test tool +which offers uebersimple assertions, scalable fixture mechanisms +and deep customization for testing with Python. The Particularly, +this release provides: + +- make "-k" option accept an expressions the same as with "-m" so that one + can write: -k "name1 or name2" etc. This is a slight usage incompatibility + if you used special syntax like "TestClass.test_method" which you now + need to write as -k "TestClass and test_method" to match a certain + method in a certain test class. +- allow to dynamically define markers via + item.keywords[...]=assignment integrating with "-m" option +- yielded test functions will now have autouse-fixtures active but + cannot accept fixtures as funcargs - it's anyway recommended to + rather use the post-2.0 parametrize features instead of yield, see: + http://pytest.org/latest/example/parametrize.html +- fix autouse-issue where autouse-fixtures would not be discovered + if defined in a a/conftest.py file and tests in a/tests/test_some.py +- fix issue226 - LIFO ordering for fixture teardowns +- fix issue224 - invocations with >256 char arguments now work +- fix issue91 - add/discuss package/directory level setups in example +- fixes related to autouse discovery and calling + +Thanks to Thomas Waldmann in particular for spotting autouse issues. + +See + + http://pytest.org/ + +for general information. To install or upgrade pytest: + + pip install -U pytest # or + easy_install -U pytest + +best, +holger krekel diff --git a/doc/en/assert.txt b/doc/en/assert.txt index b01e90e39..93f44a312 100644 --- a/doc/en/assert.txt +++ b/doc/en/assert.txt @@ -26,7 +26,7 @@ you will see the return value of the function call:: $ py.test test_assert1.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_assert1.py F @@ -110,7 +110,7 @@ if you run this module:: $ py.test test_assert2.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_assert2.py F diff --git a/doc/en/capture.txt b/doc/en/capture.txt index 4f25c118f..868efb3eb 100644 --- a/doc/en/capture.txt +++ b/doc/en/capture.txt @@ -64,7 +64,7 @@ of the failing function and hide the other one:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_module.py .F @@ -78,7 +78,7 @@ of the failing function and hide the other one:: test_module.py:9: AssertionError ----------------------------- Captured stdout ------------------------------ - setting up + setting up ==================== 1 failed, 1 passed in 0.01 seconds ==================== Accessing captured output from a test function diff --git a/doc/en/conf.py b/doc/en/conf.py index b78b0bef6..b15711d65 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -17,7 +17,7 @@ # # The full version, including alpha/beta/rc tags. # The short X.Y version. -version = release = "2.3.3.3" +version = release = "2.3.4" import sys, os diff --git a/doc/en/doctest.txt b/doc/en/doctest.txt index 438c1d1ca..53c0038a3 100644 --- a/doc/en/doctest.txt +++ b/doc/en/doctest.txt @@ -44,9 +44,9 @@ then you can just invoke ``py.test`` without command line options:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items mymodule.py . - ========================= 1 passed in 0.11 seconds ========================= + ========================= 1 passed in 0.02 seconds ========================= diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.txt index 8bf353577..d9a211ecb 100644 --- a/doc/en/example/markers.txt +++ b/doc/en/example/markers.txt @@ -28,9 +28,7 @@ You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 -- /home/hpk/venv/1/bin/python - cachedir: /tmp/doc-exec-196/.cache - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:3: test_send_http PASSED @@ -42,16 +40,14 @@ Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 -- /home/hpk/venv/1/bin/python - cachedir: /tmp/doc-exec-196/.cache - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:6: test_something_quick PASSED test_server.py:8: test_another PASSED ================= 1 tests deselected by "-m 'not webtest'" ================= - ================== 2 passed, 1 deselected in 0.02 seconds ================== + ================== 2 passed, 1 deselected in 0.01 seconds ================== Using ``-k expr`` to select tests based on their name ------------------------------------------------------- @@ -65,23 +61,19 @@ select tests based on their names:: $ py.test -v -k http # running with the above defined example module =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 -- /home/hpk/venv/1/bin/python - cachedir: /tmp/doc-exec-196/.cache - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:3: test_send_http PASSED - =================== 2 tests deselected by '-ksend_http' ==================== + ====================== 2 tests deselected by '-khttp' ====================== ================== 1 passed, 2 deselected in 0.01 seconds ================== And you can also run all tests except the ones that match the keyword:: $ py.test -k "not send_http" -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 -- /home/hpk/venv/1/bin/python - cachedir: /tmp/doc-exec-196/.cache - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:6: test_something_quick PASSED @@ -94,9 +86,7 @@ Or to select "http" and "quick" tests:: $ py.test -k "http or quick" -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 -- /home/hpk/venv/1/bin/python - cachedir: /tmp/doc-exec-196/.cache - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 3 items test_server.py:3: test_send_http PASSED @@ -242,8 +232,7 @@ the test needs:: $ py.test -E stage2 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_someenv.py s @@ -254,8 +243,7 @@ and here is one that specifies exactly the environment needed:: $ py.test -E stage1 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_someenv.py . @@ -372,28 +360,26 @@ then you will see two test skipped and two executed tests as expected:: $ py.test -rs # this option reports skip reasons =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items test_plat.py s.s. ========================= short test summary info ========================== - SKIP [2] /tmp/doc-exec-196/conftest.py:12: cannot run on platform linux2 + SKIP [2] /tmp/doc-exec-69/conftest.py:12: cannot run on platform linux2 - =================== 2 passed, 2 skipped in 0.02 seconds ==================== + =================== 2 passed, 2 skipped in 0.01 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: $ py.test -m linux2 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items test_plat.py . =================== 3 tests deselected by "-m 'linux2'" ==================== - ================== 1 passed, 3 deselected in 0.02 seconds ================== + ================== 1 passed, 3 deselected in 0.01 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -438,8 +424,7 @@ We can now use the ``-m option`` to select one set:: $ py.test -m interface --tb=short =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items test_module.py FF @@ -454,14 +439,13 @@ We can now use the ``-m option`` to select one set:: > assert 0 E assert 0 ================== 2 tests deselected by "-m 'interface'" ================== - ================== 2 failed, 2 deselected in 0.02 seconds ================== + ================== 2 failed, 2 deselected in 0.01 seconds ================== or to select both "event" and "interface" tests:: $ py.test -m "interface or event" --tb=short =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.4.dev3 - plugins: pep8, cache, xdist + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items test_module.py FFF @@ -480,4 +464,4 @@ or to select both "event" and "interface" tests:: > assert 0 E assert 0 ============= 1 tests deselected by "-m 'interface or event'" ============== - ================== 3 failed, 1 deselected in 0.02 seconds ================== + ================== 3 failed, 1 deselected in 0.01 seconds ================== diff --git a/doc/en/example/nonpython.txt b/doc/en/example/nonpython.txt index 91b56d745..f2412f15f 100644 --- a/doc/en/example/nonpython.txt +++ b/doc/en/example/nonpython.txt @@ -27,7 +27,7 @@ now execute the test specification:: nonpython $ py.test test_simple.yml =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_simple.yml .F @@ -37,7 +37,7 @@ now execute the test specification:: usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.05 seconds ==================== + ==================== 1 failed, 1 passed in 0.10 seconds ==================== You get one dot for the passing ``sub1: sub1`` check and one failure. Obviously in the above ``conftest.py`` you'll want to implement a more @@ -56,7 +56,7 @@ consulted when reporting in ``verbose`` mode:: nonpython $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 2 items test_simple.yml:1: usecase: ok PASSED @@ -67,14 +67,14 @@ consulted when reporting in ``verbose`` mode:: usecase execution failed spec failed: 'some': 'other' no further details known at this point. - ==================== 1 failed, 1 passed in 0.05 seconds ==================== + ==================== 1 failed, 1 passed in 0.04 seconds ==================== While developing your custom test collection and execution it's also interesting to just look at the collection tree:: nonpython $ py.test --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items diff --git a/doc/en/example/parametrize.txt b/doc/en/example/parametrize.txt index 7195baed2..c677219e8 100644 --- a/doc/en/example/parametrize.txt +++ b/doc/en/example/parametrize.txt @@ -104,7 +104,7 @@ this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items test_scenarios.py .... @@ -116,7 +116,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ py.test --collectonly test_scenarios.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items @@ -180,7 +180,7 @@ Let's first see how it looks like at collection time:: $ py.test test_backends.py --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items @@ -195,7 +195,7 @@ And then when we run the test:: ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -250,7 +250,7 @@ argument sets to use for each test function. Let's run it:: ================================= FAILURES ================================= ________________________ TestClass.test_equals[1-2] ________________________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b diff --git a/doc/en/example/pythoncollection.txt b/doc/en/example/pythoncollection.txt index e16d0aa66..dc46432e9 100644 --- a/doc/en/example/pythoncollection.txt +++ b/doc/en/example/pythoncollection.txt @@ -43,7 +43,7 @@ then the test collection looks like this:: $ py.test --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items @@ -82,7 +82,7 @@ You can always peek at the collection tree without running tests like this:: . $ py.test --collectonly pythoncollection.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 3 items @@ -91,7 +91,7 @@ You can always peek at the collection tree without running tests like this:: - ============================= in 0.01 seconds ============================= + ============================= in 0.00 seconds ============================= customizing test collection to find all .py files --------------------------------------------------------- @@ -135,7 +135,7 @@ interpreters and will leave out the setup.py file:: $ py.test --collectonly =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items diff --git a/doc/en/example/reportingdemo.txt b/doc/en/example/reportingdemo.txt index c95dd10e3..3407e8b5b 100644 --- a/doc/en/example/reportingdemo.txt +++ b/doc/en/example/reportingdemo.txt @@ -13,7 +13,7 @@ get on the terminal - we are working on that): assertion $ py.test failure_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF @@ -30,7 +30,7 @@ get on the terminal - we are working on that): failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ - self = + self = def test_simple(self): def f(): @@ -40,13 +40,13 @@ get on the terminal - we are working on that): > assert f() == g() E assert 42 == 43 - E + where 42 = () - E + and 43 = () + E + where 42 = () + E + and 43 = () failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -66,19 +66,19 @@ get on the terminal - we are working on that): failure_demo.py:11: AssertionError ___________________________ TestFailing.test_not ___________________________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = () + E + where 42 = () failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -89,7 +89,7 @@ get on the terminal - we are working on that): failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -102,7 +102,7 @@ get on the terminal - we are working on that): failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -115,7 +115,7 @@ get on the terminal - we are working on that): failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -132,7 +132,7 @@ get on the terminal - we are working on that): failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -156,7 +156,7 @@ get on the terminal - we are working on that): failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,7 +166,7 @@ get on the terminal - we are working on that): failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -178,7 +178,7 @@ get on the terminal - we are working on that): failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1} == {'a': 0, 'b': 2} @@ -191,7 +191,7 @@ get on the terminal - we are working on that): failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -207,7 +207,7 @@ get on the terminal - we are working on that): failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -217,7 +217,7 @@ get on the terminal - we are working on that): failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] @@ -226,7 +226,7 @@ get on the terminal - we are working on that): failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -244,7 +244,7 @@ get on the terminal - we are working on that): failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -257,7 +257,7 @@ get on the terminal - we are working on that): failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -270,7 +270,7 @@ get on the terminal - we are working on that): failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -289,7 +289,7 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .b + E + where 1 = .b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ @@ -299,8 +299,8 @@ get on the terminal - we are working on that): b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .b - E + where = () + E + where 1 = .b + E + where = () failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ @@ -316,7 +316,7 @@ get on the terminal - we are working on that): failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = + self = def _get_b(self): > raise Exception('Failed to get attrib') @@ -332,15 +332,15 @@ get on the terminal - we are working on that): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .b - E + where = () - E + and 2 = .b - E + where = () + E + where 1 = .b + E + where = () + E + and 2 = .b + E + where = () failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ - self = + self = def test_raises(self): s = 'qwe' @@ -355,7 +355,7 @@ get on the terminal - we are working on that): <0-codegen /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/_pytest/python.py:851>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") @@ -364,7 +364,7 @@ get on the terminal - we are working on that): failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ - self = + self = def test_raise(self): > raise ValueError("demo error") @@ -373,7 +373,7 @@ get on the terminal - we are working on that): failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ - self = + self = def test_tupleerror(self): > a,b = [1] @@ -382,7 +382,7 @@ get on the terminal - we are working on that): failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -395,7 +395,7 @@ get on the terminal - we are working on that): l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - self = + self = def test_some_error(self): > if namenotexi: @@ -423,7 +423,7 @@ get on the terminal - we are working on that): <2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - self = + self = def test_complex_error(self): def f(): @@ -452,7 +452,7 @@ get on the terminal - we are working on that): failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -462,7 +462,7 @@ get on the terminal - we are working on that): failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - self = + self = def test_z2_type_error(self): l = 3 @@ -472,19 +472,19 @@ get on the terminal - we are working on that): failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) - E assert ('456') - E + where = '123'.startswith + E assert ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - self = + self = def test_startswith_nested(self): def f(): @@ -492,15 +492,15 @@ get on the terminal - we are working on that): def g(): return "456" > assert f().startswith(g()) - E assert ('456') - E + where = '123'.startswith - E + where '123' = () - E + and '456' = () + E assert ('456') + E + where = '123'.startswith + E + where '123' = () + E + and '456' = () failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -510,18 +510,18 @@ get on the terminal - we are working on that): failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -531,7 +531,7 @@ get on the terminal - we are working on that): failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - self = + self = def test_try_finally(self): x = 1 diff --git a/doc/en/example/simple.txt b/doc/en/example/simple.txt index 18f5f6328..59ef1c4d8 100644 --- a/doc/en/example/simple.txt +++ b/doc/en/example/simple.txt @@ -106,7 +106,7 @@ directory with the above conftest.py:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 0 items ============================= in 0.00 seconds ============================= @@ -150,12 +150,12 @@ and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_module.py .s ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-4/conftest.py:9: need --runslow option to run + SKIP [1] /tmp/doc-exec-74/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.01 seconds ==================== @@ -163,7 +163,7 @@ Or run it including the ``slow`` marked test:: $ py.test --runslow =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_module.py .. @@ -253,7 +253,7 @@ which will add the string to the test header accordingly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 project deps: mylib-1.1 collected 0 items @@ -276,7 +276,7 @@ which will add info only when run with "--v":: $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 -- /home/hpk/venv/regen/bin/python2.7 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python info1: did you know that ... did you? collecting ... collected 0 items @@ -287,7 +287,7 @@ and nothing when run plainly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 0 items ============================= in 0.00 seconds ============================= @@ -319,7 +319,7 @@ Now we can profile which test functions execute the slowest:: $ py.test --durations=3 =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 3 items test_some_are_slow.py ... @@ -380,7 +380,7 @@ If we run this:: $ py.test -rx =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 4 items test_step.py .Fx. @@ -388,7 +388,7 @@ If we run this:: ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - self = + self = def test_modification(self): > assert 0 @@ -398,7 +398,7 @@ If we run this:: ========================= short test summary info ========================== XFAIL test_step.py::TestUserHandling::()::test_deletion reason: previous test failed (test_modification) - ============== 1 failed, 2 passed, 1 xfailed in 0.02 seconds =============== + ============== 1 failed, 2 passed, 1 xfailed in 0.01 seconds =============== We'll see that ``test_deletion`` was not executed because ``test_modification`` failed. It is reported as an "expected failure". @@ -450,7 +450,7 @@ We can run this:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 7 items test_step.py .Fx. @@ -460,17 +460,17 @@ We can run this:: ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ - file /tmp/doc-exec-4/b/test_error.py, line 1 + file /tmp/doc-exec-74/b/test_error.py, line 1 def test_root(db): # no db here, will error out fixture 'db' not found available fixtures: pytestconfig, recwarn, monkeypatch, capfd, capsys, tmpdir use 'py.test --fixtures [testpath]' for help on them. - /tmp/doc-exec-4/b/test_error.py:1 + /tmp/doc-exec-74/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - self = + self = def test_modification(self): > assert 0 @@ -479,20 +479,20 @@ We can run this:: test_step.py:9: AssertionError _________________________________ test_a1 __________________________________ - db = + db = def test_a1(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ - db = + db = def test_a2(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ========== @@ -550,7 +550,7 @@ and run them:: $ py.test test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_module.py FF @@ -558,7 +558,7 @@ and run them:: ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ - tmpdir = local('/tmp/pytest-6/test_fail10') + tmpdir = local('/tmp/pytest-376/test_fail10') def test_fail1(tmpdir): > assert 0 @@ -572,12 +572,12 @@ and run them:: E assert 0 test_module.py:4: AssertionError - ========================= 2 failed in 0.01 seconds ========================= + ========================= 2 failed in 0.02 seconds ========================= you will have a "failures" file which contains the failing test ids:: $ cat failures - test_module.py::test_fail1 (/tmp/pytest-6/test_fail10) + test_module.py::test_fail1 (/tmp/pytest-376/test_fail10) test_module.py::test_fail2 Making test result information available in fixtures @@ -640,7 +640,7 @@ and run it:: $ py.test -s test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 3 items test_module.py EFF @@ -675,7 +675,6 @@ and run it:: setting up a test failed! test_module.py::test_setup_fails executing test failed test_module.py::test_call_fails - You'll see that the fixture finalizers could use the precise reporting information. diff --git a/doc/en/fixture.txt b/doc/en/fixture.txt index 6b204239e..5311265a6 100644 --- a/doc/en/fixture.txt +++ b/doc/en/fixture.txt @@ -71,7 +71,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: $ py.test test_smtpsimple.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_smtpsimple.py F @@ -79,7 +79,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - smtp = + smtp = def test_ehlo(smtp): response, msg = smtp.ehlo() @@ -89,7 +89,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: E assert 0 test_smtpsimple.py:12: AssertionError - ========================= 1 failed in 0.26 seconds ========================= + ========================= 1 failed in 0.23 seconds ========================= In the failure traceback we see that the test function was called with a ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -189,7 +189,7 @@ inspect what is going on and can now run the tests:: $ py.test test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_module.py FF @@ -197,7 +197,7 @@ inspect what is going on and can now run the tests:: ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() @@ -209,7 +209,7 @@ inspect what is going on and can now run the tests:: test_module.py:6: AssertionError ________________________________ test_noop _________________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -218,7 +218,7 @@ inspect what is going on and can now run the tests:: E assert 0 test_module.py:11: AssertionError - ========================= 2 failed in 0.22 seconds ========================= + ========================= 2 failed in 0.19 seconds ========================= You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp`` object was passed into the two @@ -271,7 +271,7 @@ using it has executed:: $ py.test -s -q --tb=no FF - finalizing + finalizing We see that the ``smtp`` instance is finalized after the two tests using it tests executed. If we had specified ``scope='function'`` @@ -342,7 +342,7 @@ So let's just do another run:: ================================= FAILURES ================================= __________________________ test_ehlo[merlinux.eu] __________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() @@ -354,7 +354,7 @@ So let's just do another run:: test_module.py:6: AssertionError __________________________ test_noop[merlinux.eu] __________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -365,7 +365,7 @@ So let's just do another run:: test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() @@ -376,7 +376,7 @@ So let's just do another run:: test_module.py:5: AssertionError ________________________ test_noop[mail.python.org] ________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -424,13 +424,13 @@ Here we declare an ``app`` fixture which receives the previously defined $ py.test -v test_appsetup.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 2 items test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED - ========================= 2 passed in 6.43 seconds ========================= + ========================= 2 passed in 5.82 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -489,7 +489,7 @@ Let's run the tests in verbose mode and with looking at the print-output:: $ py.test -v -s test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 -- /home/hpk/p/pytest/.tox/regen/bin/python collecting ... collected 8 items test_module.py:16: test_0[1] PASSED diff --git a/doc/en/getting-started.txt b/doc/en/getting-started.txt index 87d7ca75e..250f288d2 100644 --- a/doc/en/getting-started.txt +++ b/doc/en/getting-started.txt @@ -23,7 +23,7 @@ Installation options:: To check your installation has installed the correct version:: $ py.test --version - This is py.test version 2.3.3, imported from /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/pytest.pyc + This is py.test version 2.4.6, imported from /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/pytest.pyc If you get an error checkout :ref:`installation issues`. @@ -45,7 +45,7 @@ That's it. You can execute the test function now:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_sample.py F @@ -122,7 +122,7 @@ run the module by passing its filename:: ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - self = + self = def test_two(self): x = "hello" @@ -157,7 +157,7 @@ before performing the test function call. Let's just run it:: ================================= FAILURES ================================= _____________________________ test_needsfiles ______________________________ - tmpdir = local('/tmp/pytest-780/test_needsfiles0') + tmpdir = local('/tmp/pytest-372/test_needsfiles0') def test_needsfiles(tmpdir): print tmpdir @@ -166,7 +166,7 @@ before performing the test function call. Let's just run it:: test_tmpdir.py:3: AssertionError ----------------------------- Captured stdout ------------------------------ - /tmp/pytest-780/test_needsfiles0 + /tmp/pytest-372/test_needsfiles0 Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.txt index e601fc2fd..f13bfa2c7 100644 --- a/doc/en/parametrize.txt +++ b/doc/en/parametrize.txt @@ -53,7 +53,7 @@ which will thus run three times:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 3 items test_expectation.py ..F @@ -135,8 +135,8 @@ Let's also run with a stringinput that will lead to a failing test:: def test_valid_string(stringinput): > assert stringinput.isalpha() - E assert () - E + where = '!'.isalpha + E assert () + E + where = '!'.isalpha test_strings.py:3: AssertionError @@ -149,7 +149,7 @@ listlist:: $ py.test -q -rs test_strings.py s ========================= short test summary info ========================== - SKIP [1] /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/_pytest/python.py:960: got empty parameter set, function test_valid_string at /tmp/doc-exec-103/test_strings.py:1 + SKIP [1] /home/hpk/p/pytest/.tox/regen/lib/python2.7/site-packages/_pytest/python.py:962: got empty parameter set, function test_valid_string at /tmp/doc-exec-36/test_strings.py:1 For further examples, you might want to look at :ref:`more parametrization examples `. diff --git a/doc/en/skipping.txt b/doc/en/skipping.txt index ba3d140e9..364e0777a 100644 --- a/doc/en/skipping.txt +++ b/doc/en/skipping.txt @@ -132,7 +132,7 @@ Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 6 items xfail_demo.py xxxxxx diff --git a/doc/en/tmpdir.txt b/doc/en/tmpdir.txt index f116477ae..fc1fe492b 100644 --- a/doc/en/tmpdir.txt +++ b/doc/en/tmpdir.txt @@ -29,7 +29,7 @@ Running this would result in a passed test except for the last $ py.test test_tmpdir.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 1 items test_tmpdir.py F @@ -37,7 +37,7 @@ Running this would result in a passed test except for the last ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - tmpdir = local('/tmp/pytest-781/test_create_file0') + tmpdir = local('/tmp/pytest-373/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -48,7 +48,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.04 seconds ========================= + ========================= 1 failed in 0.02 seconds ========================= .. _`base temporary directory`: diff --git a/doc/en/unittest.txt b/doc/en/unittest.txt index 225dc37be..fe83600d5 100644 --- a/doc/en/unittest.txt +++ b/doc/en/unittest.txt @@ -88,7 +88,7 @@ the ``self.db`` values in the traceback:: $ py.test test_unittest_db.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.3 -- pytest-2.3.3 + platform linux2 -- Python 2.7.3 -- pytest-2.4.6 collected 2 items test_unittest_db.py FF @@ -101,7 +101,7 @@ the ``self.db`` values in the traceback:: def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes - E AssertionError: + E AssertionError: test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ @@ -110,7 +110,7 @@ the ``self.db`` values in the traceback:: def test_method2(self): > assert 0, self.db # fail for demo purposes - E AssertionError: + E AssertionError: test_unittest_db.py:12: AssertionError ========================= 2 failed in 0.02 seconds ========================= diff --git a/setup.py b/setup.py index 4b5dcbb4a..1665a9bd6 100644 --- a/setup.py +++ b/setup.py @@ -48,7 +48,7 @@ def main(): name='pytest', description='py.test: simple powerful testing with Python', long_description = long_description, - version='2.3.4.dev6', + version='2.4.6', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff --git a/tox.ini b/tox.ini index 9f5823892..11df1bdc5 100644 --- a/tox.ini +++ b/tox.ini @@ -68,7 +68,7 @@ deps=:pypi:sphinx :pypi:PyYAML commands= rm -rf /tmp/doc-exec* - pip install pytest==2.3.3 + #pip install pytest==2.3.4 make regen [testenv:py31]