diff --git a/CHANGELOG b/CHANGELOG
index 5e63f3b13..bd3a746ce 100644
--- a/CHANGELOG
+++ b/CHANGELOG
@@ -1,4 +1,4 @@
-UNRELEASED
+2.5.2
-----------------------------------
- fix issue409 -- better interoperate with cx_freeze by not
diff --git a/_pytest/__init__.py b/_pytest/__init__.py
index 2ec33245d..af129122f 100644
--- a/_pytest/__init__.py
+++ b/_pytest/__init__.py
@@ -1,2 +1,2 @@
#
-__version__ = '2.5.2.dev1'
+__version__ = '2.5.2'
diff --git a/doc/en/_templates/links.html b/doc/en/_templates/links.html
index 0b7f87e7b..102596a0d 100644
--- a/doc/en/_templates/links.html
+++ b/doc/en/_templates/links.html
@@ -4,6 +4,7 @@
Contribution Guide
pytest @ PyPI
pytest @ Bitbucket
+ 3rd party plugins (beta)
Issue Tracker
PDF Documentation
diff --git a/doc/en/announce/index.txt b/doc/en/announce/index.txt
index 0f0fc47d1..30de39011 100644
--- a/doc/en/announce/index.txt
+++ b/doc/en/announce/index.txt
@@ -5,6 +5,7 @@ Release announcements
.. toctree::
:maxdepth: 2
+ release-2.5.2
release-2.5.1
release-2.5.0
release-2.4.2
diff --git a/doc/en/announce/release-2.5.2.txt b/doc/en/announce/release-2.5.2.txt
new file mode 100644
index 000000000..9308ffdd6
--- /dev/null
+++ b/doc/en/announce/release-2.5.2.txt
@@ -0,0 +1,64 @@
+pytest-2.5.2: fixes
+===========================================================================
+
+pytest is a mature Python testing tool with more than a 1000 tests
+against itself, passing on many different interpreters and platforms.
+
+The 2.5.2 release fixes a few bugs with two maybe-bugs remaining and
+actively being worked on (and waiting for the bug reporter's input).
+We also have a new contribution guide thanks to Piotr Banaszkiewicz
+and others.
+
+See docs at:
+
+ http://pytest.org
+
+As usual, you can upgrade from pypi via::
+
+ pip install -U pytest
+
+Thanks to the following people who contributed to this release:
+
+ Anatoly Bubenkov
+ Ronny Pfannschmidt
+ Floris Bruynooghe
+ Bruno Oliveira
+ Andreas Pelme
+ Jurko Gospodnetić
+ Piotr Banaszkiewicz
+ Simon Liedtke
+ lakka
+ Lukasz Balcerzak
+ Philippe Muller
+ Daniel Hahler
+
+have fun,
+holger krekel
+
+2.5.2
+-----------------------------------
+
+- fix issue409 -- better interoperate with cx_freeze by not
+ trying to import from collections.abc which causes problems
+ for py27/cx_freeze. Thanks Wolfgang L. for reporting and tracking it down.
+
+- fixed docs and code to use "pytest" instead of "py.test" almost everywhere.
+ Thanks Jurko Gospodnetic for the complete PR.
+
+- fix issue425: mention at end of "py.test -h" that --markers
+ and --fixtures work according to specified test path (or current dir)
+
+- fix issue413: exceptions with unicode attributes are now printed
+ correctly also on python2 and with pytest-xdist runs. (the fix
+ requires py-1.4.20)
+
+- copy, cleanup and integrate py.io capture
+ from pylib 1.4.20.dev2 (rev 13d9af95547e)
+
+- address issue416: clarify docs as to conftest.py loading semantics
+
+- fix issue429: comparing byte strings with non-ascii chars in assert
+ expressions now work better. Thanks Floris Bruynooghe.
+
+- make capfd/capsys.capture private, its unused and shouldnt be exposed
+
diff --git a/doc/en/assert.txt b/doc/en/assert.txt
index d390947f5..52a0ad2bb 100644
--- a/doc/en/assert.txt
+++ b/doc/en/assert.txt
@@ -26,19 +26,19 @@ you will see the return value of the function call::
$ py.test test_assert1.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
test_assert1.py F
-
+
================================= FAILURES =================================
______________________________ test_function _______________________________
-
+
def test_function():
> assert f() == 4
E assert 3 == 4
E + where 3 = f()
-
+
test_assert1.py:5: AssertionError
========================= 1 failed in 0.01 seconds =========================
@@ -116,14 +116,14 @@ if you run this module::
$ py.test test_assert2.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
test_assert2.py F
-
+
================================= FAILURES =================================
___________________________ test_set_comparison ____________________________
-
+
def test_set_comparison():
set1 = set("1308")
set2 = set("8035")
@@ -133,7 +133,7 @@ if you run this module::
E '1'
E Extra items in the right set:
E '5'
-
+
test_assert2.py:5: AssertionError
========================= 1 failed in 0.01 seconds =========================
diff --git a/doc/en/capture.txt b/doc/en/capture.txt
index 0c1527ba2..19bb3ca13 100644
--- a/doc/en/capture.txt
+++ b/doc/en/capture.txt
@@ -64,21 +64,21 @@ of the failing function and hide the other one::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
test_module.py .F
-
+
================================= FAILURES =================================
________________________________ test_func2 ________________________________
-
+
def test_func2():
> assert False
E assert False
-
+
test_module.py:9: AssertionError
----------------------------- Captured stdout ------------------------------
- setting up
+ setting up
==================== 1 failed, 1 passed in 0.01 seconds ====================
Accessing captured output from a test function
diff --git a/doc/en/conf.py b/doc/en/conf.py
index 6437d5abb..e5ec18361 100644
--- a/doc/en/conf.py
+++ b/doc/en/conf.py
@@ -17,8 +17,8 @@
#
# The full version, including alpha/beta/rc tags.
# The short X.Y version.
-version = "2.5.1"
-release = "2.5.1"
+version = "2.5.2"
+release = "2.5.2"
import sys, os
diff --git a/doc/en/contents.txt b/doc/en/contents.txt
index 94a772abf..46ab21472 100644
--- a/doc/en/contents.txt
+++ b/doc/en/contents.txt
@@ -14,6 +14,7 @@ Full pytest documentation
overview
apiref
plugins
+ plugins_index/index
example/index
talks
contributing
diff --git a/doc/en/doctest.txt b/doc/en/doctest.txt
index 2d29e57de..15d9d4497 100644
--- a/doc/en/doctest.txt
+++ b/doc/en/doctest.txt
@@ -44,7 +44,7 @@ then you can just invoke ``py.test`` without command line options::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
mymodule.py .
diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.txt
index 09b21f178..52322a6fa 100644
--- a/doc/en/example/markers.txt
+++ b/doc/en/example/markers.txt
@@ -28,11 +28,11 @@ You can then restrict a test run to only run tests marked with ``webtest``::
$ py.test -v -m webtest
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items
-
+
test_server.py:3: test_send_http PASSED
-
+
=================== 2 tests deselected by "-m 'webtest'" ===================
================== 1 passed, 2 deselected in 0.01 seconds ==================
@@ -40,12 +40,12 @@ Or the inverse, running all tests except the webtest ones::
$ py.test -v -m "not webtest"
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items
-
+
test_server.py:6: test_something_quick PASSED
test_server.py:8: test_another PASSED
-
+
================= 1 tests deselected by "-m 'not webtest'" =================
================== 2 passed, 1 deselected in 0.01 seconds ==================
@@ -61,11 +61,11 @@ select tests based on their names::
$ py.test -v -k http # running with the above defined example module
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items
-
+
test_server.py:3: test_send_http PASSED
-
+
====================== 2 tests deselected by '-khttp' ======================
================== 1 passed, 2 deselected in 0.01 seconds ==================
@@ -73,12 +73,12 @@ And you can also run all tests except the ones that match the keyword::
$ py.test -k "not send_http" -v
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items
-
+
test_server.py:6: test_something_quick PASSED
test_server.py:8: test_another PASSED
-
+
================= 1 tests deselected by '-knot send_http' ==================
================== 2 passed, 1 deselected in 0.01 seconds ==================
@@ -86,12 +86,12 @@ Or to select "http" and "quick" tests::
$ py.test -k "http or quick" -v
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 3 items
-
+
test_server.py:3: test_send_http PASSED
test_server.py:6: test_something_quick PASSED
-
+
================= 1 tests deselected by '-khttp or quick' ==================
================== 2 passed, 1 deselected in 0.01 seconds ==================
@@ -124,19 +124,19 @@ You can ask which markers exist for your test suite - the list includes our just
$ py.test --markers
@pytest.mark.webtest: mark a test as a webtest.
-
+
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
-
+
@pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html
-
+
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
-
- @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
-
+
+ @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
+
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
-
+
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
-
+
For an example on how to add and work with markers from a plugin, see
:ref:`adding a custom marker from a plugin`.
@@ -266,41 +266,41 @@ the test needs::
$ py.test -E stage2
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
test_someenv.py s
-
+
======================== 1 skipped in 0.01 seconds =========================
and here is one that specifies exactly the environment needed::
$ py.test -E stage1
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
test_someenv.py .
-
+
========================= 1 passed in 0.01 seconds =========================
The ``--markers`` option always gives you a list of available markers::
$ py.test --markers
@pytest.mark.env(name): mark test to run only on named environment
-
+
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
-
+
@pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html
-
+
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
-
- @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
-
+
+ @pytest.mark.usefixtures(fixturename1, fixturename2, ...): mark tests as needing all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures
+
@pytest.mark.tryfirst: mark a hook implementation function such that the plugin machinery will try to call it first/as early as possible.
-
+
@pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible.
-
+
Reading markers which were set from multiple places
----------------------------------------------------
@@ -395,24 +395,24 @@ then you will see two test skipped and two executed tests as expected::
$ py.test -rs # this option reports skip reasons
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
-
+
test_plat.py s.s.
========================= short test summary info ==========================
- SKIP [2] /tmp/doc-exec-63/conftest.py:12: cannot run on platform linux2
-
+ SKIP [2] /tmp/doc-exec-65/conftest.py:12: cannot run on platform linux2
+
=================== 2 passed, 2 skipped in 0.01 seconds ====================
Note that if you specify a platform via the marker-command line option like this::
$ py.test -m linux2
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
-
+
test_plat.py .
-
+
=================== 3 tests deselected by "-m 'linux2'" ====================
================== 1 passed, 3 deselected in 0.01 seconds ==================
@@ -459,11 +459,11 @@ We can now use the ``-m option`` to select one set::
$ py.test -m interface --tb=short
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
-
+
test_module.py FF
-
+
================================= FAILURES =================================
__________________________ test_interface_simple ___________________________
test_module.py:3: in test_interface_simple
@@ -480,11 +480,11 @@ or to select both "event" and "interface" tests::
$ py.test -m "interface or event" --tb=short
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
-
+
test_module.py FFF
-
+
================================= FAILURES =================================
__________________________ test_interface_simple ___________________________
test_module.py:3: in test_interface_simple
diff --git a/doc/en/example/nonpython.txt b/doc/en/example/nonpython.txt
index fe6563e27..13b5f94c0 100644
--- a/doc/en/example/nonpython.txt
+++ b/doc/en/example/nonpython.txt
@@ -27,10 +27,10 @@ now execute the test specification::
nonpython $ py.test test_simple.yml
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
- test_simple.yml .F
+ test_simple.yml F.
================================= FAILURES =================================
______________________________ usecase: hello ______________________________
@@ -56,11 +56,11 @@ consulted when reporting in ``verbose`` mode::
nonpython $ py.test -v
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items
- test_simple.yml:1: usecase: ok PASSED
test_simple.yml:1: usecase: hello FAILED
+ test_simple.yml:1: usecase: ok PASSED
================================= FAILURES =================================
______________________________ usecase: hello ______________________________
@@ -74,10 +74,10 @@ interesting to just look at the collection tree::
nonpython $ py.test --collect-only
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
============================= in 0.02 seconds =============================
diff --git a/doc/en/example/parametrize.txt b/doc/en/example/parametrize.txt
index c7b1249af..ebecd9fa4 100644
--- a/doc/en/example/parametrize.txt
+++ b/doc/en/example/parametrize.txt
@@ -106,11 +106,11 @@ this is a fully self-contained example which you can run with::
$ py.test test_scenarios.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
-
+
test_scenarios.py ....
-
+
========================= 4 passed in 0.01 seconds =========================
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
@@ -118,7 +118,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
$ py.test --collect-only test_scenarios.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
@@ -127,7 +127,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
-
+
============================= in 0.01 seconds =============================
Note that we told ``metafunc.parametrize()`` that your scenario values
@@ -182,12 +182,12 @@ Let's first see how it looks like at collection time::
$ py.test test_backends.py --collect-only
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
============================= in 0.00 seconds =============================
And then when we run the test::
@@ -196,15 +196,15 @@ And then when we run the test::
.F
================================= FAILURES =================================
_________________________ test_db_initialized[d2] __________________________
-
- db =
-
+
+ db =
+
def test_db_initialized(db):
# a dummy test
if db.__class__.__name__ == "DB2":
> pytest.fail("deliberately failing for demo purposes")
E Failed: deliberately failing for demo purposes
-
+
test_backends.py:6: Failed
1 failed, 1 passed in 0.01 seconds
@@ -251,14 +251,14 @@ argument sets to use for each test function. Let's run it::
$ py.test -q
F..
================================= FAILURES =================================
- ________________________ TestClass.test_equals[2-1] ________________________
-
- self = , a = 1, b = 2
-
+ ________________________ TestClass.test_equals[1-2] ________________________
+
+ self = , a = 1, b = 2
+
def test_equals(self, a, b):
> assert a == b
E assert 1 == 2
-
+
test_parametrize.py:18: AssertionError
1 failed, 2 passed in 0.01 seconds
@@ -281,8 +281,8 @@ Running it results in some skips if we don't have all the python interpreters in
. $ py.test -rs -q multipython.py
............sss............sss............sss............ssssssssssssssssss
========================= short test summary info ==========================
- SKIP [27] /home/hpk/p/pytest/doc/en/example/multipython.py:21: 'python2.8' not found
- 48 passed, 27 skipped in 1.34 seconds
+ SKIP [27] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found
+ 48 passed, 27 skipped in 1.30 seconds
Indirect parametrization of optional implementations/imports
--------------------------------------------------------------------
@@ -329,13 +329,13 @@ If you run this with reporting for skips enabled::
$ py.test -rs test_module.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
test_module.py .s
========================= short test summary info ==========================
- SKIP [1] /tmp/doc-exec-65/conftest.py:10: could not import 'opt2'
-
+ SKIP [1] /tmp/doc-exec-67/conftest.py:10: could not import 'opt2'
+
=================== 1 passed, 1 skipped in 0.01 seconds ====================
You'll see that we don't have a ``opt2`` module and thus the second test run
diff --git a/doc/en/example/pythoncollection.txt b/doc/en/example/pythoncollection.txt
index 4a23d39dc..c18ad7400 100644
--- a/doc/en/example/pythoncollection.txt
+++ b/doc/en/example/pythoncollection.txt
@@ -43,14 +43,14 @@ then the test collection looks like this::
$ py.test --collect-only
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
============================= in 0.01 seconds =============================
.. note::
@@ -88,7 +88,7 @@ You can always peek at the collection tree without running tests like this::
. $ py.test --collect-only pythoncollection.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 3 items
@@ -96,7 +96,7 @@ You can always peek at the collection tree without running tests like this::
-
+
============================= in 0.01 seconds =============================
customizing test collection to find all .py files
@@ -141,11 +141,11 @@ interpreters and will leave out the setup.py file::
$ py.test --collect-only
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
============================= in 0.01 seconds =============================
If you run with a Python3 interpreter the moduled added through the conftest.py file will not be considered for test collection.
diff --git a/doc/en/example/reportingdemo.txt b/doc/en/example/reportingdemo.txt
index c9b2fe4bb..f3bf23447 100644
--- a/doc/en/example/reportingdemo.txt
+++ b/doc/en/example/reportingdemo.txt
@@ -13,84 +13,84 @@ get on the terminal - we are working on that):
assertion $ py.test failure_demo.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 39 items
-
+
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
-
+
================================= FAILURES =================================
____________________________ test_generative[0] ____________________________
-
+
param1 = 3, param2 = 6
-
+
def test_generative(param1, param2):
> assert param1 * 2 < param2
E assert (3 * 2) < 6
-
+
failure_demo.py:15: AssertionError
_________________________ TestFailing.test_simple __________________________
-
- self =
-
+
+ self =
+
def test_simple(self):
def f():
return 42
def g():
return 43
-
+
> assert f() == g()
E assert 42 == 43
- E + where 42 = ()
- E + and 43 = ()
-
+ E + where 42 = ()
+ E + and 43 = ()
+
failure_demo.py:28: AssertionError
____________________ TestFailing.test_simple_multiline _____________________
-
- self =
-
+
+ self =
+
def test_simple_multiline(self):
otherfunc_multi(
42,
> 6*9)
-
- failure_demo.py:33:
- _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
+
+ failure_demo.py:33:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
a = 42, b = 54
-
+
def otherfunc_multi(a,b):
> assert (a ==
b)
E assert 42 == 54
-
+
failure_demo.py:11: AssertionError
___________________________ TestFailing.test_not ___________________________
-
- self =
-
+
+ self =
+
def test_not(self):
def f():
return 42
> assert not f()
E assert not 42
- E + where 42 = ()
-
+ E + where 42 = ()
+
failure_demo.py:38: AssertionError
_________________ TestSpecialisedExplanations.test_eq_text _________________
-
- self =
-
+
+ self =
+
def test_eq_text(self):
> assert 'spam' == 'eggs'
E assert 'spam' == 'eggs'
E - spam
E + eggs
-
+
failure_demo.py:42: AssertionError
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
-
- self =
-
+
+ self =
+
def test_eq_similar_text(self):
> assert 'foo 1 bar' == 'foo 2 bar'
E assert 'foo 1 bar' == 'foo 2 bar'
@@ -98,12 +98,12 @@ get on the terminal - we are working on that):
E ? ^
E + foo 2 bar
E ? ^
-
+
failure_demo.py:45: AssertionError
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
-
- self =
-
+
+ self =
+
def test_eq_multiline_text(self):
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
E assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
@@ -111,12 +111,12 @@ get on the terminal - we are working on that):
E - spam
E + eggs
E bar
-
+
failure_demo.py:48: AssertionError
______________ TestSpecialisedExplanations.test_eq_long_text _______________
-
- self =
-
+
+ self =
+
def test_eq_long_text(self):
a = '1'*100 + 'a' + '2'*100
b = '1'*100 + 'b' + '2'*100
@@ -128,12 +128,12 @@ get on the terminal - we are working on that):
E ? ^
E + 1111111111b222222222
E ? ^
-
+
failure_demo.py:53: AssertionError
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
-
- self =
-
+
+ self =
+
def test_eq_long_text_multiline(self):
a = '1\n'*100 + 'a' + '2\n'*100
b = '1\n'*100 + 'b' + '2\n'*100
@@ -152,34 +152,34 @@ get on the terminal - we are working on that):
E 2
E 2
E 2
-
+
failure_demo.py:58: AssertionError
_________________ TestSpecialisedExplanations.test_eq_list _________________
-
- self =
-
+
+ self =
+
def test_eq_list(self):
> assert [0, 1, 2] == [0, 1, 3]
E assert [0, 1, 2] == [0, 1, 3]
E At index 2 diff: 2 != 3
-
+
failure_demo.py:61: AssertionError
______________ TestSpecialisedExplanations.test_eq_list_long _______________
-
- self =
-
+
+ self =
+
def test_eq_list_long(self):
a = [0]*100 + [1] + [3]*100
b = [0]*100 + [2] + [3]*100
> assert a == b
E assert [0, 0, 0, 0, 0, 0, ...] == [0, 0, 0, 0, 0, 0, ...]
E At index 100 diff: 1 != 2
-
+
failure_demo.py:66: AssertionError
_________________ TestSpecialisedExplanations.test_eq_dict _________________
-
- self =
-
+
+ self =
+
def test_eq_dict(self):
> assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
@@ -190,12 +190,12 @@ get on the terminal - we are working on that):
E {'c': 0}
E Right contains more items:
E {'d': 0}
-
+
failure_demo.py:69: AssertionError
_________________ TestSpecialisedExplanations.test_eq_set __________________
-
- self =
-
+
+ self =
+
def test_eq_set(self):
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
E assert set([0, 10, 11, 12]) == set([0, 20, 21])
@@ -206,31 +206,31 @@ get on the terminal - we are working on that):
E Extra items in the right set:
E 20
E 21
-
+
failure_demo.py:72: AssertionError
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
-
- self =
-
+
+ self =
+
def test_eq_longer_list(self):
> assert [1,2] == [1,2,3]
E assert [1, 2] == [1, 2, 3]
E Right contains more items, first extra item: 3
-
+
failure_demo.py:75: AssertionError
_________________ TestSpecialisedExplanations.test_in_list _________________
-
- self =
-
+
+ self =
+
def test_in_list(self):
> assert 1 in [0, 2, 3, 4, 5]
E assert 1 in [0, 2, 3, 4, 5]
-
+
failure_demo.py:78: AssertionError
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
-
- self =
-
+
+ self =
+
def test_not_in_text_multiline(self):
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
> assert 'foo' not in text
@@ -243,12 +243,12 @@ get on the terminal - we are working on that):
E ? +++
E and a
E tail
-
+
failure_demo.py:82: AssertionError
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
-
- self =
-
+
+ self =
+
def test_not_in_text_single(self):
text = 'single foo line'
> assert 'foo' not in text
@@ -256,58 +256,58 @@ get on the terminal - we are working on that):
E 'foo' is contained here:
E single foo line
E ? +++
-
+
failure_demo.py:86: AssertionError
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
-
- self =
-
+
+ self =
+
def test_not_in_text_single_long(self):
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
> assert 'foo' not in text
E assert 'foo' not in 'head head head head hea...ail tail tail tail tail '
E 'foo' is contained here:
- E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
+ E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? +++
-
+
failure_demo.py:90: AssertionError
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
-
- self =
-
+
+ self =
+
def test_not_in_text_single_long_term(self):
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
> assert 'f'*70 not in text
E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail '
E 'ffffffffffffffffff...fffffffffffffffffff' is contained here:
- E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
+ E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail
E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
-
+
failure_demo.py:94: AssertionError
______________________________ test_attribute ______________________________
-
+
def test_attribute():
class Foo(object):
b = 1
i = Foo()
> assert i.b == 2
E assert 1 == 2
- E + where 1 = .b
-
+ E + where 1 = .b
+
failure_demo.py:101: AssertionError
_________________________ test_attribute_instance __________________________
-
+
def test_attribute_instance():
class Foo(object):
b = 1
> assert Foo().b == 2
E assert 1 == 2
- E + where 1 = .b
- E + where = ()
-
+ E + where 1 = .b
+ E + where = ()
+
failure_demo.py:107: AssertionError
__________________________ test_attribute_failure __________________________
-
+
def test_attribute_failure():
class Foo(object):
def _get_b(self):
@@ -315,19 +315,19 @@ get on the terminal - we are working on that):
b = property(_get_b)
i = Foo()
> assert i.b == 2
-
- failure_demo.py:116:
- _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
- self =
-
+
+ failure_demo.py:116:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
+ self =
+
def _get_b(self):
> raise Exception('Failed to get attrib')
E Exception: Failed to get attrib
-
+
failure_demo.py:113: Exception
_________________________ test_attribute_multiple __________________________
-
+
def test_attribute_multiple():
class Foo(object):
b = 1
@@ -335,78 +335,78 @@ get on the terminal - we are working on that):
b = 2
> assert Foo().b == Bar().b
E assert 1 == 2
- E + where 1 = .b
- E + where = ()
- E + and 2 = .b
- E + where = ()
-
+ E + where 1 = .b
+ E + where = ()
+ E + and 2 = .b
+ E + where = ()
+
failure_demo.py:124: AssertionError
__________________________ TestRaises.test_raises __________________________
-
- self =
-
+
+ self =
+
def test_raises(self):
s = 'qwe'
> raises(TypeError, "int(s)")
-
- failure_demo.py:133:
- _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
+
+ failure_demo.py:133:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
> int(s)
E ValueError: invalid literal for int() with base 10: 'qwe'
-
- <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:983>:1: ValueError
+
+ <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:999>:1: ValueError
______________________ TestRaises.test_raises_doesnt _______________________
-
- self =
-
+
+ self =
+
def test_raises_doesnt(self):
> raises(IOError, "int('3')")
E Failed: DID NOT RAISE
-
+
failure_demo.py:136: Failed
__________________________ TestRaises.test_raise ___________________________
-
- self =
-
+
+ self =
+
def test_raise(self):
> raise ValueError("demo error")
E ValueError: demo error
-
+
failure_demo.py:139: ValueError
________________________ TestRaises.test_tupleerror ________________________
-
- self =
-
+
+ self =
+
def test_tupleerror(self):
> a,b = [1]
E ValueError: need more than 1 value to unpack
-
+
failure_demo.py:142: ValueError
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
-
- self =
-
+
+ self =
+
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
l = [1,2,3]
print ("l is %r" % l)
> a,b = l.pop()
E TypeError: 'int' object is not iterable
-
+
failure_demo.py:147: TypeError
----------------------------- Captured stdout ------------------------------
l is [1, 2, 3]
________________________ TestRaises.test_some_error ________________________
-
- self =
-
+
+ self =
+
def test_some_error(self):
> if namenotexi:
E NameError: global name 'namenotexi' is not defined
-
+
failure_demo.py:150: NameError
____________________ test_dynamic_compile_shows_nicely _____________________
-
+
def test_dynamic_compile_shows_nicely():
src = 'def foo():\n assert 1 == 0\n'
name = 'abc-123'
@@ -415,132 +415,132 @@ get on the terminal - we are working on that):
py.builtin.exec_(code, module.__dict__)
py.std.sys.modules[name] = module
> module.foo()
-
- failure_demo.py:165:
- _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
+
+ failure_demo.py:165:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
def foo():
> assert 1 == 0
E assert 1 == 0
-
+
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError
____________________ TestMoreErrors.test_complex_error _____________________
-
- self =
-
+
+ self =
+
def test_complex_error(self):
def f():
return 44
def g():
return 43
> somefunc(f(), g())
-
- failure_demo.py:175:
- _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
+
+ failure_demo.py:175:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
x = 44, y = 43
-
+
def somefunc(x,y):
> otherfunc(x,y)
-
- failure_demo.py:8:
- _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
-
+
+ failure_demo.py:8:
+ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
+
a = 44, b = 43
-
+
def otherfunc(a,b):
> assert a==b
E assert 44 == 43
-
+
failure_demo.py:5: AssertionError
___________________ TestMoreErrors.test_z1_unpack_error ____________________
-
- self =
-
+
+ self =
+
def test_z1_unpack_error(self):
l = []
> a,b = l
E ValueError: need more than 0 values to unpack
-
+
failure_demo.py:179: ValueError
____________________ TestMoreErrors.test_z2_type_error _____________________
-
- self =
-
+
+ self =
+
def test_z2_type_error(self):
l = 3
> a,b = l
E TypeError: 'int' object is not iterable
-
+
failure_demo.py:183: TypeError
______________________ TestMoreErrors.test_startswith ______________________
-
- self =
-
+
+ self =
+
def test_startswith(self):
s = "123"
g = "456"
> assert s.startswith(g)
- E assert ('456')
- E + where = '123'.startswith
-
+ E assert ('456')
+ E + where = '123'.startswith
+
failure_demo.py:188: AssertionError
__________________ TestMoreErrors.test_startswith_nested ___________________
-
- self =
-
+
+ self =
+
def test_startswith_nested(self):
def f():
return "123"
def g():
return "456"
> assert f().startswith(g())
- E assert ('456')
- E + where = '123'.startswith
- E + where '123' = ()
- E + and '456' = ()
-
+ E assert ('456')
+ E + where = '123'.startswith
+ E + where '123' = ()
+ E + and '456' = ()
+
failure_demo.py:195: AssertionError
_____________________ TestMoreErrors.test_global_func ______________________
-
- self =
-
+
+ self =
+
def test_global_func(self):
> assert isinstance(globf(42), float)
E assert isinstance(43, float)
E + where 43 = globf(42)
-
+
failure_demo.py:198: AssertionError
_______________________ TestMoreErrors.test_instance _______________________
-
- self =
-
+
+ self =
+
def test_instance(self):
self.x = 6*7
> assert self.x != 42
E assert 42 != 42
- E + where 42 = .x
-
+ E + where 42 = .x
+
failure_demo.py:202: AssertionError
_______________________ TestMoreErrors.test_compare ________________________
-
- self =
-
+
+ self =
+
def test_compare(self):
> assert globf(10) < 5
E assert 11 < 5
E + where 11 = globf(10)
-
+
failure_demo.py:205: AssertionError
_____________________ TestMoreErrors.test_try_finally ______________________
-
- self =
-
+
+ self =
+
def test_try_finally(self):
x = 1
try:
> assert x == 0
E assert 1 == 0
-
+
failure_demo.py:210: AssertionError
======================== 39 failed in 0.20 seconds =========================
diff --git a/doc/en/example/simple.txt b/doc/en/example/simple.txt
index cb4c757ac..4198054ed 100644
--- a/doc/en/example/simple.txt
+++ b/doc/en/example/simple.txt
@@ -108,9 +108,9 @@ directory with the above conftest.py::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 0 items
-
+
============================= in 0.00 seconds =============================
.. _`excontrolskip`:
@@ -152,24 +152,24 @@ and when running it will see a skipped "slow" test::
$ py.test -rs # "-rs" means report details on the little 's'
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
test_module.py .s
========================= short test summary info ==========================
- SKIP [1] /tmp/doc-exec-68/conftest.py:9: need --runslow option to run
-
+ SKIP [1] /tmp/doc-exec-70/conftest.py:9: need --runslow option to run
+
=================== 1 passed, 1 skipped in 0.01 seconds ====================
Or run it including the ``slow`` marked test::
$ py.test --runslow
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
test_module.py ..
-
+
========================= 2 passed in 0.01 seconds =========================
Writing well integrated assertion helpers
@@ -256,10 +256,10 @@ which will add the string to the test header accordingly::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
project deps: mylib-1.1
collected 0 items
-
+
============================= in 0.00 seconds =============================
.. regendoc:wipe
@@ -279,20 +279,20 @@ which will add info only when run with "--v"::
$ py.test -v
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
info1: did you know that ...
did you?
collecting ... collected 0 items
-
+
============================= in 0.00 seconds =============================
and nothing when run plainly::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 0 items
-
+
============================= in 0.00 seconds =============================
profiling test duration
@@ -322,11 +322,11 @@ Now we can profile which test functions execute the slowest::
$ py.test --durations=3
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 3 items
-
+
test_some_are_slow.py ...
-
+
========================= slowest 3 test durations =========================
0.20s call test_some_are_slow.py::test_funcslow2
0.10s call test_some_are_slow.py::test_funcslow1
@@ -383,20 +383,20 @@ If we run this::
$ py.test -rx
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 4 items
-
+
test_step.py .Fx.
-
+
================================= FAILURES =================================
____________________ TestUserHandling.test_modification ____________________
-
- self =
-
+
+ self =
+
def test_modification(self):
> assert 0
E assert 0
-
+
test_step.py:9: AssertionError
========================= short test summary info ==========================
XFAIL test_step.py::TestUserHandling::()::test_deletion
@@ -453,50 +453,50 @@ We can run this::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 7 items
-
+
test_step.py .Fx.
a/test_db.py F
a/test_db2.py F
b/test_error.py E
-
+
================================== ERRORS ==================================
_______________________ ERROR at setup of test_root ________________________
- file /tmp/doc-exec-68/b/test_error.py, line 1
+ file /tmp/doc-exec-70/b/test_error.py, line 1
def test_root(db): # no db here, will error out
fixture 'db' not found
- available fixtures: recwarn, capfd, pytestconfig, capsys, tmpdir, monkeypatch
+ available fixtures: pytestconfig, capfd, monkeypatch, capsys, recwarn, tmpdir
use 'py.test --fixtures [testpath]' for help on them.
-
- /tmp/doc-exec-68/b/test_error.py:1
+
+ /tmp/doc-exec-70/b/test_error.py:1
================================= FAILURES =================================
____________________ TestUserHandling.test_modification ____________________
-
- self =
-
+
+ self =
+
def test_modification(self):
> assert 0
E assert 0
-
+
test_step.py:9: AssertionError
_________________________________ test_a1 __________________________________
-
- db =
-
+
+ db =
+
def test_a1(db):
> assert 0, db # to show value
- E AssertionError:
-
+ E AssertionError:
+
a/test_db.py:2: AssertionError
_________________________________ test_a2 __________________________________
-
- db =
-
+
+ db =
+
def test_a2(db):
> assert 0, db # to show value
- E AssertionError:
-
+ E AssertionError:
+
a/test_db2.py:2: AssertionError
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ==========
@@ -553,34 +553,34 @@ and run them::
$ py.test test_module.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
test_module.py FF
-
+
================================= FAILURES =================================
________________________________ test_fail1 ________________________________
-
- tmpdir = local('/tmp/pytest-42/test_fail10')
-
+
+ tmpdir = local('/tmp/pytest-1012/test_fail10')
+
def test_fail1(tmpdir):
> assert 0
E assert 0
-
+
test_module.py:2: AssertionError
________________________________ test_fail2 ________________________________
-
+
def test_fail2():
> assert 0
E assert 0
-
+
test_module.py:4: AssertionError
========================= 2 failed in 0.01 seconds =========================
you will have a "failures" file which contains the failing test ids::
$ cat failures
- test_module.py::test_fail1 (/tmp/pytest-42/test_fail10)
+ test_module.py::test_fail1 (/tmp/pytest-1012/test_fail10)
test_module.py::test_fail2
Making test result information available in fixtures
@@ -643,38 +643,38 @@ and run it::
$ py.test -s test_module.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 3 items
-
+
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
Fexecuting test failed test_module.py::test_call_fails
F
-
+
================================== ERRORS ==================================
____________________ ERROR at setup of test_setup_fails ____________________
-
+
@pytest.fixture
def other():
> assert 0
E assert 0
-
+
test_module.py:6: AssertionError
================================= FAILURES =================================
_____________________________ test_call_fails ______________________________
-
+
something = None
-
+
def test_call_fails(something):
> assert 0
E assert 0
-
+
test_module.py:12: AssertionError
________________________________ test_fail2 ________________________________
-
+
def test_fail2():
> assert 0
E assert 0
-
+
test_module.py:15: AssertionError
==================== 2 failed, 1 error in 0.01 seconds =====================
diff --git a/doc/en/fixture.txt b/doc/en/fixture.txt
index ba05431a1..7ab780100 100644
--- a/doc/en/fixture.txt
+++ b/doc/en/fixture.txt
@@ -76,23 +76,23 @@ marked ``smtp`` fixture function. Running the test looks like this::
$ py.test test_smtpsimple.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
test_smtpsimple.py F
-
+
================================= FAILURES =================================
________________________________ test_ehlo _________________________________
-
- smtp =
-
+
+ smtp =
+
def test_ehlo(smtp):
response, msg = smtp.ehlo()
assert response == 250
assert "merlinux" in msg
> assert 0 # for demo purposes
E assert 0
-
+
test_smtpsimple.py:12: AssertionError
========================= 1 failed in 0.21 seconds =========================
@@ -194,36 +194,36 @@ inspect what is going on and can now run the tests::
$ py.test test_module.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
-
+
test_module.py FF
-
+
================================= FAILURES =================================
________________________________ test_ehlo _________________________________
-
- smtp =
-
+
+ smtp =
+
def test_ehlo(smtp):
response = smtp.ehlo()
assert response[0] == 250
assert "merlinux" in response[1]
> assert 0 # for demo purposes
E assert 0
-
+
test_module.py:6: AssertionError
________________________________ test_noop _________________________________
-
- smtp =
-
+
+ smtp =
+
def test_noop(smtp):
response = smtp.noop()
assert response[0] == 250
> assert 0 # for demo purposes
E assert 0
-
+
test_module.py:11: AssertionError
- ========================= 2 failed in 0.17 seconds =========================
+ ========================= 2 failed in 0.23 seconds =========================
You see the two ``assert 0`` failing and more importantly you can also see
that the same (module-scoped) ``smtp`` object was passed into the two
@@ -270,8 +270,8 @@ Let's execute it::
$ py.test -s -q --tb=no
FFteardown smtp
-
- 2 failed in 0.17 seconds
+
+ 2 failed in 0.21 seconds
We see that the ``smtp`` instance is finalized after the two
tests finished execution. Note that if we decorated our fixture
@@ -312,7 +312,7 @@ again, nothing much has changed::
$ py.test -s -q --tb=no
FF
- 2 failed in 0.21 seconds
+ 2 failed in 0.59 seconds
Let's quickly create another test module that actually sets the
server URL in its module namespace::
@@ -378,53 +378,53 @@ So let's just do another run::
FFFF
================================= FAILURES =================================
__________________________ test_ehlo[merlinux.eu] __________________________
-
- smtp =
-
+
+ smtp =
+
def test_ehlo(smtp):
response = smtp.ehlo()
assert response[0] == 250
assert "merlinux" in response[1]
> assert 0 # for demo purposes
E assert 0
-
+
test_module.py:6: AssertionError
__________________________ test_noop[merlinux.eu] __________________________
-
- smtp =
-
+
+ smtp =
+
def test_noop(smtp):
response = smtp.noop()
assert response[0] == 250
> assert 0 # for demo purposes
E assert 0
-
+
test_module.py:11: AssertionError
________________________ test_ehlo[mail.python.org] ________________________
-
- smtp =
-
+
+ smtp =
+
def test_ehlo(smtp):
response = smtp.ehlo()
assert response[0] == 250
> assert "merlinux" in response[1]
E assert 'merlinux' in 'mail.python.org\nSIZE 25600000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN'
-
+
test_module.py:5: AssertionError
----------------------------- Captured stdout ------------------------------
- finalizing
+ finalizing
________________________ test_noop[mail.python.org] ________________________
-
- smtp =
-
+
+ smtp =
+
def test_noop(smtp):
response = smtp.noop()
assert response[0] == 250
> assert 0 # for demo purposes
E assert 0
-
+
test_module.py:11: AssertionError
- 4 failed in 6.58 seconds
+ 4 failed in 6.06 seconds
We see that our two test functions each ran twice, against the different
``smtp`` instances. Note also, that with the ``mail.python.org``
@@ -464,13 +464,13 @@ Here we declare an ``app`` fixture which receives the previously defined
$ py.test -v test_appsetup.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 2 items
-
+
test_appsetup.py:12: test_smtp_exists[merlinux.eu] PASSED
test_appsetup.py:12: test_smtp_exists[mail.python.org] PASSED
-
- ========================= 2 passed in 5.95 seconds =========================
+
+ ========================= 2 passed in 6.42 seconds =========================
Due to the parametrization of ``smtp`` the test will run twice with two
different ``App`` instances and respective smtp servers. There is no
@@ -528,9 +528,9 @@ Let's run the tests in verbose mode and with looking at the print-output::
$ py.test -v -s test_module.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1 -- /home/hpk/p/pytest/.tox/regen/bin/python
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2 -- /home/hpk/p/pytest/.tox/regen/bin/python
collecting ... collected 8 items
-
+
test_module.py:15: test_0[1] test0 1
PASSED
test_module.py:15: test_0[2] test0 2
@@ -549,7 +549,7 @@ Let's run the tests in verbose mode and with looking at the print-output::
PASSED
test_module.py:19: test_2[2-mod2] test2 2 mod2
PASSED
-
+
========================= 8 passed in 0.01 seconds =========================
You can see that the parametrized module-scoped ``modarg`` resource caused
diff --git a/doc/en/getting-started.txt b/doc/en/getting-started.txt
index 8a4c0d961..adeb8569a 100644
--- a/doc/en/getting-started.txt
+++ b/doc/en/getting-started.txt
@@ -23,7 +23,7 @@ Installation options::
To check your installation has installed the correct version::
$ py.test --version
- This is pytest version 2.5.1, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
+ This is pytest version 2.5.2, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
If you get an error checkout :ref:`installation issues`.
@@ -45,19 +45,19 @@ That's it. You can execute the test function now::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
-
+
test_sample.py F
-
+
================================= FAILURES =================================
_______________________________ test_answer ________________________________
-
+
def test_answer():
> assert func(3) == 5
E assert 4 == 5
E + where 4 = func(3)
-
+
test_sample.py:5: AssertionError
========================= 1 failed in 0.01 seconds =========================
@@ -93,7 +93,7 @@ Running it with, this time in "quiet" reporting mode::
$ py.test -q test_sysexit.py
.
- 1 passed in 0.00 seconds
+ 1 passed in 0.01 seconds
.. todo:: For further ways to assert exceptions see the `raises`
@@ -122,14 +122,14 @@ run the module by passing its filename::
.F
================================= FAILURES =================================
____________________________ TestClass.test_two ____________________________
-
- self =
-
+
+ self =
+
def test_two(self):
x = "hello"
> assert hasattr(x, 'check')
E assert hasattr('hello', 'check')
-
+
test_class.py:8: AssertionError
1 failed, 1 passed in 0.01 seconds
@@ -158,18 +158,18 @@ before performing the test function call. Let's just run it::
F
================================= FAILURES =================================
_____________________________ test_needsfiles ______________________________
-
- tmpdir = local('/tmp/pytest-38/test_needsfiles0')
-
+
+ tmpdir = local('/tmp/pytest-1008/test_needsfiles0')
+
def test_needsfiles(tmpdir):
print tmpdir
> assert 0
E assert 0
-
+
test_tmpdir.py:3: AssertionError
----------------------------- Captured stdout ------------------------------
- /tmp/pytest-38/test_needsfiles0
- 1 failed in 0.04 seconds
+ /tmp/pytest-1008/test_needsfiles0
+ 1 failed in 0.01 seconds
Before the test runs, a unique-per-test-invocation temporary directory
was created. More info at :ref:`tmpdir handling`.
diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.txt
index b4ea40985..6e5e0fc66 100644
--- a/doc/en/parametrize.txt
+++ b/doc/en/parametrize.txt
@@ -53,16 +53,16 @@ them in turn::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 3 items
-
+
test_expectation.py ..F
-
+
================================= FAILURES =================================
____________________________ test_eval[6*9-42] _____________________________
-
+
input = '6*9', expected = 42
-
+
@pytest.mark.parametrize("input,expected", [
("3+5", 8),
("2+4", 6),
@@ -72,7 +72,7 @@ them in turn::
> assert eval(input) == expected
E assert 54 == 42
E + where 54 = eval('6*9')
-
+
test_expectation.py:8: AssertionError
==================== 1 failed, 2 passed in 0.01 seconds ====================
@@ -100,11 +100,11 @@ Let's run this::
$ py.test
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 3 items
-
+
test_expectation.py ..x
-
+
=================== 2 passed, 1 xfailed in 0.01 seconds ====================
The one parameter set which caused a failure previously now
@@ -165,14 +165,14 @@ Let's also run with a stringinput that will lead to a failing test::
F
================================= FAILURES =================================
___________________________ test_valid_string[!] ___________________________
-
+
stringinput = '!'
-
+
def test_valid_string(stringinput):
> assert stringinput.isalpha()
- E assert ()
- E + where = '!'.isalpha
-
+ E assert ()
+ E + where = '!'.isalpha
+
test_strings.py:3: AssertionError
1 failed in 0.01 seconds
@@ -185,7 +185,7 @@ listlist::
$ py.test -q -rs test_strings.py
s
========================= short test summary info ==========================
- SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1094: got empty parameter set, function test_valid_string at /tmp/doc-exec-24/test_strings.py:1
+ SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1110: got empty parameter set, function test_valid_string at /tmp/doc-exec-24/test_strings.py:1
1 skipped in 0.01 seconds
For further examples, you might want to look at :ref:`more
diff --git a/doc/en/plugins.txt b/doc/en/plugins.txt
index 0f612cceb..9ef703288 100644
--- a/doc/en/plugins.txt
+++ b/doc/en/plugins.txt
@@ -64,7 +64,9 @@ tool, for example::
pip uninstall pytest-NAME
If a plugin is installed, ``pytest`` automatically finds and integrates it,
-there is no need to activate it. Here is a initial list of known plugins:
+there is no need to activate it. We have a :doc:`beta page listing
+all 3rd party plugins and their status ` and here
+is a little annotated list for some popular plugins:
.. _`django`: https://www.djangoproject.com/
diff --git a/doc/en/plugins_index/plugins_index.txt b/doc/en/plugins_index/index.txt
similarity index 100%
rename from doc/en/plugins_index/plugins_index.txt
rename to doc/en/plugins_index/index.txt
diff --git a/doc/en/plugins_index/plugins_index.py b/doc/en/plugins_index/plugins_index.py
index 74a6be32a..447809eec 100644
--- a/doc/en/plugins_index/plugins_index.py
+++ b/doc/en/plugins_index/plugins_index.py
@@ -1,5 +1,5 @@
"""
-Script to generate the file `plugins_index.txt` with information about
+Script to generate the file `index.txt` with information about
pytest plugins taken directly from a live PyPI server.
Also includes plugin compatibility between different python and pytest versions,
@@ -34,9 +34,9 @@ def get_proxy(url):
def iter_plugins(client, search='pytest-'):
"""
Returns an iterator of (name, version) from PyPI.
-
+
:param client: ServerProxy
- :param search: package names to search for
+ :param search: package names to search for
"""
for plug_data in client.search({'name': search}):
yield plug_data['name'], plug_data['version']
@@ -58,11 +58,11 @@ def obtain_plugins_table(plugins, client):
"""
Returns information to populate a table of plugins, their versions,
authors, etc.
-
+
The returned information is a list of columns of `ColumnData`
namedtuples(text, link). Link can be None if the text for that column
should not be linked to anything.
-
+
:param plugins: list of (name, version)
:param client: ServerProxy
"""
@@ -141,7 +141,7 @@ def obtain_override_repositories():
def generate_plugins_index_from_table(filename, headers, rows):
"""
Generates a RST file with the table data given.
-
+
:param filename: output filename
:param headers: see `obtain_plugins_table`
:param rows: see `obtain_plugins_table`
@@ -168,14 +168,14 @@ def generate_plugins_index_from_table(filename, headers, rows):
return ' '.join(char * length for length in column_lengths)
with open(filename, 'w') as f:
- # write welcome
+ # write welcome
print('.. _plugins_index:', file=f)
print(file=f)
print('List of Third-Party Plugins', file=f)
print('===========================', file=f)
print(file=f)
- # table
+ # table
print(get_row_limiter('='), file=f)
formatted_headers = [
'{0:^{fill}}'.format(header, fill=column_lengths[i])
@@ -200,7 +200,7 @@ def generate_plugins_index(client, filename):
"""
Generates an RST file with a table of the latest pytest plugins found in
PyPI.
-
+
:param client: ServerProxy
:param filename: output filename
"""
@@ -214,7 +214,7 @@ def main(argv):
Script entry point. Configures an option parser and calls the appropriate
internal function.
"""
- filename = os.path.join(os.path.dirname(__file__), 'plugins_index.txt')
+ filename = os.path.join(os.path.dirname(__file__), 'index.txt')
url = 'http://pypi.python.org/pypi'
parser = OptionParser(
diff --git a/doc/en/skipping.txt b/doc/en/skipping.txt
index fd2f2d952..7d6573f56 100644
--- a/doc/en/skipping.txt
+++ b/doc/en/skipping.txt
@@ -159,14 +159,14 @@ Running it with the report-on-xfail option gives this output::
example $ py.test -rx xfail_demo.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 6 items
-
+
xfail_demo.py xxxxxx
========================= short test summary info ==========================
XFAIL xfail_demo.py::test_hello
XFAIL xfail_demo.py::test_hello2
- reason: [NOTRUN]
+ reason: [NOTRUN]
XFAIL xfail_demo.py::test_hello3
condition: hasattr(os, 'sep')
XFAIL xfail_demo.py::test_hello4
@@ -175,7 +175,7 @@ Running it with the report-on-xfail option gives this output::
condition: pytest.__version__[0] != "17"
XFAIL xfail_demo.py::test_hello6
reason: reason
-
+
======================== 6 xfailed in 0.04 seconds =========================
.. _`skip/xfail with parametrize`:
diff --git a/doc/en/tmpdir.txt b/doc/en/tmpdir.txt
index f7ac60fef..899858567 100644
--- a/doc/en/tmpdir.txt
+++ b/doc/en/tmpdir.txt
@@ -29,7 +29,7 @@ Running this would result in a passed test except for the last
$ py.test test_tmpdir.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 1 items
test_tmpdir.py F
@@ -37,7 +37,7 @@ Running this would result in a passed test except for the last
================================= FAILURES =================================
_____________________________ test_create_file _____________________________
- tmpdir = local('/tmp/pytest-39/test_create_file0')
+ tmpdir = local('/tmp/pytest-1009/test_create_file0')
def test_create_file(tmpdir):
p = tmpdir.mkdir("sub").join("hello.txt")
diff --git a/doc/en/unittest.txt b/doc/en/unittest.txt
index 7256f3de3..df1dcb17a 100644
--- a/doc/en/unittest.txt
+++ b/doc/en/unittest.txt
@@ -88,7 +88,7 @@ the ``self.db`` values in the traceback::
$ py.test test_unittest_db.py
=========================== test session starts ============================
- platform linux2 -- Python 2.7.3 -- pytest-2.5.1
+ platform linux2 -- Python 2.7.3 -- py-1.4.20 -- pytest-2.5.2
collected 2 items
test_unittest_db.py FF
@@ -101,7 +101,7 @@ the ``self.db`` values in the traceback::
def test_method1(self):
assert hasattr(self, "db")
> assert 0, self.db # fail for demo purposes
- E AssertionError:
+ E AssertionError:
test_unittest_db.py:9: AssertionError
___________________________ MyTest.test_method2 ____________________________
@@ -110,7 +110,7 @@ the ``self.db`` values in the traceback::
def test_method2(self):
> assert 0, self.db # fail for demo purposes
- E AssertionError:
+ E AssertionError:
test_unittest_db.py:12: AssertionError
========================= 2 failed in 0.01 seconds =========================
diff --git a/setup.py b/setup.py
index 06b81912a..4b47e826c 100644
--- a/setup.py
+++ b/setup.py
@@ -27,7 +27,7 @@ def main():
name='pytest',
description='pytest: simple powerful testing with Python',
long_description = long_description,
- version='2.5.2.dev1',
+ version='2.5.2',
url='http://pytest.org',
license='MIT license',
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
diff --git a/testing/test_capture.py b/testing/test_capture.py
index e0634d929..c80cb97d5 100644
--- a/testing/test_capture.py
+++ b/testing/test_capture.py
@@ -545,6 +545,7 @@ def test_capture_early_option_parsing(testdir):
@pytest.mark.xfail(sys.version_info >= (3, 0), reason='encoding issues')
+@pytest.mark.xfail(sys.version_info < (2, 6), reason='test not run on py25')
def test_capture_binary_output(testdir):
testdir.makepyfile(r"""
import pytest