Merge pull request #1884 from pytest-dev/master

merge master into features
This commit is contained in:
Bruno Oliveira 2016-08-30 18:47:57 -03:00 committed by GitHub
commit 9d2149d9c0
40 changed files with 310 additions and 385 deletions

View File

@ -26,6 +26,7 @@ env:
- TESTENV=py27-nobyte
- TESTENV=doctesting
- TESTENV=freeze
- TESTENV=docs
script: tox --recreate -e $TESTENV

View File

@ -5,6 +5,7 @@ Contributors include::
Abdeali JK
Abhijeet Kasurde
Ahn Ki-Wook
Alexei Kozlenok
Anatoly Bubenkoff
Andreas Zeidler
@ -69,6 +70,7 @@ Javier Domingo Cansino
Javier Romero
John Towler
Jon Sonesen
Jordan Guymon
Joshua Bronson
Jurko Gospodnetić
Justyna Janczyszyn

View File

@ -3,11 +3,22 @@
*
*
* Improve error message when passing non-string ids to ``pytest.mark.parametrize`` (`#1857`_).
Thanks `@okken`_ for the report and `@nicoddemus`_ for the PR.
* Add ``buffer`` attribute to stdin stub class ``pytest.capture.DontReadFromInput``
Thanks `@joguSD`_ for the PR.
* Fix ``UnicodeEncodeError`` when string comparison with unicode has failed. (`#1864`_)
Thanks `@AiOO`_ for the PR
*
*
.. _@joguSD: https://github.com/joguSD
.. _@AiOO: https://github.com/AiOO
.. _#1857: https://github.com/pytest-dev/pytest/issues/1857
.. _#1864: https://github.com/pytest-dev/pytest/issues/1864
3.0.2.dev

View File

@ -35,7 +35,6 @@ To execute it::
$ pytest
======= test session starts ========
platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1
collected 1 items
test_sample.py F
@ -52,7 +51,7 @@ To execute it::
======= 1 failed in 0.12 seconds ========
Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://docs.pytest.org/en/latest/getting-started.html#our-first-test-run>`_ for more examples.
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://docs.pytest.org/en/latest/getting-started.html#our-first-test-run>`_ for more examples.
Features
@ -64,7 +63,7 @@ Features
<http://docs.pytest.org/en/latest/goodpractices.html#python-test-discovery>`_
of test modules and functions;
- `Modular fixtures <http://docs.pytest.org/en/latest/fixture.html>`_ for
- `Modular fixtures <http://docs.pytest.org/en/latest/fixture.html>`_ for
managing small or parametrized long-lived test resources;
- Can run `unittest <http://docs.pytest.org/en/latest/unittest.html>`_ (or trial),

View File

@ -354,7 +354,7 @@ class ExceptionInfo(object):
if exprinfo is None and isinstance(tup[1], AssertionError):
exprinfo = getattr(tup[1], 'msg', None)
if exprinfo is None:
exprinfo = str(tup[1])
exprinfo = py._builtin._totext(tup[1])
if exprinfo and exprinfo.startswith('assert '):
self._striptext = 'AssertionError: '
self._excinfo = tup

View File

@ -455,6 +455,13 @@ class DontReadFromInput:
def close(self):
pass
@property
def buffer(self):
if sys.version_info >= (3,0):
return self
else:
raise AttributeError('redirected stdin has no attribute buffer')
def _readline_workaround():
"""

View File

@ -14,7 +14,7 @@ def pytest_addoption(parser):
group._addoption(
'--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
help="start a custom interactive Python debugger on errors. "
"For example: --pdbcls=IPython.core.debugger:Pdb")
"For example: --pdbcls=IPython.terminal.debugger:TerminalPdb")
def pytest_namespace():
return {'set_trace': pytestPDB().set_trace}

View File

@ -711,31 +711,26 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
They help to inspect a test function and to generate tests according to
test configuration or values specified in the class or module where a
test function is defined.
:ivar fixturenames: set of fixture names required by the test function
:ivar function: underlying python test function
:ivar cls: class object where the test function is defined in or ``None``.
:ivar module: the module object where the test function is defined in.
:ivar config: access to the :class:`_pytest.config.Config` object for the
test session.
:ivar funcargnames:
.. deprecated:: 2.3
Use ``fixturenames`` instead.
"""
def __init__(self, function, fixtureinfo, config, cls=None, module=None):
#: access to the :class:`_pytest.config.Config` object for the test session
self.config = config
#: the module object where the test function is defined in.
self.module = module
#: underlying python test function
self.function = function
#: set of fixture names required by the test function
self.fixturenames = fixtureinfo.names_closure
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
#: class object where the test function is defined in or ``None``.
self.cls = cls
self._calls = []
self._ids = py.builtin.set()
self._arg2fixturedefs = fixtureinfo.name2fixturedefs
def parametrize(self, argnames, argvalues, indirect=False, ids=None,
scope=None):
@ -778,6 +773,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
"""
from _pytest.fixtures import scopes
from _pytest.mark import extract_argvalue
from py.io import saferepr
unwrapped_argvalues = []
newkeywords = []
@ -831,9 +827,14 @@ class Metafunc(fixtures.FuncargnamesCompatAttr):
if callable(ids):
idfn = ids
ids = None
if ids and len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
if ids:
if len(ids) != len(argvalues):
raise ValueError('%d tests specified with %d ids' %(
len(argvalues), len(ids)))
for id_value in ids:
if id_value is not None and not isinstance(id_value, str):
msg = 'ids must be list of strings, found: %s (type: %s)'
raise ValueError(msg % (saferepr(id_value), type(id_value).__name__))
ids = idmaker(argnames, argvalues, idfn, ids, self.config)
newcalls = []
for callspec in self._calls or [CallSpec2(self)]:

View File

@ -10,7 +10,7 @@ environment:
# builds timing out in AppVeyor
- TOXENV: "linting,py26,py27,py33,py34,py35,pypy"
- TOXENV: "py27-pexpect,py27-xdist,py27-trial,py35-pexpect,py35-xdist,py35-trial"
- TOXENV: "py27-nobyte,doctesting,freeze"
- TOXENV: "py27-nobyte,doctesting,freeze,docs"
install:
- echo Installed Pythons

View File

@ -1,3 +1,7 @@
:orphan:
.. warnings about this file not being included in any toctree will be suppressed by :orphan:
April 2015 is "adopt pytest month"
=============================================

View File

@ -63,3 +63,5 @@ The py.test Development Team
.. _#649: https://github.com/pytest-dev/pytest/issues/649
.. _@asottile: https://github.com/asottile
.. _@nicoddemus: https://github.com/nicoddemus
.. _@tomviner: https://github.com/tomviner

View File

@ -69,5 +69,10 @@ The py.test Development Team
.. _#1496: https://github.com/pytest-dev/pytest/issue/1496
.. _#1524: https://github.com/pytest-dev/pytest/issue/1524
.. _@prusse-martin: https://github.com/prusse-martin
.. _@astraw38: https://github.com/astraw38
.. _@hackebrot: https://github.com/hackebrot
.. _@omarkohl: https://github.com/omarkohl
.. _@pquentin: https://github.com/pquentin
.. _@prusse-martin: https://github.com/prusse-martin
.. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt
.. _@tomviner: https://github.com/tomviner

View File

@ -39,8 +39,6 @@ Over 20 participants took part from 4 continents, including employees
from Splunk, Personalkollen, Cobe.io, FanDuel and Dolby. Some newcomers
mixed with developers who have worked on pytest since its beginning, and
of course everyone in between.
Ana Ribeiro, Brazil
Ronny Pfannschmidt, Germany
Sprint organisation, schedule

View File

@ -208,6 +208,7 @@ It is possible to add your own detailed explanations by implementing
the ``pytest_assertrepr_compare`` hook.
.. autofunction:: _pytest.hookspec.pytest_assertrepr_compare
:noindex:
As an example consider adding the following hook in a conftest.py which
provides an alternative explanation for ``Foo`` objects::

View File

@ -58,13 +58,13 @@ Fixtures and requests
To mark a fixture function:
.. autofunction:: _pytest.python.fixture
.. autofunction:: _pytest.fixtures.fixture
Tutorial at :ref:`fixtures`.
The ``request`` object that can be used from fixture functions.
.. autoclass:: _pytest.python.FixtureRequest()
.. autoclass:: _pytest.fixtures.FixtureRequest()
:members:

View File

@ -127,7 +127,7 @@ html_theme_options = {
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
html_title = None
html_title = 'pytest documentation'
# A shorter title for the navigation bar. Default is the same as html_title.
html_short_title = "pytest-%s" % release
@ -144,7 +144,7 @@ html_favicon = "img/pytest1favi.ico"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.

View File

@ -10,32 +10,51 @@ Full pytest documentation
.. toctree::
:maxdepth: 2
overview
apiref
example/index
getting-started
usage
assert
builtin
fixture
monkeypatch
tmpdir
capture
recwarn
doctest
mark
skipping
parametrize
cache
plugins
unittest
nose
xunit_setup
plugins
writing_plugins
example/index
goodpractices
customize
bash-completion
backwards-compatibility
license
contributing
talks
projects
faq
contact
.. only:: html
.. toctree::
:maxdepth: 1
funcarg_compare
announce/index
.. only:: html
.. toctree::
:hidden:
:maxdepth: 1
changelog

View File

@ -16,6 +16,7 @@ from docstrings in all python modules (including regular
python test modules)::
pytest --doctest-modules
You can make these changes permanent in your project by
putting them into a pytest.ini file like this:

View File

@ -7,11 +7,9 @@ Demo of Python failure reports with pytest
Here is a nice run of several tens of failures
and how ``pytest`` presents things (unfortunately
not showing the nice colors here in the HTML that you
get on the terminal - we are working on that):
get on the terminal - we are working on that)::
.. code-block:: python
assertion $ pytest failure_demo.py
$ pytest failure_demo.py
======= test session starts ========
platform linux -- Python 3.5.2, pytest-3.0.1, py-1.4.31, pluggy-0.3.1
rootdir: $REGENDOC_TMPDIR/assertion, inifile:

View File

@ -1,5 +1,4 @@
.. highlightlang:: python
Basic patterns and examples
==========================================================
@ -10,7 +9,9 @@ Pass different values to a test function, depending on command line options
.. regendoc:wipe
Suppose we want to write a test that depends on a command line option.
Here is a basic pattern to achieve this::
Here is a basic pattern to achieve this:
.. code-block:: python
# content of test_sample.py
def test_answer(cmdopt):
@ -22,7 +23,9 @@ Here is a basic pattern to achieve this::
For this to work we need to add a command line option and
provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`::
provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`:
.. code-block:: python
# content of conftest.py
import pytest
@ -91,7 +94,9 @@ Dynamically adding command line options
Through :confval:`addopts` you can statically add command line
options for your project. You can also dynamically modify
the command line arguments before they get processed::
the command line arguments before they get processed:
.. code-block:: python
# content of conftest.py
import sys
@ -101,7 +106,7 @@ the command line arguments before they get processed::
num = max(multiprocessing.cpu_count() / 2, 1)
args[:] = ["-n", str(num)] + args
If you have the :ref:`xdist plugin <xdist>` installed
If you have the `xdist plugin <https://pypi.python.org/pypi/pytest-xdist>`_ installed
you will now always perform test runs using a number
of subprocesses close to your CPU. Running in an empty
directory with the above conftest.py::
@ -122,7 +127,9 @@ Control skipping of tests according to command line option
.. regendoc:wipe
Here is a ``conftest.py`` file adding a ``--runslow`` command
line option to control skipping of ``slow`` marked tests::
line option to control skipping of ``slow`` marked tests:
.. code-block:: python
# content of conftest.py
@ -131,10 +138,11 @@ line option to control skipping of ``slow`` marked tests::
parser.addoption("--runslow", action="store_true",
help="run slow tests")
We can now write a test module like this::
We can now write a test module like this:
.. code-block:: python
# content of test_module.py
import pytest
@ -187,7 +195,9 @@ If you have a test helper function called from a test you can
use the ``pytest.fail`` marker to fail a test with a certain message.
The test support function will not show up in the traceback if you
set the ``__tracebackhide__`` option somewhere in the helper function.
Example::
Example:
.. code-block:: python
# content of test_checkconfig.py
import pytest
@ -218,7 +228,9 @@ Let's run our little function::
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
to a callable which gets the ``ExceptionInfo`` object. You can for example use
this to make sure unexpected exception types aren't hidden::
this to make sure unexpected exception types aren't hidden:
.. code-block:: python
import operator
import pytest
@ -246,7 +258,9 @@ Detect if running from within a pytest run
Usually it is a bad idea to make application code
behave differently if called from a test. But if you
absolutely must find out if your application code is
running from a test you can do something like this::
running from a test you can do something like this:
.. code-block:: python
# content of conftest.py
@ -257,7 +271,9 @@ running from a test you can do something like this::
def pytest_unconfigure(config):
del sys._called_from_test
and then check for the ``sys._called_from_test`` flag::
and then check for the ``sys._called_from_test`` flag:
.. code-block:: python
if hasattr(sys, '_called_from_test'):
# called from within a test run
@ -273,7 +289,9 @@ Adding info to test report header
.. regendoc:wipe
It's easy to present extra information in a ``pytest`` run::
It's easy to present extra information in a ``pytest`` run:
.. code-block:: python
# content of conftest.py
@ -293,10 +311,11 @@ which will add the string to the test header accordingly::
.. regendoc:wipe
You can also return a list of strings which will be considered as several
lines of information. You can of course also make the amount of reporting
information on e.g. the value of ``config.getoption('verbose')`` so that
you present more information appropriately::
It is also possible to return a list of strings which will be considered as several
lines of information. You may consider ``config.getoption('verbose')`` in order to
display more information if applicable:
.. code-block:: python
# content of conftest.py
@ -335,10 +354,11 @@ profiling test duration
.. versionadded: 2.2
If you have a slow running large test suite you might want to find
out which tests are the slowest. Let's make an artificial test suite::
out which tests are the slowest. Let's make an artificial test suite:
.. code-block:: python
# content of test_some_are_slow.py
import time
def test_funcfast():
@ -375,7 +395,9 @@ Sometimes you may have a testing situation which consists of a series
of test steps. If one step fails it makes no sense to execute further
steps as they are all expected to fail anyway and their tracebacks
add no insight. Here is a simple ``conftest.py`` file which introduces
an ``incremental`` marker which is to be used on classes::
an ``incremental`` marker which is to be used on classes:
.. code-block:: python
# content of conftest.py
@ -394,7 +416,9 @@ an ``incremental`` marker which is to be used on classes::
pytest.xfail("previous test failed (%s)" %previousfailed.name)
These two hook implementations work together to abort incremental-marked
tests in a class. Here is a test module example::
tests in a class. Here is a test module example:
.. code-block:: python
# content of test_step.py
@ -452,7 +476,9 @@ concept. It's however recommended to have explicit fixture references in your
tests or test classes rather than relying on implicitly executing
setup/teardown functions, especially if they are far away from the actual tests.
Here is a an example for making a ``db`` fixture available in a directory::
Here is a an example for making a ``db`` fixture available in a directory:
.. code-block:: python
# content of a/conftest.py
import pytest
@ -464,20 +490,26 @@ Here is a an example for making a ``db`` fixture available in a directory::
def db():
return DB()
and then a test module in that directory::
and then a test module in that directory:
.. code-block:: python
# content of a/test_db.py
def test_a1(db):
assert 0, db # to show value
another test module::
another test module:
.. code-block:: python
# content of a/test_db2.py
def test_a2(db):
assert 0, db # to show value
and then a module in a sister directory which will not see
the ``db`` fixture::
the ``db`` fixture:
.. code-block:: python
# content of b/test_error.py
def test_root(db): # no db here, will error out
@ -553,7 +585,9 @@ environment you can implement a hook that gets called when the test
"report" object is about to be created. Here we write out all failing
test calls and also access a fixture (if it was used by the test) in
case you want to query/look at it during your post processing. In our
case we just write some informations out to a ``failures`` file::
case we just write some informations out to a ``failures`` file:
.. code-block:: python
# content of conftest.py
@ -579,7 +613,9 @@ case we just write some informations out to a ``failures`` file::
f.write(rep.nodeid + extra + "\n")
if you then have failing tests::
if you then have failing tests:
.. code-block:: python
# content of test_module.py
def test_fail1(tmpdir):
@ -628,7 +664,9 @@ Making test result information available in fixtures
.. regendoc:wipe
If you want to make test result reports available in fixture finalizers
here is a little example implemented via a local plugin::
here is a little example implemented via a local plugin:
.. code-block:: python
# content of conftest.py
@ -658,7 +696,9 @@ here is a little example implemented via a local plugin::
print ("executing test failed", request.node.nodeid)
if you then have failing tests::
if you then have failing tests:
.. code-block:: python
# content of test_module.py

View File

@ -1,8 +0,0 @@
What users say:
`py.test is pretty much the best thing ever`_ (Alex Gaynor)
.. _`py.test is pretty much the best thing ever`_ (Alex Gaynor)
http://twitter.com/#!/alex_gaynor/status/22389410366

View File

@ -1,3 +1,4 @@
:orphan:
.. _`funcargcompare`:

View File

@ -19,17 +19,15 @@ Installation and Getting Started
Installation
----------------------------------------
Installation options::
Installation::
pip install -U pytest # or
easy_install -U pytest
pip install -U pytest
To check your installation has installed the correct version::
$ pytest --version
This is pytest version 3.0.1, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py
If you get an error checkout :ref:`installation issues`.
.. _`simpletest`:
@ -196,7 +194,6 @@ Here are a few suggestions where to go next:
* :ref:`cmdline` for command line invocation examples
* :ref:`good practices <goodpractices>` for virtualenv, test layout
* :ref:`fixtures` for providing a functional baseline to your tests
* :ref:`apiref` for documentation and examples on using ``pytest``
* :ref:`plugins` managing and writing plugins
.. include:: links.inc

View File

@ -1,59 +1,89 @@
:orphan:
.. _features:
pytest: helps you write better programs
=============================================
**a mature full-featured Python testing tool**
- runs on Posix/Windows, Python 2.6, 2.7 and 3.3-3.5, PyPy and (possibly still) Jython-2.5.1
- free and open source software, distributed under the terms of the :ref:`MIT license <license>`
- **well tested** with more than a thousand tests against itself
- **strict backward compatibility policy** for safe pytest upgrades
- :ref:`comprehensive online <toc>` and `PDF documentation <https://media.readthedocs.org/pdf/pytest/latest/pytest.pdf>`_
- many :ref:`third party plugins <extplugins>` and :ref:`builtin helpers <pytest helpers>`,
- used in :ref:`many small and large projects and organisations <projects>`
- comes with many :ref:`tested examples <examples>`
**provides easy no-boilerplate testing**
- makes it :ref:`easy to get started <getstarted>`,
has many :ref:`usage options <usage>`
- :ref:`assert with the assert statement`
- helpful :ref:`traceback and failing assertion reporting <tbreportdemo>`
- :ref:`print debugging <printdebugging>` and :ref:`the
capturing of standard output during test execution <captures>`
**scales from simple unit to complex functional testing**
- :ref:`modular parametrizeable fixtures <fixture>` (new in 2.3,
continuously improved)
- :ref:`parametrized test functions <parametrized test functions>`
- :ref:`mark`
- :ref:`skipping` (improved in 2.4)
- :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
- :ref:`continuously re-run failing tests <looponfailing>`
- :doc:`cache`
- flexible :ref:`Python test discovery`
**integrates with other testing methods and tools**:
- multi-paradigm: pytest can run ``nose``, ``unittest`` and
``doctest`` style test suites, including running testcases made for
Django and trial
- supports :ref:`good integration practices <goodpractices>`
- supports extended :ref:`xUnit style setup <xunitsetup>`
- supports domain-specific :ref:`non-python tests`
- supports generating `test coverage reports
<https://pypi.python.org/pypi/pytest-cov>`_
- supports :pep:`8` compliant coding styles in tests
**extensive plugin and customization system**:
- all collection, reporting, running aspects are delegated to hook functions
- customizations can be per-directory, per-project or per PyPI released plugin
- it is easy to add command line options or customize existing behaviour
- :ref:`easy to write your own plugins <writing-plugins>`
=======================================
.. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
The ``pytest`` framework makes it easy to write small tests, yet
scales to support complex functional testing for applications and libraries.
An example of a simple test:
.. code-block:: python
# content of test_sample.py
def func(x):
return x + 1
def test_answer():
assert func(3) == 5
To execute it::
$ pytest
======= test session starts ========
collected 1 items
test_sample.py F
======= FAILURES ========
_______ test_answer ________
def test_answer():
> assert func(3) == 5
E assert 4 == 5
E + where 4 = func(3)
test_sample.py:5: AssertionError
======= 1 failed in 0.12 seconds ========
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used.
See :ref:`Getting Started <getstarted>` for more examples.
Features
--------
- Detailed info on failing :ref:`assert statements <assert>` (no need to remember ``self.assert*`` names);
- :ref:`Auto-discovery <test discovery>` of test modules and functions;
- :ref:`Modular fixtures <fixture>` for managing small or parametrized long-lived test resources;
- Can run :ref:`unittest <unittest>` (including trial) and :ref:`nose <noseintegration>` test suites out of the box;
- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
- Rich plugin architecture, with over 150+ :ref:`external plugins <extplugins>` and thriving community;
Documentation
-------------
Please see :ref:`Contents <toc>` for full documentation, including installation, tutorials and PDF documents.
Bugs/Requests
-------------
Please use the `GitHub issue tracker <https://github.com/pytest-dev/pytest/issues>`_ to submit bugs or request features.
Changelog
---------
Consult the :ref:`Changelog <changelog>` page for fixes and enhancements of each version.
License
-------
Copyright Holger Krekel and others, 2004-2016.
Distributed under the terms of the `MIT`_ license, pytest is free and open source software.
.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE

View File

@ -54,32 +54,11 @@ This autouse fixture will be executed for each test function and it
will delete the method ``request.session.Session.request``
so that any attempts within tests to create http requests will fail.
example: setting an environment variable for the test session
-------------------------------------------------------------
If you would like for an environment variable to be
configured for the entire test session, you can add this to your
top-level ``conftest.py`` file:
.. code-block:: python
# content of conftest.py
@pytest.fixture(scope='session', autouse=True)
def enable_debugging(monkeypatch):
monkeypatch.setenv("DEBUGGING_VERBOSITY", "4")
This auto-use fixture will set the ``DEBUGGING_VERBOSITY`` environment variable for
the entire test session.
Note that the ability to use a ``monkeypatch`` fixture from a ``session``-scoped
fixture was added in pytest-3.0.
Method reference of the monkeypatch fixture
-------------------------------------------
.. autoclass:: MonkeyPatch
:members: setattr, replace, delattr, setitem, delitem, setenv, delenv, syspath_prepend, chdir, undo
``monkeypatch.setattr/delattr/delitem/delenv()`` all
by default raise an Exception if the target does not exist.

View File

@ -1,3 +1,5 @@
.. _`noseintegration`:
Running tests written for nose
=======================================

View File

@ -1,13 +0,0 @@
==================================================
Getting started basics
==================================================
.. toctree::
:maxdepth: 2
getting-started
usage
goodpractices
projects
faq

View File

@ -215,6 +215,7 @@ The **metafunc** object
.. currentmodule:: _pytest.python
.. autoclass:: Metafunc
:members:
.. automethod:: Metafunc.parametrize
.. automethod:: Metafunc.addcall(funcargs=None,id=_notexists,param=_notexists)

View File

@ -37,7 +37,7 @@ Here is a little annotated list for some popular plugins:
to distribute tests to CPUs and remote hosts, to run in boxed
mode which allows to survive segmentation faults, to run in
looponfailing mode, automatically re-running failing tests
on file changes, see also :ref:`xdist`
on file changes.
* `pytest-instafail <http://pypi.python.org/pypi/pytest-instafail>`_:
to report failures while the test run is happening.
@ -144,7 +144,7 @@ in the `pytest repository <https://github.com/pytest-dev/pytest>`_.
_pytest.monkeypatch
_pytest.nose
_pytest.pastebin
_pytest.pdb
_pytest.debugging
_pytest.pytester
_pytest.python
_pytest.recwarn

View File

@ -1,3 +1,5 @@
:orphan:
=========================
Parametrize with fixtures
=========================

View File

@ -1,3 +1,4 @@
.. _assertwarnings:
Asserting Warnings
=====================================================

View File

@ -1,5 +0,0 @@
pytest development status
================================
https://travis-ci.org/pytest-dev/pytest

View File

@ -1,5 +1,6 @@
.. _`unittest.TestCase`:
.. _`unittest`:
Support for unittest.TestCase / Integration of fixtures
=====================================================================

View File

@ -460,7 +460,7 @@ Hooks are usually declared as do-nothing functions that contain only
documentation describing when the hook will be called and what return values
are expected.
For an example, see `newhooks.py`_ from :ref:`xdist`.
For an example, see `newhooks.py`_ from `xdist <https://github.com/pytest-dev/pytest-xdist>`_.
.. _`newhooks.py`: https://github.com/pytest-dev/pytest-xdist/blob/974bd566c599dc6a9ea291838c6f226197208b46/xdist/newhooks.py
@ -623,7 +623,7 @@ Reference of objects involved in hooks
:members:
:show-inheritance:
.. autoclass:: _pytest.python.FixtureDef()
.. autoclass:: _pytest.fixtures.FixtureDef()
:members:
:show-inheritance:
@ -644,7 +644,7 @@ Reference of objects involved in hooks
:undoc-members:
:show-inheritance:
.. autoclass:: pluggy.PluginManager()
.. autoclass:: _pytest.vendored_packages.pluggy.PluginManager()
:members:
.. currentmodule:: _pytest.pytester

View File

@ -1,197 +0,0 @@
.. _`xdist`:
xdist: pytest distributed testing plugin
===============================================================
The `pytest-xdist`_ plugin extends ``pytest`` with some unique
test execution modes:
* Looponfail: run your tests repeatedly in a subprocess. After each
run, ``pytest`` waits until a file in your project changes and then
re-runs the previously failing tests. This is repeated until all
tests pass. At this point a full run is again performed.
* multiprocess Load-balancing: if you have multiple CPUs or hosts you can use
them for a combined test run. This allows to speed up
development or to use special resources of remote machines.
* Multi-Platform coverage: you can specify different Python interpreters
or different platforms and run tests in parallel on all of them.
Before running tests remotely, ``pytest`` efficiently "rsyncs" your
program source code to the remote place. All test results
are reported back and displayed to your local terminal.
You may specify different Python versions and interpreters.
Installation of xdist plugin
------------------------------
Install the plugin with::
easy_install pytest-xdist
# or
pip install pytest-xdist
or use the package in develop/in-place mode with
a checkout of the `pytest-xdist repository`_ ::
python setup.py develop
Usage examples
---------------------
.. _`xdistcpu`:
Speed up test runs by sending tests to multiple CPUs
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
To send tests to multiple CPUs, type::
pytest -n NUM
Especially for longer running tests or tests requiring
a lot of I/O this can lead to considerable speed ups.
Running tests in a Python subprocess
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
To instantiate a Python-2.7 subprocess and send tests to it, you may type::
pytest -d --tx popen//python=python2.7
This will start a subprocess which is run with the "python2.7"
Python interpreter, found in your system binary lookup path.
If you prefix the --tx option value like this::
pytest -d --tx 3*popen//python=python2.7
then three subprocesses would be created and the tests
will be distributed to three subprocesses and run simultanously.
.. _looponfailing:
Running tests in looponfailing mode
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
For refactoring a project with a medium or large test suite
you can use the looponfailing mode. Simply add the ``--f`` option::
pytest -f
and ``pytest`` will run your tests. Assuming you have failures it will then
wait for file changes and re-run the failing test set. File changes are detected by looking at ``looponfailingroots`` root directories and all of their contents (recursively). If the default for this value does not work for you you
can change it in your project by setting a configuration option::
# content of a pytest.ini or tox.ini file
[pytest]
looponfailroots = mypkg testdir
This would lead to only looking for file changes in the respective directories, specified relatively to the ini-file's directory.
Sending tests to remote SSH accounts
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Suppose you have a package ``mypkg`` which contains some
tests that you can successfully run locally. And you also
have a ssh-reachable machine ``myhost``. Then
you can ad-hoc distribute your tests by typing::
pytest -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
This will synchronize your ``mypkg`` package directory
with a remote ssh account and then collect and run your
tests at the remote side.
You can specify multiple ``--rsyncdir`` directories
to be sent to the remote side.
.. XXX CHECK
**NOTE:** For ``pytest`` to collect and send tests correctly
you not only need to make sure all code and tests
directories are rsynced, but that any test (sub) directory
also has an ``__init__.py`` file because internally
``pytest`` references tests as a fully qualified python
module path. **You will otherwise get strange errors**
during setup of the remote side.
Sending tests to remote Socket Servers
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Download the single-module `socketserver.py`_ Python program
and run it like this::
python socketserver.py
It will tell you that it starts listening on the default
port. You can now on your home machine specify this
new socket host with something like this::
pytest -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
.. _`atonce`:
Running tests on many platforms at once
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
The basic command to run tests on multiple platforms is::
pytest --dist=each --tx=spec1 --tx=spec2
If you specify a windows host, an OSX host and a Linux
environment this command will send each tests to all
platforms - and report back failures from all platforms
at once. The specifications strings use the `xspec syntax`_.
.. _`xspec syntax`: http://codespeak.net/execnet/basics.html#xspec
.. _`socketserver.py`: http://bitbucket.org/hpk42/execnet/raw/2af991418160/execnet/script/socketserver.py
.. _`execnet`: http://codespeak.net/execnet
Specifying test exec environments in an ini file
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
pytest (since version 2.0) supports ini-style configuration.
For example, you could make running with three subprocesses your default::
[pytest]
addopts = -n3
You can also add default environments like this::
[pytest]
addopts = --tx ssh=myhost//python=python2.7 --tx ssh=myhost//python=python2.6
and then just type::
pytest --dist=each
to run tests in each of the environments.
Specifying "rsync" dirs in an ini-file
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
In a ``pytest.ini`` or ``tox.ini`` file in your root project directory
you may specify directories to include or to exclude in synchronisation::
[pytest]
rsyncdirs = . mypkg helperpkg
rsyncignore = .hg
These directory specifications are relative to the directory
where the configuration file was found.
.. _`pytest-xdist`: http://pypi.python.org/pypi/pytest-xdist
.. _`pytest-xdist repository`: http://bitbucket.org/pytest-dev/pytest-xdist
.. _`pytest`: http://pytest.org

View File

@ -1,3 +1,5 @@
:orphan:
.. _yieldfixture:
"yield_fixture" functions

View File

@ -916,6 +916,18 @@ class TestMetafuncFunctional:
result = testdir.runpytest()
result.stdout.fnmatch_lines(['* 1 skipped *'])
def test_parametrized_ids_invalid_type(self, testdir):
"""Tests parametrized with ids as non-strings (#1857)."""
testdir.makepyfile('''
import pytest
@pytest.mark.parametrize("x, expected", [(10, 20), (40, 80)], ids=(None, 2))
def test_ids_numbers(x,expected):
assert x * 2 == expected
''')
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*ids must be list of strings, found: 2 (type: int)*'])
def test_parametrize_with_identical_ids_get_unique_names(self, testdir):
testdir.makepyfile("""
import pytest

View File

@ -816,3 +816,12 @@ def test_assert_indirect_tuple_no_warning(testdir):
result = testdir.runpytest('-rw')
output = '\n'.join(result.stdout.lines)
assert 'WR1' not in output
def test_assert_with_unicode(monkeypatch, testdir):
testdir.makepyfile(u"""
# -*- coding: utf-8 -*-
def test_unicode():
assert u'유니코드' == u'Unicode'
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines(['*AssertionError*'])

View File

@ -662,6 +662,28 @@ def test_dontreadfrominput():
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info < (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python3():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
fb = f.buffer
assert not fb.isatty()
pytest.raises(IOError, fb.read)
pytest.raises(IOError, fb.readlines)
pytest.raises(IOError, iter, fb)
pytest.raises(ValueError, fb.fileno)
f.close() # just for completeness
@pytest.mark.skipif('sys.version_info >= (3,)', reason='python2 has no buffer')
def test_dontreadfrominput_buffer_python2():
from _pytest.capture import DontReadFromInput
f = DontReadFromInput()
with pytest.raises(AttributeError):
f.buffer
f.close() # just for completeness
@pytest.yield_fixture
def tmpfile(testdir):
f = testdir.makepyfile("").open('wb+')

12
tox.ini
View File

@ -5,7 +5,7 @@ distshare={homedir}/.tox/distshare
envlist=
linting,py26,py27,py33,py34,py35,pypy,
{py27,py35}-{pexpect,xdist,trial},
py27-nobyte,doctesting,freeze
py27-nobyte,doctesting,freeze,docs
[testenv]
commands= pytest --lsof -rfsxX {posargs:testing}
@ -93,15 +93,15 @@ commands=
commands=pytest --doctest-modules _pytest
deps=
[testenv:doc]
[testenv:docs]
basepython=python
changedir=doc/en
deps=sphinx
PyYAML
deps=
sphinx
PyYAML
commands=
make clean
make html
sphinx-build -W -b html . _build
[testenv:doctesting]
basepython = python