Merge remote-tracking branch 'upstream/features' into warnings-displayed-by-default
# Conflicts: # CHANGELOG.rst # testing/test_terminal.py
This commit is contained in:
commit
1266ebec83
|
@ -4,5 +4,5 @@ Here's a quick checklist in what to include:
|
|||
|
||||
- [ ] Include a detailed description of the bug or suggestion
|
||||
- [ ] `pip list` of the virtual environment you are using
|
||||
- [ ] py.test and operating system versions
|
||||
- [ ] pytest and operating system versions
|
||||
- [ ] Minimal example if possible
|
||||
|
|
|
@ -33,3 +33,4 @@ env/
|
|||
.coverage
|
||||
.ropeproject
|
||||
.idea
|
||||
.hypothesis
|
||||
|
|
16
AUTHORS
16
AUTHORS
|
@ -3,6 +3,7 @@ merlinux GmbH, Germany, office at merlinux eu
|
|||
|
||||
Contributors include::
|
||||
|
||||
Abdeali JK
|
||||
Abhijeet Kasurde
|
||||
Alexei Kozlenok
|
||||
Anatoly Bubenkoff
|
||||
|
@ -10,6 +11,7 @@ Andreas Zeidler
|
|||
Andy Freeland
|
||||
Andrzej Ostrowski
|
||||
Anthon van der Neut
|
||||
Antony Lee
|
||||
Armin Rigo
|
||||
Aron Curzon
|
||||
Aviv Palivoda
|
||||
|
@ -31,6 +33,7 @@ Christopher Gilling
|
|||
Daniel Grana
|
||||
Daniel Hahler
|
||||
Daniel Nuri
|
||||
Danielle Jenkins
|
||||
Dave Hunt
|
||||
David Díaz-Barquero
|
||||
David Mohr
|
||||
|
@ -62,6 +65,7 @@ John Towler
|
|||
Joshua Bronson
|
||||
Jurko Gospodnetić
|
||||
Katarzyna Jachim
|
||||
Kale Kundert
|
||||
Kevin Cox
|
||||
Lee Kamentsky
|
||||
Lukas Bednar
|
||||
|
@ -74,26 +78,38 @@ Martijn Faassen
|
|||
Martin Prusse
|
||||
Martin K. Scherer
|
||||
Matt Bachmann
|
||||
Matt Williams
|
||||
Michael Aquilina
|
||||
Michael Birtwell
|
||||
Michael Droettboom
|
||||
Mike Lundy
|
||||
Nicolas Delaby
|
||||
Oleg Pidsadnyi
|
||||
Oliver Bestwalter
|
||||
Omar Kohl
|
||||
Pieter Mulder
|
||||
Piotr Banaszkiewicz
|
||||
Punyashloka Biswal
|
||||
Quentin Pradet
|
||||
Ralf Schmitt
|
||||
Raphael Pierzina
|
||||
Roman Bolshakov
|
||||
Ronny Pfannschmidt
|
||||
Ross Lawley
|
||||
Russel Winder
|
||||
Ryan Wooden
|
||||
Samuele Pedroni
|
||||
Steffen Allner
|
||||
Stephan Obermann
|
||||
Tareq Alayan
|
||||
Ted Xiao
|
||||
Simon Gomizelj
|
||||
Stefano Taschini
|
||||
Stefan Farmbauer
|
||||
Thomas Grainger
|
||||
Tom Viner
|
||||
Trevor Bekolay
|
||||
Vasily Kuznetsov
|
||||
Wouter van Ackooy
|
||||
Bernard Pratz
|
||||
Stefan Zimmermann
|
||||
|
|
327
CHANGELOG.rst
327
CHANGELOG.rst
|
@ -1,68 +1,316 @@
|
|||
2.9.3.dev
|
||||
=========
|
||||
3.0.0.dev1
|
||||
==========
|
||||
|
||||
**Bug Fixes**
|
||||
**Incompatible changes**
|
||||
|
||||
* Text documents without any doctests no longer appear as "skipped".
|
||||
Thanks `@graingert`_ for reporting and providing a full PR (`#1580`_).
|
||||
A number of incompatible changes were made in this release, with the intent of removing features deprecated for a long
|
||||
time or change existing behaviors in order to make them less surprising/more useful.
|
||||
|
||||
* Fix internal error issue when ``method`` argument is missing for
|
||||
``teardown_method()``. Fixes (`#1605`_).
|
||||
* The following deprecated commandline options were removed:
|
||||
|
||||
* Fix exception visualization in case the current working directory (CWD) gets
|
||||
deleted during testing. Fixes (`#1235`). Thanks `@bukzor`_ for reporting. PR by
|
||||
`@marscher`. Thanks `@nicoddemus`_ for his help.
|
||||
* ``--genscript``: no longer supported;
|
||||
* ``--no-assert``: use ``--assert=plain`` instead;
|
||||
* ``--nomagic``: use ``--assert=plain`` instead;
|
||||
* ``--report``: use ``-r`` instead;
|
||||
|
||||
* Ensure that a module within a namespace package can be found when it
|
||||
is specified on the command line together with the ``--pyargs``
|
||||
option. Thanks to `@taschini`_ for the PR (`#1597`_).
|
||||
Thanks to `@RedBeardCode`_ for the PR (`#1664`_).
|
||||
|
||||
* Raise helpful failure message, when requesting parametrized fixture at runtime,
|
||||
e.g. with ``request.getfuncargvalue``. BACKWARD INCOMPAT: Previously these params
|
||||
were simply never defined. So a fixture decorated like ``@pytest.fixture(params=[0, 1, 2])``
|
||||
only ran once. Now a failure is raised. Fixes (`#460`_). Thanks to
|
||||
`@nikratio`_ for bug report, `@RedBeardCode`_ and `@tomviner`_ for PR.
|
||||
* ImportErrors in plugins now are a fatal error instead of issuing a
|
||||
pytest warning (`#1479`_). Thanks to `@The-Compiler`_ for the PR.
|
||||
|
||||
* Create correct diff for strings ending with newlines. Fixes (`#1553`_).
|
||||
Thanks `@Vogtinator`_ for reporting. Thanks to `@RedBeardCode`_ and
|
||||
`@tomviner`_ for PR.
|
||||
* Removed support code for Python 3 versions < 3.3 (`#1627`_).
|
||||
|
||||
* Rename ``getfuncargvalue`` to ``getfixturevalue``. ``getfuncargvalue`` is
|
||||
deprecated but still present. Thanks to `@RedBeardCode`_ and `@tomviner`_
|
||||
for PR (`#1626`_).
|
||||
* Removed all ``py.test-X*`` entry points. The versioned, suffixed entry points
|
||||
were never documented and a leftover from a pre-virtualenv era. These entry
|
||||
points also created broken entry points in wheels, so removing them also
|
||||
removes a source of confusion for users (`#1632`_).
|
||||
Thanks `@obestwalter`_ for the PR.
|
||||
|
||||
* Always include full assertion explanation. The previous behaviour was hiding
|
||||
sub-expressions that happened to be False, assuming this was redundant information.
|
||||
Thanks `@bagerard`_ for reporting (`#1503`_). Thanks to `@davehunt`_ and
|
||||
`@tomviner`_ for PR.
|
||||
* ``pytest.skip()`` now raises an error when used to decorate a test function,
|
||||
as opposed to its original intent (to imperatively skip a test inside a test function). Previously
|
||||
this usage would cause the entire module to be skipped (`#607`_).
|
||||
Thanks `@omarkohl`_ for the complete PR (`#1519`_).
|
||||
|
||||
* Exit tests if a collection error occurs. A poll indicated most users will hit CTRL-C
|
||||
anyway as soon as they see collection errors, so pytest might as well make that the default behavior (`#1421`_).
|
||||
A ``--continue-on-collection-errors`` option has been added to restore the previous behaviour.
|
||||
Thanks `@olegpidsadnyi`_ and `@omarkohl`_ for the complete PR (`#1628`_).
|
||||
|
||||
* Renamed the pytest ``pdb`` module (plugin) into ``debugging`` to avoid clashes with the builtin ``pdb`` module.
|
||||
|
||||
* Raise a helpful failure message when requesting a parametrized fixture at runtime,
|
||||
e.g. with ``request.getfixturevalue``. Previously these parameters were simply
|
||||
never defined, so a fixture decorated like ``@pytest.fixture(params=[0, 1, 2])``
|
||||
only ran once (`#460`_).
|
||||
Thanks to `@nikratio`_ for the bug report, `@RedBeardCode`_ and `@tomviner`_ for the PR.
|
||||
|
||||
* ``_pytest.monkeypatch.monkeypatch`` class has been renamed to ``_pytest.monkeypatch.MonkeyPatch``
|
||||
so it doesn't conflict with the ``monkeypatch`` fixture.
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
**New Features**
|
||||
|
||||
* Support nose-style ``__test__`` attribute on methods of classes,
|
||||
including unittest-style Classes. If set to ``False``, the test will not be
|
||||
collected.
|
||||
|
||||
* New ``doctest_namespace`` fixture for injecting names into the
|
||||
namespace in which doctests run.
|
||||
Thanks `@milliams`_ for the complete PR (`#1428`_).
|
||||
|
||||
* New ``name`` argument to ``pytest.fixture`` decorator which allows a custom name
|
||||
for a fixture (to solve the funcarg-shadowing-fixture problem).
|
||||
Thanks `@novas0x2a`_ for the complete PR (`#1444`_).
|
||||
|
||||
* New ``approx()`` function for easily comparing floating-point numbers in
|
||||
tests.
|
||||
Thanks `@kalekundert`_ for the complete PR (`#1441`_).
|
||||
|
||||
* Ability to add global properties in the final xunit output file by accessing
|
||||
the internal ``junitxml`` plugin (experimental).
|
||||
Thanks `@tareqalayan`_ for the complete PR `#1454`_).
|
||||
|
||||
* New ``ExceptionInfo.match()`` method to match a regular expression on the
|
||||
string representation of an exception (`#372`_).
|
||||
Thanks `@omarkohl`_ for the complete PR (`#1502`_).
|
||||
|
||||
* ``__tracebackhide__`` can now also be set to a callable which then can decide
|
||||
whether to filter the traceback based on the ``ExceptionInfo`` object passed
|
||||
to it. Thanks `@The-Compiler`_ for the complete PR (`#1526`_).
|
||||
|
||||
* New ``pytest_make_parametrize_id(config, val)`` hook which can be used by plugins to provide
|
||||
friendly strings for custom types.
|
||||
Thanks `@palaviv`_ for the PR.
|
||||
|
||||
* ``capsys`` and ``capfd`` now have a ``disabled()`` context-manager method, which
|
||||
can be used to temporarily disable capture within a test.
|
||||
Thanks `@nicoddemus`_ for the PR.
|
||||
|
||||
* New cli flag ``--fixtures-per-test``: shows which fixtures are being used
|
||||
for each selected test item. Features doc strings of fixtures by default.
|
||||
Can also show where fixtures are defined if combined with ``-v``.
|
||||
Thanks `@hackebrot`_ for the PR.
|
||||
|
||||
* Introduce ``pytest`` command as recommended entry point. Note that ``py.test``
|
||||
still works and is not scheduled for removal. Closes proposal
|
||||
`#1629`_. Thanks `@obestwalter`_ and `@davehunt`_ for the complete PR
|
||||
(`#1633`_).
|
||||
|
||||
* New cli flags:
|
||||
|
||||
+ ``--setup-plan``: performs normal collection and reports
|
||||
the potential setup and teardown and does not execute any fixtures and tests;
|
||||
+ ``--setup-only``: performs normal collection, executes setup and teardown of
|
||||
fixtures and reports them;
|
||||
+ ``--setup-show``: performs normal test execution and additionally shows
|
||||
setup and teardown of fixtures;
|
||||
|
||||
Thanks `@d6e`_, `@kvas-it`_, `@sallner`_ and `@omarkohl`_ for the PRs.
|
||||
|
||||
* New cli flag ``--override-ini``/``-o``: overrides values from the ini file.
|
||||
For example: ``"-o xfail_strict=True"``'.
|
||||
Thanks `@blueyed`_ and `@fengxx`_ for the PR.
|
||||
|
||||
* New hooks:
|
||||
|
||||
+ ``pytest_fixture_setup(fixturedef, request)``: executes fixture setup;
|
||||
+ ``pytest_fixture_post_finalizer(fixturedef)``: called after the fixture's
|
||||
finalizer and has access to the fixture's result cache.
|
||||
|
||||
Thanks `@d6e`_, `@sallner`_.
|
||||
|
||||
* Issue warnings for asserts whose test is a tuple literal. Such asserts will
|
||||
never fail because tuples are always truthy and are usually a mistake
|
||||
(see `#1562`_). Thanks `@kvas-it`_, for the PR.
|
||||
|
||||
* Allow passing a custom debugger class (e.g. ``--pdbcls=IPython.core.debugger:Pdb``).
|
||||
Thanks to `@anntzer`_ for the PR.
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
**Changes**
|
||||
|
||||
* Fixtures marked with ``@pytest.fixture`` can now use ``yield`` statements exactly like
|
||||
those marked with the ``@pytest.yield_fixture`` decorator. This change renders
|
||||
``@pytest.yield_fixture`` deprecated and makes ``@pytest.fixture`` with ``yield`` statements
|
||||
the preferred way to write teardown code (`#1461`_).
|
||||
Thanks `@csaftoiu`_ for bringing this to attention and `@nicoddemus`_ for the PR.
|
||||
|
||||
* Explicitly passed parametrize ids do not get escaped to ascii (`#1351`_).
|
||||
Thanks `@ceridwen`_ for the PR.
|
||||
|
||||
* Parametrize ids can accept ``None`` as specific test id, in which case the
|
||||
automatically generated id for that argument will be used.
|
||||
Thanks `@palaviv`_ for the complete PR (`#1468`_).
|
||||
|
||||
* Improved automatic id generation selection in case of duplicate ids in
|
||||
parametrize.
|
||||
Thanks `@palaviv`_ for the complete PR (`#1474`_).
|
||||
|
||||
* Now pytest warnings summary is shown up by default. Added a new flag
|
||||
``--disable-pytest-warnings`` to explicitly disable the warnings summary.
|
||||
This change resolves the (`#1668`_).
|
||||
|
||||
* Renamed the pytest ``pdb`` module (plugin) into ``debugging``.
|
||||
* Make ImportError during collection more explicit by reminding
|
||||
the user to check the name of the test module/package(s) (`#1426`_).
|
||||
Thanks `@omarkohl`_ for the complete PR (`#1520`_).
|
||||
|
||||
* Add ``build/`` and ``dist/`` to the default ``--norecursedirs`` list. Thanks
|
||||
`@mikofski`_ for the report and `@tomviner`_ for the PR (`#1544`_).
|
||||
|
||||
* ``pytest.raises`` in the context manager form accepts a custom
|
||||
``message`` to raise when no exception occurred.
|
||||
Thanks `@palaviv`_ for the complete PR (`#1616`_).
|
||||
|
||||
* ``conftest.py`` files now benefit from assertion rewriting; previously it
|
||||
was only available for test modules. Thanks `@flub`_, `@sober7`_ and
|
||||
`@nicoddemus`_ for the PR (`#1619`_).
|
||||
|
||||
* Text documents without any doctests no longer appear as "skipped".
|
||||
Thanks `@graingert`_ for reporting and providing a full PR (`#1580`_).
|
||||
|
||||
* Ensure that a module within a namespace package can be found when it
|
||||
is specified on the command line together with the ``--pyargs``
|
||||
option. Thanks to `@taschini`_ for the PR (`#1597`_).
|
||||
|
||||
* Always include full assertion explanation during assertion rewriting. The previous behaviour was hiding
|
||||
sub-expressions that happened to be ``False``, assuming this was redundant information.
|
||||
Thanks `@bagerard`_ for reporting (`#1503`_). Thanks to `@davehunt`_ and
|
||||
`@tomviner`_ for the PR.
|
||||
|
||||
* ``OptionGroup.addoption()`` now checks if option names were already
|
||||
added before, to make it easier to track down issues like `#1618`_.
|
||||
Before, you only got exceptions later from ``argparse`` library,
|
||||
giving no clue about the actual reason for double-added options.
|
||||
|
||||
* ``yield``-based tests are considered deprecated and will be removed in pytest-4.0.
|
||||
Thanks `@nicoddemus`_ for the PR.
|
||||
|
||||
* Using ``pytest_funcarg__`` prefix to declare fixtures is considered deprecated and will be
|
||||
removed in pytest-4.0 (`#1684`_).
|
||||
Thanks `@nicoddemus`_ for the PR.
|
||||
|
||||
* Rename ``getfuncargvalue`` to ``getfixturevalue``. ``getfuncargvalue`` is
|
||||
still present but is now considered deprecated. Thanks to `@RedBeardCode`_ and `@tomviner`_
|
||||
for the PR (`#1626`_).
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
.. _#1580: https://github.com/pytest-dev/pytest/pull/1580
|
||||
.. _#1605: https://github.com/pytest-dev/pytest/issues/1605
|
||||
.. _#1597: https://github.com/pytest-dev/pytest/pull/1597
|
||||
*
|
||||
|
||||
**Bug Fixes**
|
||||
|
||||
* Parametrize now correctly handles duplicated test ids.
|
||||
|
||||
* Fix internal error issue when the ``method`` argument is missing for
|
||||
``teardown_method()`` (`#1605`_).
|
||||
|
||||
* Renamed the pytest ``pdb`` module (plugin) into ``debugging``.
|
||||
|
||||
* Fix exception visualization in case the current working directory (CWD) gets
|
||||
deleted during testing (`#1235`_). Thanks `@bukzor`_ for reporting. PR by
|
||||
`@marscher`_.
|
||||
|
||||
* Improve test output for logical expression with brackets (`#925`_).
|
||||
Thanks `@DRMacIver`_ for reporting and `@RedBeardCode`_ for the PR.
|
||||
|
||||
* Create correct diff for strings ending with newlines (`#1553`_).
|
||||
Thanks `@Vogtinator`_ for reporting and `@RedBeardCode`_ and
|
||||
`@tomviner`_ for the PR.
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
*
|
||||
|
||||
.. _#372: https://github.com/pytest-dev/pytest/issues/372
|
||||
.. _#460: https://github.com/pytest-dev/pytest/pull/460
|
||||
.. _#1553: https://github.com/pytest-dev/pytest/issues/1553
|
||||
.. _#1626: https://github.com/pytest-dev/pytest/pull/1626
|
||||
.. _#607: https://github.com/pytest-dev/pytest/issues/607
|
||||
.. _#925: https://github.com/pytest-dev/pytest/issues/925
|
||||
.. _#1235: https://github.com/pytest-dev/pytest/issues/1235
|
||||
.. _#1351: https://github.com/pytest-dev/pytest/issues/1351
|
||||
.. _#1421: https://github.com/pytest-dev/pytest/issues/1421
|
||||
.. _#1426: https://github.com/pytest-dev/pytest/issues/1426
|
||||
.. _#1428: https://github.com/pytest-dev/pytest/pull/1428
|
||||
.. _#1441: https://github.com/pytest-dev/pytest/pull/1441
|
||||
.. _#1444: https://github.com/pytest-dev/pytest/pull/1444
|
||||
.. _#1454: https://github.com/pytest-dev/pytest/pull/1454
|
||||
.. _#1461: https://github.com/pytest-dev/pytest/pull/1461
|
||||
.. _#1468: https://github.com/pytest-dev/pytest/pull/1468
|
||||
.. _#1474: https://github.com/pytest-dev/pytest/pull/1474
|
||||
.. _#1479: https://github.com/pytest-dev/pytest/issues/1479
|
||||
.. _#1502: https://github.com/pytest-dev/pytest/pull/1502
|
||||
.. _#1503: https://github.com/pytest-dev/pytest/issues/1503
|
||||
.. _#1519: https://github.com/pytest-dev/pytest/pull/1519
|
||||
.. _#1520: https://github.com/pytest-dev/pytest/pull/1520
|
||||
.. _#1526: https://github.com/pytest-dev/pytest/pull/1526
|
||||
.. _#1544: https://github.com/pytest-dev/pytest/issues/1544
|
||||
.. _#1553: https://github.com/pytest-dev/pytest/issues/1553
|
||||
.. _#1562: https://github.com/pytest-dev/pytest/issues/1562
|
||||
.. _#1580: https://github.com/pytest-dev/pytest/pull/1580
|
||||
.. _#1597: https://github.com/pytest-dev/pytest/pull/1597
|
||||
.. _#1605: https://github.com/pytest-dev/pytest/issues/1605
|
||||
.. _#1616: https://github.com/pytest-dev/pytest/pull/1616
|
||||
.. _#1618: https://github.com/pytest-dev/pytest/issues/1618
|
||||
.. _#1619: https://github.com/pytest-dev/pytest/issues/1619
|
||||
.. _#1626: https://github.com/pytest-dev/pytest/pull/1626
|
||||
.. _#1668: https://github.com/pytest-dev/pytest/issues/1668
|
||||
.. _#1627: https://github.com/pytest-dev/pytest/pull/1627
|
||||
.. _#1628: https://github.com/pytest-dev/pytest/pull/1628
|
||||
.. _#1629: https://github.com/pytest-dev/pytest/issues/1629
|
||||
.. _#1632: https://github.com/pytest-dev/pytest/issues/1632
|
||||
.. _#1633: https://github.com/pytest-dev/pytest/pull/1633
|
||||
.. _#1664: https://github.com/pytest-dev/pytest/pull/1664
|
||||
.. _#1684: https://github.com/pytest-dev/pytest/pull/1684
|
||||
|
||||
.. _@graingert: https://github.com/graingert
|
||||
.. _@taschini: https://github.com/taschini
|
||||
.. _@nikratio: https://github.com/nikratio
|
||||
.. _@DRMacIver: https://github.com/DRMacIver
|
||||
.. _@RedBeardCode: https://github.com/RedBeardCode
|
||||
.. _@Vogtinator: https://github.com/Vogtinator
|
||||
.. _@anntzer: https://github.com/anntzer
|
||||
.. _@bagerard: https://github.com/bagerard
|
||||
.. _@blueyed: https://github.com/blueyed
|
||||
.. _@ceridwen: https://github.com/ceridwen
|
||||
.. _@csaftoiu: https://github.com/csaftoiu
|
||||
.. _@d6e: https://github.com/d6e
|
||||
.. _@davehunt: https://github.com/davehunt
|
||||
|
||||
.. _@fengxx: https://github.com/fengxx
|
||||
.. _@flub: https://github.com/flub
|
||||
.. _@graingert: https://github.com/graingert
|
||||
.. _@kalekundert: https://github.com/kalekundert
|
||||
.. _@kvas-it: https://github.com/kvas-it
|
||||
.. _@marscher: https://github.com/marscher
|
||||
.. _@mikofski: https://github.com/mikofski
|
||||
.. _@milliams: https://github.com/milliams
|
||||
.. _@nikratio: https://github.com/nikratio
|
||||
.. _@novas0x2a: https://github.com/novas0x2a
|
||||
.. _@obestwalter: https://github.com/obestwalter
|
||||
.. _@olegpidsadnyi: https://github.com/olegpidsadnyi
|
||||
.. _@omarkohl: https://github.com/omarkohl
|
||||
.. _@palaviv: https://github.com/palaviv
|
||||
.. _@sallner: https://github.com/sallner
|
||||
.. _@sober7: https://github.com/sober7
|
||||
.. _@tareqalayan: https://github.com/tareqalayan
|
||||
.. _@taschini: https://github.com/taschini
|
||||
|
||||
2.9.2
|
||||
=====
|
||||
|
@ -128,11 +376,13 @@
|
|||
|
||||
* Fix (`#649`_): parametrized test nodes cannot be specified to run on the command line.
|
||||
|
||||
* Fix (`#138`_): better reporting for python 3.3+ chained exceptions
|
||||
|
||||
.. _#1437: https://github.com/pytest-dev/pytest/issues/1437
|
||||
.. _#469: https://github.com/pytest-dev/pytest/issues/469
|
||||
.. _#1431: https://github.com/pytest-dev/pytest/pull/1431
|
||||
.. _#649: https://github.com/pytest-dev/pytest/issues/649
|
||||
.. _#138: https://github.com/pytest-dev/pytest/issues/138
|
||||
|
||||
.. _@asottile: https://github.com/asottile
|
||||
|
||||
|
@ -254,7 +504,6 @@
|
|||
.. _@RonnyPfannschmidt: https://github.com/RonnyPfannschmidt
|
||||
.. _@rabbbit: https://github.com/rabbbit
|
||||
.. _@hackebrot: https://github.com/hackebrot
|
||||
.. _@omarkohl: https://github.com/omarkohl
|
||||
.. _@pquentin: https://github.com/pquentin
|
||||
|
||||
2.8.7
|
||||
|
|
|
@ -120,6 +120,8 @@ the following:
|
|||
|
||||
- an issue tracker for bug reports and enhancement requests.
|
||||
|
||||
- a `changelog <http://keepachangelog.com/>`_
|
||||
|
||||
If no contributor strongly objects and two agree, the repository can then be
|
||||
transferred to the ``pytest-dev`` organisation.
|
||||
|
||||
|
|
12
README.rst
12
README.rst
|
@ -17,7 +17,7 @@
|
|||
:target: https://ci.appveyor.com/project/pytestbot/pytest
|
||||
|
||||
The ``pytest`` framework makes it easy to write small tests, yet
|
||||
scales to support complex functional testing for applications and libraries.
|
||||
scales to support complex functional testing for applications and libraries.
|
||||
|
||||
An example of a simple test:
|
||||
|
||||
|
@ -33,9 +33,9 @@ An example of a simple test:
|
|||
|
||||
To execute it::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1
|
||||
platform linux -- Python 3.4.3, pytest-2.8.5, py-1.4.31, pluggy-0.3.1
|
||||
collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
|
@ -51,8 +51,8 @@ To execute it::
|
|||
test_sample.py:5: AssertionError
|
||||
======= 1 failed in 0.12 seconds ========
|
||||
|
||||
Due to ``py.test``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://pytest.org/latest/getting-started.html#our-first-test-run>`_ for more examples.
|
||||
|
||||
Due to ``pytest``'s detailed assertion introspection, only plain ``assert`` statements are used. See `getting-started <http://pytest.org/latest/getting-started.html#our-first-test-run>`_ for more examples.
|
||||
|
||||
|
||||
Features
|
||||
--------
|
||||
|
@ -69,7 +69,7 @@ Features
|
|||
- Can run `unittest <http://pytest.org/latest/unittest.html>`_ (or trial),
|
||||
`nose <http://pytest.org/latest/nose.html>`_ test suites out of the box;
|
||||
|
||||
- Python2.6+, Python3.2+, PyPy-2.3, Jython-2.5 (untested);
|
||||
- Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested);
|
||||
|
||||
- Rich plugin architecture, with over 150+ `external plugins <http://pytest.org/latest/plugins.html#installing-external-plugins-searching>`_ and thriving community;
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.9.3.dev0'
|
||||
__version__ = '2.10.0.dev1'
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
import sys
|
||||
from inspect import CO_VARARGS, CO_VARKEYWORDS
|
||||
import re
|
||||
|
||||
import py
|
||||
builtin_repr = repr
|
||||
|
@ -142,7 +143,8 @@ class TracebackEntry(object):
|
|||
_repr_style = None
|
||||
exprinfo = None
|
||||
|
||||
def __init__(self, rawentry):
|
||||
def __init__(self, rawentry, excinfo=None):
|
||||
self._excinfo = excinfo
|
||||
self._rawentry = rawentry
|
||||
self.lineno = rawentry.tb_lineno - 1
|
||||
|
||||
|
@ -223,16 +225,24 @@ class TracebackEntry(object):
|
|||
""" return True if the current frame has a var __tracebackhide__
|
||||
resolving to True
|
||||
|
||||
If __tracebackhide__ is a callable, it gets called with the
|
||||
ExceptionInfo instance and can decide whether to hide the traceback.
|
||||
|
||||
mostly for internal use
|
||||
"""
|
||||
try:
|
||||
return self.frame.f_locals['__tracebackhide__']
|
||||
tbh = self.frame.f_locals['__tracebackhide__']
|
||||
except KeyError:
|
||||
try:
|
||||
return self.frame.f_globals['__tracebackhide__']
|
||||
tbh = self.frame.f_globals['__tracebackhide__']
|
||||
except KeyError:
|
||||
return False
|
||||
|
||||
if py.builtin.callable(tbh):
|
||||
return tbh(self._excinfo)
|
||||
else:
|
||||
return tbh
|
||||
|
||||
def __str__(self):
|
||||
try:
|
||||
fn = str(self.path)
|
||||
|
@ -256,12 +266,13 @@ class Traceback(list):
|
|||
access to Traceback entries.
|
||||
"""
|
||||
Entry = TracebackEntry
|
||||
def __init__(self, tb):
|
||||
""" initialize from given python traceback object. """
|
||||
def __init__(self, tb, excinfo=None):
|
||||
""" initialize from given python traceback object and ExceptionInfo """
|
||||
self._excinfo = excinfo
|
||||
if hasattr(tb, 'tb_next'):
|
||||
def f(cur):
|
||||
while cur is not None:
|
||||
yield self.Entry(cur)
|
||||
yield self.Entry(cur, excinfo=excinfo)
|
||||
cur = cur.tb_next
|
||||
list.__init__(self, f(tb))
|
||||
else:
|
||||
|
@ -285,7 +296,7 @@ class Traceback(list):
|
|||
not codepath.relto(excludepath)) and
|
||||
(lineno is None or x.lineno == lineno) and
|
||||
(firstlineno is None or x.frame.code.firstlineno == firstlineno)):
|
||||
return Traceback(x._rawentry)
|
||||
return Traceback(x._rawentry, self._excinfo)
|
||||
return self
|
||||
|
||||
def __getitem__(self, key):
|
||||
|
@ -304,7 +315,7 @@ class Traceback(list):
|
|||
by default this removes all the TracebackEntries which are hidden
|
||||
(see ishidden() above)
|
||||
"""
|
||||
return Traceback(filter(fn, self))
|
||||
return Traceback(filter(fn, self), self._excinfo)
|
||||
|
||||
def getcrashentry(self):
|
||||
""" return last non-hidden traceback entry that lead
|
||||
|
@ -368,7 +379,7 @@ class ExceptionInfo(object):
|
|||
#: the exception type name
|
||||
self.typename = self.type.__name__
|
||||
#: the exception traceback (_pytest._code.Traceback instance)
|
||||
self.traceback = _pytest._code.Traceback(self.tb)
|
||||
self.traceback = _pytest._code.Traceback(self.tb, excinfo=self)
|
||||
|
||||
def __repr__(self):
|
||||
return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
|
||||
|
@ -430,6 +441,19 @@ class ExceptionInfo(object):
|
|||
loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
|
||||
return unicode(loc)
|
||||
|
||||
def match(self, regexp):
|
||||
"""
|
||||
Match the regular expression 'regexp' on the string representation of
|
||||
the exception. If it matches then True is returned (so that it is
|
||||
possible to write 'assert excinfo.match()'). If it doesn't match an
|
||||
AssertionError is raised.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
if not re.search(regexp, str(self.value)):
|
||||
assert 0, "Pattern '{0!s}' not found in '{1!s}'".format(
|
||||
regexp, self.value)
|
||||
return True
|
||||
|
||||
|
||||
class FormattedExcinfo(object):
|
||||
""" presenting information about failing Functions and Generators. """
|
||||
|
@ -596,12 +620,36 @@ class FormattedExcinfo(object):
|
|||
break
|
||||
return ReprTraceback(entries, extraline, style=self.style)
|
||||
|
||||
def repr_excinfo(self, excinfo):
|
||||
reprtraceback = self.repr_traceback(excinfo)
|
||||
reprcrash = excinfo._getreprcrash()
|
||||
return ReprExceptionInfo(reprtraceback, reprcrash)
|
||||
|
||||
class TerminalRepr:
|
||||
def repr_excinfo(self, excinfo):
|
||||
if sys.version_info[0] < 3:
|
||||
reprtraceback = self.repr_traceback(excinfo)
|
||||
reprcrash = excinfo._getreprcrash()
|
||||
|
||||
return ReprExceptionInfo(reprtraceback, reprcrash)
|
||||
else:
|
||||
repr_chain = []
|
||||
e = excinfo.value
|
||||
descr = None
|
||||
while e is not None:
|
||||
reprtraceback = self.repr_traceback(excinfo)
|
||||
reprcrash = excinfo._getreprcrash()
|
||||
repr_chain += [(reprtraceback, reprcrash, descr)]
|
||||
if e.__cause__ is not None:
|
||||
e = e.__cause__
|
||||
excinfo = ExceptionInfo((type(e), e, e.__traceback__))
|
||||
descr = 'The above exception was the direct cause of the following exception:'
|
||||
elif e.__context__ is not None:
|
||||
e = e.__context__
|
||||
excinfo = ExceptionInfo((type(e), e, e.__traceback__))
|
||||
descr = 'During handling of the above exception, another exception occurred:'
|
||||
else:
|
||||
e = None
|
||||
repr_chain.reverse()
|
||||
return ExceptionChainRepr(repr_chain)
|
||||
|
||||
|
||||
class TerminalRepr(object):
|
||||
def __str__(self):
|
||||
s = self.__unicode__()
|
||||
if sys.version_info[0] < 3:
|
||||
|
@ -620,21 +668,47 @@ class TerminalRepr:
|
|||
return "<%s instance at %0x>" %(self.__class__, id(self))
|
||||
|
||||
|
||||
class ReprExceptionInfo(TerminalRepr):
|
||||
def __init__(self, reprtraceback, reprcrash):
|
||||
self.reprtraceback = reprtraceback
|
||||
self.reprcrash = reprcrash
|
||||
class ExceptionRepr(TerminalRepr):
|
||||
def __init__(self):
|
||||
self.sections = []
|
||||
|
||||
def addsection(self, name, content, sep="-"):
|
||||
self.sections.append((name, content, sep))
|
||||
|
||||
def toterminal(self, tw):
|
||||
self.reprtraceback.toterminal(tw)
|
||||
for name, content, sep in self.sections:
|
||||
tw.sep(sep, name)
|
||||
tw.line(content)
|
||||
|
||||
|
||||
class ExceptionChainRepr(ExceptionRepr):
|
||||
def __init__(self, chain):
|
||||
super(ExceptionChainRepr, self).__init__()
|
||||
self.chain = chain
|
||||
# reprcrash and reprtraceback of the outermost (the newest) exception
|
||||
# in the chain
|
||||
self.reprtraceback = chain[-1][0]
|
||||
self.reprcrash = chain[-1][1]
|
||||
|
||||
def toterminal(self, tw):
|
||||
for element in self.chain:
|
||||
element[0].toterminal(tw)
|
||||
if element[2] is not None:
|
||||
tw.line("")
|
||||
tw.line(element[2], yellow=True)
|
||||
super(ExceptionChainRepr, self).toterminal(tw)
|
||||
|
||||
|
||||
class ReprExceptionInfo(ExceptionRepr):
|
||||
def __init__(self, reprtraceback, reprcrash):
|
||||
super(ReprExceptionInfo, self).__init__()
|
||||
self.reprtraceback = reprtraceback
|
||||
self.reprcrash = reprcrash
|
||||
|
||||
def toterminal(self, tw):
|
||||
self.reprtraceback.toterminal(tw)
|
||||
super(ReprExceptionInfo, self).toterminal(tw)
|
||||
|
||||
class ReprTraceback(TerminalRepr):
|
||||
entrysep = "_ "
|
||||
|
||||
|
|
|
@ -4,7 +4,9 @@ support for presenting detailed information in failing assertions.
|
|||
import py
|
||||
import os
|
||||
import sys
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
|
||||
from _pytest.config import hookimpl
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
from _pytest.assertion import util
|
||||
|
||||
|
||||
|
@ -23,15 +25,6 @@ def pytest_addoption(parser):
|
|||
'rewrite' (the default) rewrites assert
|
||||
statements in test modules on import to
|
||||
provide assert expression information. """)
|
||||
group.addoption('--no-assert',
|
||||
action="store_true",
|
||||
default=False,
|
||||
dest="noassert",
|
||||
help="DEPRECATED equivalent to --assert=plain")
|
||||
group.addoption('--nomagic', '--no-magic',
|
||||
action="store_true",
|
||||
default=False,
|
||||
help="DEPRECATED equivalent to --assert=plain")
|
||||
|
||||
|
||||
class AssertionState:
|
||||
|
@ -42,10 +35,10 @@ class AssertionState:
|
|||
self.trace = config.trace.root.get("assertion")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
mode = config.getvalue("assertmode")
|
||||
if config.getvalue("noassert") or config.getvalue("nomagic"):
|
||||
mode = "plain"
|
||||
@hookimpl(tryfirst=True)
|
||||
def pytest_load_initial_conftests(early_config, parser, args):
|
||||
ns, ns_unknown_args = parser.parse_known_and_unknown_args(args)
|
||||
mode = ns.assertmode
|
||||
if mode == "rewrite":
|
||||
try:
|
||||
import ast # noqa
|
||||
|
@ -57,25 +50,29 @@ def pytest_configure(config):
|
|||
if (sys.platform.startswith('java') or
|
||||
sys.version_info[:3] == (2, 6, 0)):
|
||||
mode = "reinterp"
|
||||
|
||||
early_config._assertstate = AssertionState(early_config, mode)
|
||||
warn_about_missing_assertion(mode, early_config.pluginmanager)
|
||||
|
||||
if mode != "plain":
|
||||
_load_modules(mode)
|
||||
m = monkeypatch()
|
||||
config._cleanup.append(m.undo)
|
||||
m = MonkeyPatch()
|
||||
early_config._cleanup.append(m.undo)
|
||||
m.setattr(py.builtin.builtins, 'AssertionError',
|
||||
reinterpret.AssertionError) # noqa
|
||||
|
||||
hook = None
|
||||
if mode == "rewrite":
|
||||
hook = rewrite.AssertionRewritingHook() # noqa
|
||||
hook = rewrite.AssertionRewritingHook(early_config) # noqa
|
||||
sys.meta_path.insert(0, hook)
|
||||
warn_about_missing_assertion(mode)
|
||||
config._assertstate = AssertionState(config, mode)
|
||||
config._assertstate.hook = hook
|
||||
config._assertstate.trace("configured with mode set to %r" % (mode,))
|
||||
|
||||
early_config._assertstate.hook = hook
|
||||
early_config._assertstate.trace("configured with mode set to %r" % (mode,))
|
||||
def undo():
|
||||
hook = config._assertstate.hook
|
||||
hook = early_config._assertstate.hook
|
||||
if hook is not None and hook in sys.meta_path:
|
||||
sys.meta_path.remove(hook)
|
||||
config.add_cleanup(undo)
|
||||
early_config.add_cleanup(undo)
|
||||
|
||||
|
||||
def pytest_collection(session):
|
||||
|
@ -154,7 +151,7 @@ def _load_modules(mode):
|
|||
from _pytest.assertion import rewrite # noqa
|
||||
|
||||
|
||||
def warn_about_missing_assertion(mode):
|
||||
def warn_about_missing_assertion(mode, pluginmanager):
|
||||
try:
|
||||
assert False
|
||||
except AssertionError:
|
||||
|
@ -166,10 +163,18 @@ def warn_about_missing_assertion(mode):
|
|||
else:
|
||||
specifically = "failing tests may report as passing"
|
||||
|
||||
sys.stderr.write("WARNING: " + specifically +
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n")
|
||||
# temporarily disable capture so we can print our warning
|
||||
capman = pluginmanager.getplugin('capturemanager')
|
||||
try:
|
||||
out, err = capman.suspendcapture()
|
||||
sys.stderr.write("WARNING: " + specifically +
|
||||
" because assert statements are not executed "
|
||||
"by the underlying Python interpreter "
|
||||
"(are you using python -O?)\n")
|
||||
finally:
|
||||
capman.resumecapture()
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
|
||||
# Expose this plugin's implementation for the pytest_assertrepr_compare hook
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""Rewrite assertion AST to produce nice error messages"""
|
||||
|
||||
import ast
|
||||
import _ast
|
||||
import errno
|
||||
import itertools
|
||||
import imp
|
||||
|
@ -44,20 +45,18 @@ else:
|
|||
class AssertionRewritingHook(object):
|
||||
"""PEP302 Import hook which rewrites asserts."""
|
||||
|
||||
def __init__(self):
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
self.fnpats = config.getini("python_files")
|
||||
self.session = None
|
||||
self.modules = {}
|
||||
self._register_with_pkg_resources()
|
||||
|
||||
def set_session(self, session):
|
||||
self.fnpats = session.config.getini("python_files")
|
||||
self.session = session
|
||||
|
||||
def find_module(self, name, path=None):
|
||||
if self.session is None:
|
||||
return None
|
||||
sess = self.session
|
||||
state = sess.config._assertstate
|
||||
state = self.config._assertstate
|
||||
state.trace("find_module called for: %s" % name)
|
||||
names = name.rsplit(".", 1)
|
||||
lastname = names[-1]
|
||||
|
@ -86,24 +85,11 @@ class AssertionRewritingHook(object):
|
|||
return None
|
||||
else:
|
||||
fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
|
||||
|
||||
fn_pypath = py.path.local(fn)
|
||||
# Is this a test file?
|
||||
if not sess.isinitpath(fn):
|
||||
# We have to be very careful here because imports in this code can
|
||||
# trigger a cycle.
|
||||
self.session = None
|
||||
try:
|
||||
for pat in self.fnpats:
|
||||
if fn_pypath.fnmatch(pat):
|
||||
state.trace("matched test file %r" % (fn,))
|
||||
break
|
||||
else:
|
||||
return None
|
||||
finally:
|
||||
self.session = sess
|
||||
else:
|
||||
state.trace("matched test file (was specified on cmdline): %r" %
|
||||
(fn,))
|
||||
if not self._should_rewrite(fn_pypath, state):
|
||||
return None
|
||||
|
||||
# The requested module looks like a test file, so rewrite it. This is
|
||||
# the most magical part of the process: load the source, rewrite the
|
||||
# asserts, and load the rewritten source. We also cache the rewritten
|
||||
|
@ -140,7 +126,7 @@ class AssertionRewritingHook(object):
|
|||
co = _read_pyc(fn_pypath, pyc, state.trace)
|
||||
if co is None:
|
||||
state.trace("rewriting %r" % (fn,))
|
||||
source_stat, co = _rewrite_test(state, fn_pypath)
|
||||
source_stat, co = _rewrite_test(self.config, fn_pypath)
|
||||
if co is None:
|
||||
# Probably a SyntaxError in the test.
|
||||
return None
|
||||
|
@ -151,6 +137,32 @@ class AssertionRewritingHook(object):
|
|||
self.modules[name] = co, pyc
|
||||
return self
|
||||
|
||||
def _should_rewrite(self, fn_pypath, state):
|
||||
# always rewrite conftest files
|
||||
fn = str(fn_pypath)
|
||||
if fn_pypath.basename == 'conftest.py':
|
||||
state.trace("rewriting conftest file: %r" % (fn,))
|
||||
return True
|
||||
elif self.session is not None:
|
||||
if self.session.isinitpath(fn):
|
||||
state.trace("matched test file (was specified on cmdline): %r" %
|
||||
(fn,))
|
||||
return True
|
||||
else:
|
||||
# modules not passed explicitly on the command line are only
|
||||
# rewritten if they match the naming convention for test files
|
||||
session = self.session # avoid a cycle here
|
||||
self.session = None
|
||||
try:
|
||||
for pat in self.fnpats:
|
||||
if fn_pypath.fnmatch(pat):
|
||||
state.trace("matched test file %r" % (fn,))
|
||||
return True
|
||||
finally:
|
||||
self.session = session
|
||||
del session
|
||||
return False
|
||||
|
||||
def load_module(self, name):
|
||||
# If there is an existing module object named 'fullname' in
|
||||
# sys.modules, the loader must use that existing module. (Otherwise,
|
||||
|
@ -241,8 +253,9 @@ N = "\n".encode("utf-8")
|
|||
cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
|
||||
BOM_UTF8 = '\xef\xbb\xbf'
|
||||
|
||||
def _rewrite_test(state, fn):
|
||||
def _rewrite_test(config, fn):
|
||||
"""Try to read and rewrite *fn* and return the code object."""
|
||||
state = config._assertstate
|
||||
try:
|
||||
stat = fn.stat()
|
||||
source = fn.read("rb")
|
||||
|
@ -287,7 +300,7 @@ def _rewrite_test(state, fn):
|
|||
# Let this pop up again in the real import.
|
||||
state.trace("failed to parse: %r" % (fn,))
|
||||
return None, None
|
||||
rewrite_asserts(tree)
|
||||
rewrite_asserts(tree, fn, config)
|
||||
try:
|
||||
co = compile(tree, fn.strpath, "exec")
|
||||
except SyntaxError:
|
||||
|
@ -343,9 +356,9 @@ def _read_pyc(source, pyc, trace=lambda x: None):
|
|||
return co
|
||||
|
||||
|
||||
def rewrite_asserts(mod):
|
||||
def rewrite_asserts(mod, module_path=None, config=None):
|
||||
"""Rewrite the assert statements in mod."""
|
||||
AssertionRewriter().run(mod)
|
||||
AssertionRewriter(module_path, config).run(mod)
|
||||
|
||||
|
||||
def _saferepr(obj):
|
||||
|
@ -532,6 +545,11 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
|
||||
"""
|
||||
|
||||
def __init__(self, module_path, config):
|
||||
super(AssertionRewriter, self).__init__()
|
||||
self.module_path = module_path
|
||||
self.config = config
|
||||
|
||||
def run(self, mod):
|
||||
"""Find all assert statements in *mod* and rewrite them."""
|
||||
if not mod.body:
|
||||
|
@ -672,6 +690,10 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
the expression is false.
|
||||
|
||||
"""
|
||||
if isinstance(assert_.test, ast.Tuple) and self.config is not None:
|
||||
fslocation = (self.module_path, assert_.lineno)
|
||||
self.config.warn('R1', 'assertion is always true, perhaps '
|
||||
'remove parentheses?', fslocation=fslocation)
|
||||
self.statements = []
|
||||
self.variables = []
|
||||
self.variable_counter = itertools.count()
|
||||
|
@ -855,6 +877,8 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
def visit_Compare(self, comp):
|
||||
self.push_format_context()
|
||||
left_res, left_expl = self.visit(comp.left)
|
||||
if isinstance(comp.left, (_ast.Compare, _ast.BoolOp)):
|
||||
left_expl = "({0})".format(left_expl)
|
||||
res_variables = [self.variable() for i in range(len(comp.ops))]
|
||||
load_names = [ast.Name(v, ast.Load()) for v in res_variables]
|
||||
store_names = [ast.Name(v, ast.Store()) for v in res_variables]
|
||||
|
@ -864,6 +888,8 @@ class AssertionRewriter(ast.NodeVisitor):
|
|||
results = [left_res]
|
||||
for i, op, next_operand in it:
|
||||
next_res, next_expl = self.visit(next_operand)
|
||||
if isinstance(next_operand, (_ast.Compare, _ast.BoolOp)):
|
||||
next_expl = "({0})".format(next_expl)
|
||||
results.append(next_res)
|
||||
sym = binop_map[op.__class__]
|
||||
syms.append(ast.Str(sym))
|
||||
|
|
|
@ -4,6 +4,7 @@ per-test stdout/stderr capturing mechanism.
|
|||
"""
|
||||
from __future__ import with_statement
|
||||
|
||||
import contextlib
|
||||
import sys
|
||||
import os
|
||||
from tempfile import TemporaryFile
|
||||
|
@ -146,8 +147,8 @@ class CaptureManager:
|
|||
def pytest_internalerror(self, excinfo):
|
||||
self.reset_capturings()
|
||||
|
||||
def suspendcapture_item(self, item, when):
|
||||
out, err = self.suspendcapture()
|
||||
def suspendcapture_item(self, item, when, in_=False):
|
||||
out, err = self.suspendcapture(in_=in_)
|
||||
item.add_report_section(when, "stdout", out)
|
||||
item.add_report_section(when, "stderr", err)
|
||||
|
||||
|
@ -162,7 +163,7 @@ def capsys(request):
|
|||
"""
|
||||
if "capfd" in request._funcargs:
|
||||
raise request.raiseerror(error_capsysfderror)
|
||||
request.node._capfuncarg = c = CaptureFixture(SysCapture)
|
||||
request.node._capfuncarg = c = CaptureFixture(SysCapture, request)
|
||||
return c
|
||||
|
||||
@pytest.fixture
|
||||
|
@ -175,17 +176,18 @@ def capfd(request):
|
|||
request.raiseerror(error_capsysfderror)
|
||||
if not hasattr(os, 'dup'):
|
||||
pytest.skip("capfd funcarg needs os.dup")
|
||||
request.node._capfuncarg = c = CaptureFixture(FDCapture)
|
||||
request.node._capfuncarg = c = CaptureFixture(FDCapture, request)
|
||||
return c
|
||||
|
||||
|
||||
class CaptureFixture:
|
||||
def __init__(self, captureclass):
|
||||
def __init__(self, captureclass, request):
|
||||
self.captureclass = captureclass
|
||||
self.request = request
|
||||
|
||||
def _start(self):
|
||||
self._capture = MultiCapture(out=True, err=True, in_=False,
|
||||
Capture=self.captureclass)
|
||||
Capture=self.captureclass)
|
||||
self._capture.start_capturing()
|
||||
|
||||
def close(self):
|
||||
|
@ -200,6 +202,15 @@ class CaptureFixture:
|
|||
except AttributeError:
|
||||
return self._outerr
|
||||
|
||||
@contextlib.contextmanager
|
||||
def disabled(self):
|
||||
capmanager = self.request.config.pluginmanager.getplugin('capturemanager')
|
||||
capmanager.suspendcapture_item(self.request.node, "call", in_=True)
|
||||
try:
|
||||
yield
|
||||
finally:
|
||||
capmanager.resumecapture()
|
||||
|
||||
|
||||
def safe_text_dupfile(f, mode, default_encoding="UTF8"):
|
||||
""" return a open text file object that's a duplicate of f on the
|
||||
|
@ -452,7 +463,7 @@ def _readline_workaround():
|
|||
|
||||
Pdb uses readline support where available--when not running from the Python
|
||||
prompt, the readline module is not imported until running the pdb REPL. If
|
||||
running py.test with the --pdb option this means the readline module is not
|
||||
running pytest with the --pdb option this means the readline module is not
|
||||
imported until after I/O capture has been started.
|
||||
|
||||
This is a problem for pyreadline, which is often used to implement readline
|
||||
|
|
|
@ -0,0 +1,203 @@
|
|||
"""
|
||||
python version compatibility code
|
||||
"""
|
||||
import sys
|
||||
import inspect
|
||||
import types
|
||||
import re
|
||||
import functools
|
||||
|
||||
import py
|
||||
|
||||
import _pytest
|
||||
|
||||
|
||||
|
||||
try:
|
||||
import enum
|
||||
except ImportError: # pragma: no cover
|
||||
# Only available in Python 3.4+ or as a backport
|
||||
enum = None
|
||||
|
||||
_PY3 = sys.version_info > (3, 0)
|
||||
_PY2 = not _PY3
|
||||
|
||||
|
||||
NoneType = type(None)
|
||||
NOTSET = object()
|
||||
|
||||
if hasattr(inspect, 'signature'):
|
||||
def _format_args(func):
|
||||
return str(inspect.signature(func))
|
||||
else:
|
||||
def _format_args(func):
|
||||
return inspect.formatargspec(*inspect.getargspec(func))
|
||||
|
||||
isfunction = inspect.isfunction
|
||||
isclass = inspect.isclass
|
||||
# used to work around a python2 exception info leak
|
||||
exc_clear = getattr(sys, 'exc_clear', lambda: None)
|
||||
# The type of re.compile objects is not exposed in Python.
|
||||
REGEX_TYPE = type(re.compile(''))
|
||||
|
||||
|
||||
def is_generator(func):
|
||||
try:
|
||||
return _pytest._code.getrawcode(func).co_flags & 32 # generator function
|
||||
except AttributeError: # builtin functions have no bytecode
|
||||
# assume them to not be generators
|
||||
return False
|
||||
|
||||
|
||||
def getlocation(function, curdir):
|
||||
import inspect
|
||||
fn = py.path.local(inspect.getfile(function))
|
||||
lineno = py.builtin._getcode(function).co_firstlineno
|
||||
if fn.relto(curdir):
|
||||
fn = fn.relto(curdir)
|
||||
return "%s:%d" %(fn, lineno+1)
|
||||
|
||||
|
||||
def num_mock_patch_args(function):
|
||||
""" return number of arguments used up by mock arguments (if any) """
|
||||
patchings = getattr(function, "patchings", None)
|
||||
if not patchings:
|
||||
return 0
|
||||
mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
|
||||
if mock is not None:
|
||||
return len([p for p in patchings
|
||||
if not p.attribute_name and p.new is mock.DEFAULT])
|
||||
return len(patchings)
|
||||
|
||||
|
||||
def getfuncargnames(function, startindex=None):
|
||||
# XXX merge with main.py's varnames
|
||||
#assert not isclass(function)
|
||||
realfunction = function
|
||||
while hasattr(realfunction, "__wrapped__"):
|
||||
realfunction = realfunction.__wrapped__
|
||||
if startindex is None:
|
||||
startindex = inspect.ismethod(function) and 1 or 0
|
||||
if realfunction != function:
|
||||
startindex += num_mock_patch_args(function)
|
||||
function = realfunction
|
||||
if isinstance(function, functools.partial):
|
||||
argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
|
||||
partial = function
|
||||
argnames = argnames[len(partial.args):]
|
||||
if partial.keywords:
|
||||
for kw in partial.keywords:
|
||||
argnames.remove(kw)
|
||||
else:
|
||||
argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
|
||||
defaults = getattr(function, 'func_defaults',
|
||||
getattr(function, '__defaults__', None)) or ()
|
||||
numdefaults = len(defaults)
|
||||
if numdefaults:
|
||||
return tuple(argnames[startindex:-numdefaults])
|
||||
return tuple(argnames[startindex:])
|
||||
|
||||
|
||||
|
||||
if sys.version_info[:2] == (2, 6):
|
||||
def isclass(object):
|
||||
""" Return true if the object is a class. Overrides inspect.isclass for
|
||||
python 2.6 because it will return True for objects which always return
|
||||
something on __getattr__ calls (see #1035).
|
||||
Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
|
||||
"""
|
||||
return isinstance(object, (type, types.ClassType))
|
||||
|
||||
|
||||
if _PY3:
|
||||
import codecs
|
||||
|
||||
STRING_TYPES = bytes, str
|
||||
|
||||
def _escape_strings(val):
|
||||
"""If val is pure ascii, returns it as a str(). Otherwise, escapes
|
||||
bytes objects into a sequence of escaped bytes:
|
||||
|
||||
b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
|
||||
|
||||
and escapes unicode objects into a sequence of escaped unicode
|
||||
ids, e.g.:
|
||||
|
||||
'4\\nV\\U00043efa\\x0eMXWB\\x1e\\u3028\\u15fd\\xcd\\U0007d944'
|
||||
|
||||
note:
|
||||
the obvious "v.decode('unicode-escape')" will return
|
||||
valid utf-8 unicode if it finds them in bytes, but we
|
||||
want to return escaped bytes for any byte, even if they match
|
||||
a utf-8 string.
|
||||
|
||||
"""
|
||||
if isinstance(val, bytes):
|
||||
if val:
|
||||
# source: http://goo.gl/bGsnwC
|
||||
encoded_bytes, _ = codecs.escape_encode(val)
|
||||
return encoded_bytes.decode('ascii')
|
||||
else:
|
||||
# empty bytes crashes codecs.escape_encode (#1087)
|
||||
return ''
|
||||
else:
|
||||
return val.encode('unicode_escape').decode('ascii')
|
||||
else:
|
||||
STRING_TYPES = bytes, str, unicode
|
||||
|
||||
def _escape_strings(val):
|
||||
"""In py2 bytes and str are the same type, so return if it's a bytes
|
||||
object, return it unchanged if it is a full ascii string,
|
||||
otherwise escape it into its binary form.
|
||||
|
||||
If it's a unicode string, change the unicode characters into
|
||||
unicode escapes.
|
||||
|
||||
"""
|
||||
if isinstance(val, bytes):
|
||||
try:
|
||||
return val.encode('ascii')
|
||||
except UnicodeDecodeError:
|
||||
return val.encode('string-escape')
|
||||
else:
|
||||
return val.encode('unicode-escape')
|
||||
|
||||
|
||||
def get_real_func(obj):
|
||||
""" gets the real function object of the (possibly) wrapped object by
|
||||
functools.wraps or functools.partial.
|
||||
"""
|
||||
while hasattr(obj, "__wrapped__"):
|
||||
obj = obj.__wrapped__
|
||||
if isinstance(obj, functools.partial):
|
||||
obj = obj.func
|
||||
return obj
|
||||
|
||||
def getfslineno(obj):
|
||||
# xxx let decorators etc specify a sane ordering
|
||||
obj = get_real_func(obj)
|
||||
if hasattr(obj, 'place_as'):
|
||||
obj = obj.place_as
|
||||
fslineno = _pytest._code.getfslineno(obj)
|
||||
assert isinstance(fslineno[1], int), obj
|
||||
return fslineno
|
||||
|
||||
def getimfunc(func):
|
||||
try:
|
||||
return func.__func__
|
||||
except AttributeError:
|
||||
try:
|
||||
return func.im_func
|
||||
except AttributeError:
|
||||
return func
|
||||
|
||||
def safe_getattr(object, name, default):
|
||||
""" Like getattr but return default upon any Exception.
|
||||
|
||||
Attribute access can potentially fail for 'evil' Python objects.
|
||||
See issue214
|
||||
"""
|
||||
try:
|
||||
return getattr(object, name, default)
|
||||
except Exception:
|
||||
return default
|
|
@ -63,9 +63,10 @@ class UsageError(Exception):
|
|||
_preinit = []
|
||||
|
||||
default_plugins = (
|
||||
"mark main terminal runner python debugging unittest capture skipping "
|
||||
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
|
||||
"junitxml resultlog doctest cacheprovider").split()
|
||||
"mark main terminal runner python fixtures debugging unittest capture skipping "
|
||||
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion "
|
||||
"junitxml resultlog doctest cacheprovider freeze_support "
|
||||
"setuponly setupplan").split()
|
||||
|
||||
builtin_plugins = set(default_plugins)
|
||||
builtin_plugins.add("pytester")
|
||||
|
@ -655,20 +656,17 @@ class Argument:
|
|||
self._long_opts.append(opt)
|
||||
|
||||
def __repr__(self):
|
||||
retval = 'Argument('
|
||||
args = []
|
||||
if self._short_opts:
|
||||
retval += '_short_opts: ' + repr(self._short_opts) + ', '
|
||||
args += ['_short_opts: ' + repr(self._short_opts)]
|
||||
if self._long_opts:
|
||||
retval += '_long_opts: ' + repr(self._long_opts) + ', '
|
||||
retval += 'dest: ' + repr(self.dest) + ', '
|
||||
args += ['_long_opts: ' + repr(self._long_opts)]
|
||||
args += ['dest: ' + repr(self.dest)]
|
||||
if hasattr(self, 'type'):
|
||||
retval += 'type: ' + repr(self.type) + ', '
|
||||
args += ['type: ' + repr(self.type)]
|
||||
if hasattr(self, 'default'):
|
||||
retval += 'default: ' + repr(self.default) + ', '
|
||||
if retval[-2:] == ', ': # always long enough to test ("Argument(" )
|
||||
retval = retval[:-2]
|
||||
retval += ')'
|
||||
return retval
|
||||
args += ['default: ' + repr(self.default)]
|
||||
return 'Argument({0})'.format(', '.join(args))
|
||||
|
||||
|
||||
class OptionGroup:
|
||||
|
@ -686,6 +684,10 @@ class OptionGroup:
|
|||
results in help showing '--two-words' only, but --twowords gets
|
||||
accepted **and** the automatic destination is in args.twowords
|
||||
"""
|
||||
conflict = set(optnames).intersection(
|
||||
name for opt in self.options for name in opt.names())
|
||||
if conflict:
|
||||
raise ValueError("option names %s already added" % conflict)
|
||||
option = Argument(*optnames, **attrs)
|
||||
self._addoption_instance(option, shortupper=False)
|
||||
|
||||
|
@ -923,10 +925,7 @@ class Config(object):
|
|||
args[:] = self.getini("addopts") + args
|
||||
self._checkversion()
|
||||
self.pluginmanager.consider_preparse(args)
|
||||
try:
|
||||
self.pluginmanager.load_setuptools_entrypoints("pytest11")
|
||||
except ImportError as e:
|
||||
self.warn("I2", "could not load setuptools entry import: %s" % (e,))
|
||||
self.pluginmanager.load_setuptools_entrypoints("pytest11")
|
||||
self.pluginmanager.consider_env()
|
||||
self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
|
||||
if self.known_args_namespace.confcutdir is None and self.inifile:
|
||||
|
@ -999,14 +998,16 @@ class Config(object):
|
|||
description, type, default = self._parser._inidict[name]
|
||||
except KeyError:
|
||||
raise ValueError("unknown configuration value: %r" %(name,))
|
||||
try:
|
||||
value = self.inicfg[name]
|
||||
except KeyError:
|
||||
if default is not None:
|
||||
return default
|
||||
if type is None:
|
||||
return ''
|
||||
return []
|
||||
value = self._get_override_ini_value(name)
|
||||
if value is None:
|
||||
try:
|
||||
value = self.inicfg[name]
|
||||
except KeyError:
|
||||
if default is not None:
|
||||
return default
|
||||
if type is None:
|
||||
return ''
|
||||
return []
|
||||
if type == "pathlist":
|
||||
dp = py.path.local(self.inicfg.config.path).dirpath()
|
||||
l = []
|
||||
|
@ -1037,6 +1038,20 @@ class Config(object):
|
|||
l.append(relroot)
|
||||
return l
|
||||
|
||||
def _get_override_ini_value(self, name):
|
||||
value = None
|
||||
# override_ini is a list of list, to support both -o foo1=bar1 foo2=bar2 and
|
||||
# and -o foo1=bar1 -o foo2=bar2 options
|
||||
# always use the last item if multiple value set for same ini-name,
|
||||
# e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2
|
||||
if self.getoption("override_ini", None):
|
||||
for ini_config_list in self.option.override_ini:
|
||||
for ini_config in ini_config_list:
|
||||
(key, user_ini_value) = ini_config.split("=", 1)
|
||||
if key == name:
|
||||
value = user_ini_value
|
||||
return value
|
||||
|
||||
def getoption(self, name, default=notset, skip=False):
|
||||
""" return command line option value.
|
||||
|
||||
|
|
|
@ -8,21 +8,33 @@ import pytest
|
|||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption('--pdb',
|
||||
action="store_true", dest="usepdb", default=False,
|
||||
help="start the interactive Python debugger on errors.")
|
||||
group._addoption(
|
||||
'--pdb', dest="usepdb", action="store_true",
|
||||
help="start the interactive Python debugger on errors.")
|
||||
group._addoption(
|
||||
'--pdbcls', dest="usepdb_cls", metavar="modulename:classname",
|
||||
help="start a custom interactive Python debugger on errors. "
|
||||
"For example: --pdbcls=IPython.core.debugger:Pdb")
|
||||
|
||||
def pytest_namespace():
|
||||
return {'set_trace': pytestPDB().set_trace}
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.getvalue("usepdb"):
|
||||
if config.getvalue("usepdb") or config.getvalue("usepdb_cls"):
|
||||
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
|
||||
if config.getvalue("usepdb_cls"):
|
||||
modname, classname = config.getvalue("usepdb_cls").split(":")
|
||||
__import__(modname)
|
||||
pdb_cls = getattr(sys.modules[modname], classname)
|
||||
else:
|
||||
pdb_cls = pdb.Pdb
|
||||
pytestPDB._pdb_cls = pdb_cls
|
||||
|
||||
old = (pdb.set_trace, pytestPDB._pluginmanager)
|
||||
def fin():
|
||||
pdb.set_trace, pytestPDB._pluginmanager = old
|
||||
pytestPDB._config = None
|
||||
pytestPDB._pdb_cls = pdb.Pdb
|
||||
pdb.set_trace = pytest.set_trace
|
||||
pytestPDB._pluginmanager = config.pluginmanager
|
||||
pytestPDB._config = config
|
||||
|
@ -32,6 +44,7 @@ class pytestPDB:
|
|||
""" Pseudo PDB that defers to the real pdb. """
|
||||
_pluginmanager = None
|
||||
_config = None
|
||||
_pdb_cls = pdb.Pdb
|
||||
|
||||
def set_trace(self):
|
||||
""" invoke PDB set_trace debugging, dropping any IO capturing. """
|
||||
|
@ -45,7 +58,7 @@ class pytestPDB:
|
|||
tw.line()
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
|
||||
pdb.Pdb().set_trace(frame)
|
||||
self._pdb_cls().set_trace(frame)
|
||||
|
||||
|
||||
class PdbInvoke:
|
||||
|
@ -98,7 +111,7 @@ def _find_last_non_hidden_frame(stack):
|
|||
|
||||
|
||||
def post_mortem(t):
|
||||
class Pdb(pdb.Pdb):
|
||||
class Pdb(pytestPDB._pdb_cls):
|
||||
def get_stack(self, f, t):
|
||||
stack, i = pdb.Pdb.get_stack(self, f, t)
|
||||
if f is None:
|
||||
|
|
|
@ -5,7 +5,7 @@ import traceback
|
|||
|
||||
import pytest
|
||||
from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
|
||||
from _pytest.python import FixtureRequest
|
||||
from _pytest.fixtures import FixtureRequest
|
||||
|
||||
|
||||
|
||||
|
@ -71,6 +71,8 @@ class DoctestItem(pytest.Item):
|
|||
if self.dtest is not None:
|
||||
self.fixture_request = _setup_fixtures(self)
|
||||
globs = dict(getfixture=self.fixture_request.getfixturevalue)
|
||||
for name, value in self.fixture_request.getfuncargvalue('doctest_namespace').items():
|
||||
globs[name] = value
|
||||
self.dtest.globs.update(globs)
|
||||
|
||||
def runtest(self):
|
||||
|
@ -157,6 +159,7 @@ class DoctestTextfile(pytest.Module):
|
|||
name = self.fspath.basename
|
||||
globs = {'__name__': '__main__'}
|
||||
|
||||
|
||||
optionflags = get_optionflags(self)
|
||||
runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
|
||||
checker=_get_checker())
|
||||
|
@ -286,3 +289,11 @@ def _get_allow_bytes_flag():
|
|||
"""
|
||||
import doctest
|
||||
return doctest.register_optionflag('ALLOW_BYTES')
|
||||
|
||||
|
||||
@pytest.fixture(scope='session')
|
||||
def doctest_namespace():
|
||||
"""
|
||||
Inject names into the doctest namespace.
|
||||
"""
|
||||
return dict()
|
||||
|
|
File diff suppressed because it is too large
Load Diff
|
@ -0,0 +1,45 @@
|
|||
"""
|
||||
Provides a function to report all internal modules for using freezing tools
|
||||
pytest
|
||||
"""
|
||||
|
||||
def pytest_namespace():
|
||||
return {'freeze_includes': freeze_includes}
|
||||
|
||||
|
||||
def freeze_includes():
|
||||
"""
|
||||
Returns a list of module names used by py.test that should be
|
||||
included by cx_freeze.
|
||||
"""
|
||||
import py
|
||||
import _pytest
|
||||
result = list(_iter_all_modules(py))
|
||||
result += list(_iter_all_modules(_pytest))
|
||||
return result
|
||||
|
||||
|
||||
def _iter_all_modules(package, prefix=''):
|
||||
"""
|
||||
Iterates over the names of all modules that can be found in the given
|
||||
package, recursively.
|
||||
Example:
|
||||
_iter_all_modules(_pytest) ->
|
||||
['_pytest.assertion.newinterpret',
|
||||
'_pytest.capture',
|
||||
'_pytest.core',
|
||||
...
|
||||
]
|
||||
"""
|
||||
import os
|
||||
import pkgutil
|
||||
if type(package) is not str:
|
||||
path, prefix = package.__path__[0], package.__name__ + '.'
|
||||
else:
|
||||
path = package
|
||||
for _, name, is_package in pkgutil.iter_modules([path]):
|
||||
if is_package:
|
||||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
|
||||
yield prefix + m
|
||||
else:
|
||||
yield prefix + name
|
|
@ -1,132 +0,0 @@
|
|||
""" (deprecated) generate a single-file self-contained version of pytest """
|
||||
import os
|
||||
import sys
|
||||
import pkgutil
|
||||
|
||||
import py
|
||||
import _pytest
|
||||
|
||||
|
||||
|
||||
def find_toplevel(name):
|
||||
for syspath in sys.path:
|
||||
base = py.path.local(syspath)
|
||||
lib = base/name
|
||||
if lib.check(dir=1):
|
||||
return lib
|
||||
mod = base.join("%s.py" % name)
|
||||
if mod.check(file=1):
|
||||
return mod
|
||||
raise LookupError(name)
|
||||
|
||||
def pkgname(toplevel, rootpath, path):
|
||||
parts = path.parts()[len(rootpath.parts()):]
|
||||
return '.'.join([toplevel] + [x.purebasename for x in parts])
|
||||
|
||||
def pkg_to_mapping(name):
|
||||
toplevel = find_toplevel(name)
|
||||
name2src = {}
|
||||
if toplevel.check(file=1): # module
|
||||
name2src[toplevel.purebasename] = toplevel.read()
|
||||
else: # package
|
||||
for pyfile in toplevel.visit('*.py'):
|
||||
pkg = pkgname(name, toplevel, pyfile)
|
||||
name2src[pkg] = pyfile.read()
|
||||
# with wheels py source code might be not be installed
|
||||
# and the resulting genscript is useless, just bail out.
|
||||
assert name2src, "no source code found for %r at %r" %(name, toplevel)
|
||||
return name2src
|
||||
|
||||
def compress_mapping(mapping):
|
||||
import base64, pickle, zlib
|
||||
data = pickle.dumps(mapping, 2)
|
||||
data = zlib.compress(data, 9)
|
||||
data = base64.encodestring(data)
|
||||
data = data.decode('ascii')
|
||||
return data
|
||||
|
||||
|
||||
def compress_packages(names):
|
||||
mapping = {}
|
||||
for name in names:
|
||||
mapping.update(pkg_to_mapping(name))
|
||||
return compress_mapping(mapping)
|
||||
|
||||
def generate_script(entry, packages):
|
||||
data = compress_packages(packages)
|
||||
tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
|
||||
exe = tmpl.read()
|
||||
exe = exe.replace('@SOURCES@', data)
|
||||
exe = exe.replace('@ENTRY@', entry)
|
||||
return exe
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption("--genscript", action="store", default=None,
|
||||
dest="genscript", metavar="path",
|
||||
help="create standalone pytest script at given target path.")
|
||||
|
||||
def pytest_cmdline_main(config):
|
||||
import _pytest.config
|
||||
genscript = config.getvalue("genscript")
|
||||
if genscript:
|
||||
tw = _pytest.config.create_terminal_writer(config)
|
||||
tw.line("WARNING: usage of genscript is deprecated.",
|
||||
red=True)
|
||||
deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
|
||||
if sys.version_info < (2,7):
|
||||
deps.append("argparse")
|
||||
tw.line("generated script will run on python2.6-python3.3++")
|
||||
else:
|
||||
tw.line("WARNING: generated script will not run on python2.6 "
|
||||
"due to 'argparse' dependency. Use python2.6 "
|
||||
"to generate a python2.6 compatible script", red=True)
|
||||
script = generate_script(
|
||||
'import pytest; raise SystemExit(pytest.cmdline.main())',
|
||||
deps,
|
||||
)
|
||||
genscript = py.path.local(genscript)
|
||||
genscript.write(script)
|
||||
tw.line("generated pytest standalone script: %s" % genscript,
|
||||
bold=True)
|
||||
return 0
|
||||
|
||||
|
||||
def pytest_namespace():
|
||||
return {'freeze_includes': freeze_includes}
|
||||
|
||||
|
||||
def freeze_includes():
|
||||
"""
|
||||
Returns a list of module names used by py.test that should be
|
||||
included by cx_freeze.
|
||||
"""
|
||||
result = list(_iter_all_modules(py))
|
||||
result += list(_iter_all_modules(_pytest))
|
||||
return result
|
||||
|
||||
|
||||
def _iter_all_modules(package, prefix=''):
|
||||
"""
|
||||
Iterates over the names of all modules that can be found in the given
|
||||
package, recursively.
|
||||
|
||||
Example:
|
||||
_iter_all_modules(_pytest) ->
|
||||
['_pytest.assertion.newinterpret',
|
||||
'_pytest.capture',
|
||||
'_pytest.core',
|
||||
...
|
||||
]
|
||||
"""
|
||||
if type(package) is not str:
|
||||
path, prefix = package.__path__[0], package.__name__ + '.'
|
||||
else:
|
||||
path = package
|
||||
for _, name, is_package in pkgutil.iter_modules([path]):
|
||||
if is_package:
|
||||
for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
|
||||
yield prefix + m
|
||||
else:
|
||||
yield prefix + name
|
|
@ -20,6 +20,10 @@ def pytest_addoption(parser):
|
|||
group.addoption('--debug',
|
||||
action="store_true", dest="debug", default=False,
|
||||
help="store internal tracing debug information in 'pytestdebug.log'.")
|
||||
# support for "--overwrite-ini ININAME=INIVALUE" to override values from the ini file
|
||||
# Example '-o xfail_strict=True'.
|
||||
group._addoption('-o', '--override-ini', nargs='*', dest="override_ini", action="append",
|
||||
help="overrides ini values which do not have a separate command-line flag")
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
|
@ -92,8 +96,8 @@ def showhelp(config):
|
|||
tw.line()
|
||||
tw.line()
|
||||
|
||||
tw.line("to see available markers type: py.test --markers")
|
||||
tw.line("to see available fixtures type: py.test --fixtures")
|
||||
tw.line("to see available markers type: pytest --markers")
|
||||
tw.line("to see available fixtures type: pytest --fixtures")
|
||||
tw.line("(shown according to specified file_or_dir or current dir "
|
||||
"if not specified)")
|
||||
|
||||
|
|
|
@ -34,7 +34,7 @@ def pytest_addoption(parser):
|
|||
.. note::
|
||||
|
||||
This function should be implemented only in plugins or ``conftest.py``
|
||||
files situated at the tests root directory due to how py.test
|
||||
files situated at the tests root directory due to how pytest
|
||||
:ref:`discovers plugins during startup <pluginorder>`.
|
||||
|
||||
:arg parser: To add command line options, call
|
||||
|
@ -156,6 +156,12 @@ def pytest_pyfunc_call(pyfuncitem):
|
|||
def pytest_generate_tests(metafunc):
|
||||
""" generate (multiple) parametrized calls to a test function."""
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_make_parametrize_id(config, val):
|
||||
"""Return a user-friendly string representation of the given ``val`` that will be used
|
||||
by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``.
|
||||
"""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# generic runtest related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -212,6 +218,19 @@ def pytest_runtest_logreport(report):
|
|||
""" process a test setup/call/teardown report relating to
|
||||
the respective phase of executing a test. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Fixture related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
@hookspec(firstresult=True)
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
""" performs fixture setup execution. """
|
||||
|
||||
def pytest_fixture_post_finalizer(fixturedef):
|
||||
""" called after fixture teardown, but before the cache is cleared so
|
||||
the fixture result cache ``fixturedef.cached_result`` can
|
||||
still be accessed."""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# test session related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
|
|
@ -265,6 +265,7 @@ class LogXML(object):
|
|||
], 0)
|
||||
self.node_reporters = {} # nodeid -> _NodeReporter
|
||||
self.node_reporters_ordered = []
|
||||
self.global_properties = []
|
||||
|
||||
def finalize(self, report):
|
||||
nodeid = getattr(report, 'nodeid', report)
|
||||
|
@ -284,9 +285,12 @@ class LogXML(object):
|
|||
if key in self.node_reporters:
|
||||
# TODO: breasks for --dist=each
|
||||
return self.node_reporters[key]
|
||||
|
||||
reporter = _NodeReporter(nodeid, self)
|
||||
|
||||
self.node_reporters[key] = reporter
|
||||
self.node_reporters_ordered.append(reporter)
|
||||
|
||||
return reporter
|
||||
|
||||
def add_stats(self, key):
|
||||
|
@ -372,7 +376,9 @@ class LogXML(object):
|
|||
numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped']
|
||||
|
||||
logfile.write('<?xml version="1.0" encoding="utf-8"?>')
|
||||
|
||||
logfile.write(Junit.testsuite(
|
||||
self._get_global_properties_node(),
|
||||
[x.to_xml() for x in self.node_reporters_ordered],
|
||||
name="pytest",
|
||||
errors=self.stats['error'],
|
||||
|
@ -385,3 +391,18 @@ class LogXML(object):
|
|||
def pytest_terminal_summary(self, terminalreporter):
|
||||
terminalreporter.write_sep("-",
|
||||
"generated xml file: %s" % (self.logfile))
|
||||
|
||||
def add_global_property(self, name, value):
|
||||
self.global_properties.append((str(name), bin_xml_escape(value)))
|
||||
|
||||
def _get_global_properties_node(self):
|
||||
"""Return a Junit node containing custom properties, if any.
|
||||
"""
|
||||
if self.global_properties:
|
||||
return Junit.properties(
|
||||
[
|
||||
Junit.property(name=name, value=value)
|
||||
for name, value in self.global_properties
|
||||
]
|
||||
)
|
||||
return ''
|
||||
|
|
|
@ -25,7 +25,7 @@ EXIT_NOTESTSCOLLECTED = 5
|
|||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
|
||||
type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'])
|
||||
parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
|
||||
type="args", default=[])
|
||||
#parser.addini("dirpatterns",
|
||||
|
@ -44,6 +44,9 @@ def pytest_addoption(parser):
|
|||
help="run pytest in strict mode, warnings become errors.")
|
||||
group._addoption("-c", metavar="file", type=str, dest="inifilename",
|
||||
help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
|
||||
group._addoption("--continue-on-collection-errors", action="store_true",
|
||||
default=False, dest="continue_on_collection_errors",
|
||||
help="Force test execution even if collection errors occur.")
|
||||
|
||||
group = parser.getgroup("collect", "collection")
|
||||
group.addoption('--collectonly', '--collect-only', action="store_true",
|
||||
|
@ -129,6 +132,11 @@ def pytest_collection(session):
|
|||
return session.perform_collect()
|
||||
|
||||
def pytest_runtestloop(session):
|
||||
if (session.testsfailed and
|
||||
not session.config.option.continue_on_collection_errors):
|
||||
raise session.Interrupted(
|
||||
"%d errors during collection" % session.testsfailed)
|
||||
|
||||
if session.config.option.collectonly:
|
||||
return True
|
||||
|
||||
|
|
|
@ -5,11 +5,14 @@ import re
|
|||
|
||||
from py.builtin import _basestring
|
||||
|
||||
import pytest
|
||||
|
||||
RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
|
||||
|
||||
|
||||
def pytest_funcarg__monkeypatch(request):
|
||||
"""The returned ``monkeypatch`` funcarg provides these
|
||||
@pytest.fixture
|
||||
def monkeypatch(request):
|
||||
"""The returned ``monkeypatch`` fixture provides these
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
|
||||
monkeypatch.setattr(obj, name, value, raising=True)
|
||||
|
@ -26,7 +29,7 @@ def pytest_funcarg__monkeypatch(request):
|
|||
parameter determines if a KeyError or AttributeError
|
||||
will be raised if the set/deletion operation has no target.
|
||||
"""
|
||||
mpatch = monkeypatch()
|
||||
mpatch = MonkeyPatch()
|
||||
request.addfinalizer(mpatch.undo)
|
||||
return mpatch
|
||||
|
||||
|
@ -93,7 +96,7 @@ class Notset:
|
|||
notset = Notset()
|
||||
|
||||
|
||||
class monkeypatch:
|
||||
class MonkeyPatch:
|
||||
""" Object keeping a record of setattr/item/env/syspath changes. """
|
||||
|
||||
def __init__(self):
|
||||
|
|
|
@ -321,7 +321,8 @@ def linecomp(request):
|
|||
return LineComp()
|
||||
|
||||
|
||||
def pytest_funcarg__LineMatcher(request):
|
||||
@pytest.fixture(name='LineMatcher')
|
||||
def LineMatcher_fixture(request):
|
||||
return LineMatcher
|
||||
|
||||
|
||||
|
@ -377,10 +378,10 @@ class RunResult:
|
|||
|
||||
|
||||
class Testdir:
|
||||
"""Temporary test directory with tools to test/run py.test itself.
|
||||
"""Temporary test directory with tools to test/run pytest itself.
|
||||
|
||||
This is based on the ``tmpdir`` fixture but provides a number of
|
||||
methods which aid with testing py.test itself. Unless
|
||||
methods which aid with testing pytest itself. Unless
|
||||
:py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
|
||||
current working directory.
|
||||
|
||||
|
@ -591,7 +592,7 @@ class Testdir:
|
|||
"""Return the collection node of a file.
|
||||
|
||||
This is like :py:meth:`getnode` but uses
|
||||
:py:meth:`parseconfigure` to create the (configured) py.test
|
||||
:py:meth:`parseconfigure` to create the (configured) pytest
|
||||
Config instance.
|
||||
|
||||
:param path: A :py:class:`py.path.local` instance of the file.
|
||||
|
@ -659,7 +660,7 @@ class Testdir:
|
|||
:py:class:`HookRecorder` instance.
|
||||
|
||||
This runs the :py:func:`pytest.main` function to run all of
|
||||
py.test inside the test process itself like
|
||||
pytest inside the test process itself like
|
||||
:py:meth:`inline_run`. However the return value is a tuple of
|
||||
the collection items and a :py:class:`HookRecorder` instance.
|
||||
|
||||
|
@ -672,7 +673,7 @@ class Testdir:
|
|||
"""Run ``pytest.main()`` in-process, returning a HookRecorder.
|
||||
|
||||
This runs the :py:func:`pytest.main` function to run all of
|
||||
py.test inside the test process itself. This means it can
|
||||
pytest inside the test process itself. This means it can
|
||||
return a :py:class:`HookRecorder` instance which gives more
|
||||
detailed results from then run then can be done by matching
|
||||
stdout/stderr from :py:meth:`runpytest`.
|
||||
|
@ -758,9 +759,9 @@ class Testdir:
|
|||
return args
|
||||
|
||||
def parseconfig(self, *args):
|
||||
"""Return a new py.test Config instance from given commandline args.
|
||||
"""Return a new pytest Config instance from given commandline args.
|
||||
|
||||
This invokes the py.test bootstrapping code in _pytest.config
|
||||
This invokes the pytest bootstrapping code in _pytest.config
|
||||
to create a new :py:class:`_pytest.core.PluginManager` and
|
||||
call the pytest_cmdline_parse hook to create new
|
||||
:py:class:`_pytest.config.Config` instance.
|
||||
|
@ -780,7 +781,7 @@ class Testdir:
|
|||
return config
|
||||
|
||||
def parseconfigure(self, *args):
|
||||
"""Return a new py.test configured Config instance.
|
||||
"""Return a new pytest configured Config instance.
|
||||
|
||||
This returns a new :py:class:`_pytest.config.Config` instance
|
||||
like :py:meth:`parseconfig`, but also calls the
|
||||
|
@ -795,7 +796,7 @@ class Testdir:
|
|||
def getitem(self, source, funcname="test_func"):
|
||||
"""Return the test item for a test function.
|
||||
|
||||
This writes the source to a python file and runs py.test's
|
||||
This writes the source to a python file and runs pytest's
|
||||
collection on the resulting module, returning the test item
|
||||
for the requested function name.
|
||||
|
||||
|
@ -815,7 +816,7 @@ class Testdir:
|
|||
def getitems(self, source):
|
||||
"""Return all test items collected from the module.
|
||||
|
||||
This writes the source to a python file and runs py.test's
|
||||
This writes the source to a python file and runs pytest's
|
||||
collection on the resulting module, returning all test items
|
||||
contained within.
|
||||
|
||||
|
@ -827,7 +828,7 @@ class Testdir:
|
|||
"""Return the module collection node for ``source``.
|
||||
|
||||
This writes ``source`` to a file using :py:meth:`makepyfile`
|
||||
and then runs the py.test collection on it, returning the
|
||||
and then runs the pytest collection on it, returning the
|
||||
collection node for the test module.
|
||||
|
||||
:param source: The source code of the module to collect.
|
||||
|
@ -927,7 +928,7 @@ class Testdir:
|
|||
|
||||
def _getpytestargs(self):
|
||||
# we cannot use "(sys.executable,script)"
|
||||
# because on windows the script is e.g. a py.test.exe
|
||||
# because on windows the script is e.g. a pytest.exe
|
||||
return (sys.executable, _pytest_fullpath,) # noqa
|
||||
|
||||
def runpython(self, script):
|
||||
|
@ -942,7 +943,7 @@ class Testdir:
|
|||
return self.run(sys.executable, "-c", command)
|
||||
|
||||
def runpytest_subprocess(self, *args, **kwargs):
|
||||
"""Run py.test as a subprocess with given arguments.
|
||||
"""Run pytest as a subprocess with given arguments.
|
||||
|
||||
Any plugins added to the :py:attr:`plugins` list will added
|
||||
using the ``-p`` command line option. Addtionally
|
||||
|
@ -970,9 +971,9 @@ class Testdir:
|
|||
return self.run(*args)
|
||||
|
||||
def spawn_pytest(self, string, expect_timeout=10.0):
|
||||
"""Run py.test using pexpect.
|
||||
"""Run pytest using pexpect.
|
||||
|
||||
This makes sure to use the right py.test and sets up the
|
||||
This makes sure to use the right pytest and sets up the
|
||||
temporary directory locations.
|
||||
|
||||
The pexpect child is returned.
|
||||
|
|
1678
_pytest/python.py
1678
_pytest/python.py
File diff suppressed because it is too large
Load Diff
|
@ -73,7 +73,10 @@ def runtestprotocol(item, log=True, nextitem=None):
|
|||
rep = call_and_report(item, "setup", log)
|
||||
reports = [rep]
|
||||
if rep.passed:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
if item.config.option.setupshow:
|
||||
show_test_item(item)
|
||||
if not item.config.option.setuponly:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log,
|
||||
nextitem=nextitem))
|
||||
# after all teardown hooks have been called
|
||||
|
@ -83,6 +86,16 @@ def runtestprotocol(item, log=True, nextitem=None):
|
|||
item.funcargs = None
|
||||
return reports
|
||||
|
||||
def show_test_item(item):
|
||||
"""Show test function, parameters and the fixtures of the test item."""
|
||||
tw = item.config.get_terminal_writer()
|
||||
tw.line()
|
||||
tw.write(' ' * 8)
|
||||
tw.write(item._nodeid)
|
||||
used_fixtures = sorted(item._fixtureinfo.name2fixturedefs.keys())
|
||||
if used_fixtures:
|
||||
tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures)))
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
item.session._setupstate.prepare(item)
|
||||
|
||||
|
@ -494,9 +507,13 @@ def importorskip(modname, minversion=None):
|
|||
"""
|
||||
__tracebackhide__ = True
|
||||
compile(modname, '', 'eval') # to catch syntaxerrors
|
||||
should_skip = False
|
||||
try:
|
||||
__import__(modname)
|
||||
except ImportError:
|
||||
# Do not raise chained exception here(#1485)
|
||||
should_skip = True
|
||||
if should_skip:
|
||||
skip("could not import %r" %(modname,))
|
||||
mod = sys.modules[modname]
|
||||
if minversion is None:
|
||||
|
|
|
@ -0,0 +1,72 @@
|
|||
import pytest
|
||||
import sys
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption('--setuponly', '--setup-only', action="store_true",
|
||||
help="only setup fixtures, don't execute the tests.")
|
||||
group.addoption('--setupshow', '--setup-show', action="store_true",
|
||||
help="show setup fixtures while executing the tests.")
|
||||
|
||||
|
||||
@pytest.hookimpl(hookwrapper=True)
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
yield
|
||||
config = request.config
|
||||
if config.option.setupshow:
|
||||
if hasattr(request, 'param'):
|
||||
# Save the fixture parameter so ._show_fixture_action() can
|
||||
# display it now and during the teardown (in .finish()).
|
||||
if fixturedef.ids:
|
||||
if callable(fixturedef.ids):
|
||||
fixturedef.cached_param = fixturedef.ids(request.param)
|
||||
else:
|
||||
fixturedef.cached_param = fixturedef.ids[
|
||||
request.param_index]
|
||||
else:
|
||||
fixturedef.cached_param = request.param
|
||||
_show_fixture_action(fixturedef, 'SETUP')
|
||||
|
||||
|
||||
def pytest_fixture_post_finalizer(fixturedef):
|
||||
if hasattr(fixturedef, "cached_result"):
|
||||
config = fixturedef._fixturemanager.config
|
||||
if config.option.setupshow:
|
||||
_show_fixture_action(fixturedef, 'TEARDOWN')
|
||||
if hasattr(fixturedef, "cached_param"):
|
||||
del fixturedef.cached_param
|
||||
|
||||
|
||||
def _show_fixture_action(fixturedef, msg):
|
||||
config = fixturedef._fixturemanager.config
|
||||
capman = config.pluginmanager.getplugin('capturemanager')
|
||||
if capman:
|
||||
out, err = capman.suspendcapture()
|
||||
|
||||
tw = config.get_terminal_writer()
|
||||
tw.line()
|
||||
tw.write(' ' * 2 * fixturedef.scopenum)
|
||||
tw.write('{step} {scope} {fixture}'.format(
|
||||
step=msg.ljust(8), # align the output to TEARDOWN
|
||||
scope=fixturedef.scope[0].upper(),
|
||||
fixture=fixturedef.argname))
|
||||
|
||||
if msg == 'SETUP':
|
||||
deps = sorted(arg for arg in fixturedef.argnames if arg != 'request')
|
||||
if deps:
|
||||
tw.write(' (fixtures used: {0})'.format(', '.join(deps)))
|
||||
|
||||
if hasattr(fixturedef, 'cached_param'):
|
||||
tw.write('[{0}]'.format(fixturedef.cached_param))
|
||||
|
||||
if capman:
|
||||
capman.resumecapture()
|
||||
sys.stdout.write(out)
|
||||
sys.stderr.write(err)
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_cmdline_main(config):
|
||||
if config.option.setuponly:
|
||||
config.option.setupshow = True
|
|
@ -0,0 +1,23 @@
|
|||
import pytest
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("debugconfig")
|
||||
group.addoption('--setupplan', '--setup-plan', action="store_true",
|
||||
help="show what fixtures and tests would be executed but "
|
||||
"don't execute anything.")
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_fixture_setup(fixturedef, request):
|
||||
# Will return a dummy fixture if the setuponly option is provided.
|
||||
if request.config.option.setupplan:
|
||||
fixturedef.cached_result = (None, None, None)
|
||||
return fixturedef.cached_result
|
||||
|
||||
|
||||
@pytest.hookimpl(tryfirst=True)
|
||||
def pytest_cmdline_main(config):
|
||||
if config.option.setupplan:
|
||||
config.option.setuponly = True
|
||||
config.option.setupshow = True
|
|
@ -1,89 +0,0 @@
|
|||
#! /usr/bin/env python
|
||||
|
||||
# Hi There!
|
||||
# You may be wondering what this giant blob of binary data here is, you might
|
||||
# even be worried that we're up to something nefarious (good for you for being
|
||||
# paranoid!). This is a base64 encoding of a zip file, this zip file contains
|
||||
# a fully functional basic pytest script.
|
||||
#
|
||||
# Pytest is a thing that tests packages, pytest itself is a package that some-
|
||||
# one might want to install, especially if they're looking to run tests inside
|
||||
# some package they want to install. Pytest has a lot of code to collect and
|
||||
# execute tests, and other such sort of "tribal knowledge" that has been en-
|
||||
# coded in its code base. Because of this we basically include a basic copy
|
||||
# of pytest inside this blob. We do this because it let's you as a maintainer
|
||||
# or application developer who wants people who don't deal with python much to
|
||||
# easily run tests without installing the complete pytest package.
|
||||
#
|
||||
# If you're wondering how this is created: you can create it yourself if you
|
||||
# have a complete pytest installation by using this command on the command-
|
||||
# line: ``py.test --genscript=runtests.py``.
|
||||
|
||||
sources = """
|
||||
@SOURCES@"""
|
||||
|
||||
import sys
|
||||
import base64
|
||||
import zlib
|
||||
|
||||
class DictImporter(object):
|
||||
def __init__(self, sources):
|
||||
self.sources = sources
|
||||
|
||||
def find_module(self, fullname, path=None):
|
||||
if fullname == "argparse" and sys.version_info >= (2,7):
|
||||
# we were generated with <python2.7 (which pulls in argparse)
|
||||
# but we are running now on a stdlib which has it, so use that.
|
||||
return None
|
||||
if fullname in self.sources:
|
||||
return self
|
||||
if fullname + '.__init__' in self.sources:
|
||||
return self
|
||||
return None
|
||||
|
||||
def load_module(self, fullname):
|
||||
# print "load_module:", fullname
|
||||
from types import ModuleType
|
||||
try:
|
||||
s = self.sources[fullname]
|
||||
is_pkg = False
|
||||
except KeyError:
|
||||
s = self.sources[fullname + '.__init__']
|
||||
is_pkg = True
|
||||
|
||||
co = compile(s, fullname, 'exec')
|
||||
module = sys.modules.setdefault(fullname, ModuleType(fullname))
|
||||
module.__file__ = "%s/%s" % (__file__, fullname)
|
||||
module.__loader__ = self
|
||||
if is_pkg:
|
||||
module.__path__ = [fullname]
|
||||
|
||||
do_exec(co, module.__dict__) # noqa
|
||||
return sys.modules[fullname]
|
||||
|
||||
def get_source(self, name):
|
||||
res = self.sources.get(name)
|
||||
if res is None:
|
||||
res = self.sources.get(name + '.__init__')
|
||||
return res
|
||||
|
||||
if __name__ == "__main__":
|
||||
try:
|
||||
import pkg_resources # noqa
|
||||
except ImportError:
|
||||
sys.stderr.write("ERROR: setuptools not installed\n")
|
||||
sys.exit(2)
|
||||
if sys.version_info >= (3, 0):
|
||||
exec("def do_exec(co, loc): exec(co, loc)\n")
|
||||
import pickle
|
||||
sources = sources.encode("ascii") # ensure bytes
|
||||
sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
|
||||
else:
|
||||
import cPickle as pickle
|
||||
exec("def do_exec(co, loc): exec co in loc\n")
|
||||
sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
|
||||
|
||||
importer = DictImporter(sources)
|
||||
sys.meta_path.insert(0, importer)
|
||||
entry = "@ENTRY@"
|
||||
do_exec(entry, locals()) # noqa
|
|
@ -32,9 +32,6 @@ def pytest_addoption(parser):
|
|||
group._addoption('-l', '--showlocals',
|
||||
action="store_true", dest="showlocals", default=False,
|
||||
help="show locals in tracebacks (disabled by default).")
|
||||
group._addoption('--report',
|
||||
action="store", dest="report", default=None, metavar="opts",
|
||||
help="(deprecated, use -r)")
|
||||
group._addoption('--tb', metavar="style",
|
||||
action="store", dest="tbstyle", default='auto',
|
||||
choices=['auto', 'long', 'short', 'no', 'line', 'native'],
|
||||
|
@ -59,17 +56,6 @@ def pytest_configure(config):
|
|||
|
||||
def getreportopt(config):
|
||||
reportopts = ""
|
||||
optvalue = config.option.report
|
||||
if optvalue:
|
||||
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
|
||||
file=sys.stderr)
|
||||
if optvalue:
|
||||
for setting in optvalue.split(","):
|
||||
setting = setting.strip()
|
||||
if setting == "skipped":
|
||||
reportopts += "s"
|
||||
elif setting == "xfailed":
|
||||
reportopts += "x"
|
||||
reportchars = config.option.reportchars
|
||||
if not config.option.disablepytestwarnings and 'w' not in reportchars:
|
||||
reportchars += 'w'
|
||||
|
|
|
@ -3,7 +3,7 @@ import re
|
|||
|
||||
import pytest
|
||||
import py
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
from _pytest.monkeypatch import MonkeyPatch
|
||||
|
||||
|
||||
class TempdirFactory:
|
||||
|
@ -92,7 +92,7 @@ def pytest_configure(config):
|
|||
available at pytest_configure time, but ideally should be moved entirely
|
||||
to the tmpdir_factory session fixture.
|
||||
"""
|
||||
mp = monkeypatch()
|
||||
mp = MonkeyPatch()
|
||||
t = TempdirFactory(config)
|
||||
config._cleanup.extend([mp.undo, t.finish])
|
||||
mp.setattr(config, '_tmpdirhandler', t, raising=False)
|
||||
|
|
|
@ -50,6 +50,8 @@ class UnitTestCase(pytest.Class):
|
|||
foundsomething = False
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
x = getattr(self.obj, name)
|
||||
if not getattr(x, '__test__', True):
|
||||
continue
|
||||
funcobj = getattr(x, 'im_func', x)
|
||||
transfer_markers(funcobj, cls, module)
|
||||
yield TestCaseFunction(name, parent=self)
|
||||
|
|
|
@ -5,6 +5,13 @@ environment:
|
|||
# using pytestbot account as detailed here:
|
||||
# https://www.appveyor.com/docs/build-configuration#secure-variables
|
||||
|
||||
matrix:
|
||||
# create multiple jobs to execute a set of tox runs on each; this is to workaround having
|
||||
# builds timing out in AppVeyor
|
||||
- TOXENV: "linting,py26,py27,py33,py34,py35,pypy"
|
||||
- TOXENV: "py27-pexpect,py27-xdist,py27-trial,py35-pexpect,py35-xdist,py35-trial"
|
||||
- TOXENV: "py27-nobyte,doctesting,py27-cxfreeze"
|
||||
|
||||
install:
|
||||
- echo Installed Pythons
|
||||
- dir c:\Python*
|
||||
|
|
|
@ -1,19 +1,5 @@
|
|||
{% extends "!layout.html" %}
|
||||
{% block header %}
|
||||
<div align="center" xmlns="http://www.w3.org/1999/html" style="background-color: lightgreen; padding: .5em">
|
||||
<h4>
|
||||
Want to help improve pytest? Please
|
||||
<a href="https://www.indiegogo.com/projects/python-testing-sprint-mid-2016#/">
|
||||
contribute to
|
||||
</a>
|
||||
or
|
||||
<a href="announce/sprint2016.html">
|
||||
join
|
||||
</a>
|
||||
our upcoming sprint in June 2016!
|
||||
|
||||
</h4>
|
||||
</div>
|
||||
{{super()}}
|
||||
{% endblock %}
|
||||
{% block footer %}
|
||||
|
|
|
@ -1,10 +1,5 @@
|
|||
<h3>Useful Links</h3>
|
||||
<ul>
|
||||
<li>
|
||||
<a href="https://www.indiegogo.com/projects/python-testing-sprint-mid-2016#/">
|
||||
<b>Sprint funding campaign</b>
|
||||
</a>
|
||||
</li>
|
||||
<li><a href="{{ pathto('index') }}">The pytest Website</a></li>
|
||||
<li><a href="{{ pathto('contributing') }}">Contribution Guide</a></li>
|
||||
<li><a href="https://pypi.python.org/pypi/pytest">pytest @ PyPI</a></li>
|
||||
|
|
|
@ -4,9 +4,9 @@ python testing sprint June 20th-26th 2016
|
|||
.. image:: ../img/freiburg2.jpg
|
||||
:width: 400
|
||||
|
||||
The pytest core group is heading towards the biggest sprint
|
||||
in its history, to take place in the black forest town Freiburg
|
||||
in Germany. As of February 2016 we have started a `funding
|
||||
The pytest core group held the biggest sprint
|
||||
in its history in June 2016, taking place in the black forest town Freiburg
|
||||
in Germany. In February 2016 we started a `funding
|
||||
campaign on Indiegogo to cover expenses
|
||||
<http://igg.me/at/pytest-sprint/x/4034848>`_ The page also mentions
|
||||
some preliminary topics:
|
||||
|
@ -35,71 +35,32 @@ some preliminary topics:
|
|||
Participants
|
||||
--------------
|
||||
|
||||
Here are preliminary participants who said they are likely to come,
|
||||
given some expenses funding::
|
||||
|
||||
Anatoly Bubenkoff, Netherlands
|
||||
Andreas Pelme, Personalkollen, Sweden
|
||||
Anthony Wang, Splunk, US
|
||||
Brianna Laugher, Australia
|
||||
Bruno Oliveira, Brazil
|
||||
Danielle Jenkins, Splunk, US
|
||||
Dave Hunt, UK
|
||||
Florian Bruhin, Switzerland
|
||||
Floris Bruynooghe, Cobe.io, UK
|
||||
Holger Krekel, merlinux, Germany
|
||||
Oliver Bestwalter, Avira, Germany
|
||||
Omar Kohl, Germany
|
||||
Raphael Pierzina, FanDuel, UK
|
||||
Tom Viner, UK
|
||||
|
||||
<your name here?>
|
||||
|
||||
Other contributors and experienced newcomers are invited to join as well
|
||||
but please send a mail to the pytest-dev mailing list if you intend to
|
||||
do so somewhat soon, also how much funding you need if so. And if you
|
||||
are working for a company and using pytest heavily you are welcome to
|
||||
join and we encourage your company to provide some funding for the
|
||||
sprint. They may see it, and rightfully so, as a very cheap and deep
|
||||
training which brings you together with the experts in the field :)
|
||||
Over 20 participants took part from 4 continents, including employees
|
||||
from Splunk, Personalkollen, Cobe.io, FanDuel and Dolby. Some newcomers
|
||||
mixed with developers who have worked on pytest since its beginning, and
|
||||
of course everyone in between.
|
||||
Ana Ribeiro, Brazil
|
||||
Ronny Pfannschmidt, Germany
|
||||
|
||||
|
||||
Sprint organisation, schedule
|
||||
-------------------------------
|
||||
|
||||
tentative schedule:
|
||||
People arrived in Freiburg on the 19th, with sprint development taking
|
||||
place on 20th, 21st, 22nd, 24th and 25th. On the 23rd we took a break
|
||||
day for some hot hiking in the Black Forest.
|
||||
|
||||
- 19/20th arrival in Freiburg
|
||||
- 20th social get together, initial hacking
|
||||
- 21/22th full sprint days
|
||||
- 23rd break day, hiking
|
||||
- 24/25th full sprint days
|
||||
- 26th departure
|
||||
Sprint activity was organised heavily around pairing, with plenty of group
|
||||
discusssions to take advantage of the high bandwidth, and lightning talks
|
||||
as well.
|
||||
|
||||
We might adjust according to weather to make sure that if
|
||||
we do some hiking or excursion we'll have good weather.
|
||||
Freiburg is one of the sunniest places in Germany so
|
||||
it shouldn't be too much of a constraint.
|
||||
|
||||
|
||||
Accomodation
|
||||
----------------
|
||||
|
||||
We'll see to arrange for renting a flat with multiple
|
||||
beds/rooms. Hotels are usually below 100 per night.
|
||||
The earlier we book the better.
|
||||
|
||||
Money / funding
|
||||
---------------
|
||||
|
||||
The Indiegogo campaign asks for 11000 USD which should cover
|
||||
the costs for flights and accomodation, renting a sprint place
|
||||
and maybe a bit of food as well.
|
||||
|
||||
If your organisation wants to support the sprint but prefers
|
||||
to give money according to an invoice, get in contact with
|
||||
holger at http://merlinux.eu who can invoice your organisation
|
||||
properly.
|
||||
The Indiegogo campaign aimed for 11000 USD and in the end raised over
|
||||
12000, to reimburse travel costs, pay for a sprint venue and catering.
|
||||
|
||||
If we have excess money we'll use for further sprint/travel
|
||||
funding for pytest/tox contributors.
|
||||
Excess money is reserved for further sprint/travel funding for pytest/tox
|
||||
contributors.
|
||||
|
|
|
@ -24,7 +24,7 @@ following::
|
|||
to assert that your function returns a certain value. If this assertion fails
|
||||
you will see the return value of the function call::
|
||||
|
||||
$ py.test test_assert1.py
|
||||
$ pytest test_assert1.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -85,6 +85,15 @@ and if you need to have access to the actual exception info you may use::
|
|||
the actual exception raised. The main attributes of interest are
|
||||
``.type``, ``.value`` and ``.traceback``.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
|
||||
In the context manager form you may use the keyword argument
|
||||
``message`` to specify a custom failure message::
|
||||
|
||||
>>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"):
|
||||
... pass
|
||||
... Failed: Expecting ZeroDivisionError
|
||||
|
||||
If you want to write test code that works on Python 2.4 as well,
|
||||
you may also use two other ways to test for an expected exception::
|
||||
|
||||
|
@ -110,6 +119,24 @@ exceptions your own code is deliberately raising, whereas using
|
|||
like documenting unfixed bugs (where the test describes what "should" happen)
|
||||
or bugs in dependencies.
|
||||
|
||||
If you want to test that a regular expression matches on the string
|
||||
representation of an exception (like the ``TestCase.assertRaisesRegexp`` method
|
||||
from ``unittest``) you can use the ``ExceptionInfo.match`` method::
|
||||
|
||||
import pytest
|
||||
|
||||
def myfunc():
|
||||
raise ValueError("Exception 123 raised")
|
||||
|
||||
def test_match():
|
||||
with pytest.raises(ValueError) as excinfo:
|
||||
myfunc()
|
||||
excinfo.match(r'.* 123 .*')
|
||||
|
||||
The regexp parameter of the ``match`` method is matched with the ``re.search``
|
||||
function. So in the above example ``excinfo.match('123')`` would have worked as
|
||||
well.
|
||||
|
||||
|
||||
.. _`assertwarns`:
|
||||
|
||||
|
@ -141,7 +168,7 @@ when it encounters comparisons. For example::
|
|||
|
||||
if you run this module::
|
||||
|
||||
$ py.test test_assert2.py
|
||||
$ pytest test_assert2.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -210,7 +237,7 @@ now, given this test module::
|
|||
you can run the test module and get the custom output defined in
|
||||
the conftest file::
|
||||
|
||||
$ py.test -q test_foocompare.py
|
||||
$ pytest -q test_foocompare.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_compare ________
|
||||
|
@ -287,3 +314,6 @@ For further information, Benjamin Peterson wrote up `Behind the scenes of pytest
|
|||
.. versionchanged:: 2.1
|
||||
Introduce the ``--assert`` option. Deprecate ``--no-assert`` and
|
||||
``--nomagic``.
|
||||
|
||||
.. versionchanged:: 3.0
|
||||
Removes the ``--no-assert`` and``--nomagic`` options.
|
||||
|
|
|
@ -18,11 +18,11 @@ For global activation of all argcomplete enabled python applications run::
|
|||
|
||||
For permanent (but not global) ``pytest`` activation, use::
|
||||
|
||||
register-python-argcomplete py.test >> ~/.bashrc
|
||||
register-python-argcomplete pytest >> ~/.bashrc
|
||||
|
||||
For one-time activation of argcomplete for ``pytest`` only, use::
|
||||
|
||||
eval "$(register-python-argcomplete py.test)"
|
||||
eval "$(register-python-argcomplete pytest)"
|
||||
|
||||
|
||||
|
||||
|
|
|
@ -35,6 +35,11 @@ Examples at :ref:`assertraises`.
|
|||
|
||||
.. autofunction:: deprecated_call
|
||||
|
||||
Comparing floating point numbers
|
||||
--------------------------------
|
||||
|
||||
.. autoclass:: approx
|
||||
|
||||
Raising a specific test outcome
|
||||
--------------------------------------
|
||||
|
||||
|
@ -48,7 +53,7 @@ you can rather use declarative marks, see :ref:`skipping`.
|
|||
.. autofunction:: _pytest.skipping.xfail
|
||||
.. autofunction:: _pytest.runner.exit
|
||||
|
||||
fixtures and requests
|
||||
Fixtures and requests
|
||||
-----------------------------------------------------
|
||||
|
||||
To mark a fixture function:
|
||||
|
@ -72,7 +77,7 @@ Builtin fixtures/function arguments
|
|||
You can ask for available builtin or project-custom
|
||||
:ref:`fixtures <fixtures>` by typing::
|
||||
|
||||
$ py.test -q --fixtures
|
||||
$ pytest -q --fixtures
|
||||
cache
|
||||
Return a cache object that can persist state between testing sessions.
|
||||
|
||||
|
|
|
@ -15,7 +15,7 @@ Usage
|
|||
---------
|
||||
|
||||
The plugin provides two command line options to rerun failures from the
|
||||
last ``py.test`` invocation:
|
||||
last ``pytest`` invocation:
|
||||
|
||||
* ``--lf``, ``--last-failed`` - to only re-run the failures.
|
||||
* ``--ff``, ``--failed-first`` - to run the failures first and then the rest of
|
||||
|
@ -25,7 +25,7 @@ For cleanup (usually not needed), a ``--cache-clear`` option allows to remove
|
|||
all cross-session cache contents ahead of a test run.
|
||||
|
||||
Other plugins may access the `config.cache`_ object to set/get
|
||||
**json encodable** values between ``py.test`` invocations.
|
||||
**json encodable** values between ``pytest`` invocations.
|
||||
|
||||
.. note::
|
||||
|
||||
|
@ -49,7 +49,7 @@ First, let's create 50 test invocation of which only 2 fail::
|
|||
|
||||
If you run this for the first time you will see two failures::
|
||||
|
||||
$ py.test -q
|
||||
$ pytest -q
|
||||
.................F.......F........................
|
||||
======= FAILURES ========
|
||||
_______ test_num[17] ________
|
||||
|
@ -78,7 +78,7 @@ If you run this for the first time you will see two failures::
|
|||
|
||||
If you then run it with ``--lf``::
|
||||
|
||||
$ py.test --lf
|
||||
$ pytest --lf
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
run-last-failure: rerun last 2 failures
|
||||
|
@ -119,7 +119,7 @@ Now, if you run with the ``--ff`` option, all tests will be run but the first
|
|||
previous failures will be executed first (as can be seen from the series
|
||||
of ``FF`` and dots)::
|
||||
|
||||
$ py.test --ff
|
||||
$ pytest --ff
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
run-last-failure: rerun last 2 failures first
|
||||
|
@ -163,7 +163,7 @@ The new config.cache object
|
|||
Plugins or conftest.py support code can get a cached value using the
|
||||
pytest ``config`` object. Here is a basic example plugin which
|
||||
implements a :ref:`fixture` which re-uses previously created state
|
||||
across py.test invocations::
|
||||
across pytest invocations::
|
||||
|
||||
# content of test_caching.py
|
||||
import pytest
|
||||
|
@ -184,7 +184,7 @@ across py.test invocations::
|
|||
If you run this command once, it will take a while because
|
||||
of the sleep::
|
||||
|
||||
$ py.test -q
|
||||
$ pytest -q
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_function ________
|
||||
|
@ -201,7 +201,7 @@ of the sleep::
|
|||
If you run it a second time the value will be retrieved from
|
||||
the cache and this will be quick::
|
||||
|
||||
$ py.test -q
|
||||
$ pytest -q
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_function ________
|
||||
|
@ -250,7 +250,7 @@ Clearing Cache content
|
|||
You can instruct pytest to clear all cache files and values
|
||||
by adding the ``--cache-clear`` option like this::
|
||||
|
||||
py.test --cache-clear
|
||||
pytest --cache-clear
|
||||
|
||||
This is recommended for invocations from Continous Integration
|
||||
servers where isolation and correctness is more important
|
||||
|
|
|
@ -36,9 +36,9 @@ There are two ways in which ``pytest`` can perform capturing:
|
|||
|
||||
You can influence output capturing mechanisms from the command line::
|
||||
|
||||
py.test -s # disable all capturing
|
||||
py.test --capture=sys # replace sys.stdout/stderr with in-mem files
|
||||
py.test --capture=fd # also point filedescriptors 1 and 2 to temp file
|
||||
pytest -s # disable all capturing
|
||||
pytest --capture=sys # replace sys.stdout/stderr with in-mem files
|
||||
pytest --capture=fd # also point filedescriptors 1 and 2 to temp file
|
||||
|
||||
.. _printdebugging:
|
||||
|
||||
|
@ -62,7 +62,7 @@ is that you can use print statements for debugging::
|
|||
and running this module will show you precisely the output
|
||||
of the failing function and hide the other one::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -115,4 +115,19 @@ same interface but allows to also capture output from
|
|||
libraries or subprocesses that directly write to operating
|
||||
system level output streams (FD1 and FD2).
|
||||
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
To temporarily disable capture within a test, both ``capsys``
|
||||
and ``capfd`` have a ``disabled()`` method that can be used
|
||||
as a context manager, disabling capture inside the ``with`` block:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
def test_disabling_capturing(capsys):
|
||||
print('this output is captured')
|
||||
with capsys.disabled():
|
||||
print('output not captured, going directly to sys.stdout')
|
||||
print('this output is also captured')
|
||||
|
||||
.. include:: links.inc
|
||||
|
|
|
@ -7,7 +7,7 @@ Command line options and configuration file settings
|
|||
You can get help on command line options and values in INI-style
|
||||
configurations files by using the general help option::
|
||||
|
||||
py.test -h # prints options _and_ config file settings
|
||||
pytest -h # prints options _and_ config file settings
|
||||
|
||||
This will display command line and configuration file settings
|
||||
which were registered by installed plugins.
|
||||
|
@ -62,7 +62,7 @@ per-testrun information.
|
|||
|
||||
Example::
|
||||
|
||||
py.test path/to/testdir path/other/
|
||||
pytest path/to/testdir path/other/
|
||||
|
||||
will determine the common ancestor as ``path`` and then
|
||||
check for ini-files as follows::
|
||||
|
@ -126,9 +126,9 @@ Builtin configuration file options
|
|||
[pytest]
|
||||
addopts = --maxfail=2 -rf # exit after 2 failures, report fail info
|
||||
|
||||
issuing ``py.test test_hello.py`` actually means::
|
||||
issuing ``pytest test_hello.py`` actually means::
|
||||
|
||||
py.test --maxfail=2 -rf test_hello.py
|
||||
pytest --maxfail=2 -rf test_hello.py
|
||||
|
||||
Default is to add no options.
|
||||
|
||||
|
@ -144,7 +144,7 @@ Builtin configuration file options
|
|||
[seq] matches any character in seq
|
||||
[!seq] matches any char not in seq
|
||||
|
||||
Default patterns are ``'.*', 'CVS', '_darcs', '{arch}', '*.egg'``.
|
||||
Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'``.
|
||||
Setting a ``norecursedirs`` replaces the default. Here is an example of
|
||||
how to avoid certain directories:
|
||||
|
||||
|
@ -218,7 +218,7 @@ Builtin configuration file options
|
|||
.. confval:: doctest_optionflags
|
||||
|
||||
One or more doctest flag names from the standard ``doctest`` module.
|
||||
:doc:`See how py.test handles doctests <doctest>`.
|
||||
:doc:`See how pytest handles doctests <doctest>`.
|
||||
|
||||
.. confval:: confcutdir
|
||||
|
||||
|
|
|
@ -6,7 +6,7 @@ By default all files matching the ``test*.txt`` pattern will
|
|||
be run through the python standard ``doctest`` module. You
|
||||
can change the pattern by issuing::
|
||||
|
||||
py.test --doctest-glob='*.rst'
|
||||
pytest --doctest-glob='*.rst'
|
||||
|
||||
on the command line. Since version ``2.9``, ``--doctest-glob``
|
||||
can be given multiple times in the command-line.
|
||||
|
@ -15,7 +15,7 @@ You can also trigger running of doctests
|
|||
from docstrings in all python modules (including regular
|
||||
python test modules)::
|
||||
|
||||
py.test --doctest-modules
|
||||
pytest --doctest-modules
|
||||
|
||||
You can make these changes permanent in your project by
|
||||
putting them into a pytest.ini file like this:
|
||||
|
@ -45,9 +45,9 @@ and another like this::
|
|||
"""
|
||||
return 42
|
||||
|
||||
then you can just invoke ``py.test`` without command line options::
|
||||
then you can just invoke ``pytest`` without command line options::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
|
@ -68,7 +68,7 @@ Also, :ref:`usefixtures` and :ref:`autouse` fixtures are supported
|
|||
when executing text doctest files.
|
||||
|
||||
The standard ``doctest`` module provides some setting flags to configure the
|
||||
strictness of doctest tests. In py.test You can enable those flags those flags
|
||||
strictness of doctest tests. In pytest You can enable those flags those flags
|
||||
using the configuration file. To make pytest ignore trailing whitespaces and
|
||||
ignore lengthy exception stack traces you can just write:
|
||||
|
||||
|
@ -77,7 +77,7 @@ ignore lengthy exception stack traces you can just write:
|
|||
[pytest]
|
||||
doctest_optionflags= NORMALIZE_WHITESPACE IGNORE_EXCEPTION_DETAIL
|
||||
|
||||
py.test also introduces new options to allow doctests to run in Python 2 and
|
||||
pytest also introduces new options to allow doctests to run in Python 2 and
|
||||
Python 3 unchanged:
|
||||
|
||||
* ``ALLOW_UNICODE``: when enabled, the ``u`` prefix is stripped from unicode
|
||||
|
@ -102,4 +102,31 @@ itself::
|
|||
>>> get_unicode_greeting() # doctest: +ALLOW_UNICODE
|
||||
'Hello'
|
||||
|
||||
The 'doctest_namespace' fixture
|
||||
-------------------------------
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
The ``doctest_namespace`` fixture can be used to inject items into the
|
||||
namespace in which your doctests run. It is intended to be used within
|
||||
your own fixtures to provide the tests that use them with context.
|
||||
|
||||
``doctest_namespace`` is a standard ``dict`` object into which you
|
||||
place the objects you want to appear in the doctest namespace::
|
||||
|
||||
# content of conftest.py
|
||||
import numpy
|
||||
@pytest.fixture(autouse=True)
|
||||
def add_np(doctest_namespace):
|
||||
doctest_namespace['np'] = numpy
|
||||
|
||||
which can then be used in your doctests directly::
|
||||
|
||||
# content of numpy.py
|
||||
def arange():
|
||||
"""
|
||||
>>> a = np.arange(10)
|
||||
>>> len(a)
|
||||
10
|
||||
"""
|
||||
pass
|
||||
|
|
|
@ -4,8 +4,8 @@ import pytest
|
|||
@pytest.fixture("session")
|
||||
def setup(request):
|
||||
setup = CostlySetup()
|
||||
request.addfinalizer(setup.finalize)
|
||||
return setup
|
||||
yield setup
|
||||
setup.finalize()
|
||||
|
||||
class CostlySetup:
|
||||
def __init__(self):
|
||||
|
|
|
@ -29,7 +29,7 @@ You can "mark" a test function with custom metadata like this::
|
|||
|
||||
You can then restrict a test run to only run tests marked with ``webtest``::
|
||||
|
||||
$ py.test -v -m webtest
|
||||
$ pytest -v -m webtest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -43,7 +43,7 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
|||
|
||||
Or the inverse, running all tests except the webtest ones::
|
||||
|
||||
$ py.test -v -m "not webtest"
|
||||
$ pytest -v -m "not webtest"
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -64,7 +64,7 @@ You can provide one or more :ref:`node IDs <node-id>` as positional
|
|||
arguments to select only specified tests. This makes it easy to select
|
||||
tests based on their module, class, method, or function name::
|
||||
|
||||
$ py.test -v test_server.py::TestClass::test_method
|
||||
$ pytest -v test_server.py::TestClass::test_method
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -77,7 +77,7 @@ tests based on their module, class, method, or function name::
|
|||
|
||||
You can also select on the class::
|
||||
|
||||
$ py.test -v test_server.py::TestClass
|
||||
$ pytest -v test_server.py::TestClass
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -90,7 +90,7 @@ You can also select on the class::
|
|||
|
||||
Or select multiple nodes::
|
||||
|
||||
$ py.test -v test_server.py::TestClass test_server.py::test_send_http
|
||||
$ pytest -v test_server.py::TestClass test_server.py::test_send_http
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -115,8 +115,8 @@ Or select multiple nodes::
|
|||
``module.py::function[param]``.
|
||||
|
||||
Node IDs for failing tests are displayed in the test summary info
|
||||
when running py.test with the ``-rf`` option. You can also
|
||||
construct Node IDs from the output of ``py.test --collectonly``.
|
||||
when running pytest with the ``-rf`` option. You can also
|
||||
construct Node IDs from the output of ``pytest --collectonly``.
|
||||
|
||||
Using ``-k expr`` to select tests based on their name
|
||||
-------------------------------------------------------
|
||||
|
@ -128,7 +128,7 @@ which implements a substring match on the test names instead of the
|
|||
exact match on markers that ``-m`` provides. This makes it easy to
|
||||
select tests based on their names::
|
||||
|
||||
$ py.test -v -k http # running with the above defined example module
|
||||
$ pytest -v -k http # running with the above defined example module
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -142,7 +142,7 @@ select tests based on their names::
|
|||
|
||||
And you can also run all tests except the ones that match the keyword::
|
||||
|
||||
$ py.test -k "not send_http" -v
|
||||
$ pytest -k "not send_http" -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -158,7 +158,7 @@ And you can also run all tests except the ones that match the keyword::
|
|||
|
||||
Or to select "http" and "quick" tests::
|
||||
|
||||
$ py.test -k "http or quick" -v
|
||||
$ pytest -k "http or quick" -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -198,7 +198,7 @@ Registering markers for your test suite is simple::
|
|||
|
||||
You can ask which markers exist for your test suite - the list includes our just defined ``webtest`` markers::
|
||||
|
||||
$ py.test --markers
|
||||
$ pytest --markers
|
||||
@pytest.mark.webtest: mark a test as a webtest.
|
||||
|
||||
@pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test.
|
||||
|
@ -225,7 +225,7 @@ For an example on how to add and work with markers from a plugin, see
|
|||
|
||||
* there is one place in your test suite defining your markers
|
||||
|
||||
* asking for existing markers via ``py.test --markers`` gives good output
|
||||
* asking for existing markers via ``pytest --markers`` gives good output
|
||||
|
||||
* typos in function markers are treated as an error if you use
|
||||
the ``--strict`` option. Future versions of ``pytest`` are probably
|
||||
|
@ -350,7 +350,7 @@ A test file using this local plugin::
|
|||
and an example invocations specifying a different environment than what
|
||||
the test needs::
|
||||
|
||||
$ py.test -E stage2
|
||||
$ pytest -E stage2
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -362,7 +362,7 @@ the test needs::
|
|||
|
||||
and here is one that specifies exactly the environment needed::
|
||||
|
||||
$ py.test -E stage1
|
||||
$ pytest -E stage1
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -374,7 +374,7 @@ and here is one that specifies exactly the environment needed::
|
|||
|
||||
The ``--markers`` option always gives you a list of available markers::
|
||||
|
||||
$ py.test --markers
|
||||
$ pytest --markers
|
||||
@pytest.mark.env(name): mark test to run only on named environment
|
||||
|
||||
@pytest.mark.skip(reason=None): skip the given test function with an optional reason. Example: skip(reason="no way of currently testing this") skips the test.
|
||||
|
@ -427,7 +427,7 @@ test function. From a conftest file we can read it like this::
|
|||
|
||||
Let's run this without capturing output and see what we get::
|
||||
|
||||
$ py.test -q -s
|
||||
$ pytest -q -s
|
||||
glob args=('function',) kwargs={'x': 3}
|
||||
glob args=('class',) kwargs={'x': 2}
|
||||
glob args=('module',) kwargs={'x': 1}
|
||||
|
@ -483,7 +483,7 @@ Let's do a little test file to show how this looks like::
|
|||
|
||||
then you will see two test skipped and two executed tests as expected::
|
||||
|
||||
$ py.test -rs # this option reports skip reasons
|
||||
$ pytest -rs # this option reports skip reasons
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -497,7 +497,7 @@ then you will see two test skipped and two executed tests as expected::
|
|||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
|
||||
$ py.test -m linux2
|
||||
$ pytest -m linux2
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -549,7 +549,7 @@ We want to dynamically define two markers and can do it in a
|
|||
|
||||
We can now use the ``-m option`` to select one set::
|
||||
|
||||
$ py.test -m interface --tb=short
|
||||
$ pytest -m interface --tb=short
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -571,7 +571,7 @@ We can now use the ``-m option`` to select one set::
|
|||
|
||||
or to select both "event" and "interface" tests::
|
||||
|
||||
$ py.test -m "interface or event" --tb=short
|
||||
$ pytest -m "interface or event" --tb=short
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
|
|
@ -25,7 +25,7 @@ You can create a simple example file:
|
|||
and if you installed `PyYAML`_ or a compatible YAML-parser you can
|
||||
now execute the test specification::
|
||||
|
||||
nonpython $ py.test test_simple.yml
|
||||
nonpython $ pytest test_simple.yml
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
|
@ -57,7 +57,7 @@ your own domain specific testing language this way.
|
|||
``reportinfo()`` is used for representing the test location and is also
|
||||
consulted when reporting in ``verbose`` mode::
|
||||
|
||||
nonpython $ py.test -v
|
||||
nonpython $ pytest -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -79,7 +79,7 @@ consulted when reporting in ``verbose`` mode::
|
|||
While developing your custom test collection and execution it's also
|
||||
interesting to just look at the collection tree::
|
||||
|
||||
nonpython $ py.test --collect-only
|
||||
nonpython $ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR/nonpython, inifile:
|
||||
|
|
|
@ -44,14 +44,14 @@ Now we add a test configuration like this::
|
|||
|
||||
This means that we only run 2 tests if we do not pass ``--all``::
|
||||
|
||||
$ py.test -q test_compute.py
|
||||
$ pytest -q test_compute.py
|
||||
..
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
We run only two computations, so we see two dots.
|
||||
let's run the full monty::
|
||||
|
||||
$ py.test -q --all
|
||||
$ pytest -q --all
|
||||
....F
|
||||
======= FAILURES ========
|
||||
_______ test_compute[4] ________
|
||||
|
@ -128,7 +128,7 @@ label generated by ``idfn``, but because we didn't generate a label for ``timede
|
|||
objects, they are still using the default pytest representation::
|
||||
|
||||
|
||||
$ py.test test_time.py --collect-only
|
||||
$ pytest test_time.py --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -179,7 +179,7 @@ only have to work a bit to construct the correct arguments for pytest's
|
|||
|
||||
this is a fully self-contained example which you can run with::
|
||||
|
||||
$ py.test test_scenarios.py
|
||||
$ pytest test_scenarios.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -192,7 +192,7 @@ this is a fully self-contained example which you can run with::
|
|||
If you just collect tests you'll also nicely see 'advanced' and 'basic' as variants for the test function::
|
||||
|
||||
|
||||
$ py.test --collect-only test_scenarios.py
|
||||
$ pytest --collect-only test_scenarios.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -257,7 +257,7 @@ creates a database object for the actual test invocations::
|
|||
|
||||
Let's first see how it looks like at collection time::
|
||||
|
||||
$ py.test test_backends.py --collect-only
|
||||
$ pytest test_backends.py --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -270,7 +270,7 @@ Let's first see how it looks like at collection time::
|
|||
|
||||
And then when we run the test::
|
||||
|
||||
$ py.test -q test_backends.py
|
||||
$ pytest -q test_backends.py
|
||||
.F
|
||||
======= FAILURES ========
|
||||
_______ test_db_initialized[d2] ________
|
||||
|
@ -318,7 +318,7 @@ will be passed to respective fixture function::
|
|||
|
||||
The result of this test will be successful::
|
||||
|
||||
$ py.test test_indirect_list.py --collect-only
|
||||
$ pytest test_indirect_list.py --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -366,7 +366,7 @@ parametrizer`_ but in a lot less code::
|
|||
Our test generator looks up a class-level definition which specifies which
|
||||
argument sets to use for each test function. Let's run it::
|
||||
|
||||
$ py.test -q
|
||||
$ pytest -q
|
||||
F..
|
||||
======= FAILURES ========
|
||||
_______ TestClass.test_equals[1-2] ________
|
||||
|
@ -396,7 +396,7 @@ is to be run with different sets of arguments for its three arguments:
|
|||
|
||||
Running it results in some skips if we don't have all the python interpreters installed and otherwise runs all combinations (5 interpreters times 5 interpreters times 3 objects to serialize/deserialize)::
|
||||
|
||||
. $ py.test -rs -q multipython.py
|
||||
. $ pytest -rs -q multipython.py
|
||||
...........................
|
||||
27 passed in 0.12 seconds
|
||||
|
||||
|
@ -443,7 +443,7 @@ And finally a little test module::
|
|||
|
||||
If you run this with reporting for skips enabled::
|
||||
|
||||
$ py.test -rs test_module.py
|
||||
$ pytest -rs test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
|
||||
# run this with $ py.test --collect-only test_collectonly.py
|
||||
# run this with $ pytest --collect-only test_collectonly.py
|
||||
#
|
||||
def test_function():
|
||||
pass
|
||||
|
|
|
@ -80,7 +80,7 @@ that match ``*_check``. For example, if we have::
|
|||
|
||||
then the test collection looks like this::
|
||||
|
||||
$ py.test --collect-only
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: setup.cfg
|
||||
|
@ -107,7 +107,7 @@ interpreting arguments as python package names, deriving
|
|||
their file system path and then running the test. For
|
||||
example if you have unittest2 installed you can type::
|
||||
|
||||
py.test --pyargs unittest2.test.test_skipping -q
|
||||
pytest --pyargs unittest2.test.test_skipping -q
|
||||
|
||||
which would run the respective test module. Like with
|
||||
other options, through an ini-file and the :confval:`addopts` option you
|
||||
|
@ -117,7 +117,7 @@ can make this change more permanently::
|
|||
[pytest]
|
||||
addopts = --pyargs
|
||||
|
||||
Now a simple invocation of ``py.test NAME`` will check
|
||||
Now a simple invocation of ``pytest NAME`` will check
|
||||
if NAME exists as an importable package/module and otherwise
|
||||
treat it as a filesystem path.
|
||||
|
||||
|
@ -126,7 +126,7 @@ Finding out what is collected
|
|||
|
||||
You can always peek at the collection tree without running tests like this::
|
||||
|
||||
. $ py.test --collect-only pythoncollection.py
|
||||
. $ pytest --collect-only pythoncollection.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
|
@ -180,7 +180,7 @@ and a setup.py dummy file like this::
|
|||
then a pytest run on Python2 will find the one test and will leave out the
|
||||
setup.py file::
|
||||
|
||||
#$ py.test --collect-only
|
||||
#$ pytest --collect-only
|
||||
====== test session starts ======
|
||||
platform linux2 -- Python 2.7.10, pytest-2.9.1, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
|
@ -193,7 +193,7 @@ setup.py file::
|
|||
If you run with a Python3 interpreter both the one test and the setup.py file
|
||||
will be left out::
|
||||
|
||||
$ py.test --collect-only
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini
|
||||
|
|
|
@ -11,7 +11,7 @@ get on the terminal - we are working on that):
|
|||
|
||||
.. code-block:: python
|
||||
|
||||
assertion $ py.test failure_demo.py
|
||||
assertion $ pytest failure_demo.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR/assertion, inifile:
|
||||
|
|
|
@ -37,7 +37,7 @@ provide the ``cmdopt`` through a :ref:`fixture function <fixture function>`::
|
|||
|
||||
Let's run this without supplying our new option::
|
||||
|
||||
$ py.test -q test_sample.py
|
||||
$ pytest -q test_sample.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
|
@ -59,7 +59,7 @@ Let's run this without supplying our new option::
|
|||
|
||||
And now with supplying a command line option::
|
||||
|
||||
$ py.test -q --cmdopt=type2
|
||||
$ pytest -q --cmdopt=type2
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_answer ________
|
||||
|
@ -106,7 +106,7 @@ you will now always perform test runs using a number
|
|||
of subprocesses close to your CPU. Running in an empty
|
||||
directory with the above conftest.py::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -154,7 +154,7 @@ We can now write a test module like this::
|
|||
|
||||
and when running it will see a skipped "slow" test::
|
||||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
$ pytest -rs # "-rs" means report details on the little 's'
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -168,7 +168,7 @@ and when running it will see a skipped "slow" test::
|
|||
|
||||
Or run it including the ``slow`` marked test::
|
||||
|
||||
$ py.test --runslow
|
||||
$ pytest --runslow
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -204,7 +204,7 @@ of tracebacks: the ``checkconfig`` function will not be shown
|
|||
unless the ``--full-trace`` command line option is specified.
|
||||
Let's run our little function::
|
||||
|
||||
$ py.test -q test_checkconfig.py
|
||||
$ pytest -q test_checkconfig.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_something ________
|
||||
|
@ -216,6 +216,28 @@ Let's run our little function::
|
|||
test_checkconfig.py:8: Failed
|
||||
1 failed in 0.12 seconds
|
||||
|
||||
If you only want to hide certain exceptions, you can set ``__tracebackhide__``
|
||||
to a callable which gets the ``ExceptionInfo`` object. You can for example use
|
||||
this to make sure unexpected exception types aren't hidden::
|
||||
|
||||
import operator
|
||||
import pytest
|
||||
|
||||
class ConfigException(Exception):
|
||||
pass
|
||||
|
||||
def checkconfig(x):
|
||||
__tracebackhide__ = operator.methodcaller('errisinstance', ConfigException)
|
||||
if not hasattr(x, "config"):
|
||||
raise ConfigException("not configured: %s" %(x,))
|
||||
|
||||
def test_something():
|
||||
checkconfig(42)
|
||||
|
||||
This will avoid hiding the exception traceback on unrelated exceptions (i.e.
|
||||
bugs in assertion helpers).
|
||||
|
||||
|
||||
Detect if running from within a pytest run
|
||||
--------------------------------------------------------------
|
||||
|
||||
|
@ -260,7 +282,7 @@ It's easy to present extra information in a ``pytest`` run::
|
|||
|
||||
which will add the string to the test header accordingly::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
project deps: mylib-1.1
|
||||
|
@ -284,7 +306,7 @@ you present more information appropriately::
|
|||
|
||||
which will add info only when run with "--v"::
|
||||
|
||||
$ py.test -v
|
||||
$ pytest -v
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -297,7 +319,7 @@ which will add info only when run with "--v"::
|
|||
|
||||
and nothing when run plainly::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -330,7 +352,7 @@ out which tests are the slowest. Let's make an artificial test suite::
|
|||
|
||||
Now we can profile which test functions execute the slowest::
|
||||
|
||||
$ py.test --durations=3
|
||||
$ pytest --durations=3
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -392,7 +414,7 @@ tests in a class. Here is a test module example::
|
|||
|
||||
If we run this::
|
||||
|
||||
$ py.test -rx
|
||||
$ pytest -rx
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -463,7 +485,7 @@ the ``db`` fixture::
|
|||
|
||||
We can run this::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -480,7 +502,7 @@ We can run this::
|
|||
def test_root(db): # no db here, will error out
|
||||
fixture 'db' not found
|
||||
available fixtures: tmpdir_factory, cache, tmpdir, pytestconfig, recwarn, monkeypatch, capfd, record_xml_property, capsys
|
||||
use 'py.test --fixtures [testpath]' for help on them.
|
||||
use 'pytest --fixtures [testpath]' for help on them.
|
||||
|
||||
$REGENDOC_TMPDIR/b/test_error.py:1
|
||||
======= FAILURES ========
|
||||
|
@ -567,7 +589,7 @@ if you then have failing tests::
|
|||
|
||||
and run them::
|
||||
|
||||
$ py.test test_module.py
|
||||
$ pytest test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -626,15 +648,14 @@ here is a little example implemented via a local plugin::
|
|||
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
def fin():
|
||||
# request.node is an "item" because we use the default
|
||||
# "function" scope
|
||||
if request.node.rep_setup.failed:
|
||||
print ("setting up a test failed!", request.node.nodeid)
|
||||
elif request.node.rep_setup.passed:
|
||||
if request.node.rep_call.failed:
|
||||
print ("executing test failed", request.node.nodeid)
|
||||
request.addfinalizer(fin)
|
||||
yield
|
||||
# request.node is an "item" because we use the default
|
||||
# "function" scope
|
||||
if request.node.rep_setup.failed:
|
||||
print ("setting up a test failed!", request.node.nodeid)
|
||||
elif request.node.rep_setup.passed:
|
||||
if request.node.rep_call.failed:
|
||||
print ("executing test failed", request.node.nodeid)
|
||||
|
||||
|
||||
if you then have failing tests::
|
||||
|
@ -658,7 +679,7 @@ if you then have failing tests::
|
|||
|
||||
and run it::
|
||||
|
||||
$ py.test -s test_module.py
|
||||
$ pytest -s test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -746,6 +767,6 @@ over to ``pytest`` instead. For example::
|
|||
...
|
||||
|
||||
This makes it convenient to execute your tests from within your frozen
|
||||
application, using standard ``py.test`` command-line options::
|
||||
application, using standard ``pytest`` command-line options::
|
||||
|
||||
./app_main --pytest --verbose --tb=long --junitxml=results.xml test-suite/
|
||||
|
|
|
@ -59,7 +59,7 @@ will be called ahead of running any tests::
|
|||
|
||||
If you run this without output capturing::
|
||||
|
||||
$ py.test -q -s test_module.py
|
||||
$ pytest -q -s test_module.py
|
||||
callattr_ahead_of_alltests called
|
||||
callme called!
|
||||
callme other called
|
||||
|
|
|
@ -81,18 +81,17 @@ You can also turn off all assertion interaction using the
|
|||
.. _`py/__init__.py`: http://bitbucket.org/hpk42/py-trunk/src/trunk/py/__init__.py
|
||||
|
||||
|
||||
Why a ``py.test`` instead of a ``pytest`` command?
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
Why can I use both ``pytest`` and ``py.test`` commands?
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
Some of the reasons are historic, others are practical. ``pytest``
|
||||
used to be part of the ``py`` package which provided several developer
|
||||
utilities, all starting with ``py.<TAB>``, thus providing nice
|
||||
TAB-completion. If
|
||||
you install ``pip install pycmd`` you get these tools from a separate
|
||||
package. These days the command line tool could be called ``pytest``
|
||||
but since many people have gotten used to the old name and there
|
||||
is another tool named "pytest" we just decided to stick with
|
||||
``py.test`` for now.
|
||||
pytest used to be part of the py package, which provided several developer
|
||||
utilities, all starting with ``py.<TAB>``, thus providing nice TAB-completion.
|
||||
If you install ``pip install pycmd`` you get these tools from a separate
|
||||
package. Once ``pytest`` became a separate package, the ``py.test`` name was
|
||||
retained due to avoid a naming conflict with another tool. This conflict was
|
||||
eventually resolved, and the ``pytest`` command was therefore introduced. In
|
||||
future versions of pytest, we may deprecate and later remove the ``py.test``
|
||||
command to avoid perpetuating the confusion.
|
||||
|
||||
pytest fixtures, parametrized tests
|
||||
-------------------------------------------------------
|
||||
|
|
|
@ -34,11 +34,6 @@ both styles, moving incrementally from classic to new style, as you
|
|||
prefer. You can also start out from existing :ref:`unittest.TestCase
|
||||
style <unittest.TestCase>` or :ref:`nose based <nosestyle>` projects.
|
||||
|
||||
.. note::
|
||||
|
||||
pytest-2.4 introduced an additional :ref:`yield fixture mechanism
|
||||
<yieldfixture>` for easier context manager integration and more linear
|
||||
writing of teardown code.
|
||||
|
||||
.. _`funcargs`:
|
||||
.. _`funcarg mechanism`:
|
||||
|
@ -73,7 +68,7 @@ Here, the ``test_ehlo`` needs the ``smtp`` fixture value. pytest
|
|||
will discover and call the :py:func:`@pytest.fixture <_pytest.python.fixture>`
|
||||
marked ``smtp`` fixture function. Running the test looks like this::
|
||||
|
||||
$ py.test test_smtpsimple.py
|
||||
$ pytest test_smtpsimple.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -118,7 +113,7 @@ with a list of available function arguments.
|
|||
|
||||
You can always issue::
|
||||
|
||||
py.test --fixtures test_simplefactory.py
|
||||
pytest --fixtures test_simplefactory.py
|
||||
|
||||
to see available fixtures.
|
||||
|
||||
|
@ -191,7 +186,7 @@ function (in or below the directory where ``conftest.py`` is located)::
|
|||
We deliberately insert failing ``assert 0`` statements in order to
|
||||
inspect what is going on and can now run the tests::
|
||||
|
||||
$ py.test test_module.py
|
||||
$ pytest test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -247,9 +242,8 @@ Fixture finalization / executing teardown code
|
|||
-------------------------------------------------------------
|
||||
|
||||
pytest supports execution of fixture specific finalization code
|
||||
when the fixture goes out of scope. By accepting a ``request`` object
|
||||
into your fixture function you can call its ``request.addfinalizer`` one
|
||||
or multiple times::
|
||||
when the fixture goes out of scope. By using a ``yield`` statement instead of ``return``, all
|
||||
the code after the *yield* statement serves as the teardown code.::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
|
@ -259,18 +253,16 @@ or multiple times::
|
|||
@pytest.fixture(scope="module")
|
||||
def smtp(request):
|
||||
smtp = smtplib.SMTP("smtp.gmail.com")
|
||||
def fin():
|
||||
print ("teardown smtp")
|
||||
smtp.close()
|
||||
request.addfinalizer(fin)
|
||||
return smtp # provide the fixture value
|
||||
yield smtp # provide the fixture value
|
||||
print("teardown smtp")
|
||||
smtp.close()
|
||||
|
||||
The ``fin`` function will execute when the last test using
|
||||
the fixture in the module has finished execution.
|
||||
The ``print`` and ``smtp.close()`` statements will execute when the last test using
|
||||
the fixture in the module has finished execution, regardless of the exception status of the tests.
|
||||
|
||||
Let's execute it::
|
||||
|
||||
$ py.test -s -q --tb=no
|
||||
$ pytest -s -q --tb=no
|
||||
FFteardown smtp
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
|
@ -282,14 +274,55 @@ occur around each single test. In either case the test
|
|||
module itself does not need to change or know about these details
|
||||
of fixture setup.
|
||||
|
||||
Note that we can also seamlessly use the ``yield`` syntax with ``with`` statements::
|
||||
|
||||
Finalization/teardown with yield fixtures
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
# content of test_yield2.py
|
||||
|
||||
Another alternative to the *request.addfinalizer()* method is to use *yield
|
||||
fixtures*. All the code after the *yield* statement serves as the teardown
|
||||
code. See the :ref:`yield fixture documentation <yieldfixture>`.
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def passwd():
|
||||
with open("/etc/passwd") as f:
|
||||
yield f.readlines()
|
||||
|
||||
def test_has_lines(passwd):
|
||||
assert len(passwd) >= 1
|
||||
|
||||
The file ``f`` will be closed after the test finished execution
|
||||
because the Python ``file`` object supports finalization when
|
||||
the ``with`` statement ends.
|
||||
|
||||
|
||||
.. note::
|
||||
Prior to version 2.10, in order to use a ``yield`` statement to execute teardown code one
|
||||
had to mark a fixture using the ``yield_fixture`` marker. From 2.10 onward, normal
|
||||
fixtures can use ``yield`` directly so the ``yield_fixture`` decorator is no longer needed
|
||||
and considered deprecated.
|
||||
|
||||
.. note::
|
||||
As historical note, another way to write teardown code is
|
||||
by accepting a ``request`` object into your fixture function and can call its
|
||||
``request.addfinalizer`` one or multiple times::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import smtplib
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def smtp(request):
|
||||
smtp = smtplib.SMTP("smtp.gmail.com")
|
||||
def fin():
|
||||
print ("teardown smtp")
|
||||
smtp.close()
|
||||
request.addfinalizer(fin)
|
||||
return smtp # provide the fixture value
|
||||
|
||||
The ``fin`` function will execute when the last test using
|
||||
the fixture in the module has finished execution.
|
||||
|
||||
This method is still fully supported, but ``yield`` is recommended from 2.10 onward because
|
||||
it is considered simpler and better describes the natural code flow.
|
||||
|
||||
.. _`request-context`:
|
||||
|
||||
|
@ -309,18 +342,15 @@ read an optional server URL from the test module which uses our fixture::
|
|||
def smtp(request):
|
||||
server = getattr(request.module, "smtpserver", "smtp.gmail.com")
|
||||
smtp = smtplib.SMTP(server)
|
||||
|
||||
def fin():
|
||||
print ("finalizing %s (%s)" % (smtp, server))
|
||||
smtp.close()
|
||||
request.addfinalizer(fin)
|
||||
return smtp
|
||||
yield smtp
|
||||
print ("finalizing %s (%s)" % (smtp, server))
|
||||
smtp.close()
|
||||
|
||||
We use the ``request.module`` attribute to optionally obtain an
|
||||
``smtpserver`` attribute from the test module. If we just execute
|
||||
again, nothing much has changed::
|
||||
|
||||
$ py.test -s -q --tb=no
|
||||
$ pytest -s -q --tb=no
|
||||
FFfinalizing <smtplib.SMTP object at 0xdeadbeef> (smtp.gmail.com)
|
||||
|
||||
2 failed in 0.12 seconds
|
||||
|
@ -337,7 +367,7 @@ server URL in its module namespace::
|
|||
|
||||
Running it::
|
||||
|
||||
$ py.test -qq --tb=short test_anothersmtp.py
|
||||
$ pytest -qq --tb=short test_anothersmtp.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_showhelo ________
|
||||
|
@ -351,7 +381,7 @@ from the module namespace.
|
|||
|
||||
.. _`fixture-parametrize`:
|
||||
|
||||
Parametrizing a fixture
|
||||
Parametrizing fixtures
|
||||
-----------------------------------------------------------------
|
||||
|
||||
Fixture functions can be parametrized in which case they will be called
|
||||
|
@ -374,11 +404,9 @@ through the special :py:class:`request <FixtureRequest>` object::
|
|||
params=["smtp.gmail.com", "mail.python.org"])
|
||||
def smtp(request):
|
||||
smtp = smtplib.SMTP(request.param)
|
||||
def fin():
|
||||
print ("finalizing %s" % smtp)
|
||||
smtp.close()
|
||||
request.addfinalizer(fin)
|
||||
return smtp
|
||||
yield smtp
|
||||
print ("finalizing %s" % smtp)
|
||||
smtp.close()
|
||||
|
||||
The main change is the declaration of ``params`` with
|
||||
:py:func:`@pytest.fixture <_pytest.python.fixture>`, a list of values
|
||||
|
@ -386,7 +414,7 @@ for each of which the fixture function will execute and can access
|
|||
a value via ``request.param``. No test function code needs to change.
|
||||
So let's just do another run::
|
||||
|
||||
$ py.test -q test_module.py
|
||||
$ pytest -q test_module.py
|
||||
FFFF
|
||||
======= FAILURES ========
|
||||
_______ test_ehlo[smtp.gmail.com] ________
|
||||
|
@ -486,7 +514,7 @@ return ``None`` then pytest's auto-generated ID will be used.
|
|||
|
||||
Running the above tests results in the following test IDs being used::
|
||||
|
||||
$ py.test --collect-only
|
||||
$ pytest --collect-only
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -537,7 +565,7 @@ and instantiate an object ``app`` where we stick the already defined
|
|||
Here we declare an ``app`` fixture which receives the previously defined
|
||||
``smtp`` fixture and instantiates an ``App`` object with it. Let's run it::
|
||||
|
||||
$ py.test -v test_appsetup.py
|
||||
$ pytest -v test_appsetup.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -586,19 +614,15 @@ to show the setup/teardown flow::
|
|||
def modarg(request):
|
||||
param = request.param
|
||||
print (" SETUP modarg %s" % param)
|
||||
def fin():
|
||||
print (" TEARDOWN modarg %s" % param)
|
||||
request.addfinalizer(fin)
|
||||
return param
|
||||
yield param
|
||||
print (" TEARDOWN modarg %s" % param)
|
||||
|
||||
@pytest.fixture(scope="function", params=[1,2])
|
||||
def otherarg(request):
|
||||
param = request.param
|
||||
print (" SETUP otherarg %s" % param)
|
||||
def fin():
|
||||
print (" TEARDOWN otherarg %s" % param)
|
||||
request.addfinalizer(fin)
|
||||
return param
|
||||
yield param
|
||||
print (" TEARDOWN otherarg %s" % param)
|
||||
|
||||
def test_0(otherarg):
|
||||
print (" RUN test0 with otherarg %s" % otherarg)
|
||||
|
@ -610,7 +634,7 @@ to show the setup/teardown flow::
|
|||
|
||||
Let's run the tests in verbose mode and with looking at the print-output::
|
||||
|
||||
$ py.test -v -s test_module.py
|
||||
$ pytest -v -s test_module.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1 -- $PYTHON_PREFIX/bin/python3.5
|
||||
cachedir: .cache
|
||||
|
@ -712,7 +736,7 @@ will be required for the execution of each test method, just as if
|
|||
you specified a "cleandir" function argument to each of them. Let's run it
|
||||
to verify our fixture is activated and the tests pass::
|
||||
|
||||
$ py.test -q
|
||||
$ pytest -q
|
||||
..
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
|
@ -777,7 +801,8 @@ self-contained implementation of this idea::
|
|||
@pytest.fixture(autouse=True)
|
||||
def transact(self, request, db):
|
||||
db.begin(request.function.__name__)
|
||||
request.addfinalizer(db.rollback)
|
||||
yield
|
||||
db.rollback()
|
||||
|
||||
def test_method1(self, db):
|
||||
assert db.intransaction == ["test_method1"]
|
||||
|
@ -792,12 +817,16 @@ class-level ``usefixtures`` decorator.
|
|||
|
||||
If we run it, we get two passing tests::
|
||||
|
||||
$ py.test -q
|
||||
$ pytest -q
|
||||
..
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
Here is how autouse fixtures work in other scopes:
|
||||
|
||||
- autouse fixtures obey the ``scope=`` keyword-argument: if an autouse fixture
|
||||
has ``scope='session'`` it will only be run once, no matter where it is
|
||||
defined. ``scope='class'`` means it will be run once per class, etc.
|
||||
|
||||
- if an autouse fixture is defined in a test module, all its test
|
||||
functions automatically use it.
|
||||
|
||||
|
@ -817,10 +846,11 @@ active. The canonical way to do that is to put the transact definition
|
|||
into a conftest.py file **without** using ``autouse``::
|
||||
|
||||
# content of conftest.py
|
||||
@pytest.fixture()
|
||||
@pytest.fixture
|
||||
def transact(self, request, db):
|
||||
db.begin()
|
||||
request.addfinalizer(db.rollback)
|
||||
yield
|
||||
db.rollback()
|
||||
|
||||
and then e.g. have a TestClass using it by declaring the need::
|
||||
|
||||
|
|
|
@ -172,17 +172,17 @@ to do this with parametrization as ``pytest_runtest_setup()`` is called
|
|||
during test execution and parametrization happens at collection time.
|
||||
|
||||
It follows that pytest_configure/session/runtest_setup are often not
|
||||
appropriate for implementing common fixture needs. Therefore,
|
||||
appropriate for implementing common fixture needs. Therefore,
|
||||
pytest-2.3 introduces :ref:`autouse fixtures` which fully
|
||||
integrate with the generic :ref:`fixture mechanism <fixture>`
|
||||
integrate with the generic :ref:`fixture mechanism <fixture>`
|
||||
and obsolete many prior uses of pytest hooks.
|
||||
|
||||
funcargs/fixture discovery now happens at collection time
|
||||
---------------------------------------------------------------------
|
||||
|
||||
pytest-2.3 takes care to discover fixture/funcarg factories
|
||||
at collection time. This is more efficient especially for large test suites.
|
||||
Moreover, a call to "py.test --collect-only" should be able to in the future
|
||||
Since pytest-2.3, discovery of fixture/funcarg factories are taken care of
|
||||
at collection time. This is more efficient especially for large test suites.
|
||||
Moreover, a call to "pytest --collect-only" should be able to in the future
|
||||
show a lot of setup-information and thus presents a nice method to get an
|
||||
overview of fixture management in your project.
|
||||
|
||||
|
|
|
@ -26,7 +26,7 @@ Installation options::
|
|||
|
||||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
$ pytest --version
|
||||
This is pytest version 2.9.2, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
@ -47,7 +47,7 @@ Let's create a first test file with a simple test function::
|
|||
|
||||
That's it. You can execute the test function now::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -102,7 +102,7 @@ use the ``raises`` helper::
|
|||
|
||||
Running it with, this time in "quiet" reporting mode::
|
||||
|
||||
$ py.test -q test_sysexit.py
|
||||
$ pytest -q test_sysexit.py
|
||||
.
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
|
@ -127,7 +127,7 @@ The two tests are found because of the standard :ref:`test discovery`.
|
|||
There is no need to subclass anything. We can simply
|
||||
run the module by passing its filename::
|
||||
|
||||
$ py.test -q test_class.py
|
||||
$ pytest -q test_class.py
|
||||
.F
|
||||
======= FAILURES ========
|
||||
_______ TestClass.test_two ________
|
||||
|
@ -163,7 +163,7 @@ We list the name ``tmpdir`` in the test function signature and
|
|||
``pytest`` will lookup and call a fixture factory to create the resource
|
||||
before performing the test function call. Let's just run it::
|
||||
|
||||
$ py.test -q test_tmpdir.py
|
||||
$ pytest -q test_tmpdir.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_needsfiles ________
|
||||
|
@ -185,7 +185,7 @@ was created. More info at :ref:`tmpdir handling`.
|
|||
|
||||
You can find out what kind of builtin :ref:`fixtures` exist by typing::
|
||||
|
||||
py.test --fixtures # shows builtin and custom fixtures
|
||||
pytest --fixtures # shows builtin and custom fixtures
|
||||
|
||||
Where to go next
|
||||
-------------------------------------
|
||||
|
@ -193,45 +193,9 @@ Where to go next
|
|||
Here are a few suggestions where to go next:
|
||||
|
||||
* :ref:`cmdline` for command line invocation examples
|
||||
* :ref:`good practices <goodpractices>` for virtualenv, test layout, genscript support
|
||||
* :ref:`good practices <goodpractices>` for virtualenv, test layout
|
||||
* :ref:`fixtures` for providing a functional baseline to your tests
|
||||
* :ref:`apiref` for documentation and examples on using ``pytest``
|
||||
* :ref:`plugins` managing and writing plugins
|
||||
|
||||
.. _`installation issues`:
|
||||
|
||||
Known Installation issues
|
||||
------------------------------
|
||||
|
||||
easy_install or pip not found?
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
.. _`install pip`: http://www.pip-installer.org/en/latest/index.html
|
||||
|
||||
`Install pip`_ for a state of the art python package installer.
|
||||
|
||||
Install `setuptools`_ to get ``easy_install`` which allows to install
|
||||
``.egg`` binary format packages in addition to source-based ones.
|
||||
|
||||
py.test not found on Windows despite installation?
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
.. _`Python for Windows`: http://www.imladris.com/Scripts/PythonForWindows.html
|
||||
|
||||
- **Windows**: If "easy_install" or "py.test" are not found
|
||||
you need to add the Python script path to your ``PATH``, see here:
|
||||
`Python for Windows`_. You may alternatively use an `ActivePython install`_
|
||||
which does this for you automatically.
|
||||
|
||||
.. _`ActivePython install`: http://www.activestate.com/activepython/downloads
|
||||
|
||||
.. _`Jython does not create command line launchers`: http://bugs.jython.org/issue1491
|
||||
|
||||
- **Jython2.5.1 on Windows XP**: `Jython does not create command line launchers`_
|
||||
so ``py.test`` will not work correctly. You may install py.test on
|
||||
CPython and type ``py.test --genscript=mytest`` and then use
|
||||
``jython mytest`` to run your tests with Jython using ``pytest``.
|
||||
|
||||
:ref:`examples` for more complex examples
|
||||
|
||||
.. include:: links.inc
|
||||
|
|
|
@ -72,17 +72,17 @@ Important notes relating to both schemes:
|
|||
|
||||
- With inlined tests you might put ``__init__.py`` into test
|
||||
directories and make them installable as part of your application.
|
||||
Using the ``py.test --pyargs mypkg`` invocation pytest will
|
||||
Using the ``pytest --pyargs mypkg`` invocation pytest will
|
||||
discover where mypkg is installed and collect tests from there.
|
||||
With the "external" test you can still distribute tests but they
|
||||
will not be installed or become importable.
|
||||
|
||||
Typically you can run tests by pointing to test directories or modules::
|
||||
|
||||
py.test tests/test_app.py # for external test dirs
|
||||
py.test mypkg/test/test_app.py # for inlined test dirs
|
||||
py.test mypkg # run tests in all below test directories
|
||||
py.test # run all tests below current dir
|
||||
pytest tests/test_app.py # for external test dirs
|
||||
pytest mypkg/test/test_app.py # for inlined test dirs
|
||||
pytest mypkg # run tests in all below test directories
|
||||
pytest # run all tests below current dir
|
||||
...
|
||||
|
||||
Because of the above ``editable install`` mode you can change your
|
||||
|
@ -193,7 +193,7 @@ If you now type::
|
|||
this will execute your tests using ``pytest-runner``. As this is a
|
||||
standalone version of ``pytest`` no prior installation whatsoever is
|
||||
required for calling the test command. You can also pass additional
|
||||
arguments to py.test such as your test directory or other
|
||||
arguments to pytest such as your test directory or other
|
||||
options using ``--addopts``.
|
||||
|
||||
|
||||
|
@ -211,7 +211,7 @@ your own setuptools Test command for invoking pytest.
|
|||
|
||||
|
||||
class PyTest(TestCommand):
|
||||
user_options = [('pytest-args=', 'a', "Arguments to pass to py.test")]
|
||||
user_options = [('pytest-args=', 'a', "Arguments to pass to pytest")]
|
||||
|
||||
def initialize_options(self):
|
||||
TestCommand.initialize_options(self)
|
||||
|
@ -240,41 +240,7 @@ using the ``--pytest-args`` or ``-a`` command-line option. For example::
|
|||
|
||||
python setup.py test -a "--durations=5"
|
||||
|
||||
is equivalent to running ``py.test --durations=5``.
|
||||
|
||||
|
||||
.. _standalone:
|
||||
.. _`genscript method`:
|
||||
|
||||
(deprecated) Create a pytest standalone script
|
||||
-----------------------------------------------
|
||||
|
||||
.. deprecated:: 2.8
|
||||
|
||||
.. note::
|
||||
|
||||
``genscript`` has been deprecated because:
|
||||
|
||||
* It cannot support plugins, rendering its usefulness extremely limited;
|
||||
* Tooling has become much better since ``genscript`` was introduced;
|
||||
* It is possible to build a zipped ``pytest`` application without the
|
||||
shortcomings above.
|
||||
|
||||
There's no planned version in which this command will be removed
|
||||
at the moment of this writing, but its use is discouraged for new
|
||||
applications.
|
||||
|
||||
If you are a maintainer or application developer and want people
|
||||
who don't deal with python much to easily run tests you may generate
|
||||
a standalone ``pytest`` script::
|
||||
|
||||
py.test --genscript=runtests.py
|
||||
|
||||
This generates a ``runtests.py`` script which is a fully functional basic
|
||||
``pytest`` script, running unchanged under Python2 and Python3.
|
||||
You can tell people to download the script and then e.g. run it like this::
|
||||
|
||||
python runtests.py
|
||||
is equivalent to running ``pytest --durations=5``.
|
||||
|
||||
|
||||
.. include:: links.inc
|
||||
|
|
|
@ -6,7 +6,7 @@ pytest: helps you write better programs
|
|||
|
||||
**a mature full-featured Python testing tool**
|
||||
|
||||
- runs on Posix/Windows, Python 2.6-3.5, PyPy and (possibly still) Jython-2.5.1
|
||||
- runs on Posix/Windows, Python 2.6, 2.7 and 3.3-3.5, PyPy and (possibly still) Jython-2.5.1
|
||||
- free and open source software, distributed under the terms of the :ref:`MIT license <license>`
|
||||
- **well tested** with more than a thousand tests against itself
|
||||
- **strict backward compatibility policy** for safe pytest upgrades
|
||||
|
@ -57,5 +57,3 @@ pytest: helps you write better programs
|
|||
|
||||
|
||||
.. _`easy`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
|
||||
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ Usage
|
|||
After :ref:`installation` type::
|
||||
|
||||
python setup.py develop # make sure tests can import our package
|
||||
py.test # instead of 'nosetests'
|
||||
pytest # instead of 'nosetests'
|
||||
|
||||
and you should be able to run your nose style tests and
|
||||
make use of pytest's capabilities.
|
||||
|
|
|
@ -53,7 +53,7 @@ Here, the ``@parametrize`` decorator defines three different ``(test_input,expec
|
|||
tuples so that the ``test_eval`` function will run three times using
|
||||
them in turn::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -101,7 +101,7 @@ for example with the builtin ``mark.xfail``::
|
|||
|
||||
Let's run this::
|
||||
|
||||
$ py.test
|
||||
$ pytest
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -171,13 +171,13 @@ command line option and the parametrization of our test function::
|
|||
|
||||
If we now pass two stringinput values, our test will run twice::
|
||||
|
||||
$ py.test -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||
$ pytest -q --stringinput="hello" --stringinput="world" test_strings.py
|
||||
..
|
||||
2 passed in 0.12 seconds
|
||||
|
||||
Let's also run with a stringinput that will lead to a failing test::
|
||||
|
||||
$ py.test -q --stringinput="!" test_strings.py
|
||||
$ pytest -q --stringinput="!" test_strings.py
|
||||
F
|
||||
======= FAILURES ========
|
||||
_______ test_valid_string[!] ________
|
||||
|
@ -198,7 +198,7 @@ If you don't specify a stringinput it will be skipped because
|
|||
``metafunc.parametrize()`` will be called with an empty parameter
|
||||
list::
|
||||
|
||||
$ py.test -q -rs test_strings.py
|
||||
$ pytest -q -rs test_strings.py
|
||||
s
|
||||
======= short test summary info ========
|
||||
SKIP [1] test_strings.py:1: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1
|
||||
|
|
|
@ -59,7 +59,7 @@ Here is a little annotated list for some popular plugins:
|
|||
a plugin to run javascript unittests in live browsers.
|
||||
|
||||
To see a complete list of all plugins with their latest testing
|
||||
status against different py.test and Python versions, please visit
|
||||
status against different pytest and Python versions, please visit
|
||||
`plugincompat <http://plugincompat.herokuapp.com/>`_.
|
||||
|
||||
You may also discover more plugins through a `pytest- pypi.python.org search`_.
|
||||
|
@ -90,7 +90,7 @@ Finding out which plugins are active
|
|||
If you want to find out which plugins are active in your
|
||||
environment you can type::
|
||||
|
||||
py.test --trace-config
|
||||
pytest --trace-config
|
||||
|
||||
and will get an extended test header which shows activated plugins
|
||||
and their names. It will also print local plugins aka
|
||||
|
@ -103,7 +103,7 @@ Deactivating / unregistering a plugin by name
|
|||
|
||||
You can prevent plugins from loading or unregister them::
|
||||
|
||||
py.test -p no:NAME
|
||||
pytest -p no:NAME
|
||||
|
||||
This means that any subsequent try to activate/load the named
|
||||
plugin will not work.
|
||||
|
@ -138,7 +138,6 @@ in the `pytest repository <https://github.com/pytest-dev/pytest>`_.
|
|||
_pytest.capture
|
||||
_pytest.config
|
||||
_pytest.doctest
|
||||
_pytest.genscript
|
||||
_pytest.helpconfig
|
||||
_pytest.junitxml
|
||||
_pytest.mark
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
=========================
|
||||
Parametrize with fixtures
|
||||
=========================
|
||||
|
||||
Problem
|
||||
-------
|
||||
|
||||
As a user I have functional tests that I would like to run against various
|
||||
scenarios.
|
||||
|
||||
In this particular example we want to generate a new project based on a
|
||||
cookiecutter template. We want to test default values but also data that
|
||||
emulates user input.
|
||||
|
||||
- use default values
|
||||
|
||||
- emulate user input
|
||||
|
||||
- specify 'author'
|
||||
|
||||
- specify 'project_slug'
|
||||
|
||||
- specify 'author' and 'project_slug'
|
||||
|
||||
This is how a functional test could look like:
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def default_context():
|
||||
return {'extra_context': {}}
|
||||
|
||||
|
||||
@pytest.fixture(params=[
|
||||
{'author': 'alice'},
|
||||
{'project_slug': 'helloworld'},
|
||||
{'author': 'bob', 'project_slug': 'foobar'},
|
||||
])
|
||||
def extra_context(request):
|
||||
return {'extra_context': request.param}
|
||||
|
||||
|
||||
@pytest.fixture(params=['default', 'extra'])
|
||||
def context(request):
|
||||
if request.param == 'default':
|
||||
return request.getfuncargvalue('default_context')
|
||||
else:
|
||||
return request.getfuncargvalue('extra_context')
|
||||
|
||||
|
||||
def test_generate_project(cookies, context):
|
||||
"""Call the cookiecutter API to generate a new project from a
|
||||
template.
|
||||
"""
|
||||
result = cookies.bake(extra_context=context)
|
||||
|
||||
assert result.exit_code == 0
|
||||
assert result.exception is None
|
||||
assert result.project.isdir()
|
||||
|
||||
|
||||
Issues
|
||||
------
|
||||
|
||||
* By using ``request.getfuncargvalue()`` we rely on actual fixture function
|
||||
execution to know what fixtures are involved, due to it's dynamic nature
|
||||
* More importantly, ``request.getfuncargvalue()`` cannot be combined with
|
||||
parametrized fixtures, such as ``extra_context``
|
||||
* This is very inconvenient if you wish to extend an existing test suite by
|
||||
certain parameters for fixtures that are already used by tests
|
||||
|
||||
pytest version 3.0 reports an error if you try to run above code::
|
||||
|
||||
Failed: The requested fixture has no parameter defined for the current
|
||||
test.
|
||||
|
||||
Requested fixture 'extra_context'
|
||||
|
||||
|
||||
Proposed solution
|
||||
-----------------
|
||||
|
||||
A new function that can be used in modules can be used to dynamically define
|
||||
fixtures from existing ones.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
pytest.define_combined_fixture(
|
||||
name='context',
|
||||
fixtures=['default_context', 'extra_context'],
|
||||
)
|
||||
|
||||
The new fixture ``context`` inherits the scope from the used fixtures and yield
|
||||
the following values.
|
||||
|
||||
- ``{}``
|
||||
|
||||
- ``{'author': 'alice'}``
|
||||
|
||||
- ``{'project_slug': 'helloworld'}``
|
||||
|
||||
- ``{'author': 'bob', 'project_slug': 'foobar'}``
|
||||
|
||||
Alternative approach
|
||||
--------------------
|
||||
|
||||
A new helper function named ``fixture_request`` tells pytest to yield all
|
||||
parameters of a fixture.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
@pytest.fixture(params=[
|
||||
pytest.fixture_request('default_context'),
|
||||
pytest.fixture_request('extra_context'),
|
||||
])
|
||||
def context(request):
|
||||
"""Returns all values for ``default_context``, one-by-one before it
|
||||
does the same for ``extra_context``.
|
||||
|
||||
request.param:
|
||||
- {}
|
||||
- {'author': 'alice'}
|
||||
- {'project_slug': 'helloworld'}
|
||||
- {'author': 'bob', 'project_slug': 'foobar'}
|
||||
"""
|
||||
return request.param
|
||||
|
||||
The same helper can be used in combination with ``pytest.mark.parametrize``.
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
|
||||
@pytest.mark.parametrize(
|
||||
'context, expected_response_code',
|
||||
[
|
||||
(pytest.fixture_request('default_context'), 0),
|
||||
(pytest.fixture_request('extra_context'), 0),
|
||||
],
|
||||
)
|
||||
def test_generate_project(cookies, context, exit_code):
|
||||
"""Call the cookiecutter API to generate a new project from a
|
||||
template.
|
||||
"""
|
||||
result = cookies.bake(extra_context=context)
|
||||
|
||||
assert result.exit_code == exit_code
|
|
@ -19,7 +19,7 @@ information about skipped/xfailed tests is not shown by default to avoid
|
|||
cluttering the output. You can use the ``-r`` option to see details
|
||||
corresponding to the "short" letters shown in the test progress::
|
||||
|
||||
py.test -rxs # show extra info on skips and xfails
|
||||
pytest -rxs # show extra info on skips and xfails
|
||||
|
||||
(See :ref:`how to change command line options defaults`)
|
||||
|
||||
|
@ -222,7 +222,7 @@ Here is a simple test file with the several usages:
|
|||
|
||||
Running it with the report-on-xfail option gives this output::
|
||||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
example $ pytest -rx xfail_demo.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR/example, inifile:
|
||||
|
@ -368,6 +368,6 @@ The equivalent with "boolean conditions" is::
|
|||
.. note::
|
||||
|
||||
You cannot use ``pytest.config.getvalue()`` in code
|
||||
imported before py.test's argument parsing takes place. For example,
|
||||
imported before pytest's argument parsing takes place. For example,
|
||||
``conftest.py`` files are imported before command line parsing and thus
|
||||
``config.getvalue()`` will not execute correctly.
|
||||
|
|
|
@ -11,9 +11,6 @@ Talks and Tutorials
|
|||
Talks and blog postings
|
||||
---------------------------------------------
|
||||
|
||||
.. _`tutorial1 repository`: http://bitbucket.org/pytest-dev/pytest-tutorial1/
|
||||
.. _`pycon 2010 tutorial PDF`: http://bitbucket.org/pytest-dev/pytest-tutorial1/raw/tip/pytest-basic.pdf
|
||||
|
||||
- `pytest - Rapid Simple Testing, Florian Bruhin, Swiss Python Summit 2016
|
||||
<https://www.youtube.com/watch?v=rCBHkQ_LVIs>`_.
|
||||
|
||||
|
@ -52,12 +49,14 @@ Talks and blog postings
|
|||
- `pytest introduction from Brian Okken (January 2013)
|
||||
<http://pythontesting.net/framework/pytest-introduction/>`_
|
||||
|
||||
- `monkey patching done right`_ (blog post, consult `monkeypatch
|
||||
plugin`_ for up-to-date API)
|
||||
- pycon australia 2012 pytest talk from Brianna Laugher (`video <http://www.youtube.com/watch?v=DTNejE9EraI>`_, `slides <http://www.slideshare.net/pfctdayelise/funcargs-other-fun-with-pytest>`_, `code <https://gist.github.com/3386951>`_)
|
||||
- `pycon 2012 US talk video from Holger Krekel <http://www.youtube.com/watch?v=9LVqBQcFmyw>`_
|
||||
|
||||
- `monkey patching done right`_ (blog post, consult `monkeypatch plugin`_ for up-to-date API)
|
||||
|
||||
Test parametrization:
|
||||
|
||||
- `generating parametrized tests with funcargs`_ (uses deprecated ``addcall()`` API.
|
||||
- `generating parametrized tests with fixtures`_.
|
||||
- `test generators and cached setup`_
|
||||
- `parametrizing tests, generalized`_ (blog post)
|
||||
- `putting test-hooks into local or global plugins`_ (blog post)
|
||||
|
@ -78,39 +77,17 @@ Plugin specific examples:
|
|||
- `many examples in the docs for plugins`_
|
||||
|
||||
.. _`skipping slow tests by default in pytest`: http://bruynooghe.blogspot.com/2009/12/skipping-slow-test-by-default-in-pytest.html
|
||||
.. _`many examples in the docs for plugins`: plugin/index.html
|
||||
.. _`monkeypatch plugin`: plugin/monkeypatch.html
|
||||
.. _`application setup in test functions with funcargs`: funcargs.html#appsetup
|
||||
.. _`many examples in the docs for plugins`: plugins.html
|
||||
.. _`monkeypatch plugin`: monkeypatch.html
|
||||
.. _`application setup in test functions with fixtures`: fixture.html#interdependent-fixtures
|
||||
.. _`simultaneously test your code on all platforms`: http://tetamap.wordpress.com/2009/03/23/new-simultanously-test-your-code-on-all-platforms/
|
||||
.. _`monkey patching done right`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
.. _`putting test-hooks into local or global plugins`: http://tetamap.wordpress.com/2009/05/14/putting-test-hooks-into-local-and-global-plugins/
|
||||
.. _`parametrizing tests, generalized`: http://tetamap.wordpress.com/2009/05/13/parametrizing-python-tests-generalized/
|
||||
.. _`generating parametrized tests with funcargs`: funcargs.html#test-generators
|
||||
.. _`generating parametrized tests with fixtures`: parametrize.html#test-generators
|
||||
.. _`test generators and cached setup`: http://bruynooghe.blogspot.com/2010/06/pytest-test-generators-and-cached-setup.html
|
||||
|
||||
Older conference talks and tutorials
|
||||
----------------------------------------
|
||||
|
||||
- `pycon australia 2012 pytest talk from Brianna Laugher
|
||||
<http://2012.pycon-au.org/schedule/52/view_talk?day=sunday>`_ (`video <http://www.youtube.com/watch?v=DTNejE9EraI>`_, `slides <http://www.slideshare.net/pfctdayelise/funcargs-other-fun-with-pytest>`_, `code <https://gist.github.com/3386951>`_)
|
||||
- `pycon 2012 US talk video from Holger Krekel <http://www.youtube.com/watch?v=9LVqBQcFmyw>`_
|
||||
- `pycon 2010 tutorial PDF`_ and `tutorial1 repository`_
|
||||
|
||||
- `ep2009-rapidtesting.pdf`_ tutorial slides (July 2009):
|
||||
|
||||
- testing terminology
|
||||
- basic pytest usage, file system layout
|
||||
- test function arguments (funcargs_) and test fixtures
|
||||
- existing plugins
|
||||
- distributed testing
|
||||
|
||||
- `ep2009-pytest.pdf`_ 60 minute pytest talk, highlighting unique features and a roadmap (July 2009)
|
||||
|
||||
- `pycon2009-pytest-introduction.zip`_ slides and files, extended version of pytest basic introduction, discusses more options, also introduces old-style xUnit setup, looponfailing and other features.
|
||||
|
||||
- `pycon2009-pytest-advanced.pdf`_ contain a slightly older version of funcargs and distributed testing, compared to the EuroPython 2009 slides.
|
||||
|
||||
.. _`ep2009-rapidtesting.pdf`: http://codespeak.net/download/py/ep2009-rapidtesting.pdf
|
||||
.. _`ep2009-pytest.pdf`: http://codespeak.net/download/py/ep2009-pytest.pdf
|
||||
.. _`pycon2009-pytest-introduction.zip`: http://codespeak.net/download/py/pycon2009-pytest-introduction.zip
|
||||
.. _`pycon2009-pytest-advanced.pdf`: http://codespeak.net/download/py/pycon2009-pytest-advanced.pdf
|
||||
|
|
|
@ -21,7 +21,7 @@ but note that project specific settings will be considered
|
|||
first. There is a flag that helps you debugging your
|
||||
conftest.py configurations::
|
||||
|
||||
py.test --trace-config
|
||||
pytest --trace-config
|
||||
|
||||
|
||||
customizing the collecting and running process
|
||||
|
|
|
@ -5,7 +5,7 @@ Mission
|
|||
``pytest`` strives to make testing a fun and no-boilerplate effort.
|
||||
|
||||
The tool is distributed as a `pytest` package. Its project independent
|
||||
``py.test`` command line tool helps you to:
|
||||
``pytest`` command line tool helps you to:
|
||||
|
||||
* rapidly collect and run tests
|
||||
* run unit- or doctests, functional or integration tests
|
||||
|
|
|
@ -53,7 +53,7 @@ subprocesses.
|
|||
|
||||
Running centralised testing::
|
||||
|
||||
py.test --cov myproj tests/
|
||||
pytest --cov myproj tests/
|
||||
|
||||
Shows a terminal report::
|
||||
|
||||
|
@ -76,7 +76,7 @@ file system. Each slave will have it's subprocesses measured.
|
|||
|
||||
Running distributed testing with dist mode set to load::
|
||||
|
||||
py.test --cov myproj -n 2 tests/
|
||||
pytest --cov myproj -n 2 tests/
|
||||
|
||||
Shows a terminal report::
|
||||
|
||||
|
@ -92,7 +92,7 @@ Shows a terminal report::
|
|||
|
||||
Again but spread over different hosts and different directories::
|
||||
|
||||
py.test --cov myproj --dist load
|
||||
pytest --cov myproj --dist load
|
||||
--tx ssh=memedough@host1//chdir=testenv1
|
||||
--tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python
|
||||
--rsyncdir myproj --rsyncdir tests --rsync examples
|
||||
|
@ -119,7 +119,7 @@ environments.
|
|||
|
||||
Running distributed testing with dist mode set to each::
|
||||
|
||||
py.test --cov myproj --dist each
|
||||
pytest --cov myproj --dist each
|
||||
--tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python
|
||||
--tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python
|
||||
--rsyncdir myproj --rsyncdir tests --rsync examples
|
||||
|
@ -149,7 +149,7 @@ annotated source code.
|
|||
|
||||
The terminal report without line numbers (default)::
|
||||
|
||||
py.test --cov-report term --cov myproj tests/
|
||||
pytest --cov-report term --cov myproj tests/
|
||||
|
||||
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
|
||||
Name Stmts Miss Cover
|
||||
|
@ -163,7 +163,7 @@ The terminal report without line numbers (default)::
|
|||
|
||||
The terminal report with line numbers::
|
||||
|
||||
py.test --cov-report term-missing --cov myproj tests/
|
||||
pytest --cov-report term-missing --cov myproj tests/
|
||||
|
||||
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
|
||||
Name Stmts Miss Cover Missing
|
||||
|
@ -178,7 +178,7 @@ The terminal report with line numbers::
|
|||
The remaining three reports output to files without showing anything on the terminal (useful for
|
||||
when the output is going to a continuous integration server)::
|
||||
|
||||
py.test --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/
|
||||
pytest --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/
|
||||
|
||||
|
||||
Coverage Data File
|
||||
|
|
|
@ -26,7 +26,7 @@ Usage
|
|||
|
||||
To get full test coverage reports for a particular package type::
|
||||
|
||||
py.test --cover-report=report
|
||||
pytest --cover-report=report
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
|
|
@ -24,7 +24,7 @@ Usage
|
|||
|
||||
After installation you can simply type::
|
||||
|
||||
py.test --figleaf [...]
|
||||
pytest --figleaf [...]
|
||||
|
||||
to enable figleaf coverage in your test run. A default ".figleaf" data file
|
||||
and "html" directory will be created. You can use command line options
|
||||
|
|
|
@ -1,28 +0,0 @@
|
|||
|
||||
(deprecated) generate standalone test script to be distributed along with an application.
|
||||
============================================================================
|
||||
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--genscript=path``
|
||||
create standalone ``pytest`` script at given target path.
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
||||
|
||||
1. Download `pytest_genscript.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_genscript.py`` into your import path
|
||||
3. a subsequent ``pytest`` run will use your local version
|
||||
|
||||
Checkout customize_, other plugins_ or `get in contact`_.
|
||||
|
||||
.. include:: links.txt
|
|
@ -18,8 +18,6 @@ command line options
|
|||
early-load given plugin (multi-allowed).
|
||||
``--trace-config``
|
||||
trace considerations of conftest.py files.
|
||||
``--nomagic``
|
||||
don't reinterpret asserts, no traceback cutting.
|
||||
``--debug``
|
||||
generate and show internal debugging information.
|
||||
``--help-config``
|
||||
|
|
|
@ -2,10 +2,8 @@
|
|||
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_recwarn.py
|
||||
.. _`unittest`: unittest.html
|
||||
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_monkeypatch.py
|
||||
.. _`pytest_genscript.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_genscript.py
|
||||
.. _`pastebin`: pastebin.html
|
||||
.. _`skipping`: skipping.html
|
||||
.. _`genscript`: genscript.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`mark`: mark.html
|
||||
.. _`tmpdir`: tmpdir.html
|
||||
|
|
|
@ -14,7 +14,7 @@ Usage
|
|||
|
||||
type::
|
||||
|
||||
py.test # instead of 'nosetests'
|
||||
pytest # instead of 'nosetests'
|
||||
|
||||
and you should be able to run nose style tests and at the same
|
||||
time can make full use of pytest's capabilities.
|
||||
|
@ -38,7 +38,7 @@ Unsupported idioms / issues
|
|||
|
||||
If you find other issues or have suggestions please run::
|
||||
|
||||
py.test --pastebin=all
|
||||
pytest --pastebin=all
|
||||
|
||||
and send the resulting URL to a ``pytest`` contact channel,
|
||||
at best to the mailing list.
|
||||
|
|
|
@ -18,8 +18,6 @@ command line options
|
|||
show extra test summary info as specified by chars (f)ailed, (s)skipped, (x)failed, (X)passed.
|
||||
``-l, --showlocals``
|
||||
show locals in tracebacks (disabled by default).
|
||||
``--report=opts``
|
||||
(deprecated, use -r)
|
||||
``--tb=style``
|
||||
traceback print mode (long/short/line/no).
|
||||
``--full-trace``
|
||||
|
|
|
@ -36,7 +36,7 @@ Speed up test runs by sending tests to multiple CPUs
|
|||
|
||||
To send tests to multiple CPUs, type::
|
||||
|
||||
py.test -n NUM
|
||||
pytest -n NUM
|
||||
|
||||
Especially for longer running tests or tests requiring
|
||||
a lot of IO this can lead to considerable speed ups.
|
||||
|
@ -47,7 +47,7 @@ Running tests in a Python subprocess
|
|||
|
||||
To instantiate a python2.4 sub process and send tests to it, you may type::
|
||||
|
||||
py.test -d --tx popen//python=python2.4
|
||||
pytest -d --tx popen//python=python2.4
|
||||
|
||||
This will start a subprocess which is run with the "python2.4"
|
||||
Python interpreter, found in your system binary lookup path.
|
||||
|
@ -68,7 +68,7 @@ tests that you can successfully run locally. And you
|
|||
have a ssh-reachable machine ``myhost``. Then
|
||||
you can ad-hoc distribute your tests by typing::
|
||||
|
||||
py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
|
||||
pytest -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
|
||||
|
||||
This will synchronize your ``mypkg`` package directory
|
||||
to an remote ssh account and then locally collect tests
|
||||
|
@ -97,7 +97,7 @@ It will tell you that it starts listening on the default
|
|||
port. You can now on your home machine specify this
|
||||
new socket host with something like this::
|
||||
|
||||
py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
|
||||
pytest -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
|
||||
|
||||
|
||||
.. _`atonce`:
|
||||
|
@ -107,7 +107,7 @@ Running tests on many platforms at once
|
|||
|
||||
The basic command to run tests on multiple platforms is::
|
||||
|
||||
py.test --dist=each --tx=spec1 --tx=spec2
|
||||
pytest --dist=each --tx=spec1 --tx=spec2
|
||||
|
||||
If you specify a windows host, an OSX host and a Linux
|
||||
environment this command will send each tests to all
|
||||
|
|
|
@ -27,7 +27,7 @@ and more. Here is an example test usage::
|
|||
Running this would result in a passed test except for the last
|
||||
``assert 0`` line which we use to look at values::
|
||||
|
||||
$ py.test test_tmpdir.py
|
||||
$ pytest test_tmpdir.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -100,7 +100,7 @@ than 3 temporary directories will be removed.
|
|||
|
||||
You can override the default temporary directory setting like this::
|
||||
|
||||
py.test --basetemp=mydir
|
||||
pytest --basetemp=mydir
|
||||
|
||||
When distributing tests on the local machine, ``pytest`` takes care to
|
||||
configure a basetemp directory for the sub processes such that all temporary
|
||||
|
|
|
@ -21,7 +21,7 @@ Usage
|
|||
|
||||
After :ref:`installation` type::
|
||||
|
||||
py.test
|
||||
pytest
|
||||
|
||||
and you should be able to run your unittest-style tests if they
|
||||
are contained in ``test_*`` modules. If that works for you then
|
||||
|
@ -86,7 +86,7 @@ the pytest fixture function ``db_class`` is called once per class.
|
|||
Due to the deliberately failing assert statements, we can take a look at
|
||||
the ``self.db`` values in the traceback::
|
||||
|
||||
$ py.test test_unittest_db.py
|
||||
$ pytest test_unittest_db.py
|
||||
======= test session starts ========
|
||||
platform linux -- Python 3.5.1, pytest-2.9.2, py-1.4.31, pluggy-0.3.1
|
||||
rootdir: $REGENDOC_TMPDIR, inifile:
|
||||
|
@ -161,7 +161,7 @@ on the class like in the previous example.
|
|||
|
||||
Running this test module ...::
|
||||
|
||||
$ py.test -q test_unittest_cleandir.py
|
||||
$ pytest -q test_unittest_cleandir.py
|
||||
.
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
|
|
115
doc/en/usage.rst
115
doc/en/usage.rst
|
@ -16,7 +16,7 @@ You can invoke testing through the Python interpreter from the command line::
|
|||
|
||||
python -m pytest [...]
|
||||
|
||||
This is equivalent to invoking the command line script ``py.test [...]``
|
||||
This is equivalent to invoking the command line script ``pytest [...]``
|
||||
directly.
|
||||
|
||||
Getting help on version, option names, environment variables
|
||||
|
@ -24,9 +24,9 @@ Getting help on version, option names, environment variables
|
|||
|
||||
::
|
||||
|
||||
py.test --version # shows where pytest was imported from
|
||||
py.test --fixtures # show available builtin function arguments
|
||||
py.test -h | --help # show help on command line and config file options
|
||||
pytest --version # shows where pytest was imported from
|
||||
pytest --fixtures # show available builtin function arguments
|
||||
pytest -h | --help # show help on command line and config file options
|
||||
|
||||
|
||||
Stopping after the first (or N) failures
|
||||
|
@ -34,52 +34,52 @@ Stopping after the first (or N) failures
|
|||
|
||||
To stop the testing process after the first (N) failures::
|
||||
|
||||
py.test -x # stop after first failure
|
||||
py.test --maxfail=2 # stop after two failures
|
||||
pytest -x # stop after first failure
|
||||
pytest --maxfail=2 # stop after two failures
|
||||
|
||||
Specifying tests / selecting tests
|
||||
---------------------------------------------------
|
||||
|
||||
Several test run options::
|
||||
|
||||
py.test test_mod.py # run tests in module
|
||||
py.test somepath # run all tests below somepath
|
||||
py.test -k stringexpr # only run tests with names that match the
|
||||
pytest test_mod.py # run tests in module
|
||||
pytest somepath # run all tests below somepath
|
||||
pytest -k stringexpr # only run tests with names that match the
|
||||
# "string expression", e.g. "MyClass and not method"
|
||||
# will select TestMyClass.test_something
|
||||
# but not TestMyClass.test_method_simple
|
||||
py.test test_mod.py::test_func # only run tests that match the "node ID",
|
||||
pytest test_mod.py::test_func # only run tests that match the "node ID",
|
||||
# e.g "test_mod.py::test_func" will select
|
||||
# only test_func in test_mod.py
|
||||
py.test test_mod.py::TestClass::test_method # run a single method in
|
||||
pytest test_mod.py::TestClass::test_method # run a single method in
|
||||
# a single class
|
||||
|
||||
Import 'pkg' and use its filesystem location to find and run tests::
|
||||
|
||||
py.test --pyargs pkg # run all tests found below directory of pkg
|
||||
pytest --pyargs pkg # run all tests found below directory of pkg
|
||||
|
||||
Modifying Python traceback printing
|
||||
----------------------------------------------
|
||||
|
||||
Examples for modifying traceback printing::
|
||||
|
||||
py.test --showlocals # show local variables in tracebacks
|
||||
py.test -l # show local variables (shortcut)
|
||||
pytest --showlocals # show local variables in tracebacks
|
||||
pytest -l # show local variables (shortcut)
|
||||
|
||||
py.test --tb=auto # (default) 'long' tracebacks for the first and last
|
||||
pytest --tb=auto # (default) 'long' tracebacks for the first and last
|
||||
# entry, but 'short' style for the other entries
|
||||
py.test --tb=long # exhaustive, informative traceback formatting
|
||||
py.test --tb=short # shorter traceback format
|
||||
py.test --tb=line # only one line per failure
|
||||
py.test --tb=native # Python standard library formatting
|
||||
py.test --tb=no # no traceback at all
|
||||
pytest --tb=long # exhaustive, informative traceback formatting
|
||||
pytest --tb=short # shorter traceback format
|
||||
pytest --tb=line # only one line per failure
|
||||
pytest --tb=native # Python standard library formatting
|
||||
pytest --tb=no # no traceback at all
|
||||
|
||||
The ``--full-trace`` causes very long traces to be printed on error (longer
|
||||
than ``--tb=long``). It also ensures that a stack trace is printed on
|
||||
**KeyboardInterrrupt** (Ctrl+C).
|
||||
This is very useful if the tests are taking too long and you interrupt them
|
||||
with Ctrl+C to find out where the tests are *hanging*. By default no output
|
||||
will be shown (because KeyboardInterrupt is catched by pytest). By using this
|
||||
will be shown (because KeyboardInterrupt is caught by pytest). By using this
|
||||
option you make sure a trace is shown.
|
||||
|
||||
Dropping to PDB_ (Python Debugger) on failures
|
||||
|
@ -90,14 +90,14 @@ Dropping to PDB_ (Python Debugger) on failures
|
|||
Python comes with a builtin Python debugger called PDB_. ``pytest``
|
||||
allows one to drop into the PDB_ prompt via a command line option::
|
||||
|
||||
py.test --pdb
|
||||
pytest --pdb
|
||||
|
||||
This will invoke the Python debugger on every failure. Often you might
|
||||
only want to do this for the first failing test to understand a certain
|
||||
failure situation::
|
||||
|
||||
py.test -x --pdb # drop to PDB on first failure, then end test session
|
||||
py.test --pdb --maxfail=3 # drop to PDB for first three failures
|
||||
pytest -x --pdb # drop to PDB on first failure, then end test session
|
||||
pytest --pdb --maxfail=3 # drop to PDB for first three failures
|
||||
|
||||
Note that on any failure the exception information is stored on
|
||||
``sys.last_value``, ``sys.last_type`` and ``sys.last_traceback``. In
|
||||
|
@ -125,7 +125,7 @@ can use a helper::
|
|||
.. versionadded: 2.0.0
|
||||
|
||||
Prior to pytest version 2.0.0 you could only enter PDB_ tracing if you disabled
|
||||
capturing on the command line via ``py.test -s``. In later versions, pytest
|
||||
capturing on the command line via ``pytest -s``. In later versions, pytest
|
||||
automatically disables its output capture when you enter PDB_ tracing:
|
||||
|
||||
* Output capture in other tests is not affected.
|
||||
|
@ -141,7 +141,7 @@ automatically disables its output capture when you enter PDB_ tracing:
|
|||
Since pytest version 2.4.0 you can also use the native Python
|
||||
``import pdb;pdb.set_trace()`` call to enter PDB_ tracing without having to use
|
||||
the ``pytest.set_trace()`` wrapper or explicitly disable pytest's output
|
||||
capturing via ``py.test -s``.
|
||||
capturing via ``pytest -s``.
|
||||
|
||||
.. _durations:
|
||||
|
||||
|
@ -152,7 +152,7 @@ Profiling test execution duration
|
|||
|
||||
To get a list of the slowest 10 test durations::
|
||||
|
||||
py.test --durations=10
|
||||
pytest --durations=10
|
||||
|
||||
|
||||
Creating JUnitXML format files
|
||||
|
@ -161,7 +161,7 @@ Creating JUnitXML format files
|
|||
To create result files which can be read by Jenkins_ or other Continuous
|
||||
integration servers, use this invocation::
|
||||
|
||||
py.test --junitxml=path
|
||||
pytest --junitxml=path
|
||||
|
||||
to create an XML file at ``path``.
|
||||
|
||||
|
@ -201,12 +201,59 @@ This will add an extra property ``example_key="1"`` to the generated
|
|||
Also please note that using this feature will break any schema verification.
|
||||
This might be a problem when used with some CI servers.
|
||||
|
||||
LogXML: add_global_property
|
||||
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
|
||||
|
||||
.. versionadded:: 3.0
|
||||
|
||||
If you want to add a properties node in the testsuite level, which may contains properties that are relevant
|
||||
to all testcases you can use ``LogXML.add_global_properties``
|
||||
|
||||
.. code-block:: python
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def log_global_env_facts(f):
|
||||
|
||||
if pytest.config.pluginmanager.hasplugin('junitxml'):
|
||||
my_junit = getattr(pytest.config, '_xml', None)
|
||||
|
||||
my_junit.add_global_property('ARCH', 'PPC')
|
||||
my_junit.add_global_property('STORAGE_TYPE', 'CEPH')
|
||||
|
||||
@pytest.mark.usefixtures(log_global_env_facts)
|
||||
def start_and_prepare_env():
|
||||
pass
|
||||
|
||||
class TestMe:
|
||||
def test_foo(self):
|
||||
assert True
|
||||
|
||||
This will add a property node below the testsuite node to the generated xml:
|
||||
|
||||
.. code-block:: xml
|
||||
|
||||
<testsuite errors="0" failures="0" name="pytest" skips="0" tests="1" time="0.006">
|
||||
<properties>
|
||||
<property name="ARCH" value="PPC"/>
|
||||
<property name="STORAGE_TYPE" value="CEPH"/>
|
||||
</properties>
|
||||
<testcase classname="test_me.TestMe" file="test_me.py" line="16" name="test_foo" time="0.000243663787842"/>
|
||||
</testsuite>
|
||||
|
||||
.. warning::
|
||||
|
||||
This is an experimental feature, and its interface might be replaced
|
||||
by something more powerful and general in future versions. The
|
||||
functionality per-se will be kept.
|
||||
|
||||
Creating resultlog format files
|
||||
----------------------------------------------------
|
||||
|
||||
To create plain-text machine-readable result files you can issue::
|
||||
|
||||
py.test --resultlog=path
|
||||
pytest --resultlog=path
|
||||
|
||||
and look at the content at the ``path`` location. Such files are used e.g.
|
||||
by the `PyPy-test`_ web page to show test results over several revisions.
|
||||
|
@ -219,7 +266,7 @@ Sending test report to online pastebin service
|
|||
|
||||
**Creating a URL for each test failure**::
|
||||
|
||||
py.test --pastebin=failed
|
||||
pytest --pastebin=failed
|
||||
|
||||
This will submit test run information to a remote Paste service and
|
||||
provide a URL for each failure. You may select tests as usual or add
|
||||
|
@ -227,7 +274,7 @@ for example ``-x`` if you only want to send one particular failure.
|
|||
|
||||
**Creating a URL for a whole test session log**::
|
||||
|
||||
py.test --pastebin=all
|
||||
pytest --pastebin=all
|
||||
|
||||
Currently only pasting to the http://bpaste.net service is implemented.
|
||||
|
||||
|
@ -238,9 +285,9 @@ To disable loading specific plugins at invocation time, use the ``-p`` option
|
|||
together with the prefix ``no:``.
|
||||
|
||||
Example: to disable loading the plugin ``doctest``, which is responsible for
|
||||
executing doctest tests from text files, invoke py.test like this::
|
||||
executing doctest tests from text files, invoke pytest like this::
|
||||
|
||||
py.test -p no:doctest
|
||||
pytest -p no:doctest
|
||||
|
||||
.. _`pytest.main-usage`:
|
||||
|
||||
|
@ -253,7 +300,7 @@ You can invoke ``pytest`` from Python code directly::
|
|||
|
||||
pytest.main()
|
||||
|
||||
this acts as if you would call "py.test" from the command line.
|
||||
this acts as if you would call "pytest" from the command line.
|
||||
It will not raise ``SystemExit`` but return the exitcode instead.
|
||||
You can pass in options and arguments::
|
||||
|
||||
|
|
|
@ -87,8 +87,8 @@ sub directory but not for other directories::
|
|||
|
||||
Here is how you might run it::
|
||||
|
||||
py.test test_flat.py # will not show "setting up"
|
||||
py.test a/test_sub.py # will show "setting up"
|
||||
pytest test_flat.py # will not show "setting up"
|
||||
pytest a/test_sub.py # will show "setting up"
|
||||
|
||||
.. Note::
|
||||
If you have ``conftest.py`` files which do not reside in a
|
||||
|
@ -479,6 +479,7 @@ you can use the following hook:
|
|||
|
||||
.. autofunction:: pytest_pycollect_makeitem
|
||||
.. autofunction:: pytest_generate_tests
|
||||
.. autofunction:: pytest_make_parametrize_id
|
||||
|
||||
After collection is complete, you can modify the order of
|
||||
items, delete or otherwise amend the test items:
|
||||
|
@ -497,6 +498,8 @@ Session related reporting hooks:
|
|||
.. autofunction:: pytest_report_header
|
||||
.. autofunction:: pytest_report_teststatus
|
||||
.. autofunction:: pytest_terminal_summary
|
||||
.. autofunction:: pytest_fixture_setup
|
||||
.. autofunction:: pytest_fixture_post_finalizer
|
||||
|
||||
And here is the central hook for reporting about
|
||||
test execution:
|
||||
|
@ -553,6 +556,10 @@ Reference of objects involved in hooks
|
|||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: _pytest.python.FixtureDef()
|
||||
:members:
|
||||
:show-inheritance:
|
||||
|
||||
.. autoclass:: _pytest.runner.CallInfo()
|
||||
:members:
|
||||
|
||||
|
|
|
@ -52,7 +52,7 @@ Speed up test runs by sending tests to multiple CPUs
|
|||
|
||||
To send tests to multiple CPUs, type::
|
||||
|
||||
py.test -n NUM
|
||||
pytest -n NUM
|
||||
|
||||
Especially for longer running tests or tests requiring
|
||||
a lot of I/O this can lead to considerable speed ups.
|
||||
|
@ -63,14 +63,14 @@ Running tests in a Python subprocess
|
|||
|
||||
To instantiate a Python-2.7 subprocess and send tests to it, you may type::
|
||||
|
||||
py.test -d --tx popen//python=python2.7
|
||||
pytest -d --tx popen//python=python2.7
|
||||
|
||||
This will start a subprocess which is run with the "python2.7"
|
||||
Python interpreter, found in your system binary lookup path.
|
||||
|
||||
If you prefix the --tx option value like this::
|
||||
|
||||
py.test -d --tx 3*popen//python=python2.7
|
||||
pytest -d --tx 3*popen//python=python2.7
|
||||
|
||||
then three subprocesses would be created and the tests
|
||||
will be distributed to three subprocesses and run simultanously.
|
||||
|
@ -84,7 +84,7 @@ Running tests in looponfailing mode
|
|||
For refactoring a project with a medium or large test suite
|
||||
you can use the looponfailing mode. Simply add the ``--f`` option::
|
||||
|
||||
py.test -f
|
||||
pytest -f
|
||||
|
||||
and ``pytest`` will run your tests. Assuming you have failures it will then
|
||||
wait for file changes and re-run the failing test set. File changes are detected by looking at ``looponfailingroots`` root directories and all of their contents (recursively). If the default for this value does not work for you you
|
||||
|
@ -104,7 +104,7 @@ tests that you can successfully run locally. And you also
|
|||
have a ssh-reachable machine ``myhost``. Then
|
||||
you can ad-hoc distribute your tests by typing::
|
||||
|
||||
py.test -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
|
||||
pytest -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
|
||||
|
||||
This will synchronize your ``mypkg`` package directory
|
||||
with a remote ssh account and then collect and run your
|
||||
|
@ -135,7 +135,7 @@ It will tell you that it starts listening on the default
|
|||
port. You can now on your home machine specify this
|
||||
new socket host with something like this::
|
||||
|
||||
py.test -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
|
||||
pytest -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
|
||||
|
||||
|
||||
.. _`atonce`:
|
||||
|
@ -145,7 +145,7 @@ Running tests on many platforms at once
|
|||
|
||||
The basic command to run tests on multiple platforms is::
|
||||
|
||||
py.test --dist=each --tx=spec1 --tx=spec2
|
||||
pytest --dist=each --tx=spec1 --tx=spec2
|
||||
|
||||
If you specify a windows host, an OSX host and a Linux
|
||||
environment this command will send each tests to all
|
||||
|
@ -174,7 +174,7 @@ You can also add default environments like this::
|
|||
|
||||
and then just type::
|
||||
|
||||
py.test --dist=each
|
||||
pytest --dist=each
|
||||
|
||||
to run tests in each of the environments.
|
||||
|
||||
|
|
|
@ -1,100 +1,16 @@
|
|||
.. _yieldfixture:
|
||||
|
||||
Fixture functions using "yield" / context manager integration
|
||||
"yield_fixture" functions
|
||||
---------------------------------------------------------------
|
||||
|
||||
.. deprecated:: 3.0
|
||||
|
||||
.. versionadded:: 2.4
|
||||
|
||||
.. regendoc:wipe
|
||||
.. important::
|
||||
Since pytest-3.0, fixtures using the normal ``fixture`` decorator can use a ``yield``
|
||||
statement to provide fixture values and execute teardown code, exactly like ``yield_fixture``
|
||||
in previous versions.
|
||||
|
||||
pytest-2.4 allows fixture functions to seamlessly use a ``yield`` instead
|
||||
of a ``return`` statement to provide a fixture value while otherwise
|
||||
fully supporting all other fixture features.
|
||||
|
||||
Let's look at a simple standalone-example using the ``yield`` syntax::
|
||||
|
||||
# content of test_yield.py
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.yield_fixture
|
||||
def passwd():
|
||||
print ("\nsetup before yield")
|
||||
f = open("/etc/passwd")
|
||||
yield f.readlines()
|
||||
print ("teardown after yield")
|
||||
f.close()
|
||||
|
||||
def test_has_lines(passwd):
|
||||
print ("test called")
|
||||
assert passwd
|
||||
|
||||
In contrast to :ref:`finalization through registering callbacks
|
||||
<finalization>`, our fixture function used a ``yield``
|
||||
statement to provide the lines of the ``/etc/passwd`` file.
|
||||
The code after the ``yield`` statement serves as the teardown code,
|
||||
avoiding the indirection of registering a teardown callback function.
|
||||
|
||||
Let's run it with output capturing disabled::
|
||||
|
||||
$ py.test -q -s test_yield.py
|
||||
|
||||
setup before yield
|
||||
test called
|
||||
.teardown after yield
|
||||
|
||||
1 passed in 0.12 seconds
|
||||
|
||||
We can also seamlessly use the new syntax with ``with`` statements.
|
||||
Let's simplify the above ``passwd`` fixture::
|
||||
|
||||
# content of test_yield2.py
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.yield_fixture
|
||||
def passwd():
|
||||
with open("/etc/passwd") as f:
|
||||
yield f.readlines()
|
||||
|
||||
def test_has_lines(passwd):
|
||||
assert len(passwd) >= 1
|
||||
|
||||
The file ``f`` will be closed after the test finished execution
|
||||
because the Python ``file`` object supports finalization when
|
||||
the ``with`` statement ends.
|
||||
|
||||
Note that the yield fixture form supports all other fixture
|
||||
features such as ``scope``, ``params``, etc., thus changing existing
|
||||
fixture functions to use ``yield`` is straightforward.
|
||||
|
||||
.. note::
|
||||
|
||||
While the ``yield`` syntax is similar to what
|
||||
:py:func:`contextlib.contextmanager` decorated functions
|
||||
provide, with pytest fixture functions the part after the
|
||||
"yield" will always be invoked, independently from the
|
||||
exception status of the test function which uses the fixture.
|
||||
This behaviour makes sense if you consider that many different
|
||||
test functions might use a module or session scoped fixture.
|
||||
|
||||
|
||||
Discussion and future considerations / feedback
|
||||
++++++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
There are some topics that are worth mentioning:
|
||||
|
||||
- usually ``yield`` is used for producing multiple values.
|
||||
But fixture functions can only yield exactly one value.
|
||||
Yielding a second fixture value will get you an error.
|
||||
It's possible we can evolve pytest to allow for producing
|
||||
multiple values as an alternative to current parametrization.
|
||||
For now, you can just use the normal
|
||||
:ref:`fixture parametrization <fixture-parametrize>`
|
||||
mechanisms together with ``yield``-style fixtures.
|
||||
|
||||
- lastly ``yield`` introduces more than one way to write
|
||||
fixture functions, so what's the obvious way to a newcomer?
|
||||
|
||||
If you want to feedback or participate in discussion of the above
|
||||
topics, please join our :ref:`contact channels`, you are most welcome.
|
||||
Marking functions as ``yield_fixture`` is still supported, but deprecated and should not
|
||||
be used in new code.
|
||||
|
|
|
@ -4,17 +4,17 @@
|
|||
|
||||
set -e
|
||||
cd ../pytest-pep8
|
||||
py.test
|
||||
pytest
|
||||
cd ../pytest-instafail
|
||||
py.test
|
||||
pytest
|
||||
cd ../pytest-cache
|
||||
py.test
|
||||
pytest
|
||||
cd ../pytest-xprocess
|
||||
py.test
|
||||
pytest
|
||||
#cd ../pytest-cov
|
||||
#py.test
|
||||
#pytest
|
||||
cd ../pytest-capturelog
|
||||
py.test
|
||||
pytest
|
||||
cd ../pytest-xdist
|
||||
py.test
|
||||
pytest
|
||||
|
||||
|
|
31
setup.py
31
setup.py
|
@ -13,7 +13,7 @@ classifiers = ['Development Status :: 6 - Mature',
|
|||
'Topic :: Software Development :: Libraries',
|
||||
'Topic :: Utilities'] + [
|
||||
('Programming Language :: Python :: %s' % x) for x in
|
||||
'2 2.6 2.7 3 3.2 3.3 3.4 3.5'.split()]
|
||||
'2 2.6 2.7 3 3.3 3.4 3.5'.split()]
|
||||
|
||||
with open('README.rst') as fd:
|
||||
long_description = fd.read()
|
||||
|
@ -51,10 +51,10 @@ def main():
|
|||
install_requires = ['py>=1.4.29'] # pluggy is vendored in _pytest.vendored_packages
|
||||
extras_require = {}
|
||||
if has_environment_marker_support():
|
||||
extras_require[':python_version=="2.6" or python_version=="3.0" or python_version=="3.1"'] = ['argparse']
|
||||
extras_require[':python_version=="2.6"'] = ['argparse']
|
||||
extras_require[':sys_platform=="win32"'] = ['colorama']
|
||||
else:
|
||||
if sys.version_info < (2, 7) or (3,) <= sys.version_info < (3, 2):
|
||||
if sys.version_info < (2, 7):
|
||||
install_requires.append('argparse')
|
||||
if sys.platform == 'win32':
|
||||
install_requires.append('colorama')
|
||||
|
@ -69,7 +69,8 @@ def main():
|
|||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others',
|
||||
author_email='holger at merlinux.eu',
|
||||
entry_points=make_entry_points(),
|
||||
entry_points={'console_scripts':
|
||||
['pytest=pytest:main', 'py.test=pytest:main']},
|
||||
classifiers=classifiers,
|
||||
cmdclass={'test': PyTest},
|
||||
# the following should be enabled for release
|
||||
|
@ -81,28 +82,6 @@ def main():
|
|||
)
|
||||
|
||||
|
||||
def cmdline_entrypoints(versioninfo, platform, basename):
|
||||
target = 'pytest:main'
|
||||
if platform.startswith('java'):
|
||||
points = {'py.test-jython': target}
|
||||
else:
|
||||
if basename.startswith('pypy'):
|
||||
points = {'py.test-%s' % basename: target}
|
||||
else: # cpython
|
||||
points = {'py.test-%s.%s' % versioninfo[:2] : target}
|
||||
points['py.test'] = target
|
||||
return points
|
||||
|
||||
|
||||
def make_entry_points():
|
||||
basename = os.path.basename(sys.executable)
|
||||
points = cmdline_entrypoints(sys.version_info, sys.platform, basename)
|
||||
keys = list(points.keys())
|
||||
keys.sort()
|
||||
l = ['%s = %s' % (x, points[x]) for x in keys]
|
||||
return {'console_scripts': l}
|
||||
|
||||
|
||||
class PyTest(Command):
|
||||
user_options = []
|
||||
def initialize_options(self):
|
||||
|
|
|
@ -119,9 +119,10 @@ class TestGeneralUsage:
|
|||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
#XXX on jython this fails: "> import import_fails",
|
||||
"E ImportError: No module named *does_not_work*",
|
||||
"ImportError while importing test module*",
|
||||
"'No module named *does_not_work*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
assert result.ret == 2
|
||||
|
||||
def test_not_collectable_arguments(self, testdir):
|
||||
p1 = testdir.makepyfile("")
|
||||
|
@ -726,11 +727,13 @@ class TestDurations:
|
|||
testdir.makepyfile(self.source)
|
||||
testdir.makepyfile(test_collecterror="""xyz""")
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret != 0
|
||||
assert result.ret == 2
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_1*",
|
||||
"*Interrupted: 1 errors during collection*",
|
||||
])
|
||||
# Collection errors abort test execution, therefore no duration is
|
||||
# output
|
||||
assert "duration" not in result.stdout.str()
|
||||
|
||||
def test_with_not(self, testdir):
|
||||
testdir.makepyfile(self.source)
|
||||
|
@ -759,3 +762,36 @@ class TestDurationWithFixture:
|
|||
* setup *test_1*
|
||||
* call *test_1*
|
||||
""")
|
||||
|
||||
|
||||
def test_yield_tests_deprecation(testdir):
|
||||
testdir.makepyfile("""
|
||||
def func1(arg, arg2):
|
||||
assert arg == arg2
|
||||
def test_gen():
|
||||
yield "m1", func1, 15, 3*5
|
||||
yield "m2", func1, 42, 6*7
|
||||
""")
|
||||
result = testdir.runpytest('-ra')
|
||||
result.stdout.fnmatch_lines([
|
||||
'*yield tests are deprecated, and scheduled to be removed in pytest 4.0*',
|
||||
'*2 passed*',
|
||||
])
|
||||
|
||||
|
||||
def test_funcarg_prefix_deprecation(testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__value():
|
||||
return 10
|
||||
|
||||
def test_funcarg_prefix(value):
|
||||
assert value == 10
|
||||
""")
|
||||
result = testdir.runpytest('-ra')
|
||||
result.stdout.fnmatch_lines([
|
||||
('WC1 None pytest_funcarg__value: '
|
||||
'declaring fixtures using "pytest_funcarg__" prefix is deprecated '
|
||||
'and scheduled to be removed in pytest 4.0. '
|
||||
'Please remove the prefix and use the @pytest.fixture decorator instead.'),
|
||||
'*1 passed*',
|
||||
])
|
||||
|
|
|
@ -1,9 +1,14 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
import operator
|
||||
import _pytest
|
||||
import py
|
||||
import pytest
|
||||
from _pytest._code.code import ExceptionInfo, FormattedExcinfo, ReprExceptionInfo
|
||||
from _pytest._code.code import (
|
||||
ExceptionInfo,
|
||||
FormattedExcinfo,
|
||||
ReprExceptionInfo,
|
||||
ExceptionChainRepr)
|
||||
|
||||
queue = py.builtin._tryimport('queue', 'Queue')
|
||||
|
||||
|
@ -143,6 +148,39 @@ class TestTraceback_f_g_h:
|
|||
ntraceback = traceback.filter()
|
||||
assert len(ntraceback) == len(traceback) - 1
|
||||
|
||||
@pytest.mark.parametrize('tracebackhide, matching', [
|
||||
(lambda info: True, True),
|
||||
(lambda info: False, False),
|
||||
(operator.methodcaller('errisinstance', ValueError), True),
|
||||
(operator.methodcaller('errisinstance', IndexError), False),
|
||||
])
|
||||
def test_traceback_filter_selective(self, tracebackhide, matching):
|
||||
def f():
|
||||
#
|
||||
raise ValueError
|
||||
#
|
||||
def g():
|
||||
#
|
||||
__tracebackhide__ = tracebackhide
|
||||
f()
|
||||
#
|
||||
def h():
|
||||
#
|
||||
g()
|
||||
#
|
||||
|
||||
excinfo = pytest.raises(ValueError, h)
|
||||
traceback = excinfo.traceback
|
||||
ntraceback = traceback.filter()
|
||||
print('old: {0!r}'.format(traceback))
|
||||
print('new: {0!r}'.format(ntraceback))
|
||||
|
||||
if matching:
|
||||
assert len(ntraceback) == len(traceback) - 2
|
||||
else:
|
||||
# -1 because of the __tracebackhide__ in pytest.raises
|
||||
assert len(ntraceback) == len(traceback) - 1
|
||||
|
||||
def test_traceback_recursion_index(self):
|
||||
def f(n):
|
||||
if n < 10:
|
||||
|
@ -323,8 +361,29 @@ def test_codepath_Queue_example():
|
|||
assert path.basename.lower() == "queue.py"
|
||||
assert path.check()
|
||||
|
||||
def test_match_succeeds():
|
||||
with pytest.raises(ZeroDivisionError) as excinfo:
|
||||
0 / 0
|
||||
excinfo.match(r'.*zero.*')
|
||||
|
||||
def test_match_raises_error(testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def test_division_zero():
|
||||
with pytest.raises(ZeroDivisionError) as excinfo:
|
||||
0 / 0
|
||||
excinfo.match(r'[123]+')
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*AssertionError*Pattern*[123]*not found*",
|
||||
])
|
||||
|
||||
class TestFormattedExcinfo:
|
||||
def pytest_funcarg__importasmod(self, request):
|
||||
|
||||
@pytest.fixture
|
||||
def importasmod(self, request):
|
||||
def importasmod(source):
|
||||
source = _pytest._code.Source(source)
|
||||
tmpdir = request.getfixturevalue("tmpdir")
|
||||
|
@ -385,6 +444,8 @@ class TestFormattedExcinfo:
|
|||
excinfo = _pytest._code.ExceptionInfo()
|
||||
repr = pr.repr_excinfo(excinfo)
|
||||
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0].reprentries[1].lines[0] == "> ???"
|
||||
|
||||
def test_repr_many_line_source_not_existing(self):
|
||||
pr = FormattedExcinfo()
|
||||
|
@ -398,6 +459,8 @@ raise ValueError()
|
|||
excinfo = _pytest._code.ExceptionInfo()
|
||||
repr = pr.repr_excinfo(excinfo)
|
||||
assert repr.reprtraceback.reprentries[1].lines[0] == "> ???"
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0].reprentries[1].lines[0] == "> ???"
|
||||
|
||||
def test_repr_source_failing_fullsource(self):
|
||||
pr = FormattedExcinfo()
|
||||
|
@ -418,7 +481,7 @@ raise ValueError()
|
|||
f_globals = {}
|
||||
|
||||
class FakeTracebackEntry(_pytest._code.Traceback.Entry):
|
||||
def __init__(self, tb):
|
||||
def __init__(self, tb, excinfo=None):
|
||||
self.lineno = 5+3
|
||||
|
||||
@property
|
||||
|
@ -430,6 +493,7 @@ raise ValueError()
|
|||
|
||||
class FakeExcinfo(_pytest._code.ExceptionInfo):
|
||||
typename = "Foo"
|
||||
value = Exception()
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
|
@ -447,10 +511,15 @@ raise ValueError()
|
|||
fail = IOError() # noqa
|
||||
repr = pr.repr_excinfo(excinfo)
|
||||
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
|
||||
|
||||
|
||||
fail = py.error.ENOENT # noqa
|
||||
repr = pr.repr_excinfo(excinfo)
|
||||
assert repr.reprtraceback.reprentries[0].lines[0] == "> ???"
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0].reprentries[0].lines[0] == "> ???"
|
||||
|
||||
|
||||
def test_repr_local(self):
|
||||
|
@ -637,6 +706,9 @@ raise ValueError()
|
|||
repr = p.repr_excinfo(excinfo)
|
||||
assert repr.reprtraceback
|
||||
assert len(repr.reprtraceback.reprentries) == len(reprtb.reprentries)
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert repr.chain[0][0]
|
||||
assert len(repr.chain[0][0].reprentries) == len(reprtb.reprentries)
|
||||
assert repr.reprcrash.path.endswith("mod.py")
|
||||
assert repr.reprcrash.message == "ValueError: 0"
|
||||
|
||||
|
@ -727,8 +799,13 @@ raise ValueError()
|
|||
for style in ("short", "long", "no"):
|
||||
for showlocals in (True, False):
|
||||
repr = excinfo.getrepr(style=style, showlocals=showlocals)
|
||||
assert isinstance(repr, ReprExceptionInfo)
|
||||
if py.std.sys.version_info[0] < 3:
|
||||
assert isinstance(repr, ReprExceptionInfo)
|
||||
assert repr.reprtraceback.style == style
|
||||
if py.std.sys.version_info[0] >= 3:
|
||||
assert isinstance(repr, ExceptionChainRepr)
|
||||
for repr in repr.chain:
|
||||
assert repr[0].style == style
|
||||
|
||||
def test_reprexcinfo_unicode(self):
|
||||
from _pytest._code.code import TerminalRepr
|
||||
|
@ -910,6 +987,73 @@ raise ValueError()
|
|||
assert tw.lines[15] == ""
|
||||
assert tw.lines[16].endswith("mod.py:9: ValueError")
|
||||
|
||||
@pytest.mark.skipif("sys.version_info[0] < 3")
|
||||
def test_exc_chain_repr(self, importasmod):
|
||||
mod = importasmod("""
|
||||
class Err(Exception):
|
||||
pass
|
||||
def f():
|
||||
try:
|
||||
g()
|
||||
except Exception as e:
|
||||
raise Err() from e
|
||||
finally:
|
||||
h()
|
||||
def g():
|
||||
raise ValueError()
|
||||
|
||||
def h():
|
||||
raise AttributeError()
|
||||
""")
|
||||
excinfo = pytest.raises(AttributeError, mod.f)
|
||||
r = excinfo.getrepr(style="long")
|
||||
tw = TWMock()
|
||||
r.toterminal(tw)
|
||||
for line in tw.lines: print (line)
|
||||
assert tw.lines[0] == ""
|
||||
assert tw.lines[1] == " def f():"
|
||||
assert tw.lines[2] == " try:"
|
||||
assert tw.lines[3] == "> g()"
|
||||
assert tw.lines[4] == ""
|
||||
assert tw.lines[5].endswith("mod.py:6: ")
|
||||
assert tw.lines[6] == ("_ ", None)
|
||||
assert tw.lines[7] == ""
|
||||
assert tw.lines[8] == " def g():"
|
||||
assert tw.lines[9] == "> raise ValueError()"
|
||||
assert tw.lines[10] == "E ValueError"
|
||||
assert tw.lines[11] == ""
|
||||
assert tw.lines[12].endswith("mod.py:12: ValueError")
|
||||
assert tw.lines[13] == ""
|
||||
assert tw.lines[14] == "The above exception was the direct cause of the following exception:"
|
||||
assert tw.lines[15] == ""
|
||||
assert tw.lines[16] == " def f():"
|
||||
assert tw.lines[17] == " try:"
|
||||
assert tw.lines[18] == " g()"
|
||||
assert tw.lines[19] == " except Exception as e:"
|
||||
assert tw.lines[20] == "> raise Err() from e"
|
||||
assert tw.lines[21] == "E test_exc_chain_repr0.mod.Err"
|
||||
assert tw.lines[22] == ""
|
||||
assert tw.lines[23].endswith("mod.py:8: Err")
|
||||
assert tw.lines[24] == ""
|
||||
assert tw.lines[25] == "During handling of the above exception, another exception occurred:"
|
||||
assert tw.lines[26] == ""
|
||||
assert tw.lines[27] == " def f():"
|
||||
assert tw.lines[28] == " try:"
|
||||
assert tw.lines[29] == " g()"
|
||||
assert tw.lines[30] == " except Exception as e:"
|
||||
assert tw.lines[31] == " raise Err() from e"
|
||||
assert tw.lines[32] == " finally:"
|
||||
assert tw.lines[33] == "> h()"
|
||||
assert tw.lines[34] == ""
|
||||
assert tw.lines[35].endswith("mod.py:10: ")
|
||||
assert tw.lines[36] == ('_ ', None)
|
||||
assert tw.lines[37] == ""
|
||||
assert tw.lines[38] == " def h():"
|
||||
assert tw.lines[39] == "> raise AttributeError()"
|
||||
assert tw.lines[40] == "E AttributeError"
|
||||
assert tw.lines[41] == ""
|
||||
assert tw.lines[42].endswith("mod.py:15: AttributeError")
|
||||
|
||||
|
||||
@pytest.mark.parametrize("style", ["short", "long"])
|
||||
@pytest.mark.parametrize("encoding", [None, "utf8", "utf16"])
|
||||
|
@ -935,4 +1079,4 @@ def test_cwd_deleted(testdir):
|
|||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines(['* 1 failed in *'])
|
||||
assert 'INTERNALERROR' not in result.stdout.str() + result.stderr.str()
|
||||
assert 'INTERNALERROR' not in result.stdout.str() + result.stderr.str()
|
|
@ -285,13 +285,14 @@ class TestSourceParsingAndCompiling:
|
|||
#print "block", str(block)
|
||||
assert str(stmt).strip().startswith('assert')
|
||||
|
||||
def test_compilefuncs_and_path_sanity(self):
|
||||
@pytest.mark.parametrize('name', ['', None, 'my'])
|
||||
def test_compilefuncs_and_path_sanity(self, name):
|
||||
def check(comp, name):
|
||||
co = comp(self.source, name)
|
||||
if not name:
|
||||
expected = "codegen %s:%d>" %(mypath, mylineno+2+1)
|
||||
expected = "codegen %s:%d>" %(mypath, mylineno+2+2)
|
||||
else:
|
||||
expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+1)
|
||||
expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+2)
|
||||
fn = co.co_filename
|
||||
assert fn.endswith(expected)
|
||||
|
||||
|
@ -300,8 +301,7 @@ class TestSourceParsingAndCompiling:
|
|||
mypath = mycode.path
|
||||
|
||||
for comp in _pytest._code.compile, _pytest._code.Source.compile:
|
||||
for name in '', None, 'my':
|
||||
yield check, comp, name
|
||||
check(comp, name)
|
||||
|
||||
def test_offsetless_synerr(self):
|
||||
pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval')
|
||||
|
@ -385,8 +385,7 @@ def test_deindent():
|
|||
lines = deindent(source.splitlines())
|
||||
assert lines == ['', 'def f():', ' def g():', ' pass', ' ']
|
||||
|
||||
@pytest.mark.xfail("sys.version_info[:3] < (2,7,0) or "
|
||||
"((3,0) <= sys.version_info[:2] < (3,2))")
|
||||
@pytest.mark.xfail("sys.version_info[:3] < (2,7,0)")
|
||||
def test_source_of_class_at_eof_without_newline(tmpdir):
|
||||
# this test fails because the implicit inspect.getsource(A) below
|
||||
# does not return the "x = 1" last line.
|
||||
|
@ -656,4 +655,3 @@ something
|
|||
'''"""
|
||||
result = getstatement(1, source)
|
||||
assert str(result) == "'''\n'''"
|
||||
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
"""
|
||||
This is the script that is actually frozen into an executable: simply executes
|
||||
py.test main().
|
||||
pytest main().
|
||||
"""
|
||||
|
||||
if __name__ == '__main__':
|
||||
|
|
|
@ -8,7 +8,7 @@ if __name__ == '__main__':
|
|||
setup(
|
||||
name="runtests",
|
||||
version="0.1",
|
||||
description="example of how embedding py.test into an executable using cx_freeze",
|
||||
description="example of how embedding pytest into an executable using cx_freeze",
|
||||
executables=[Executable("runtests_script.py")],
|
||||
options={"build_exe": {'includes': pytest.freeze_includes()}},
|
||||
)
|
||||
|
|
|
@ -0,0 +1,286 @@
|
|||
# encoding: utf-8
|
||||
|
||||
import pytest
|
||||
import doctest
|
||||
|
||||
from pytest import approx
|
||||
from operator import eq, ne
|
||||
from decimal import Decimal
|
||||
from fractions import Fraction
|
||||
inf, nan = float('inf'), float('nan')
|
||||
|
||||
class MyDocTestRunner(doctest.DocTestRunner):
|
||||
|
||||
def __init__(self):
|
||||
doctest.DocTestRunner.__init__(self)
|
||||
|
||||
def report_failure(self, out, test, example, got):
|
||||
raise AssertionError("'{}' evaluates to '{}', not '{}'".format(
|
||||
example.source.strip(), got.strip(), example.want.strip()))
|
||||
|
||||
|
||||
class TestApprox:
|
||||
|
||||
def test_repr_string(self):
|
||||
# Just make sure the Unicode handling doesn't raise any exceptions.
|
||||
print(approx(1.0))
|
||||
print(approx([1.0, 2.0, 3.0]))
|
||||
print(approx(inf))
|
||||
print(approx(1.0, rel=nan))
|
||||
print(approx(1.0, rel=inf))
|
||||
|
||||
def test_operator_overloading(self):
|
||||
assert 1 == approx(1, rel=1e-6, abs=1e-12)
|
||||
assert not (1 != approx(1, rel=1e-6, abs=1e-12))
|
||||
assert 10 != approx(1, rel=1e-6, abs=1e-12)
|
||||
assert not (10 == approx(1, rel=1e-6, abs=1e-12))
|
||||
|
||||
def test_exactly_equal(self):
|
||||
examples = [
|
||||
(2.0, 2.0),
|
||||
(0.1e200, 0.1e200),
|
||||
(1.123e-300, 1.123e-300),
|
||||
(12345, 12345.0),
|
||||
(0.0, -0.0),
|
||||
(345678, 345678),
|
||||
(Decimal('1.0001'), Decimal('1.0001')),
|
||||
(Fraction(1, 3), Fraction(-1, -3)),
|
||||
]
|
||||
for a, x in examples:
|
||||
assert a == approx(x)
|
||||
|
||||
def test_opposite_sign(self):
|
||||
examples = [
|
||||
(eq, 1e-100, -1e-100),
|
||||
(ne, 1e100, -1e100),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_zero_tolerance(self):
|
||||
within_1e10 = [
|
||||
(1.1e-100, 1e-100),
|
||||
(-1.1e-100, -1e-100),
|
||||
]
|
||||
for a, x in within_1e10:
|
||||
assert x == approx(x, rel=0.0, abs=0.0)
|
||||
assert a != approx(x, rel=0.0, abs=0.0)
|
||||
assert a == approx(x, rel=0.0, abs=5e-101)
|
||||
assert a != approx(x, rel=0.0, abs=5e-102)
|
||||
assert a == approx(x, rel=5e-1, abs=0.0)
|
||||
assert a != approx(x, rel=5e-2, abs=0.0)
|
||||
|
||||
def test_negative_tolerance(self):
|
||||
# Negative tolerances are not allowed.
|
||||
illegal_kwargs = [
|
||||
dict(rel=-1e100),
|
||||
dict(abs=-1e100),
|
||||
dict(rel=1e100, abs=-1e100),
|
||||
dict(rel=-1e100, abs=1e100),
|
||||
dict(rel=-1e100, abs=-1e100),
|
||||
]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1.1 == approx(1, **kwargs)
|
||||
|
||||
def test_inf_tolerance(self):
|
||||
# Everything should be equal if the tolerance is infinite.
|
||||
large_diffs = [
|
||||
(1, 1000),
|
||||
(1e-50, 1e50),
|
||||
(-1.0, -1e300),
|
||||
(0.0, 10),
|
||||
]
|
||||
for a, x in large_diffs:
|
||||
assert a != approx(x, rel=0.0, abs=0.0)
|
||||
assert a == approx(x, rel=inf, abs=0.0)
|
||||
assert a == approx(x, rel=0.0, abs=inf)
|
||||
assert a == approx(x, rel=inf, abs=inf)
|
||||
|
||||
def test_inf_tolerance_expecting_zero(self):
|
||||
# If the relative tolerance is zero but the expected value is infinite,
|
||||
# the actual tolerance is a NaN, which should be an error.
|
||||
illegal_kwargs = [
|
||||
dict(rel=inf, abs=0.0),
|
||||
dict(rel=inf, abs=inf),
|
||||
]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1 == approx(0, **kwargs)
|
||||
|
||||
def test_nan_tolerance(self):
|
||||
illegal_kwargs = [
|
||||
dict(rel=nan),
|
||||
dict(abs=nan),
|
||||
dict(rel=nan, abs=nan),
|
||||
]
|
||||
for kwargs in illegal_kwargs:
|
||||
with pytest.raises(ValueError):
|
||||
1.1 == approx(1, **kwargs)
|
||||
|
||||
def test_reasonable_defaults(self):
|
||||
# Whatever the defaults are, they should work for numbers close to 1
|
||||
# than have a small amount of floating-point error.
|
||||
assert 0.1 + 0.2 == approx(0.3)
|
||||
|
||||
def test_default_tolerances(self):
|
||||
# This tests the defaults as they are currently set. If you change the
|
||||
# defaults, this test will fail but you should feel free to change it.
|
||||
# None of the other tests (except the doctests) should be affected by
|
||||
# the choice of defaults.
|
||||
examples = [
|
||||
# Relative tolerance used.
|
||||
(eq, 1e100 + 1e94, 1e100),
|
||||
(ne, 1e100 + 2e94, 1e100),
|
||||
(eq, 1e0 + 1e-6, 1e0),
|
||||
(ne, 1e0 + 2e-6, 1e0),
|
||||
# Absolute tolerance used.
|
||||
(eq, 1e-100, + 1e-106),
|
||||
(eq, 1e-100, + 2e-106),
|
||||
(eq, 1e-100, 0),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_custom_tolerances(self):
|
||||
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e0)
|
||||
assert 1e8 + 1e0 == approx(1e8, rel=5e-9, abs=5e0)
|
||||
assert 1e8 + 1e0 == approx(1e8, rel=5e-8, abs=5e-1)
|
||||
assert 1e8 + 1e0 != approx(1e8, rel=5e-9, abs=5e-1)
|
||||
|
||||
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-8)
|
||||
assert 1e0 + 1e-8 == approx(1e0, rel=5e-9, abs=5e-8)
|
||||
assert 1e0 + 1e-8 == approx(1e0, rel=5e-8, abs=5e-9)
|
||||
assert 1e0 + 1e-8 != approx(1e0, rel=5e-9, abs=5e-9)
|
||||
|
||||
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-16)
|
||||
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-9, abs=5e-16)
|
||||
assert 1e-8 + 1e-16 == approx(1e-8, rel=5e-8, abs=5e-17)
|
||||
assert 1e-8 + 1e-16 != approx(1e-8, rel=5e-9, abs=5e-17)
|
||||
|
||||
def test_relative_tolerance(self):
|
||||
within_1e8_rel = [
|
||||
(1e8 + 1e0, 1e8),
|
||||
(1e0 + 1e-8, 1e0),
|
||||
(1e-8 + 1e-16, 1e-8),
|
||||
]
|
||||
for a, x in within_1e8_rel:
|
||||
assert a == approx(x, rel=5e-8, abs=0.0)
|
||||
assert a != approx(x, rel=5e-9, abs=0.0)
|
||||
|
||||
def test_absolute_tolerance(self):
|
||||
within_1e8_abs = [
|
||||
(1e8 + 9e-9, 1e8),
|
||||
(1e0 + 9e-9, 1e0),
|
||||
(1e-8 + 9e-9, 1e-8),
|
||||
]
|
||||
for a, x in within_1e8_abs:
|
||||
assert a == approx(x, rel=0, abs=5e-8)
|
||||
assert a != approx(x, rel=0, abs=5e-9)
|
||||
|
||||
def test_expecting_zero(self):
|
||||
examples = [
|
||||
(ne, 1e-6, 0.0),
|
||||
(ne, -1e-6, 0.0),
|
||||
(eq, 1e-12, 0.0),
|
||||
(eq, -1e-12, 0.0),
|
||||
(ne, 2e-12, 0.0),
|
||||
(ne, -2e-12, 0.0),
|
||||
(ne, inf, 0.0),
|
||||
(ne, nan, 0.0),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x, rel=0.0, abs=1e-12))
|
||||
assert op(a, approx(x, rel=1e-6, abs=1e-12))
|
||||
|
||||
def test_expecting_inf(self):
|
||||
examples = [
|
||||
(eq, inf, inf),
|
||||
(eq, -inf, -inf),
|
||||
(ne, inf, -inf),
|
||||
(ne, 0.0, inf),
|
||||
(ne, nan, inf),
|
||||
]
|
||||
for op, a, x in examples:
|
||||
assert op(a, approx(x))
|
||||
|
||||
def test_expecting_nan(self):
|
||||
examples = [
|
||||
(nan, nan),
|
||||
(-nan, -nan),
|
||||
(nan, -nan),
|
||||
(0.0, nan),
|
||||
(inf, nan),
|
||||
]
|
||||
for a, x in examples:
|
||||
# If there is a relative tolerance and the expected value is NaN,
|
||||
# the actual tolerance is a NaN, which should be an error.
|
||||
with pytest.raises(ValueError):
|
||||
a != approx(x, rel=inf)
|
||||
|
||||
# You can make comparisons against NaN by not specifying a relative
|
||||
# tolerance, so only an absolute tolerance is calculated.
|
||||
assert a != approx(x, abs=inf)
|
||||
|
||||
def test_expecting_sequence(self):
|
||||
within_1e8 = [
|
||||
(1e8 + 1e0, 1e8),
|
||||
(1e0 + 1e-8, 1e0),
|
||||
(1e-8 + 1e-16, 1e-8),
|
||||
]
|
||||
actual, expected = zip(*within_1e8)
|
||||
assert actual == approx(expected, rel=5e-8, abs=0.0)
|
||||
|
||||
def test_expecting_sequence_wrong_len(self):
|
||||
assert [1, 2] != approx([1])
|
||||
assert [1, 2] != approx([1,2,3])
|
||||
|
||||
def test_complex(self):
|
||||
within_1e6 = [
|
||||
( 1.000001 + 1.0j, 1.0 + 1.0j),
|
||||
(1.0 + 1.000001j, 1.0 + 1.0j),
|
||||
(-1.000001 + 1.0j, -1.0 + 1.0j),
|
||||
(1.0 - 1.000001j, 1.0 - 1.0j),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
||||
def test_int(self):
|
||||
within_1e6 = [
|
||||
(1000001, 1000000),
|
||||
(-1000001, -1000000),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
||||
def test_decimal(self):
|
||||
within_1e6 = [
|
||||
(Decimal('1.000001'), Decimal('1.0')),
|
||||
(Decimal('-1.000001'), Decimal('-1.0')),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=Decimal('5e-6'), abs=0)
|
||||
assert a != approx(x, rel=Decimal('5e-7'), abs=0)
|
||||
|
||||
def test_fraction(self):
|
||||
within_1e6 = [
|
||||
(1 + Fraction(1, 1000000), Fraction(1)),
|
||||
(-1 - Fraction(-1, 1000000), Fraction(-1)),
|
||||
]
|
||||
for a, x in within_1e6:
|
||||
assert a == approx(x, rel=5e-6, abs=0)
|
||||
assert a != approx(x, rel=5e-7, abs=0)
|
||||
|
||||
def test_doctests(self):
|
||||
parser = doctest.DocTestParser()
|
||||
test = parser.get_doctest(
|
||||
approx.__doc__,
|
||||
{'approx': approx},
|
||||
approx.__name__,
|
||||
None, None,
|
||||
)
|
||||
runner = MyDocTestRunner()
|
||||
runner.run(test)
|
||||
|
|
@ -5,14 +5,16 @@ from textwrap import dedent
|
|||
import _pytest._code
|
||||
import py
|
||||
import pytest
|
||||
from _pytest.main import EXIT_NOTESTSCOLLECTED
|
||||
from _pytest.main import (
|
||||
Collector,
|
||||
EXIT_NOTESTSCOLLECTED
|
||||
)
|
||||
|
||||
|
||||
class TestModule:
|
||||
def test_failing_import(self, testdir):
|
||||
modcol = testdir.getmodulecol("import alksdjalskdjalkjals")
|
||||
pytest.raises(ImportError, modcol.collect)
|
||||
pytest.raises(ImportError, modcol.collect)
|
||||
pytest.raises(Collector.CollectError, modcol.collect)
|
||||
|
||||
def test_import_duplicate(self, testdir):
|
||||
a = testdir.mkdir("a")
|
||||
|
@ -60,6 +62,16 @@ class TestModule:
|
|||
modcol = testdir.getmodulecol("pytest_plugins='xasdlkj',")
|
||||
pytest.raises(ImportError, lambda: modcol.obj)
|
||||
|
||||
def test_invalid_test_module_name(self, testdir):
|
||||
a = testdir.mkdir('a')
|
||||
a.ensure('test_one.part1.py')
|
||||
result = testdir.runpytest("-rw")
|
||||
result.stdout.fnmatch_lines([
|
||||
"ImportError while importing test module*test_one.part1*",
|
||||
"Make sure your test modules/packages have valid Python names.",
|
||||
])
|
||||
|
||||
|
||||
class TestClass:
|
||||
def test_class_with_init_warning(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -322,7 +334,7 @@ class TestFunction:
|
|||
reprec.assertoutcome()
|
||||
|
||||
def test_function_equality(self, testdir, tmpdir):
|
||||
from _pytest.python import FixtureManager
|
||||
from _pytest.fixtures import FixtureManager
|
||||
config = testdir.parseconfigure()
|
||||
session = testdir.Session(config)
|
||||
session._fixturemanager = FixtureManager(session)
|
||||
|
@ -783,21 +795,24 @@ class TestTracebackCutting:
|
|||
|
||||
def test_traceback_argsetup(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_funcarg__hello(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def hello(request):
|
||||
raise ValueError("xyz")
|
||||
""")
|
||||
p = testdir.makepyfile("def test(hello): pass")
|
||||
result = testdir.runpytest(p)
|
||||
assert result.ret != 0
|
||||
out = result.stdout.str()
|
||||
assert out.find("xyz") != -1
|
||||
assert out.find("conftest.py:2: ValueError") != -1
|
||||
assert "xyz" in out
|
||||
assert "conftest.py:5: ValueError" in out
|
||||
numentries = out.count("_ _ _") # separator for traceback entries
|
||||
assert numentries == 0
|
||||
|
||||
result = testdir.runpytest("--fulltrace", p)
|
||||
out = result.stdout.str()
|
||||
assert out.find("conftest.py:2: ValueError") != -1
|
||||
assert "conftest.py:5: ValueError" in out
|
||||
numentries = out.count("_ _ _ _") # separator for traceback entries
|
||||
assert numentries > 3
|
||||
|
||||
|
|
|
@ -3,35 +3,37 @@ from textwrap import dedent
|
|||
import _pytest._code
|
||||
import pytest
|
||||
import sys
|
||||
from _pytest import python as funcargs
|
||||
from _pytest.pytester import get_public_names
|
||||
from _pytest.python import FixtureLookupError
|
||||
|
||||
from _pytest.fixtures import FixtureLookupError
|
||||
from _pytest import fixtures
|
||||
|
||||
def test_getfuncargnames():
|
||||
def f(): pass
|
||||
assert not funcargs.getfuncargnames(f)
|
||||
assert not fixtures.getfuncargnames(f)
|
||||
def g(arg): pass
|
||||
assert funcargs.getfuncargnames(g) == ('arg',)
|
||||
assert fixtures.getfuncargnames(g) == ('arg',)
|
||||
def h(arg1, arg2="hello"): pass
|
||||
assert funcargs.getfuncargnames(h) == ('arg1',)
|
||||
assert fixtures.getfuncargnames(h) == ('arg1',)
|
||||
def h(arg1, arg2, arg3="hello"): pass
|
||||
assert funcargs.getfuncargnames(h) == ('arg1', 'arg2')
|
||||
assert fixtures.getfuncargnames(h) == ('arg1', 'arg2')
|
||||
class A:
|
||||
def f(self, arg1, arg2="hello"):
|
||||
pass
|
||||
assert funcargs.getfuncargnames(A().f) == ('arg1',)
|
||||
assert fixtures.getfuncargnames(A().f) == ('arg1',)
|
||||
if sys.version_info < (3,0):
|
||||
assert funcargs.getfuncargnames(A.f) == ('arg1',)
|
||||
assert fixtures.getfuncargnames(A.f) == ('arg1',)
|
||||
|
||||
class TestFillFixtures:
|
||||
def test_fillfuncargs_exposed(self):
|
||||
# used by oejskit, kept for compatibility
|
||||
assert pytest._fillfuncargs == funcargs.fillfixtures
|
||||
assert pytest._fillfuncargs == fixtures.fillfixtures
|
||||
|
||||
def test_funcarg_lookupfails(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__xyzsomething(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def xyzsomething(request):
|
||||
return 42
|
||||
|
||||
def test_func(some):
|
||||
|
@ -47,14 +49,18 @@ class TestFillFixtures:
|
|||
|
||||
def test_funcarg_basic(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
def pytest_funcarg__some(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def some(request):
|
||||
return request.function.__name__
|
||||
def pytest_funcarg__other(request):
|
||||
@pytest.fixture
|
||||
def other(request):
|
||||
return 42
|
||||
def test_func(some, other):
|
||||
pass
|
||||
""")
|
||||
funcargs.fillfixtures(item)
|
||||
fixtures.fillfixtures(item)
|
||||
del item.funcargs["request"]
|
||||
assert len(get_public_names(item.funcargs)) == 2
|
||||
assert item.funcargs['some'] == "test_func"
|
||||
|
@ -62,7 +68,10 @@ class TestFillFixtures:
|
|||
|
||||
def test_funcarg_lookup_modulelevel(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
return request.function.__name__
|
||||
|
||||
class TestClass:
|
||||
|
@ -76,9 +85,13 @@ class TestFillFixtures:
|
|||
|
||||
def test_funcarg_lookup_classlevel(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
class TestClass:
|
||||
def pytest_funcarg__something(self, request):
|
||||
|
||||
@pytest.fixture
|
||||
def something(self, request):
|
||||
return request.instance
|
||||
|
||||
def test_method(self, something):
|
||||
assert something is self
|
||||
""")
|
||||
|
@ -92,12 +105,14 @@ class TestFillFixtures:
|
|||
sub2 = testdir.mkpydir("sub2")
|
||||
sub1.join("conftest.py").write(_pytest._code.Source("""
|
||||
import pytest
|
||||
def pytest_funcarg__arg1(request):
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
pytest.raises(Exception, "request.getfixturevalue('arg2')")
|
||||
"""))
|
||||
sub2.join("conftest.py").write(_pytest._code.Source("""
|
||||
import pytest
|
||||
def pytest_funcarg__arg2(request):
|
||||
@pytest.fixture
|
||||
def arg2(request):
|
||||
pytest.raises(Exception, "request.getfixturevalue('arg1')")
|
||||
"""))
|
||||
|
||||
|
@ -397,10 +412,13 @@ class TestFillFixtures:
|
|||
class TestRequestBasic:
|
||||
def test_request_attributes(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
def pytest_funcarg__something(request): pass
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def something(request): pass
|
||||
def test_func(something): pass
|
||||
""")
|
||||
req = funcargs.FixtureRequest(item)
|
||||
req = fixtures.FixtureRequest(item)
|
||||
assert req.function == item.obj
|
||||
assert req.keywords == item.keywords
|
||||
assert hasattr(req.module, 'test_func')
|
||||
|
@ -411,8 +429,11 @@ class TestRequestBasic:
|
|||
|
||||
def test_request_attributes_method(self, testdir):
|
||||
item, = testdir.getitems("""
|
||||
import pytest
|
||||
class TestB:
|
||||
def pytest_funcarg__something(self, request):
|
||||
|
||||
@pytest.fixture
|
||||
def something(self, request):
|
||||
return 1
|
||||
def test_func(self, something):
|
||||
pass
|
||||
|
@ -421,9 +442,11 @@ class TestRequestBasic:
|
|||
assert req.cls.__name__ == "TestB"
|
||||
assert req.instance.__class__ == req.cls
|
||||
|
||||
def XXXtest_request_contains_funcarg_arg2fixturedefs(self, testdir):
|
||||
def test_request_contains_funcarg_arg2fixturedefs(self, testdir):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self, something):
|
||||
|
@ -431,17 +454,23 @@ class TestRequestBasic:
|
|||
""")
|
||||
item1, = testdir.genitems([modcol])
|
||||
assert item1.name == "test_method"
|
||||
arg2fixturedefs = funcargs.FixtureRequest(item1)._arg2fixturedefs
|
||||
arg2fixturedefs = fixtures.FixtureRequest(item1)._arg2fixturedefs
|
||||
assert len(arg2fixturedefs) == 1
|
||||
assert arg2fixturedefs[0].__name__ == "pytest_funcarg__something"
|
||||
assert arg2fixturedefs['something'][0].argname == "something"
|
||||
|
||||
def test_getfixturevalue_recursive(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
return 1
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
return request.getfixturevalue("something") + 1
|
||||
def test_func(something):
|
||||
assert something == 2
|
||||
|
@ -453,9 +482,12 @@ class TestRequestBasic:
|
|||
'getfixmethod', ('getfixturevalue', 'getfuncargvalue'))
|
||||
def test_getfixturevalue(self, testdir, getfixmethod):
|
||||
item = testdir.getitem("""
|
||||
import pytest
|
||||
l = [2]
|
||||
def pytest_funcarg__something(request): return 1
|
||||
def pytest_funcarg__other(request):
|
||||
@pytest.fixture
|
||||
def something(request): return 1
|
||||
@pytest.fixture
|
||||
def other(request):
|
||||
return l.pop()
|
||||
def test_func(something): pass
|
||||
""")
|
||||
|
@ -478,8 +510,10 @@ class TestRequestBasic:
|
|||
|
||||
def test_request_addfinalizer(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
import pytest
|
||||
teardownlist = []
|
||||
def pytest_funcarg__something(request):
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
request.addfinalizer(lambda: teardownlist.append(1))
|
||||
def test_func(something): pass
|
||||
""")
|
||||
|
@ -504,7 +538,8 @@ class TestRequestBasic:
|
|||
result = testdir.runpytest_subprocess()
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*AssertionError:*pytest_funcarg__marked_with_prefix_and_decorator*"
|
||||
"*AssertionError: fixtures cannot have*@pytest.fixture*",
|
||||
"*pytest_funcarg__marked_with_prefix_and_decorator*"
|
||||
])
|
||||
|
||||
def test_request_addfinalizer_failing_setup(self, testdir):
|
||||
|
@ -542,8 +577,10 @@ class TestRequestBasic:
|
|||
|
||||
def test_request_addfinalizer_partial_setup_failure(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import pytest
|
||||
l = []
|
||||
def pytest_funcarg__something(request):
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
request.addfinalizer(lambda: l.append(None))
|
||||
def test_func(something, missingarg):
|
||||
pass
|
||||
|
@ -558,7 +595,7 @@ class TestRequestBasic:
|
|||
def test_request_getmodulepath(self, testdir):
|
||||
modcol = testdir.getmodulecol("def test_somefunc(): pass")
|
||||
item, = testdir.genitems([modcol])
|
||||
req = funcargs.FixtureRequest(item)
|
||||
req = fixtures.FixtureRequest(item)
|
||||
assert req.fspath == modcol.fspath
|
||||
|
||||
def test_request_fixturenames(self, testdir):
|
||||
|
@ -584,9 +621,11 @@ class TestRequestBasic:
|
|||
|
||||
def test_funcargnames_compatattr(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def pytest_generate_tests(metafunc):
|
||||
assert metafunc.funcargnames == metafunc.fixturenames
|
||||
def pytest_funcarg__fn(request):
|
||||
@pytest.fixture
|
||||
def fn(request):
|
||||
assert request._pyfuncitem.funcargnames == \
|
||||
request._pyfuncitem.fixturenames
|
||||
return request.funcargnames, request.fixturenames
|
||||
|
@ -631,7 +670,9 @@ class TestRequestBasic:
|
|||
# this tests that normalization of nodeids takes place
|
||||
b = testdir.mkdir("tests").mkdir("unit")
|
||||
b.join("conftest.py").write(_pytest._code.Source("""
|
||||
def pytest_funcarg__arg1():
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
pass
|
||||
"""))
|
||||
p = b.join("test_module.py")
|
||||
|
@ -679,7 +720,10 @@ class TestRequestBasic:
|
|||
class TestRequestMarking:
|
||||
def test_applymarker(self, testdir):
|
||||
item1,item2 = testdir.getitems("""
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
pass
|
||||
class TestClass:
|
||||
def test_func1(self, something):
|
||||
|
@ -687,7 +731,7 @@ class TestRequestMarking:
|
|||
def test_func2(self, something):
|
||||
pass
|
||||
""")
|
||||
req1 = funcargs.FixtureRequest(item1)
|
||||
req1 = fixtures.FixtureRequest(item1)
|
||||
assert 'xfail' not in item1.keywords
|
||||
req1.applymarker(pytest.mark.xfail)
|
||||
assert 'xfail' in item1.keywords
|
||||
|
@ -738,7 +782,10 @@ class TestRequestCachedSetup:
|
|||
reprec = testdir.inline_runsource("""
|
||||
mysetup = ["hello",].pop
|
||||
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
return request.cached_setup(mysetup, scope="module")
|
||||
|
||||
def test_func1(something):
|
||||
|
@ -753,7 +800,9 @@ class TestRequestCachedSetup:
|
|||
reprec = testdir.inline_runsource("""
|
||||
mysetup = ["hello", "hello2", "hello3"].pop
|
||||
|
||||
def pytest_funcarg__something(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
return request.cached_setup(mysetup, scope="class")
|
||||
def test_func1(something):
|
||||
assert something == "hello3"
|
||||
|
@ -769,7 +818,7 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_request_cachedsetup_extrakey(self, testdir):
|
||||
item1 = testdir.getitem("def test_func(): pass")
|
||||
req1 = funcargs.FixtureRequest(item1)
|
||||
req1 = fixtures.FixtureRequest(item1)
|
||||
l = ["hello", "world"]
|
||||
def setup():
|
||||
return l.pop()
|
||||
|
@ -784,7 +833,7 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_request_cachedsetup_cache_deletion(self, testdir):
|
||||
item1 = testdir.getitem("def test_func(): pass")
|
||||
req1 = funcargs.FixtureRequest(item1)
|
||||
req1 = fixtures.FixtureRequest(item1)
|
||||
l = []
|
||||
def setup():
|
||||
l.append("setup")
|
||||
|
@ -803,9 +852,13 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_request_cached_setup_two_args(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return request.cached_setup(lambda: 42)
|
||||
def pytest_funcarg__arg2(request):
|
||||
@pytest.fixture
|
||||
def arg2(request):
|
||||
return request.cached_setup(lambda: 17)
|
||||
def test_two_different_setups(arg1, arg2):
|
||||
assert arg1 != arg2
|
||||
|
@ -817,10 +870,14 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_request_cached_setup_getfixturevalue(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
arg1 = request.getfixturevalue("arg2")
|
||||
return request.cached_setup(lambda: arg1 + 1)
|
||||
def pytest_funcarg__arg2(request):
|
||||
@pytest.fixture
|
||||
def arg2(request):
|
||||
return request.cached_setup(lambda: 10)
|
||||
def test_two_funcarg(arg1):
|
||||
assert arg1 == 11
|
||||
|
@ -832,8 +889,10 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_request_cached_setup_functional(self, testdir):
|
||||
testdir.makepyfile(test_0="""
|
||||
import pytest
|
||||
l = []
|
||||
def pytest_funcarg__something(request):
|
||||
@pytest.fixture
|
||||
def something(request):
|
||||
val = request.cached_setup(fsetup, fteardown)
|
||||
return val
|
||||
def fsetup(mycache=[1]):
|
||||
|
@ -859,7 +918,10 @@ class TestRequestCachedSetup:
|
|||
|
||||
def test_issue117_sessionscopeteardown(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__app(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def app(request):
|
||||
app = request.cached_setup(
|
||||
scope='session',
|
||||
setup=lambda: 0,
|
||||
|
@ -1120,16 +1182,23 @@ class TestFixtureUsages:
|
|||
|
||||
|
||||
class TestFixtureManagerParseFactories:
|
||||
def pytest_funcarg__testdir(self, request):
|
||||
|
||||
@pytest.fixture
|
||||
def testdir(self, request):
|
||||
testdir = request.getfixturevalue("testdir")
|
||||
testdir.makeconftest("""
|
||||
def pytest_funcarg__hello(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def hello(request):
|
||||
return "conftest"
|
||||
|
||||
def pytest_funcarg__fm(request):
|
||||
@pytest.fixture
|
||||
def fm(request):
|
||||
return request._fixturemanager
|
||||
|
||||
def pytest_funcarg__item(request):
|
||||
@pytest.fixture
|
||||
def item(request):
|
||||
return request._pyfuncitem
|
||||
""")
|
||||
return testdir
|
||||
|
@ -1155,17 +1224,21 @@ class TestFixtureManagerParseFactories:
|
|||
faclist = fm.getfixturedefs(name, item.nodeid)
|
||||
assert len(faclist) == 1
|
||||
fac = faclist[0]
|
||||
assert fac.func.__name__ == "pytest_funcarg__" + name
|
||||
assert fac.func.__name__ == name
|
||||
""")
|
||||
reprec = testdir.inline_run("-s")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_parsefactories_conftest_and_module_and_class(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def pytest_funcarg__hello(request):
|
||||
import pytest
|
||||
|
||||
@pytest.fixture
|
||||
def hello(request):
|
||||
return "module"
|
||||
class TestClass:
|
||||
def pytest_funcarg__hello(self, request):
|
||||
@pytest.fixture
|
||||
def hello(self, request):
|
||||
return "class"
|
||||
def test_hello(self, item, fm):
|
||||
faclist = fm.getfixturedefs("hello", item.nodeid)
|
||||
|
@ -1213,7 +1286,9 @@ class TestFixtureManagerParseFactories:
|
|||
|
||||
|
||||
class TestAutouseDiscovery:
|
||||
def pytest_funcarg__testdir(self, testdir):
|
||||
|
||||
@pytest.fixture
|
||||
def testdir(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
@pytest.fixture(autouse=True)
|
||||
|
@ -1227,10 +1302,12 @@ class TestAutouseDiscovery:
|
|||
def perfunction2(arg1):
|
||||
pass
|
||||
|
||||
def pytest_funcarg__fm(request):
|
||||
@pytest.fixture
|
||||
def fm(request):
|
||||
return request._fixturemanager
|
||||
|
||||
def pytest_funcarg__item(request):
|
||||
@pytest.fixture
|
||||
def item(request):
|
||||
return request._pyfuncitem
|
||||
""")
|
||||
return testdir
|
||||
|
@ -1774,17 +1851,19 @@ class TestFixtureMarker:
|
|||
def test_scope_module_and_finalizer(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
finalized = []
|
||||
created = []
|
||||
finalized_list = []
|
||||
created_list = []
|
||||
@pytest.fixture(scope="module")
|
||||
def arg(request):
|
||||
created.append(1)
|
||||
created_list.append(1)
|
||||
assert request.scope == "module"
|
||||
request.addfinalizer(lambda: finalized.append(1))
|
||||
def pytest_funcarg__created(request):
|
||||
return len(created)
|
||||
def pytest_funcarg__finalized(request):
|
||||
return len(finalized)
|
||||
request.addfinalizer(lambda: finalized_list.append(1))
|
||||
@pytest.fixture
|
||||
def created(request):
|
||||
return len(created_list)
|
||||
@pytest.fixture
|
||||
def finalized(request):
|
||||
return len(finalized_list)
|
||||
""")
|
||||
testdir.makepyfile(
|
||||
test_mod1="""
|
||||
|
@ -2614,11 +2693,13 @@ class TestShowFixtures:
|
|||
''')
|
||||
|
||||
|
||||
@pytest.mark.parametrize('flavor', ['fixture', 'yield_fixture'])
|
||||
class TestContextManagerFixtureFuncs:
|
||||
def test_simple(self, testdir):
|
||||
|
||||
def test_simple(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.yield_fixture
|
||||
@pytest.{flavor}
|
||||
def arg1():
|
||||
print ("setup")
|
||||
yield 1
|
||||
|
@ -2628,7 +2709,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_2(arg1):
|
||||
print ("test2 %s" % arg1)
|
||||
assert 0
|
||||
""")
|
||||
""".format(flavor=flavor))
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*setup*
|
||||
|
@ -2639,10 +2720,10 @@ class TestContextManagerFixtureFuncs:
|
|||
*teardown*
|
||||
""")
|
||||
|
||||
def test_scoped(self, testdir):
|
||||
def test_scoped(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.yield_fixture(scope="module")
|
||||
@pytest.{flavor}(scope="module")
|
||||
def arg1():
|
||||
print ("setup")
|
||||
yield 1
|
||||
|
@ -2651,7 +2732,7 @@ class TestContextManagerFixtureFuncs:
|
|||
print ("test1 %s" % arg1)
|
||||
def test_2(arg1):
|
||||
print ("test2 %s" % arg1)
|
||||
""")
|
||||
""".format(flavor=flavor))
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*setup*
|
||||
|
@ -2660,86 +2741,65 @@ class TestContextManagerFixtureFuncs:
|
|||
*teardown*
|
||||
""")
|
||||
|
||||
def test_setup_exception(self, testdir):
|
||||
def test_setup_exception(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.yield_fixture(scope="module")
|
||||
@pytest.{flavor}(scope="module")
|
||||
def arg1():
|
||||
pytest.fail("setup")
|
||||
yield 1
|
||||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
""".format(flavor=flavor))
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*pytest.fail*setup*
|
||||
*1 error*
|
||||
""")
|
||||
|
||||
def test_teardown_exception(self, testdir):
|
||||
def test_teardown_exception(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.yield_fixture(scope="module")
|
||||
@pytest.{flavor}(scope="module")
|
||||
def arg1():
|
||||
yield 1
|
||||
pytest.fail("teardown")
|
||||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
""".format(flavor=flavor))
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*pytest.fail*teardown*
|
||||
*1 passed*1 error*
|
||||
""")
|
||||
|
||||
def test_yields_more_than_one(self, testdir):
|
||||
def test_yields_more_than_one(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.yield_fixture(scope="module")
|
||||
@pytest.{flavor}(scope="module")
|
||||
def arg1():
|
||||
yield 1
|
||||
yield 2
|
||||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
""".format(flavor=flavor))
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*fixture function*
|
||||
*test_yields*:2*
|
||||
""")
|
||||
|
||||
|
||||
def test_no_yield(self, testdir):
|
||||
def test_custom_name(self, testdir, flavor):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.yield_fixture(scope="module")
|
||||
@pytest.{flavor}(name='meow')
|
||||
def arg1():
|
||||
return 1
|
||||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
return 'mew'
|
||||
def test_1(meow):
|
||||
print(meow)
|
||||
""".format(flavor=flavor))
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*yield_fixture*requires*yield*
|
||||
*yield_fixture*
|
||||
*def arg1*
|
||||
""")
|
||||
|
||||
def test_yield_not_allowed_in_non_yield(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
@pytest.fixture(scope="module")
|
||||
def arg1():
|
||||
yield 1
|
||||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*fixture*cannot use*yield*
|
||||
*def arg1*
|
||||
""")
|
||||
|
||||
result.stdout.fnmatch_lines("*mew*")
|
||||
|
||||
class TestParameterizedSubRequest:
|
||||
def test_call_from_fixture(self, testdir):
|
||||
|
|
|
@ -15,7 +15,9 @@ class TestOEJSKITSpecials:
|
|||
return self.fspath, 3, "xyz"
|
||||
""")
|
||||
modcol = testdir.getmodulecol("""
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return 42
|
||||
class MyClass:
|
||||
pass
|
||||
|
@ -43,7 +45,8 @@ class TestOEJSKITSpecials:
|
|||
@pytest.fixture(autouse=True)
|
||||
def hello():
|
||||
pass
|
||||
def pytest_funcarg__arg1(request):
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return 42
|
||||
class MyClass:
|
||||
pass
|
||||
|
@ -73,7 +76,7 @@ def test_wrapped_getfslineno():
|
|||
|
||||
class TestMockDecoration:
|
||||
def test_wrapped_getfuncargnames(self):
|
||||
from _pytest.python import getfuncargnames
|
||||
from _pytest.compat import getfuncargnames
|
||||
def wrap(f):
|
||||
def func():
|
||||
pass
|
||||
|
@ -86,7 +89,7 @@ class TestMockDecoration:
|
|||
assert l == ("x",)
|
||||
|
||||
def test_wrapped_getfuncargnames_patching(self):
|
||||
from _pytest.python import getfuncargnames
|
||||
from _pytest.compat import getfuncargnames
|
||||
def wrap(f):
|
||||
def func():
|
||||
pass
|
||||
|
@ -234,7 +237,7 @@ class TestReRunTests:
|
|||
""")
|
||||
|
||||
def test_pytestconfig_is_session_scoped():
|
||||
from _pytest.python import pytestconfig
|
||||
from _pytest.fixtures import pytestconfig
|
||||
assert pytestconfig._pytestfixturefunction.scope == "session"
|
||||
|
||||
|
||||
|
|
|
@ -1,10 +1,17 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
import re
|
||||
import sys
|
||||
|
||||
import _pytest._code
|
||||
import py
|
||||
import pytest
|
||||
from _pytest import python as funcargs
|
||||
from _pytest import python, fixtures
|
||||
|
||||
import hypothesis
|
||||
from hypothesis import strategies
|
||||
|
||||
PY3 = sys.version_info >= (3, 0)
|
||||
|
||||
|
||||
class TestMetafunc:
|
||||
def Metafunc(self, func):
|
||||
|
@ -15,9 +22,9 @@ class TestMetafunc:
|
|||
name2fixturedefs = None
|
||||
def __init__(self, names):
|
||||
self.names_closure = names
|
||||
names = funcargs.getfuncargnames(func)
|
||||
names = fixtures.getfuncargnames(func)
|
||||
fixtureinfo = FixtureInfo(names)
|
||||
return funcargs.Metafunc(func, fixtureinfo, None)
|
||||
return python.Metafunc(func, fixtureinfo, None)
|
||||
|
||||
def test_no_funcargs(self, testdir):
|
||||
def function(): pass
|
||||
|
@ -128,20 +135,29 @@ class TestMetafunc:
|
|||
assert metafunc._calls[2].id == "x1-a"
|
||||
assert metafunc._calls[3].id == "x1-b"
|
||||
|
||||
@pytest.mark.skipif('sys.version_info[0] >= 3')
|
||||
def test_unicode_idval_python2(self):
|
||||
"""unittest for the expected behavior to obtain ids for parametrized
|
||||
unicode values in Python 2: if convertible to ascii, they should appear
|
||||
as ascii values, otherwise fallback to hide the value behind the name
|
||||
of the parametrized variable name. #1086
|
||||
@hypothesis.given(strategies.text() | strategies.binary())
|
||||
def test_idval_hypothesis(self, value):
|
||||
from _pytest.python import _idval
|
||||
escaped = _idval(value, 'a', 6, None)
|
||||
assert isinstance(escaped, str)
|
||||
if PY3:
|
||||
escaped.encode('ascii')
|
||||
else:
|
||||
escaped.decode('ascii')
|
||||
|
||||
def test_unicode_idval(self):
|
||||
"""This tests that Unicode strings outside the ASCII character set get
|
||||
escaped, using byte escapes if they're in that range or unicode
|
||||
escapes if they're not.
|
||||
|
||||
"""
|
||||
from _pytest.python import _idval
|
||||
values = [
|
||||
(u'', ''),
|
||||
(u'ascii', 'ascii'),
|
||||
(u'ação', 'a6'),
|
||||
(u'josé@blah.com', 'a6'),
|
||||
(u'δοκ.ιμή@παράδειγμα.δοκιμή', 'a6'),
|
||||
(u'ação', 'a\\xe7\\xe3o'),
|
||||
(u'josé@blah.com', 'jos\\xe9@blah.com'),
|
||||
(u'δοκ.ιμή@παράδειγμα.δοκιμή', '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae'),
|
||||
]
|
||||
for val, expected in values:
|
||||
assert _idval(val, 'a', 6, None) == expected
|
||||
|
@ -245,9 +261,9 @@ class TestMetafunc:
|
|||
(20, KeyError()),
|
||||
("three", [1, 2, 3]),
|
||||
], idfn=ids)
|
||||
assert result == ["0a-a",
|
||||
"1a-a",
|
||||
"2a-a",
|
||||
assert result == ["a-a0",
|
||||
"a-a1",
|
||||
"a-a2",
|
||||
]
|
||||
|
||||
@pytest.mark.issue351
|
||||
|
@ -265,6 +281,19 @@ class TestMetafunc:
|
|||
"three-b2",
|
||||
]
|
||||
|
||||
def test_idmaker_with_ids(self):
|
||||
from _pytest.python import idmaker
|
||||
result = idmaker(("a", "b"), [(1, 2),
|
||||
(3, 4)],
|
||||
ids=["a", None])
|
||||
assert result == ["a", "3-4"]
|
||||
|
||||
def test_idmaker_with_ids_unique_names(self):
|
||||
from _pytest.python import idmaker
|
||||
result = idmaker(("a"), [1,2,3,4,5],
|
||||
ids=["a", "a", "b", "c", "b"])
|
||||
assert result == ["a0", "a1", "b0", "c", "b1"]
|
||||
|
||||
def test_addcall_and_parametrize(self):
|
||||
def func(x, y): pass
|
||||
metafunc = self.Metafunc(func)
|
||||
|
@ -419,13 +448,13 @@ class TestMetafunc:
|
|||
|
||||
def test_parametrize_functional(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize('x', [1,2], indirect=True)
|
||||
metafunc.parametrize('y', [2])
|
||||
def pytest_funcarg__x(request):
|
||||
@pytest.fixture
|
||||
def x(request):
|
||||
return request.param * 10
|
||||
#def pytest_funcarg__y(request):
|
||||
# return request.param
|
||||
|
||||
def test_simple(x,y):
|
||||
assert x in (10,20)
|
||||
|
@ -529,16 +558,16 @@ class TestMetafunc:
|
|||
|
||||
def test_format_args(self):
|
||||
def function1(): pass
|
||||
assert funcargs._format_args(function1) == '()'
|
||||
assert fixtures._format_args(function1) == '()'
|
||||
|
||||
def function2(arg1): pass
|
||||
assert funcargs._format_args(function2) == "(arg1)"
|
||||
assert fixtures._format_args(function2) == "(arg1)"
|
||||
|
||||
def function3(arg1, arg2="qwe"): pass
|
||||
assert funcargs._format_args(function3) == "(arg1, arg2='qwe')"
|
||||
assert fixtures._format_args(function3) == "(arg1, arg2='qwe')"
|
||||
|
||||
def function4(arg1, *args, **kwargs): pass
|
||||
assert funcargs._format_args(function4) == "(arg1, *args, **kwargs)"
|
||||
assert fixtures._format_args(function4) == "(arg1, *args, **kwargs)"
|
||||
|
||||
|
||||
class TestMetafuncFunctional:
|
||||
|
@ -549,7 +578,8 @@ class TestMetafuncFunctional:
|
|||
def pytest_generate_tests(metafunc):
|
||||
metafunc.addcall(param=metafunc)
|
||||
|
||||
def pytest_funcarg__metafunc(request):
|
||||
@pytest.fixture
|
||||
def metafunc(request):
|
||||
assert request._pyfuncitem._genid == "0"
|
||||
return request.param
|
||||
|
||||
|
@ -601,7 +631,9 @@ class TestMetafuncFunctional:
|
|||
metafunc.addcall(param=10)
|
||||
metafunc.addcall(param=20)
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return request.param
|
||||
|
||||
def test_func1(arg1):
|
||||
|
@ -640,9 +672,12 @@ class TestMetafuncFunctional:
|
|||
def pytest_generate_tests(metafunc):
|
||||
metafunc.addcall(param=(1,1), id="hello")
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return request.param[0]
|
||||
def pytest_funcarg__arg2(request):
|
||||
@pytest.fixture
|
||||
def arg2(request):
|
||||
return request.param[1]
|
||||
|
||||
class TestClass:
|
||||
|
@ -726,11 +761,14 @@ class TestMetafuncFunctional:
|
|||
metafunc.parametrize("arg1", [1], indirect=True)
|
||||
metafunc.parametrize("arg2", [10], indirect=True)
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
x = request.getfixturevalue("arg2")
|
||||
return x + request.param
|
||||
|
||||
def pytest_funcarg__arg2(request):
|
||||
@pytest.fixture
|
||||
def arg2(request):
|
||||
return request.param
|
||||
|
||||
def test_func1(arg1, arg2):
|
||||
|
@ -748,10 +786,13 @@ class TestMetafuncFunctional:
|
|||
assert "arg1" in metafunc.fixturenames
|
||||
metafunc.parametrize("arg1", [1], indirect=True)
|
||||
|
||||
def pytest_funcarg__arg1(request):
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1(request):
|
||||
return request.param
|
||||
|
||||
def pytest_funcarg__arg2(request, arg1):
|
||||
@pytest.fixture
|
||||
def arg2(request, arg1):
|
||||
return 10 * arg1
|
||||
|
||||
def test_func(arg2):
|
||||
|
@ -796,6 +837,41 @@ class TestMetafuncFunctional:
|
|||
*test_function*1.3-b1*
|
||||
""")
|
||||
|
||||
def test_parametrize_with_None_in_ids(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize(("a", "b"), [(1,1), (1,1), (1,2)],
|
||||
ids=["basic", None, "advanced"])
|
||||
|
||||
def test_function(a, b):
|
||||
assert a == b
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*basic*PASSED",
|
||||
"*test_function*1-1*PASSED",
|
||||
"*test_function*advanced*FAILED",
|
||||
])
|
||||
|
||||
def test_parametrize_with_identical_ids_get_unique_names(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def pytest_generate_tests(metafunc):
|
||||
metafunc.parametrize(("a", "b"), [(1,1), (1,2)],
|
||||
ids=["a", "a"])
|
||||
|
||||
def test_function(a, b):
|
||||
assert a == b
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*a0*PASSED",
|
||||
"*test_function*a1*FAILED"
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(("scope", "length"),
|
||||
[("module", 2), ("function", 4)])
|
||||
def test_parametrize_scope_overrides(self, testdir, scope, length):
|
||||
|
@ -806,7 +882,8 @@ class TestMetafuncFunctional:
|
|||
if "arg" in metafunc.funcargnames:
|
||||
metafunc.parametrize("arg", [1,2], indirect=True,
|
||||
scope=%r)
|
||||
def pytest_funcarg__arg(request):
|
||||
@pytest.fixture
|
||||
def arg(request):
|
||||
l.append(request.param)
|
||||
return request.param
|
||||
def test_hello(arg):
|
||||
|
@ -1099,3 +1176,21 @@ class TestMarkersWithParametrization:
|
|||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
def test_pytest_make_parametrize_id(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_make_parametrize_id(config, val):
|
||||
return str(val * 2)
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize("x", range(2))
|
||||
def test_func(x):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func*0*PASS*",
|
||||
"*test_func*2*PASS*",
|
||||
])
|
||||
|
|
|
@ -76,3 +76,23 @@ class TestRaises:
|
|||
pytest.raises(ValueError, int, '0')
|
||||
except pytest.raises.Exception as e:
|
||||
assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError))
|
||||
else:
|
||||
assert False, "Expected pytest.raises.Exception"
|
||||
|
||||
try:
|
||||
with pytest.raises(ValueError):
|
||||
pass
|
||||
except pytest.raises.Exception as e:
|
||||
assert e.msg == "DID NOT RAISE {0}".format(repr(ValueError))
|
||||
else:
|
||||
assert False, "Expected pytest.raises.Exception"
|
||||
|
||||
def test_custom_raise_message(self):
|
||||
message = "TEST_MESSAGE"
|
||||
try:
|
||||
with pytest.raises(ValueError, message=message):
|
||||
pass
|
||||
except pytest.raises.Exception as e:
|
||||
assert e.msg == message
|
||||
else:
|
||||
assert False, "Expected pytest.raises.Exception"
|
||||
|
|
|
@ -0,0 +1,243 @@
|
|||
import pytest
|
||||
|
||||
|
||||
@pytest.fixture(params=['--setup-only', '--setup-plan', '--setup-show'],
|
||||
scope='module')
|
||||
def mode(request):
|
||||
return request.param
|
||||
|
||||
|
||||
def test_show_only_active_fixtures(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def _arg0():
|
||||
"""hidden arg0 fixture"""
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
"""arg1 docstring"""
|
||||
def test_arg1(arg1):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg1*',
|
||||
'*test_arg1 (fixtures used: arg1)*',
|
||||
'*TEARDOWN F arg1*',
|
||||
])
|
||||
assert "_arg0" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_show_different_scopes(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg_function():
|
||||
"""function scoped fixture"""
|
||||
@pytest.fixture(scope='session')
|
||||
def arg_session():
|
||||
"""session scoped fixture"""
|
||||
def test_arg1(arg_session, arg_function):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_session*',
|
||||
'*SETUP F arg_function*',
|
||||
'*test_arg1 (fixtures used: arg_function, arg_session)*',
|
||||
'*TEARDOWN F arg_function*',
|
||||
'TEARDOWN S arg_session*',
|
||||
])
|
||||
|
||||
|
||||
def test_show_nested_fixtures(testdir, mode):
|
||||
testdir.makeconftest('''
|
||||
import pytest
|
||||
@pytest.fixture(scope='session')
|
||||
def arg_same():
|
||||
"""session scoped fixture"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
def arg_same(arg_same):
|
||||
"""function scoped fixture"""
|
||||
def test_arg1(arg_same):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_same*',
|
||||
'*SETUP F arg_same (fixtures used: arg_same)*',
|
||||
'*test_arg1 (fixtures used: arg_same)*',
|
||||
'*TEARDOWN F arg_same*',
|
||||
'TEARDOWN S arg_same*',
|
||||
])
|
||||
|
||||
|
||||
def test_show_fixtures_with_autouse(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg_function():
|
||||
"""function scoped fixture"""
|
||||
@pytest.fixture(scope='session', autouse=True)
|
||||
def arg_session():
|
||||
"""session scoped fixture"""
|
||||
def test_arg1(arg_function):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_session*',
|
||||
'*SETUP F arg_function*',
|
||||
'*test_arg1 (fixtures used: arg_function, arg_session)*',
|
||||
])
|
||||
|
||||
|
||||
def test_show_fixtures_with_parameters(testdir, mode):
|
||||
testdir.makeconftest('''
|
||||
import pytest
|
||||
@pytest.fixture(scope='session', params=['foo', 'bar'])
|
||||
def arg_same():
|
||||
"""session scoped fixture"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
def arg_other(arg_same):
|
||||
"""function scoped fixture"""
|
||||
def test_arg1(arg_other):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_same?foo?',
|
||||
'TEARDOWN S arg_same?foo?',
|
||||
'SETUP S arg_same?bar?',
|
||||
'TEARDOWN S arg_same?bar?',
|
||||
])
|
||||
|
||||
|
||||
def test_show_fixtures_with_parameter_ids(testdir, mode):
|
||||
testdir.makeconftest('''
|
||||
import pytest
|
||||
@pytest.fixture(
|
||||
scope='session', params=['foo', 'bar'], ids=['spam', 'ham'])
|
||||
def arg_same():
|
||||
"""session scoped fixture"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture(scope='function')
|
||||
def arg_other(arg_same):
|
||||
"""function scoped fixture"""
|
||||
def test_arg1(arg_other):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'SETUP S arg_same?spam?',
|
||||
'SETUP S arg_same?ham?',
|
||||
])
|
||||
|
||||
|
||||
def test_show_fixtures_with_parameter_ids_function(testdir, mode):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture(params=['foo', 'bar'], ids=lambda p: p.upper())
|
||||
def foobar():
|
||||
pass
|
||||
def test_foobar(foobar):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest(mode, p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F foobar?FOO?',
|
||||
'*SETUP F foobar?BAR?',
|
||||
])
|
||||
|
||||
|
||||
def test_dynamic_fixture_request(testdir):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture()
|
||||
def dynamically_requested_fixture():
|
||||
pass
|
||||
@pytest.fixture()
|
||||
def dependent_fixture(request):
|
||||
request.getfuncargvalue('dynamically_requested_fixture')
|
||||
def test_dyn(dependent_fixture):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('--setup-only', p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F dynamically_requested_fixture',
|
||||
'*TEARDOWN F dynamically_requested_fixture'
|
||||
])
|
||||
|
||||
|
||||
def test_capturing(testdir):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest, sys
|
||||
@pytest.fixture()
|
||||
def one():
|
||||
sys.stdout.write('this should be captured')
|
||||
sys.stderr.write('this should also be captured')
|
||||
@pytest.fixture()
|
||||
def two(one):
|
||||
assert 0
|
||||
def test_capturing(two):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest('--setup-only', p)
|
||||
result.stdout.fnmatch_lines([
|
||||
'this should be captured',
|
||||
'this should also be captured'
|
||||
])
|
||||
|
||||
|
||||
def test_show_fixtures_and_execute_test(testdir):
|
||||
""" Verifies that setups are shown and tests are executed. """
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg():
|
||||
assert True
|
||||
def test_arg(arg):
|
||||
assert False
|
||||
''')
|
||||
|
||||
result = testdir.runpytest("--setup-show", p)
|
||||
assert result.ret == 1
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg*',
|
||||
'*test_arg (fixtures used: arg)F',
|
||||
'*TEARDOWN F arg*',
|
||||
])
|
|
@ -0,0 +1,19 @@
|
|||
def test_show_fixtures_and_test(testdir):
|
||||
""" Verifies that fixtures are not executed. """
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg():
|
||||
assert False
|
||||
def test_arg(arg):
|
||||
assert False
|
||||
''')
|
||||
|
||||
result = testdir.runpytest("--setup-plan", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg*',
|
||||
'*test_arg (fixtures used: arg)',
|
||||
'*TEARDOWN F arg*',
|
||||
])
|
|
@ -0,0 +1,137 @@
|
|||
# -*- coding: utf-8 -*-
|
||||
|
||||
|
||||
def test_no_items_should_not_show_output(testdir):
|
||||
result = testdir.runpytest('--fixtures-per-test')
|
||||
assert 'fixtures used by' not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
def test_fixtures_in_module(testdir):
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def _arg0():
|
||||
"""hidden arg0 fixture"""
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
"""arg1 docstring"""
|
||||
def test_arg1(arg1):
|
||||
pass
|
||||
''')
|
||||
|
||||
result = testdir.runpytest("--fixtures-per-test", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_arg1*',
|
||||
'*(test_fixtures_in_module.py:9)*',
|
||||
'arg1',
|
||||
' arg1 docstring',
|
||||
])
|
||||
assert "_arg0" not in result.stdout.str()
|
||||
|
||||
|
||||
def test_fixtures_in_conftest(testdir):
|
||||
testdir.makeconftest('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
"""arg1 docstring"""
|
||||
@pytest.fixture
|
||||
def arg2():
|
||||
"""arg2 docstring"""
|
||||
@pytest.fixture
|
||||
def arg3(arg1, arg2):
|
||||
"""arg3
|
||||
docstring
|
||||
"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
def test_arg2(arg2):
|
||||
pass
|
||||
def test_arg3(arg3):
|
||||
pass
|
||||
''')
|
||||
result = testdir.runpytest("--fixtures-per-test", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_arg2*',
|
||||
'*(test_fixtures_in_conftest.py:2)*',
|
||||
'arg2',
|
||||
' arg2 docstring',
|
||||
'*fixtures used by test_arg3*',
|
||||
'*(test_fixtures_in_conftest.py:4)*',
|
||||
'arg1',
|
||||
' arg1 docstring',
|
||||
'arg2',
|
||||
' arg2 docstring',
|
||||
'arg3',
|
||||
' arg3',
|
||||
' docstring',
|
||||
])
|
||||
|
||||
|
||||
def test_should_show_fixtures_used_by_test(testdir):
|
||||
testdir.makeconftest('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
"""arg1 from conftest"""
|
||||
@pytest.fixture
|
||||
def arg2():
|
||||
"""arg2 from conftest"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg1():
|
||||
"""arg1 from testmodule"""
|
||||
def test_args(arg1, arg2):
|
||||
pass
|
||||
''')
|
||||
result = testdir.runpytest("--fixtures-per-test", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_args*',
|
||||
'*(test_should_show_fixtures_used_by_test.py:6)*',
|
||||
'arg1',
|
||||
' arg1 from testmodule',
|
||||
'arg2',
|
||||
' arg2 from conftest',
|
||||
])
|
||||
|
||||
|
||||
def test_verbose_include_private_fixtures_and_loc(testdir):
|
||||
testdir.makeconftest('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def _arg1():
|
||||
"""_arg1 from conftest"""
|
||||
@pytest.fixture
|
||||
def arg2(_arg1):
|
||||
"""arg2 from conftest"""
|
||||
''')
|
||||
p = testdir.makepyfile('''
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def arg3():
|
||||
"""arg3 from testmodule"""
|
||||
def test_args(arg2, arg3):
|
||||
pass
|
||||
''')
|
||||
result = testdir.runpytest("--fixtures-per-test", "-v", p)
|
||||
assert result.ret == 0
|
||||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*fixtures used by test_args*',
|
||||
'*(test_verbose_include_private_fixtures_and_loc.py:6)*',
|
||||
'_arg1 -- conftest.py:3',
|
||||
' _arg1 from conftest',
|
||||
'arg2 -- conftest.py:6',
|
||||
' arg2 from conftest',
|
||||
'arg3 -- test_verbose_include_private_fixtures_and_loc.py:3',
|
||||
' arg3 from testmodule',
|
||||
])
|
|
@ -30,17 +30,20 @@ class TestBinReprIntegration:
|
|||
|
||||
def test_pytest_assertrepr_compare_called(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
l = []
|
||||
def pytest_assertrepr_compare(op, left, right):
|
||||
l.append((op, left, right))
|
||||
def pytest_funcarg__l(request):
|
||||
|
||||
@pytest.fixture
|
||||
def list(request):
|
||||
return l
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
def test_hello():
|
||||
assert 0 == 1
|
||||
def test_check(l):
|
||||
assert l == [("==", 0, 1)]
|
||||
def test_check(list):
|
||||
assert list == [("==", 0, 1)]
|
||||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
|
@ -474,16 +477,8 @@ def test_assertion_options(testdir):
|
|||
""")
|
||||
result = testdir.runpytest()
|
||||
assert "3 == 4" in result.stdout.str()
|
||||
off_options = (("--no-assert",),
|
||||
("--nomagic",),
|
||||
("--no-assert", "--nomagic"),
|
||||
("--assert=plain",),
|
||||
("--assert=plain", "--no-assert"),
|
||||
("--assert=plain", "--nomagic"),
|
||||
("--assert=plain", "--no-assert", "--nomagic"))
|
||||
for opt in off_options:
|
||||
result = testdir.runpytest_subprocess(*opt)
|
||||
assert "3 == 4" not in result.stdout.str()
|
||||
result = testdir.runpytest_subprocess("--assert=plain")
|
||||
assert "3 == 4" not in result.stdout.str()
|
||||
|
||||
def test_old_assert_mode(testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -559,7 +554,7 @@ def test_warn_missing(testdir):
|
|||
result.stderr.fnmatch_lines([
|
||||
"*WARNING*assert statements are not executed*",
|
||||
])
|
||||
result = testdir.run(sys.executable, "-OO", "-m", "pytest", "--no-assert")
|
||||
result = testdir.run(sys.executable, "-OO", "-m", "pytest")
|
||||
result.stderr.fnmatch_lines([
|
||||
"*WARNING*assert statements are not executed*",
|
||||
])
|
||||
|
@ -640,3 +635,21 @@ def test_diff_newline_at_end(monkeypatch, testdir):
|
|||
* + asdf
|
||||
* ? +
|
||||
""")
|
||||
|
||||
def test_assert_tuple_warning(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_tuple():
|
||||
assert(False, 'you shall not pass')
|
||||
""")
|
||||
result = testdir.runpytest('-rw')
|
||||
result.stdout.fnmatch_lines('WR1*:2 assertion is always true*')
|
||||
|
||||
def test_assert_indirect_tuple_no_warning(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_tuple():
|
||||
tpl = ('foo', 'bar')
|
||||
assert tpl
|
||||
""")
|
||||
result = testdir.runpytest('-rw')
|
||||
output = '\n'.join(result.stdout.lines)
|
||||
assert 'WR1' not in output
|
||||
|
|
|
@ -704,6 +704,40 @@ class TestAssertionRewriteHookDetails(object):
|
|||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*1 passed*')
|
||||
|
||||
@pytest.mark.parametrize('initial_conftest', [True, False])
|
||||
@pytest.mark.parametrize('mode', ['plain', 'rewrite', 'reinterp'])
|
||||
def test_conftest_assertion_rewrite(self, testdir, initial_conftest, mode):
|
||||
"""Test that conftest files are using assertion rewrite on import.
|
||||
(#1619)
|
||||
"""
|
||||
testdir.tmpdir.join('foo/tests').ensure(dir=1)
|
||||
conftest_path = 'conftest.py' if initial_conftest else 'foo/conftest.py'
|
||||
contents = {
|
||||
conftest_path: """
|
||||
import pytest
|
||||
@pytest.fixture
|
||||
def check_first():
|
||||
def check(values, value):
|
||||
assert values.pop(0) == value
|
||||
return check
|
||||
""",
|
||||
'foo/tests/test_foo.py': """
|
||||
def test(check_first):
|
||||
check_first([10, 30], 30)
|
||||
"""
|
||||
}
|
||||
testdir.makepyfile(**contents)
|
||||
result = testdir.runpytest_subprocess('--assert=%s' % mode)
|
||||
if mode == 'plain':
|
||||
expected = 'E AssertionError'
|
||||
elif mode == 'rewrite':
|
||||
expected = '*assert 10 == 30*'
|
||||
elif mode == 'reinterp':
|
||||
expected = '*AssertionError:*was re-run*'
|
||||
else:
|
||||
assert 0
|
||||
result.stdout.fnmatch_lines([expected])
|
||||
|
||||
|
||||
def test_issue731(testdir):
|
||||
testdir.makepyfile("""
|
||||
|
@ -720,3 +754,30 @@ def test_issue731(testdir):
|
|||
""")
|
||||
result = testdir.runpytest()
|
||||
assert 'unbalanced braces' not in result.stdout.str()
|
||||
|
||||
|
||||
class TestIssue925():
|
||||
def test_simple_case(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_ternary_display():
|
||||
assert (False == False) == False
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*E*assert (False == False) == False')
|
||||
|
||||
def test_long_case(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_ternary_display():
|
||||
assert False == (False == True) == True
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*E*assert (False == True) == True')
|
||||
|
||||
def test_many_brackets(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_ternary_display():
|
||||
assert True == ((False == True) == True)
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines('*E*assert True == ((False == True) == True)')
|
||||
|
||||
|
|
|
@ -480,6 +480,22 @@ class TestCaptureFixture:
|
|||
result = testdir.runpytest_subprocess(p)
|
||||
assert 'closed' not in result.stderr.str()
|
||||
|
||||
@pytest.mark.parametrize('fixture', ['capsys', 'capfd'])
|
||||
def test_disabled_capture_fixture(self, testdir, fixture):
|
||||
testdir.makepyfile("""
|
||||
def test_disabled({fixture}):
|
||||
print('captured before')
|
||||
with {fixture}.disabled():
|
||||
print('while capture is disabled')
|
||||
print('captured after')
|
||||
""".format(fixture=fixture))
|
||||
result = testdir.runpytest_subprocess()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*while capture is disabled*
|
||||
""")
|
||||
assert 'captured before' not in result.stdout.str()
|
||||
assert 'captured after' not in result.stdout.str()
|
||||
|
||||
|
||||
def test_setup_failure_does_not_kill_capturing(testdir):
|
||||
sub1 = testdir.mkpydir("sub1")
|
||||
|
|
Some files were not shown because too many files have changed in this diff Show More
Loading…
Reference in New Issue