diff --git a/.github/PULL_REQUEST_TEMPLATE.md b/.github/PULL_REQUEST_TEMPLATE.md index 1191fad27..bf9fc199f 100644 --- a/.github/PULL_REQUEST_TEMPLATE.md +++ b/.github/PULL_REQUEST_TEMPLATE.md @@ -2,13 +2,14 @@ Thanks for submitting a PR, your contribution is really appreciated! Here's a quick checklist that should be present in PRs: -- [ ] Target: for bug or doc fixes, target `master`; for new features, target `features`; +- [ ] Add a new news fragment into the changelog folder + * name it `$issue_id.$type` for example (588.bug) + * if you don't have an issue_id change it to the pr id after creating the pr + * ensure type is one of `removal`, `feature`, `bugfix`, `vendor`, `doc` or `trivial` + * Make sure to use full sentences with correct case and punctuation, for example: "Fix issue with non-ascii contents in doctest text files." +- [ ] Target: for `bugfix`, `vendor`, `doc` or `trivial` fixes, target `master`; for removals or features target `features`; +- [ ] Make sure to include reasonable tests for your change if necessary -Unless your change is trivial documentation fix (e.g., a typo or reword of a small section) please: +Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please: -- [ ] Make sure to include one or more tests for your change; -- [ ] Add yourself to `AUTHORS`; -- [ ] Add a new entry to `CHANGELOG.rst` - * Choose any open position to avoid merge conflicts with other PRs. - * Add a link to the issue you are fixing (if any) using RST syntax. - * The pytest team likes to have people to acknowledged in the `CHANGELOG`, so please add a thank note to yourself ("Thanks @user for the PR") and a link to your GitHub profile. It may sound weird thanking yourself, but otherwise a maintainer would have to do it manually before or after merging instead of just using GitHub's merge button. This makes it easier on the maintainers to merge PRs. +- [ ] Add yourself to `AUTHORS`, in alphabetical order; diff --git a/.gitignore b/.gitignore index 0e42b11ff..3b7ec9fac 100644 --- a/.gitignore +++ b/.gitignore @@ -18,6 +18,9 @@ include/ *~ .hypothesis/ +# autogenerated +_pytest/_version.py +# setuptools .eggs/ doc/*/_build diff --git a/.travis.yml b/.travis.yml index bbc03d856..b9ced8646 100644 --- a/.travis.yml +++ b/.travis.yml @@ -1,39 +1,51 @@ sudo: false language: python python: - - '3.5' + - '3.6' # command to install dependencies -install: "pip install -U tox" +install: + - pip install --upgrade --pre tox # # command to run tests env: matrix: # coveralls is not listed in tox's envlist, but should run in travis - - TESTENV=coveralls + - TOXENV=coveralls # note: please use "tox --listenvs" to populate the build matrix below - - TESTENV=linting - - TESTENV=py26 - - TESTENV=py27 - - TESTENV=py33 - - TESTENV=py34 - - TESTENV=py35 - - TESTENV=pypy - - TESTENV=py27-pexpect - - TESTENV=py27-xdist - - TESTENV=py27-trial - - TESTENV=py35-pexpect - - TESTENV=py35-xdist - - TESTENV=py35-trial - - TESTENV=py27-nobyte - - TESTENV=doctesting - - TESTENV=freeze - - TESTENV=docs + - TOXENV=linting + - TOXENV=py27 + - TOXENV=py34 + - TOXENV=py36 + - TOXENV=py27-pexpect + - TOXENV=py27-xdist + - TOXENV=py27-trial + - TOXENV=py27-numpy + - TOXENV=py36-pexpect + - TOXENV=py36-xdist + - TOXENV=py36-trial + - TOXENV=py36-numpy + - TOXENV=py27-nobyte + - TOXENV=doctesting + - TOXENV=docs matrix: + include: + - env: TOXENV=py26 + python: '2.6' + - env: TOXENV=py33 + python: '3.3' + - env: TOXENV=pypy + python: 'pypy-5.4' + - env: TOXENV=py35 + python: '3.5' + - env: TOXENV=py35-freeze + python: '3.5' + - env: TOXENV=py37 + python: 'nightly' allow_failures: - # py35-trial failing on Linux: #1989 - - env: TESTENV=py35-trial + - env: TOXENV=py37 + python: 'nightly' -script: tox --recreate -e $TESTENV +script: tox --recreate notifications: irc: diff --git a/AUTHORS b/AUTHORS index 8c7cb19ce..cc789b3b6 100644 --- a/AUTHORS +++ b/AUTHORS @@ -6,16 +6,20 @@ Contributors include:: Abdeali JK Abhijeet Kasurde Ahn Ki-Wook +Alexander Johnson Alexei Kozlenok Anatoly Bubenkoff +Andras Tim Andreas Zeidler Andrzej Ostrowski Andy Freeland Anthon van der Neut +Anthony Sottile Antony Lee Armin Rigo Aron Curzon Aviv Palivoda +Barney Gale Ben Webb Benjamin Peterson Bernard Pratz @@ -42,12 +46,16 @@ Dave Hunt David Díaz-Barquero David Mohr David Vierra +Daw-Ran Liou +Denis Kirisov Diego Russo Dmitry Dygalo +Dmitry Pribysh Duncan Betts Edison Gustavo Muenz Edoardo Batini Eduardo Schettino +Eli Boyarski Elizaveta Shashkova Endre Galaczi Eric Hunsberger @@ -64,6 +72,7 @@ Grig Gheorghiu Grigorii Eremeev (budulianin) Guido Wesdorp Harald Armin Massa +Hui Wang (coldnight) Ian Bicking Jaap Broekhuizen Jan Balster @@ -71,30 +80,42 @@ Janne Vanhala Jason R. Coombs Javier Domingo Cansino Javier Romero +Jeff Widman John Towler Jon Sonesen +Jonas Obrist Jordan Guymon +Jordan Moldow Joshua Bronson Jurko Gospodnetić Justyna Janczyszyn Kale Kundert Katarzyna Jachim Kevin Cox +Kodi B. Arfer +Lawrence Mitchell Lee Kamentsky Lev Maximov +Llandy Riveron Del Risco +Loic Esteve Lukas Bednar Luke Murphy Maciek Fijalkowski Maho +Maik Figura +Mandeep Bhutani +Manuel Krebber Marc Schlaich Marcin Bachry Mark Abramowitz Markus Unterwaditzer Martijn Faassen +Martin Altmayer Martin K. Scherer Martin Prusse Mathieu Clabaut Matt Bachmann +Matt Duck Matt Williams Matthias Hafner mbyt @@ -102,20 +123,28 @@ Michael Aquilina Michael Birtwell Michael Droettboom Michael Seifert +Michal Wajszczuk +Mihai Capotă Mike Lundy +Nathaniel Waisbrot Ned Batchelder Neven Mundar Nicolas Delaby Oleg Pidsadnyi Oliver Bestwalter Omar Kohl +Omer Hadari +Patrick Hayes +Paweł Adamczak Pieter Mulder Piotr Banaszkiewicz Punyashloka Biswal Quentin Pradet Ralf Schmitt +Ran Benita Raphael Pierzina Raquel Alegre +Ravi Chandra Roberto Polli Romain Dorgueil Roman Bolshakov @@ -124,7 +153,10 @@ Ross Lawley Russel Winder Ryan Wooden Samuele Pedroni +Segev Finer Simon Gomizelj +Skylar Downes +Srinivas Reddy Thatiparthy Stefan Farmbauer Stefan Zimmermann Stefano Taschini @@ -133,9 +165,16 @@ Stephan Obermann Tareq Alayan Ted Xiao Thomas Grainger +Tom Dalton Tom Viner Trevor Bekolay Tyler Goodlet Vasily Kuznetsov +Victor Uriarte +Vidar T. Fauske +Vitaly Lashmanov +Vlad Dragos Wouter van Ackooy +Xuan Luong Xuecong Liao +Zoltán Máté diff --git a/CHANGELOG.rst b/CHANGELOG.rst index 707253958..28269a311 100644 --- a/CHANGELOG.rst +++ b/CHANGELOG.rst @@ -1,16 +1,672 @@ -3.0.6.dev0 (unreleased) -======================= +.. + You should *NOT* be adding new change log entries to this file, this + file is managed by towncrier. You *may* edit previous change logs to + fix problems like typo corrections or such. + To add a new change log entry, please see + https://pip.pypa.io/en/latest/development/#adding-a-news-entry + we named the news folder changelog -* +.. towncrier release notes start -* +Pytest 3.2.3 (2017-10-03) +========================= -* +Bug Fixes +--------- -* +- Fix crash in tab completion when no prefix is given. (`#2748 + `_) -* +- The equality checking function (``__eq__``) of ``MarkDecorator`` returns + ``False`` if one object is not an instance of ``MarkDecorator``. (`#2758 + `_) +- When running ``pytest --fixtures-per-test``: don't crash if an item has no + _fixtureinfo attribute (e.g. doctests) (`#2788 + `_) + + +Improved Documentation +---------------------- + +- In help text of ``-k`` option, add example of using ``not`` to not select + certain tests whose names match the provided expression. (`#1442 + `_) + +- Add note in ``parametrize.rst`` about calling ``metafunc.parametrize`` + multiple times. (`#1548 `_) + + +Trivial/Internal Changes +------------------------ + +- Set ``xfail_strict=True`` in pytest's own test suite to catch expected + failures as soon as they start to pass. (`#2722 + `_) + +- Fix typo in example of passing a callable to markers (in example/markers.rst) + (`#2765 `_) + + +Pytest 3.2.2 (2017-09-06) +========================= + +Bug Fixes +--------- + +- Calling the deprecated `request.getfuncargvalue()` now shows the source of + the call. (`#2681 `_) + +- Allow tests declared as ``@staticmethod`` to use fixtures. (`#2699 + `_) + +- Fixed edge-case during collection: attributes which raised ``pytest.fail`` + when accessed would abort the entire collection. (`#2707 + `_) + +- Fix ``ReprFuncArgs`` with mixed unicode and UTF-8 args. (`#2731 + `_) + + +Improved Documentation +---------------------- + +- In examples on working with custom markers, add examples demonstrating the + usage of ``pytest.mark.MARKER_NAME.with_args`` in comparison with + ``pytest.mark.MARKER_NAME.__call__`` (`#2604 + `_) + +- In one of the simple examples, use `pytest_collection_modifyitems()` to skip + tests based on a command-line option, allowing its sharing while preventing a + user error when acessing `pytest.config` before the argument parsing. (`#2653 + `_) + + +Trivial/Internal Changes +------------------------ + +- Fixed minor error in 'Good Practices/Manual Integration' code snippet. + (`#2691 `_) + +- Fixed typo in goodpractices.rst. (`#2721 + `_) + +- Improve user guidance regarding ``--resultlog`` deprecation. (`#2739 + `_) + + +Pytest 3.2.1 (2017-08-08) +========================= + +Bug Fixes +--------- + +- Fixed small terminal glitch when collecting a single test item. (`#2579 + `_) + +- Correctly consider ``/`` as the file separator to automatically mark plugin + files for rewrite on Windows. (`#2591 `_) + +- Properly escape test names when setting ``PYTEST_CURRENT_TEST`` environment + variable. (`#2644 `_) + +- Fix error on Windows and Python 3.6+ when ``sys.stdout`` has been replaced + with a stream-like object which does not implement the full ``io`` module + buffer protocol. In particular this affects ``pytest-xdist`` users on the + aforementioned platform. (`#2666 `_) + + +Improved Documentation +---------------------- + +- Explicitly document which pytest features work with ``unittest``. (`#2626 + `_) + + +Pytest 3.2.0 (2017-07-30) +========================= + +Deprecations and Removals +------------------------- + +- ``pytest.approx`` no longer supports ``>``, ``>=``, ``<`` and ``<=`` + operators to avoid surprising/inconsistent behavior. See `the docs + `_ for more + information. (`#2003 `_) + +- All old-style specific behavior in current classes in the pytest's API is + considered deprecated at this point and will be removed in a future release. + This affects Python 2 users only and in rare situations. (`#2147 + `_) + +- A deprecation warning is now raised when using marks for parameters + in ``pytest.mark.parametrize``. Use ``pytest.param`` to apply marks to + parameters instead. (`#2427 `_) + + +Features +-------- + +- Add support for numpy arrays (and dicts) to approx. (`#1994 + `_) + +- Now test function objects have a ``pytestmark`` attribute containing a list + of marks applied directly to the test function, as opposed to marks inherited + from parent classes or modules. (`#2516 `_) + +- Collection ignores local virtualenvs by default; `--collect-in-virtualenv` + overrides this behavior. (`#2518 `_) + +- Allow class methods decorated as ``@staticmethod`` to be candidates for + collection as a test function. (Only for Python 2.7 and above. Python 2.6 + will still ignore static methods.) (`#2528 `_) + +- Introduce ``mark.with_args`` in order to allow passing functions/classes as + sole argument to marks. (`#2540 `_) + +- New ``cache_dir`` ini option: sets the directory where the contents of the + cache plugin are stored. Directory may be relative or absolute path: if relative path, then + directory is created relative to ``rootdir``, otherwise it is used as is. + Additionally path may contain environment variables which are expanded during + runtime. (`#2543 `_) + +- Introduce the ``PYTEST_CURRENT_TEST`` environment variable that is set with + the ``nodeid`` and stage (``setup``, ``call`` and ``teardown``) of the test + being currently executed. See the `documentation + `_ for more info. (`#2583 `_) + +- Introduced ``@pytest.mark.filterwarnings`` mark which allows overwriting the + warnings filter on a per test, class or module level. See the `docs + `_ for more information. (`#2598 `_) + +- ``--last-failed`` now remembers forever when a test has failed and only + forgets it if it passes again. This makes it easy to fix a test suite by + selectively running files and fixing tests incrementally. (`#2621 + `_) + +- New ``pytest_report_collectionfinish`` hook which allows plugins to add + messages to the terminal reporting after collection has been finished + successfully. (`#2622 `_) + +- Added support for `PEP-415's `_ + ``Exception.__suppress_context__``. Now if a ``raise exception from None`` is + caught by pytest, pytest will no longer chain the context in the test report. + The behavior now matches Python's traceback behavior. (`#2631 + `_) + +- Exceptions raised by ``pytest.fail``, ``pytest.skip`` and ``pytest.xfail`` + now subclass BaseException, making them harder to be caught unintentionally + by normal code. (`#580 `_) + + +Bug Fixes +--------- + +- Set ``stdin`` to a closed ``PIPE`` in ``pytester.py.Testdir.popen()`` for + avoid unwanted interactive ``pdb`` (`#2023 `_) + +- Add missing ``encoding`` attribute to ``sys.std*`` streams when using + ``capsys`` capture mode. (`#2375 `_) + +- Fix terminal color changing to black on Windows if ``colorama`` is imported + in a ``conftest.py`` file. (`#2510 `_) + +- Fix line number when reporting summary of skipped tests. (`#2548 + `_) + +- capture: ensure that EncodedFile.name is a string. (`#2555 + `_) + +- The options ``--fixtures`` and ``--fixtures-per-test`` will now keep + indentation within docstrings. (`#2574 `_) + +- doctests line numbers are now reported correctly, fixing `pytest-sugar#122 + `_. (`#2610 + `_) + +- Fix non-determinism in order of fixture collection. Adds new dependency + (ordereddict) for Python 2.6. (`#920 `_) + + +Improved Documentation +---------------------- + +- Clarify ``pytest_configure`` hook call order. (`#2539 + `_) + +- Extend documentation for testing plugin code with the ``pytester`` plugin. + (`#971 `_) + + +Trivial/Internal Changes +------------------------ + +- Update help message for ``--strict`` to make it clear it only deals with + unregistered markers, not warnings. (`#2444 `_) + +- Internal code move: move code for pytest.approx/pytest.raises to own files in + order to cut down the size of python.py (`#2489 `_) + +- Renamed the utility function ``_pytest.compat._escape_strings`` to + ``_ascii_escaped`` to better communicate the function's purpose. (`#2533 + `_) + +- Improve error message for CollectError with skip/skipif. (`#2546 + `_) + +- Emit warning about ``yield`` tests being deprecated only once per generator. + (`#2562 `_) + +- Ensure final collected line doesn't include artifacts of previous write. + (`#2571 `_) + +- Fixed all flake8 errors and warnings. (`#2581 `_) + +- Added ``fix-lint`` tox environment to run automatic pep8 fixes on the code. + (`#2582 `_) + +- Turn warnings into errors in pytest's own test suite in order to catch + regressions due to deprecations more promptly. (`#2588 + `_) + +- Show multiple issue links in CHANGELOG entries. (`#2620 + `_) + + +Pytest 3.1.3 (2017-07-03) +========================= + +Bug Fixes +--------- + +- Fix decode error in Python 2 for doctests in docstrings. (`#2434 + `_) + +- Exceptions raised during teardown by finalizers are now suppressed until all + finalizers are called, with the initial exception reraised. (`#2440 + `_) + +- Fix incorrect "collected items" report when specifying tests on the command- + line. (`#2464 `_) + +- ``deprecated_call`` in context-manager form now captures deprecation warnings + even if the same warning has already been raised. Also, ``deprecated_call`` + will always produce the same error message (previously it would produce + different messages in context-manager vs. function-call mode). (`#2469 + `_) + +- Fix issue where paths collected by pytest could have triple leading ``/`` + characters. (`#2475 `_) + +- Fix internal error when trying to detect the start of a recursive traceback. + (`#2486 `_) + + +Improved Documentation +---------------------- + +- Explicitly state for which hooks the calls stop after the first non-None + result. (`#2493 `_) + + +Trivial/Internal Changes +------------------------ + +- Create invoke tasks for updating the vendored packages. (`#2474 + `_) + +- Update copyright dates in LICENSE, README.rst and in the documentation. + (`#2499 `_) + + +Pytest 3.1.2 (2017-06-08) +========================= + +Bug Fixes +--------- + +- Required options added via ``pytest_addoption`` will no longer prevent using + --help without passing them. (#1999) + +- Respect ``python_files`` in assertion rewriting. (#2121) + +- Fix recursion error detection when frames in the traceback contain objects + that can't be compared (like ``numpy`` arrays). (#2459) + +- ``UnicodeWarning`` is issued from the internal pytest warnings plugin only + when the message contains non-ascii unicode (Python 2 only). (#2463) + +- Added a workaround for Python 3.6 ``WindowsConsoleIO`` breaking due to Pytests's + ``FDCapture``. Other code using console handles might still be affected by the + very same issue and might require further workarounds/fixes, i.e. ``colorama``. + (#2467) + + +Improved Documentation +---------------------- + +- Fix internal API links to ``pluggy`` objects. (#2331) + +- Make it clear that ``pytest.xfail`` stops test execution at the calling point + and improve overall flow of the ``skipping`` docs. (#810) + + +Pytest 3.1.1 (2017-05-30) +========================= + +Bug Fixes +--------- + +- pytest warning capture no longer overrides existing warning filters. The + previous behaviour would override all filters and caused regressions in test + suites which configure warning filters to match their needs. Note that as a + side-effect of this is that ``DeprecationWarning`` and + ``PendingDeprecationWarning`` are no longer shown by default. (#2430) + +- Fix issue with non-ascii contents in doctest text files. (#2434) + +- Fix encoding errors for unicode warnings in Python 2. (#2436) + +- ``pytest.deprecated_call`` now captures ``PendingDeprecationWarning`` in + context manager form. (#2441) + + +Improved Documentation +---------------------- + +- Addition of towncrier for changelog management. (#2390) + + +3.1.0 (2017-05-22) +================== + + +New Features +------------ + +* The ``pytest-warnings`` plugin has been integrated into the core and now ``pytest`` automatically + captures and displays warnings at the end of the test session. + + .. warning:: + + This feature may disrupt test suites which apply and treat warnings themselves, and can be + disabled in your ``pytest.ini``: + + .. code-block:: ini + + [pytest] + addopts = -p no:warnings + + See the `warnings documentation page `_ for more + information. + + Thanks `@nicoddemus`_ for the PR. + +* Added ``junit_suite_name`` ini option to specify root ```` name for JUnit XML reports (`#533`_). + +* Added an ini option ``doctest_encoding`` to specify which encoding to use for doctest files. + Thanks `@wheerd`_ for the PR (`#2101`_). + +* ``pytest.warns`` now checks for subclass relationship rather than + class equality. Thanks `@lesteve`_ for the PR (`#2166`_) + +* ``pytest.raises`` now asserts that the error message matches a text or regex + with the ``match`` keyword argument. Thanks `@Kriechi`_ for the PR. + +* ``pytest.param`` can be used to declare test parameter sets with marks and test ids. + Thanks `@RonnyPfannschmidt`_ for the PR. + + +Changes +------- + +* remove all internal uses of pytest_namespace hooks, + this is to prepare the removal of preloadconfig in pytest 4.0 + Thanks to `@RonnyPfannschmidt`_ for the PR. + +* pytest now warns when a callable ids raises in a parametrized test. Thanks `@fogo`_ for the PR. + +* It is now possible to skip test classes from being collected by setting a + ``__test__`` attribute to ``False`` in the class body (`#2007`_). Thanks + to `@syre`_ for the report and `@lwm`_ for the PR. + +* Change junitxml.py to produce reports that comply with Junitxml schema. + If the same test fails with failure in call and then errors in teardown + we split testcase element into two, one containing the error and the other + the failure. (`#2228`_) Thanks to `@kkoukiou`_ for the PR. + +* Testcase reports with a ``url`` attribute will now properly write this to junitxml. + Thanks `@fushi`_ for the PR (`#1874`_). + +* Remove common items from dict comparision output when verbosity=1. Also update + the truncation message to make it clearer that pytest truncates all + assertion messages if verbosity < 2 (`#1512`_). + Thanks `@mattduck`_ for the PR + +* ``--pdbcls`` no longer implies ``--pdb``. This makes it possible to use + ``addopts=--pdbcls=module.SomeClass`` on ``pytest.ini``. Thanks `@davidszotten`_ for + the PR (`#1952`_). + +* fix `#2013`_: turn RecordedWarning into ``namedtuple``, + to give it a comprehensible repr while preventing unwarranted modification. + +* fix `#2208`_: ensure a iteration limit for _pytest.compat.get_real_func. + Thanks `@RonnyPfannschmidt`_ for the report and PR. + +* Hooks are now verified after collection is complete, rather than right after loading installed plugins. This + makes it easy to write hooks for plugins which will be loaded during collection, for example using the + ``pytest_plugins`` special variable (`#1821`_). + Thanks `@nicoddemus`_ for the PR. + +* Modify ``pytest_make_parametrize_id()`` hook to accept ``argname`` as an + additional parameter. + Thanks `@unsignedint`_ for the PR. + +* Add ``venv`` to the default ``norecursedirs`` setting. + Thanks `@The-Compiler`_ for the PR. + +* ``PluginManager.import_plugin`` now accepts unicode plugin names in Python 2. + Thanks `@reutsharabani`_ for the PR. + +* fix `#2308`_: When using both ``--lf`` and ``--ff``, only the last failed tests are run. + Thanks `@ojii`_ for the PR. + +* Replace minor/patch level version numbers in the documentation with placeholders. + This significantly reduces change-noise as different contributors regnerate + the documentation on different platforms. + Thanks `@RonnyPfannschmidt`_ for the PR. + +* fix `#2391`_: consider pytest_plugins on all plugin modules + Thanks `@RonnyPfannschmidt`_ for the PR. + + +Bug Fixes +--------- + +* Fix ``AttributeError`` on ``sys.stdout.buffer`` / ``sys.stderr.buffer`` + while using ``capsys`` fixture in python 3. (`#1407`_). + Thanks to `@asottile`_. + +* Change capture.py's ``DontReadFromInput`` class to throw ``io.UnsupportedOperation`` errors rather + than ValueErrors in the ``fileno`` method (`#2276`_). + Thanks `@metasyn`_ and `@vlad-dragos`_ for the PR. + +* Fix exception formatting while importing modules when the exception message + contains non-ascii characters (`#2336`_). + Thanks `@fabioz`_ for the report and `@nicoddemus`_ for the PR. + +* Added documentation related to issue (`#1937`_) + Thanks `@skylarjhdownes`_ for the PR. + +* Allow collecting files with any file extension as Python modules (`#2369`_). + Thanks `@Kodiologist`_ for the PR. + +* Show the correct error message when collect "parametrize" func with wrong args (`#2383`_). + Thanks `@The-Compiler`_ for the report and `@robin0371`_ for the PR. + + +.. _@davidszotten: https://github.com/davidszotten +.. _@fabioz: https://github.com/fabioz +.. _@fogo: https://github.com/fogo +.. _@fushi: https://github.com/fushi +.. _@Kodiologist: https://github.com/Kodiologist +.. _@Kriechi: https://github.com/Kriechi +.. _@mandeep: https://github.com/mandeep +.. _@mattduck: https://github.com/mattduck +.. _@metasyn: https://github.com/metasyn +.. _@MichalTHEDUDE: https://github.com/MichalTHEDUDE +.. _@ojii: https://github.com/ojii +.. _@reutsharabani: https://github.com/reutsharabani +.. _@robin0371: https://github.com/robin0371 +.. _@skylarjhdownes: https://github.com/skylarjhdownes +.. _@unsignedint: https://github.com/unsignedint +.. _@wheerd: https://github.com/wheerd + + +.. _#1407: https://github.com/pytest-dev/pytest/issues/1407 +.. _#1512: https://github.com/pytest-dev/pytest/issues/1512 +.. _#1821: https://github.com/pytest-dev/pytest/issues/1821 +.. _#1874: https://github.com/pytest-dev/pytest/pull/1874 +.. _#1937: https://github.com/pytest-dev/pytest/issues/1937 +.. _#1952: https://github.com/pytest-dev/pytest/pull/1952 +.. _#2007: https://github.com/pytest-dev/pytest/issues/2007 +.. _#2013: https://github.com/pytest-dev/pytest/issues/2013 +.. _#2101: https://github.com/pytest-dev/pytest/pull/2101 +.. _#2166: https://github.com/pytest-dev/pytest/pull/2166 +.. _#2208: https://github.com/pytest-dev/pytest/issues/2208 +.. _#2228: https://github.com/pytest-dev/pytest/issues/2228 +.. _#2276: https://github.com/pytest-dev/pytest/issues/2276 +.. _#2308: https://github.com/pytest-dev/pytest/issues/2308 +.. _#2336: https://github.com/pytest-dev/pytest/issues/2336 +.. _#2369: https://github.com/pytest-dev/pytest/issues/2369 +.. _#2383: https://github.com/pytest-dev/pytest/issues/2383 +.. _#2391: https://github.com/pytest-dev/pytest/issues/2391 +.. _#533: https://github.com/pytest-dev/pytest/issues/533 + + + +3.0.7 (2017-03-14) +================== + + +* Fix issue in assertion rewriting breaking due to modules silently discarding + other modules when importing fails + Notably, importing the ``anydbm`` module is fixed. (`#2248`_). + Thanks `@pfhayes`_ for the PR. + +* junitxml: Fix problematic case where system-out tag occured twice per testcase + element in the XML report. Thanks `@kkoukiou`_ for the PR. + +* Fix regression, pytest now skips unittest correctly if run with ``--pdb`` + (`#2137`_). Thanks to `@gst`_ for the report and `@mbyt`_ for the PR. + +* Ignore exceptions raised from descriptors (e.g. properties) during Python test collection (`#2234`_). + Thanks to `@bluetech`_. + +* ``--override-ini`` now correctly overrides some fundamental options like ``python_files`` (`#2238`_). + Thanks `@sirex`_ for the report and `@nicoddemus`_ for the PR. + +* Replace ``raise StopIteration`` usages in the code by simple ``returns`` to finish generators, in accordance to `PEP-479`_ (`#2160`_). + Thanks `@tgoodlet`_ for the report and `@nicoddemus`_ for the PR. + +* Fix internal errors when an unprintable ``AssertionError`` is raised inside a test. + Thanks `@omerhadari`_ for the PR. + +* Skipping plugin now also works with test items generated by custom collectors (`#2231`_). + Thanks to `@vidartf`_. + +* Fix trailing whitespace in console output if no .ini file presented (`#2281`_). Thanks `@fbjorn`_ for the PR. + +* Conditionless ``xfail`` markers no longer rely on the underlying test item + being an instance of ``PyobjMixin``, and can therefore apply to tests not + collected by the built-in python test collector. Thanks `@barneygale`_ for the + PR. + + +.. _@pfhayes: https://github.com/pfhayes +.. _@bluetech: https://github.com/bluetech +.. _@gst: https://github.com/gst +.. _@sirex: https://github.com/sirex +.. _@vidartf: https://github.com/vidartf +.. _@kkoukiou: https://github.com/KKoukiou +.. _@omerhadari: https://github.com/omerhadari +.. _@fbjorn: https://github.com/fbjorn + +.. _#2248: https://github.com/pytest-dev/pytest/issues/2248 +.. _#2137: https://github.com/pytest-dev/pytest/issues/2137 +.. _#2160: https://github.com/pytest-dev/pytest/issues/2160 +.. _#2231: https://github.com/pytest-dev/pytest/issues/2231 +.. _#2234: https://github.com/pytest-dev/pytest/issues/2234 +.. _#2238: https://github.com/pytest-dev/pytest/issues/2238 +.. _#2281: https://github.com/pytest-dev/pytest/issues/2281 + +.. _PEP-479: https://www.python.org/dev/peps/pep-0479/ + + +3.0.6 (2017-01-22) +================== + +* pytest no longer generates ``PendingDeprecationWarning`` from its own operations, which was introduced by mistake in version ``3.0.5`` (`#2118`_). + Thanks to `@nicoddemus`_ for the report and `@RonnyPfannschmidt`_ for the PR. + + +* pytest no longer recognizes coroutine functions as yield tests (`#2129`_). + Thanks to `@malinoff`_ for the PR. + +* Plugins loaded by the ``PYTEST_PLUGINS`` environment variable are now automatically + considered for assertion rewriting (`#2185`_). + Thanks `@nicoddemus`_ for the PR. + +* Improve error message when pytest.warns fails (`#2150`_). The type(s) of the + expected warnings and the list of caught warnings is added to the + error message. Thanks `@lesteve`_ for the PR. + +* Fix ``pytester`` internal plugin to work correctly with latest versions of + ``zope.interface`` (`#1989`_). Thanks `@nicoddemus`_ for the PR. + +* Assert statements of the ``pytester`` plugin again benefit from assertion rewriting (`#1920`_). + Thanks `@RonnyPfannschmidt`_ for the report and `@nicoddemus`_ for the PR. + +* Specifying tests with colons like ``test_foo.py::test_bar`` for tests in + subdirectories with ini configuration files now uses the correct ini file + (`#2148`_). Thanks `@pelme`_. + +* Fail ``testdir.runpytest().assert_outcomes()`` explicitly if the pytest + terminal output it relies on is missing. Thanks to `@eli-b`_ for the PR. + + +.. _@barneygale: https://github.com/barneygale +.. _@lesteve: https://github.com/lesteve +.. _@malinoff: https://github.com/malinoff +.. _@pelme: https://github.com/pelme +.. _@eli-b: https://github.com/eli-b + +.. _#2118: https://github.com/pytest-dev/pytest/issues/2118 + +.. _#1989: https://github.com/pytest-dev/pytest/issues/1989 +.. _#1920: https://github.com/pytest-dev/pytest/issues/1920 +.. _#2129: https://github.com/pytest-dev/pytest/issues/2129 +.. _#2148: https://github.com/pytest-dev/pytest/issues/2148 +.. _#2150: https://github.com/pytest-dev/pytest/issues/2150 +.. _#2185: https://github.com/pytest-dev/pytest/issues/2185 3.0.5 (2016-12-05) @@ -58,6 +714,7 @@ * Cope gracefully with a .pyc file with no matching .py file (`#2038`_). Thanks `@nedbat`_. +.. _@syre: https://github.com/syre .. _@adler-j: https://github.com/adler-j .. _@d-b-w: https://bitbucket.org/d-b-w/ .. _@DuncanBetts: https://github.com/DuncanBetts @@ -165,6 +822,7 @@ .. _@raquel-ucl: https://github.com/raquel-ucl .. _@axil: https://github.com/axil .. _@tgoodlet: https://github.com/tgoodlet +.. _@vlad-dragos: https://github.com/vlad-dragos .. _#1853: https://github.com/pytest-dev/pytest/issues/1853 .. _#1905: https://github.com/pytest-dev/pytest/issues/1905 @@ -174,6 +832,7 @@ + 3.0.2 (2016-09-01) ================== @@ -678,7 +1337,7 @@ time or change existing behaviors in order to make them less surprising/more use Thanks `@astraw38`_ for reporting the issue (`#1496`_) and `@tomviner`_ for PR the (`#1524`_). -* Fix win32 path issue when puttinging custom config file with absolute path +* Fix win32 path issue when putting custom config file with absolute path in ``pytest.main("-c your_absolute_path")``. * Fix maximum recursion depth detection when raised error class is not aware @@ -1010,7 +1669,7 @@ time or change existing behaviors in order to make them less surprising/more use - (experimental) adapt more SEMVER style versioning and change meaning of master branch in git repo: "master" branch now keeps the bugfixes, changes - aimed for micro releases. "features" branch will only be be released + aimed for micro releases. "features" branch will only be released with minor or major pytest releases. - Fix issue #766 by removing documentation references to distutils. @@ -1144,7 +1803,7 @@ time or change existing behaviors in order to make them less surprising/more use - new option ``--import-mode`` to allow to change test module importing behaviour to append to sys.path instead of prepending. This better allows - to run test modules against installated versions of a package even if the + to run test modules against installed versions of a package even if the package under test has the same import root. In this example:: testing/__init__.py @@ -1302,7 +1961,7 @@ time or change existing behaviors in order to make them less surprising/more use explanations. Thanks Carl Meyer for the report and test case. - fix issue553: properly handling inspect.getsourcelines failures in - FixtureLookupError which would lead to to an internal error, + FixtureLookupError which would lead to an internal error, obfuscating the original problem. Thanks talljosh for initial diagnose/patch and Bruno Oliveira for final patch. @@ -1445,7 +2104,7 @@ time or change existing behaviors in order to make them less surprising/more use - fix conftest related fixture visibility issue: when running with a CWD outside of a test package pytest would get fixture discovery wrong. - Thanks to Wolfgang Schnerring for figuring out a reproducable example. + Thanks to Wolfgang Schnerring for figuring out a reproducible example. - Introduce pytest_enter_pdb hook (needed e.g. by pytest_timeout to cancel the timeout when interactively entering pdb). Thanks Wolfgang Schnerring. @@ -1644,7 +2303,7 @@ time or change existing behaviors in order to make them less surprising/more use - fix issue429: comparing byte strings with non-ascii chars in assert expressions now work better. Thanks Floris Bruynooghe. -- make capfd/capsys.capture private, its unused and shouldnt be exposed +- make capfd/capsys.capture private, its unused and shouldn't be exposed 2.5.1 (2013-12-17) @@ -1701,7 +2360,7 @@ time or change existing behaviors in order to make them less surprising/more use to problems for more than >966 non-function scoped parameters). - fix issue290 - there is preliminary support now for parametrizing - with repeated same values (sometimes useful to to test if calling + with repeated same values (sometimes useful to test if calling a second time works as with the first time). - close issue240 - document precisely how pytest module importing @@ -1915,7 +2574,7 @@ new features: - fix issue322: tearDownClass is not run if setUpClass failed. Thanks Mathieu Agopian for the initial fix. Also make all of pytest/nose - finalizer mimick the same generic behaviour: if a setupX exists and + finalizer mimic the same generic behaviour: if a setupX exists and fails, don't run teardownX. This internally introduces a new method "node.addfinalizer()" helper which can only be called during the setup phase of a node. @@ -2034,11 +2693,11 @@ Bug fixes: (thanks Adam Goucher) - Issue 265 - integrate nose setup/teardown with setupstate - so it doesnt try to teardown if it did not setup + so it doesn't try to teardown if it did not setup -- issue 271 - dont write junitxml on slave nodes +- issue 271 - don't write junitxml on slave nodes -- Issue 274 - dont try to show full doctest example +- Issue 274 - don't try to show full doctest example when doctest does not know the example location - issue 280 - disable assertion rewriting on buggy CPython 2.6.0 @@ -2074,7 +2733,7 @@ Bug fixes: - allow to specify prefixes starting with "_" when customizing python_functions test discovery. (thanks Graham Horler) -- improve PYTEST_DEBUG tracing output by puting +- improve PYTEST_DEBUG tracing output by putting extra data on a new lines with additional indent - ensure OutcomeExceptions like skip/fail have initialized exception attributes @@ -2123,7 +2782,7 @@ Bug fixes: - fix issue209 - reintroduce python2.4 support by depending on newer pylib which re-introduced statement-finding for pre-AST interpreters -- nose support: only call setup if its a callable, thanks Andrew +- nose support: only call setup if it's a callable, thanks Andrew Taumoefolau - fix issue219 - add py2.4-3.3 classifiers to TROVE list @@ -2219,7 +2878,7 @@ Bug fixes: - fix issue128: show captured output when capsys/capfd are used -- fix issue179: propperly show the dependency chain of factories +- fix issue179: properly show the dependency chain of factories - pluginmanager.register(...) now raises ValueError if the plugin has been already registered or the name is taken @@ -2260,7 +2919,7 @@ Bug fixes: - don't show deselected reason line if there is none - - py.test -vv will show all of assert comparisations instead of truncating + - py.test -vv will show all of assert comparisons instead of truncating 2.2.4 (2012-05-22) ================== @@ -2271,7 +2930,7 @@ Bug fixes: - fix issue with unittest: now @unittest.expectedFailure markers should be processed correctly (you can also use @pytest.mark markers) - document integration with the extended distribute/setuptools test commands -- fix issue 140: propperly get the real functions +- fix issue 140: properly get the real functions of bound classmethods for setup/teardown_class - fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net - fix issue #143: call unconfigure/sessionfinish always when @@ -2282,7 +2941,7 @@ Bug fixes: 2.2.3 (2012-02-05) ================== -- fix uploaded package to only include neccesary files +- fix uploaded package to only include necessary files 2.2.2 (2012-02-05) ================== @@ -2328,7 +2987,7 @@ Bug fixes: teardown function are called earlier. - add an all-powerful metafunc.parametrize function which allows to parametrize test function arguments in multiple steps and therefore - from indepdenent plugins and palces. + from independent plugins and places. - add a @pytest.mark.parametrize helper which allows to easily call a test function with different argument values - Add examples to the "parametrize" example page, including a quick port @@ -2423,7 +3082,7 @@ Bug fixes: - don't require zlib (and other libs) for genscript plugin without --genscript actually being used. -- speed up skips (by not doing a full traceback represenation +- speed up skips (by not doing a full traceback representation internally) - fix issue37: avoid invalid characters in junitxml's output @@ -2471,9 +3130,9 @@ Bug fixes: this. - fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular - thanks to Laura Creighton who also revieved parts of the documentation. + thanks to Laura Creighton who also reviewed parts of the documentation. -- fix slighly wrong output of verbose progress reporting for classes +- fix slightly wrong output of verbose progress reporting for classes (thanks Amaury) - more precise (avoiding of) deprecation warnings for node.Class|Function accesses @@ -2534,7 +3193,7 @@ Bug fixes: - pytest-2.0 is now its own package and depends on pylib-2.0 - new ability: python -m pytest / python -m pytest.main ability -- new python invcation: pytest.main(args, plugins) to load +- new python invocation: pytest.main(args, plugins) to load some custom plugins early. - try harder to run unittest test suites in a more compatible manner by deferring setup/teardown semantics to the unittest package. @@ -2773,7 +3432,7 @@ Bug fixes: - extend and refine xfail mechanism: ``@py.test.mark.xfail(run=False)`` do not run the decorated test ``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries - specifiying ``--runxfail`` on command line virtually ignores xfail markers + specifying ``--runxfail`` on command line virtually ignores xfail markers - expose (previously internal) commonly useful methods: py.io.get_terminal_with() -> return terminal width @@ -2998,7 +3657,7 @@ Bug fixes: * add the ability to specify a path for py.lookup to search in -* fix a funcarg cached_setup bug probably only occuring +* fix a funcarg cached_setup bug probably only occurring in distributed testing and "module" scope with teardown. * many fixes and changes for making the code base python3 compatible, @@ -3264,10 +3923,10 @@ serve as a reference for developers. * fixed issue with 2.5 type representations in py.test [45483, 45484] * made that internal reporting issues displaying is done atomically in py.test [45518] -* made that non-existing files are igored by the py.lookup script [45519] +* made that non-existing files are ignored by the py.lookup script [45519] * improved exception name creation in py.test [45535] * made that less threads are used in execnet [merge in 45539] -* removed lock required for atomical reporting issue displaying in py.test +* removed lock required for atomic reporting issue displaying in py.test [45545] * removed globals from execnet [45541, 45547] * refactored cleanup mechanics, made that setDaemon is set to 1 to make atexit diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 71dc04d91..68db81398 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -34,13 +34,13 @@ If you are reporting a bug, please include: * Your operating system name and version. * Any details about your local setup that might be helpful in troubleshooting, - specifically Python interpreter version, - installed libraries and pytest version. + specifically the Python interpreter version, installed libraries, and pytest + version. * Detailed steps to reproduce the bug. -If you can write a demonstration test that currently fails but should pass (xfail), -that is a very useful commit to make as well, even if you can't find how -to fix the bug yet. +If you can write a demonstration test that currently fails but should pass +(xfail), that is a very useful commit to make as well, even if you cannot +fix the bug itself. .. _fixbugs: @@ -120,7 +120,7 @@ the following: - PyPI presence with a ``setup.py`` that contains a license, ``pytest-`` prefixed name, version number, authors, short and long description. -- a ``tox.ini`` for running tests using `tox `_. +- a ``tox.ini`` for running tests using `tox `_. - a ``README.txt`` describing how to use the plugin and on which platforms it runs. @@ -158,19 +158,41 @@ As stated, the objective is to share maintenance and avoid "plugin-abandon". .. _`pull requests`: .. _pull-requests: -Preparing Pull Requests on GitHub ---------------------------------- +Preparing Pull Requests +----------------------- -.. note:: - What is a "pull request"? It informs project's core developers about the - changes you want to review and merge. Pull requests are stored on - `GitHub servers `_. - Once you send a pull request, we can discuss its potential modifications and - even add more commits to it later on. +Short version +~~~~~~~~~~~~~ -There's an excellent tutorial on how Pull Requests work in the -`GitHub Help Center `_, -but here is a simple overview: +#. Fork the repository; +#. Target ``master`` for bugfixes and doc changes; +#. Target ``features`` for new features or functionality changes. +#. Follow **PEP-8**. There's a ``tox`` command to help fixing it: ``tox -e fix-lint``. +#. Tests are run using ``tox``:: + + tox -e linting,py27,py36 + + The test environments above are usually enough to cover most cases locally. + +#. Write a ``changelog`` entry: ``changelog/2574.bugfix``, use issue id number + and one of ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or + ``trivial`` for the issue type. +#. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please + add yourself to the ``AUTHORS`` file, in alphabetical order; + + +Long version +~~~~~~~~~~~~ + +What is a "pull request"? It informs the project's core developers about the +changes you want to review and merge. Pull requests are stored on +`GitHub servers `_. +Once you send a pull request, we can discuss its potential modifications and +even add more commits to it later on. There's an excellent tutorial on how Pull +Requests work in the +`GitHub Help Center `_. + +Here is a simple overview, with pytest-specific bits: #. Fork the `pytest GitHub repository `__. It's @@ -206,35 +228,43 @@ but here is a simple overview: #. Run all the tests - You need to have Python 2.7 and 3.5 available in your system. Now + You need to have Python 2.7 and 3.6 available in your system. Now running tests is as simple as issuing this command:: - $ tox -e linting,py27,py35 + $ tox -e linting,py27,py36 - This command will run tests via the "tox" tool against Python 2.7 and 3.5 + This command will run tests via the "tox" tool against Python 2.7 and 3.6 and also perform "lint" coding-style checks. -#. You can now edit your local working copy. +#. You can now edit your local working copy. Please follow PEP-8. You can now make the changes you want and run the tests again as necessary. - To run tests on Python 2.7 and pass options to pytest (e.g. enter pdb on - failure) to pytest you can do:: + If you have too much linting errors, try running:: + + $ tox -e fix-lint + + To fix pep8 related errors. + + You can pass different options to ``tox``. For example, to run tests on Python 2.7 and pass options to pytest + (e.g. enter pdb on failure) to pytest you can do:: $ tox -e py27 -- --pdb - Or to only run tests in a particular test module on Python 3.5:: + Or to only run tests in a particular test module on Python 3.6:: - $ tox -e py35 -- testing/test_config.py + $ tox -e py36 -- testing/test_config.py #. Commit and push once your tests pass and you are happy with your change(s):: $ git commit -a -m "" $ git push -u - Make sure you add a message to ``CHANGELOG.rst`` and add yourself to - ``AUTHORS``. If you are unsure about either of these steps, submit your - pull request and we'll help you fix it up. +#. Create a new changelog entry in ``changelog``. The file should be named ``.``, + where *issueid* is the number of the issue related to the change and *type* is one of + ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or ``trivial``. + +#. Add yourself to ``AUTHORS`` file if not there yet, in alphabetical order. #. Finally, submit a pull request through the GitHub website using this data:: diff --git a/HOWTORELEASE.rst b/HOWTORELEASE.rst index 372ecf7f1..48a3461d4 100644 --- a/HOWTORELEASE.rst +++ b/HOWTORELEASE.rst @@ -1,85 +1,65 @@ -How to release pytest --------------------------------------------- +Release Procedure +----------------- -Note: this assumes you have already registered on pypi. +Our current policy for releasing is to aim for a bugfix every few weeks and a minor release every 2-3 months. The idea +is to get fixes and new features out instead of trying to cram a ton of features into a release and by consequence +taking a lot of time to make a new one. -1. Bump version numbers in ``_pytest/__init__.py`` (``setup.py`` reads it). +.. important:: -2. Check and finalize ``CHANGELOG.rst``. + pytest releases must be prepared on **Linux** because the docs and examples expect + to be executed in that platform. -3. Write ``doc/en/announce/release-VERSION.txt`` and include - it in ``doc/en/announce/index.txt``. Run this command to list names of authors involved:: +#. Install development dependencies in a virtual environment with:: - git log $(git describe --abbrev=0 --tags)..HEAD --format='%aN' | sort -u + pip3 install -r tasks/requirements.txt -4. Regenerate the docs examples using tox:: +#. Create a branch ``release-X.Y.Z`` with the version for the release. - tox -e regen + * **patch releases**: from the latest ``master``; -5. At this point, open a PR named ``release-X`` so others can help find regressions or provide suggestions. + * **minor releases**: from the latest ``features``; then merge with the latest ``master``; -6. Use devpi for uploading a release tarball to a staging area:: + Ensure your are in a clean work tree. - devpi use https://devpi.net/USER/dev - devpi upload --formats sdist,bdist_wheel +#. Generate docs, changelog, announcements and upload a package to + your ``devpi`` staging server:: -7. Run from multiple machines:: + invoke generate.pre-release --password - devpi use https://devpi.net/USER/dev - devpi test pytest==VERSION + If ``--password`` is not given, it is assumed the user is already logged in ``devpi``. + If you don't have an account, please ask for one. - Alternatively, you can use `devpi-cloud-tester `_ to test - the package on AppVeyor and Travis (follow instructions on the ``README``). +#. Open a PR for this branch targeting ``master``. -8. Check that tests pass for relevant combinations with:: +#. Test the package + + * **Manual method** + + Run from multiple machines:: + + devpi use https://devpi.net/USER/dev + devpi test pytest==VERSION + + Check that tests pass for relevant combinations with:: devpi list pytest - or look at failures with "devpi list -f pytest". + * **CI servers** -9. Feeling confident? Publish to pypi:: + Configure a repository as per-instructions on + devpi-cloud-test_ to test the package on Travis_ and AppVeyor_. + All test environments should pass. - devpi push pytest==VERSION pypi:NAME +#. Publish to PyPI:: - where NAME is the name of pypi.python.org as configured in your ``~/.pypirc`` + invoke generate.publish-release + + where PYPI_NAME is the name of pypi.python.org as configured in your ``~/.pypirc`` file `for devpi `_. -10. Tag the release:: - - git tag VERSION - git push origin VERSION - - Make sure ```` is **exactly** the git hash at the time the package was created. - -11. Send release announcement to mailing lists: - - - pytest-dev@python.org - - python-announce-list@python.org - - testing-in-python@lists.idyll.org (only for minor/major releases) - - And announce the release on Twitter, making sure to add the hashtag ``#pytest``. - -12. **After the release** - - a. **patch release (2.8.3)**: - - 1. Checkout ``master``. - 2. Update version number in ``_pytest/__init__.py`` to ``"2.8.4.dev0"``. - 3. Create a new section in ``CHANGELOG.rst`` titled ``2.8.4.dev0`` and add a few bullet points as placeholders for new entries. - 4. Commit and push. - - b. **minor release (2.9.0)**: - - 1. Merge ``features`` into ``master``. - 2. Checkout ``master``. - 3. Follow the same steps for a **patch release** above, using the next patch release: ``2.9.1.dev0``. - 4. Commit ``master``. - 5. Checkout ``features`` and merge with ``master`` (should be a fast-forward at this point). - 6. Update version number in ``_pytest/__init__.py`` to the next minor release: ``"2.10.0.dev0"``. - 7. Create a new section in ``CHANGELOG.rst`` titled ``2.10.0.dev0``, above ``2.9.1.dev0``, and add a few bullet points as placeholders for new entries. - 8. Commit ``features``. - 9. Push ``master`` and ``features``. - - c. **major release (3.0.0)**: same steps as that of a **minor release** - +#. After a minor/major release, merge ``release-X.Y.Z`` into ``master`` and push (or open a PR). +.. _devpi-cloud-test: https://github.com/obestwalter/devpi-cloud-test +.. _AppVeyor: https://www.appveyor.com/ +.. _Travis: https://travis-ci.org diff --git a/LICENSE b/LICENSE index 9e27bd784..629df45ac 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ The MIT License (MIT) -Copyright (c) 2004-2016 Holger Krekel and others +Copyright (c) 2004-2017 Holger Krekel and others Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index c57cbd911..000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,36 +0,0 @@ -include CHANGELOG.rst -include LICENSE -include AUTHORS - -include README.rst -include CONTRIBUTING.rst -include HOWTORELEASE.rst - -include tox.ini -include setup.py - -recursive-include scripts *.py -recursive-include scripts *.bat - -include .coveragerc - -recursive-include bench *.py -recursive-include extra *.py - -graft testing -graft doc -prune doc/en/_build - -exclude _pytest/impl - -graft _pytest/vendored_packages - -recursive-exclude * *.pyc *.pyo -recursive-exclude testing/.hypothesis * -recursive-exclude testing/freeze/~ * -recursive-exclude testing/freeze/build * -recursive-exclude testing/freeze/dist * - -exclude appveyor.yml -exclude .travis.yml -prune .github diff --git a/README.rst b/README.rst index d5650af65..15ad6ea18 100644 --- a/README.rst +++ b/README.rst @@ -6,13 +6,20 @@ ------ .. image:: https://img.shields.io/pypi/v/pytest.svg - :target: https://pypi.python.org/pypi/pytest + :target: https://pypi.python.org/pypi/pytest + +.. image:: https://anaconda.org/conda-forge/pytest/badges/version.svg + :target: https://anaconda.org/conda-forge/pytest + .. image:: https://img.shields.io/pypi/pyversions/pytest.svg - :target: https://pypi.python.org/pypi/pytest + :target: https://pypi.python.org/pypi/pytest + .. image:: https://img.shields.io/coveralls/pytest-dev/pytest/master.svg - :target: https://coveralls.io/r/pytest-dev/pytest + :target: https://coveralls.io/r/pytest-dev/pytest + .. image:: https://travis-ci.org/pytest-dev/pytest.svg?branch=master :target: https://travis-ci.org/pytest-dev/pytest + .. image:: https://ci.appveyor.com/api/projects/status/mrgbjaua7t33pg6b?svg=true :target: https://ci.appveyor.com/project/pytestbot/pytest @@ -34,7 +41,7 @@ An example of a simple test: To execute it:: $ pytest - ============================= test session starts ============================= + ============================= test session starts ============================= collected 1 items test_sample.py F @@ -71,7 +78,7 @@ Features - Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested); -- Rich plugin architecture, with over 150+ `external plugins `_ and thriving community; +- Rich plugin architecture, with over 315+ `external plugins `_ and thriving community; Documentation @@ -95,7 +102,7 @@ Consult the `Changelog `__ page License ------- -Copyright Holger Krekel and others, 2004-2016. +Copyright Holger Krekel and others, 2004-2017. Distributed under the terms of the `MIT`_ license, pytest is free and open source software. diff --git a/_pytest/__init__.py b/_pytest/__init__.py index 8546ff02b..6e41f0504 100644 --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,8 @@ -# -__version__ = '3.0.6.dev0' +__all__ = ['__version__'] + +try: + from ._version import version as __version__ +except ImportError: + # broken installation, we don't even try + # unknown only works because we do poor mans version compare + __version__ = 'unknown' diff --git a/_pytest/_argcomplete.py b/_pytest/_argcomplete.py index 3ab679d8b..965ec7951 100644 --- a/_pytest/_argcomplete.py +++ b/_pytest/_argcomplete.py @@ -57,26 +57,29 @@ If things do not work right away: which should throw a KeyError: 'COMPLINE' (which is properly set by the global argcomplete script). """ - +from __future__ import absolute_import, division, print_function import sys import os from glob import glob + class FastFilesCompleter: 'Fast file completer class' + def __init__(self, directories=True): self.directories = directories def __call__(self, prefix, **kwargs): """only called on non option completions""" - if os.path.sep in prefix[1:]: # + if os.path.sep in prefix[1:]: prefix_dir = len(os.path.dirname(prefix) + os.path.sep) else: prefix_dir = 0 completion = [] globbed = [] if '*' not in prefix and '?' not in prefix: - if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash + # we are on unix, otherwise no bash + if not prefix or prefix[-1] == os.path.sep: globbed.extend(glob(prefix + '.*')) prefix += '*' globbed.extend(glob(prefix)) @@ -96,7 +99,8 @@ if os.environ.get('_ARGCOMPLETE'): filescompleter = FastFilesCompleter() def try_argcomplete(parser): - argcomplete.autocomplete(parser) + argcomplete.autocomplete(parser, always_complete_options=False) else: - def try_argcomplete(parser): pass + def try_argcomplete(parser): + pass filescompleter = None diff --git a/_pytest/_code/__init__.py b/_pytest/_code/__init__.py index 3463c11ea..815c13b42 100644 --- a/_pytest/_code/__init__.py +++ b/_pytest/_code/__init__.py @@ -1,4 +1,5 @@ """ python inspection/code generation API """ +from __future__ import absolute_import, division, print_function from .code import Code # noqa from .code import ExceptionInfo # noqa from .code import Frame # noqa diff --git a/_pytest/_code/_py2traceback.py b/_pytest/_code/_py2traceback.py index a830d9899..5aacf0a42 100644 --- a/_pytest/_code/_py2traceback.py +++ b/_pytest/_code/_py2traceback.py @@ -2,8 +2,10 @@ # CHANGES: # - some_str is replaced, trying to create unicode strings # +from __future__ import absolute_import, division, print_function import types + def format_exception_only(etype, value): """Format the exception part of a traceback. @@ -29,7 +31,7 @@ def format_exception_only(etype, value): # would throw another exception and mask the original problem. if (isinstance(etype, BaseException) or isinstance(etype, types.InstanceType) or - etype is None or type(etype) is str): + etype is None or type(etype) is str): return [_format_final_exc_line(etype, value)] stype = etype.__name__ @@ -61,6 +63,7 @@ def format_exception_only(etype, value): lines.append(_format_final_exc_line(stype, value)) return lines + def _format_final_exc_line(etype, value): """Return a list of a single line -- normal case for format_exception_only""" valuestr = _some_str(value) @@ -70,6 +73,7 @@ def _format_final_exc_line(etype, value): line = "%s: %s\n" % (etype, valuestr) return line + def _some_str(value): try: return unicode(value) diff --git a/_pytest/_code/code.py b/_pytest/_code/code.py index 616d5c431..f3b7eedfc 100644 --- a/_pytest/_code/code.py +++ b/_pytest/_code/code.py @@ -1,14 +1,16 @@ +from __future__ import absolute_import, division, print_function import sys from inspect import CO_VARARGS, CO_VARKEYWORDS import re from weakref import ref +from _pytest.compat import _PY2, _PY3, PY35, safe_str import py builtin_repr = repr reprlib = py.builtin._tryimport('repr', 'reprlib') -if sys.version_info[0] >= 3: +if _PY3: from traceback import format_exception_only else: from ._py2traceback import format_exception_only @@ -16,6 +18,7 @@ else: class Code(object): """ wrapper around Python code objects """ + def __init__(self, rawcode): if not hasattr(rawcode, "co_filename"): rawcode = getrawcode(rawcode) @@ -24,7 +27,7 @@ class Code(object): self.firstlineno = rawcode.co_firstlineno - 1 self.name = rawcode.co_name except AttributeError: - raise TypeError("not a code object: %r" %(rawcode,)) + raise TypeError("not a code object: %r" % (rawcode,)) self.raw = rawcode def __eq__(self, other): @@ -80,6 +83,7 @@ class Code(object): argcount += raw.co_flags & CO_VARKEYWORDS return raw.co_varnames[:argcount] + class Frame(object): """Wrapper around a Python frame holding f_locals and f_globals in which expressions can be evaluated.""" @@ -117,7 +121,7 @@ class Frame(object): """ f_locals = self.f_locals.copy() f_locals.update(vars) - py.builtin.exec_(code, self.f_globals, f_locals ) + py.builtin.exec_(code, self.f_globals, f_locals) def repr(self, object): """ return a 'safe' (non-recursive, one-line) string repr for 'object' @@ -141,6 +145,7 @@ class Frame(object): pass # this can occur when using Psyco return retval + class TracebackEntry(object): """ a single entry in a traceback """ @@ -166,7 +171,7 @@ class TracebackEntry(object): return self.lineno - self.frame.code.firstlineno def __repr__(self): - return "" %(self.frame.code.path, self.lineno+1) + return "" % (self.frame.code.path, self.lineno + 1) @property def statement(self): @@ -245,19 +250,21 @@ class TracebackEntry(object): line = str(self.statement).lstrip() except KeyboardInterrupt: raise - except: + except: # noqa line = "???" - return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line) + return " File %r:%d in %s\n %s\n" % (fn, self.lineno + 1, name, line) def name(self): return self.frame.code.raw.co_name name = property(name, None, None, "co_name of underlaying code") + class Traceback(list): """ Traceback objects encapsulate and offer higher level access to Traceback entries. """ Entry = TracebackEntry + def __init__(self, tb, excinfo=None): """ initialize from given python traceback object and ExceptionInfo """ self._excinfo = excinfo @@ -287,7 +294,7 @@ class Traceback(list): (excludepath is None or not hasattr(codepath, 'relto') or not codepath.relto(excludepath)) and (lineno is None or x.lineno == lineno) and - (firstlineno is None or x.frame.code.firstlineno == firstlineno)): + (firstlineno is None or x.frame.code.firstlineno == firstlineno)): return Traceback(x._rawentry, self._excinfo) return self @@ -313,7 +320,7 @@ class Traceback(list): """ return last non-hidden traceback entry that lead to the exception of a traceback. """ - for i in range(-1, -len(self)-1, -1): + for i in range(-1, -len(self) - 1, -1): entry = self[i] if not entry.ishidden(): return entry @@ -328,30 +335,33 @@ class Traceback(list): # id for the code.raw is needed to work around # the strange metaprogramming in the decorator lib from pypi # which generates code objects that have hash/value equality - #XXX needs a test + # XXX needs a test key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno - #print "checking for recursion at", key - l = cache.setdefault(key, []) - if l: + # print "checking for recursion at", key + values = cache.setdefault(key, []) + if values: f = entry.frame loc = f.f_locals - for otherloc in l: + for otherloc in values: if f.is_true(f.eval(co_equal, - __recursioncache_locals_1=loc, - __recursioncache_locals_2=otherloc)): + __recursioncache_locals_1=loc, + __recursioncache_locals_2=otherloc)): return i - l.append(entry.frame.f_locals) + values.append(entry.frame.f_locals) return None co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2', '?', 'eval') + class ExceptionInfo(object): """ wraps sys.exc_info() objects and offers help for navigating the traceback. """ _striptext = '' + _assert_start_repr = "AssertionError(u\'assert " if _PY2 else "AssertionError(\'assert " + def __init__(self, tup=None, exprinfo=None): import _pytest._code if tup is None: @@ -359,8 +369,8 @@ class ExceptionInfo(object): if exprinfo is None and isinstance(tup[1], AssertionError): exprinfo = getattr(tup[1], 'msg', None) if exprinfo is None: - exprinfo = py._builtin._totext(tup[1]) - if exprinfo and exprinfo.startswith('assert '): + exprinfo = py.io.saferepr(tup[1]) + if exprinfo and exprinfo.startswith(self._assert_start_repr): self._striptext = 'AssertionError: ' self._excinfo = tup #: the exception class @@ -401,10 +411,10 @@ class ExceptionInfo(object): exconly = self.exconly(tryshort=True) entry = self.traceback.getcrashentry() path, lineno = entry.frame.code.raw.co_filename, entry.lineno - return ReprFileLocation(path, lineno+1, exconly) + return ReprFileLocation(path, lineno + 1, exconly) def getrepr(self, showlocals=False, style="long", - abspath=False, tbfilter=True, funcargs=False): + abspath=False, tbfilter=True, funcargs=False): """ return str()able representation of this exception info. showlocals: show locals per traceback entry style: long|short|no|native traceback style @@ -421,7 +431,7 @@ class ExceptionInfo(object): )), self._getreprcrash()) fmt = FormattedExcinfo(showlocals=showlocals, style=style, - abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) + abspath=abspath, tbfilter=tbfilter, funcargs=funcargs) return fmt.repr_excinfo(self) def __str__(self): @@ -465,15 +475,15 @@ class FormattedExcinfo(object): def _getindent(self, source): # figure out indent for given source try: - s = str(source.getstatement(len(source)-1)) + s = str(source.getstatement(len(source) - 1)) except KeyboardInterrupt: raise - except: + except: # noqa try: s = str(source[-1]) except KeyboardInterrupt: raise - except: + except: # noqa return 0 return 4 + (len(s) - len(s.lstrip())) @@ -509,7 +519,7 @@ class FormattedExcinfo(object): for line in source.lines[:line_index]: lines.append(space_prefix + line) lines.append(self.flow_marker + " " + source.lines[line_index]) - for line in source.lines[line_index+1:]: + for line in source.lines[line_index + 1:]: lines.append(space_prefix + line) if excinfo is not None: indent = 4 if short else self._getindent(source) @@ -542,10 +552,10 @@ class FormattedExcinfo(object): # _repr() function, which is only reprlib.Repr in # disguise, so is very configurable. str_repr = self._saferepr(value) - #if len(str_repr) < 70 or not isinstance(value, + # if len(str_repr) < 70 or not isinstance(value, # (list, tuple, dict)): - lines.append("%-10s = %s" %(name, str_repr)) - #else: + lines.append("%-10s = %s" % (name, str_repr)) + # else: # self._line("%-10s =\\" % (name,)) # # XXX # py.std.pprint.pprint(value, stream=self.excinfowriter) @@ -571,14 +581,14 @@ class FormattedExcinfo(object): s = self.get_source(source, line_index, excinfo, short=short) lines.extend(s) if short: - message = "in %s" %(entry.name) + message = "in %s" % (entry.name) else: message = excinfo and excinfo.typename or "" path = self._makepath(entry.path) - filelocrepr = ReprFileLocation(path, entry.lineno+1, message) + filelocrepr = ReprFileLocation(path, entry.lineno + 1, message) localsrepr = None if not short: - localsrepr = self.repr_locals(entry.locals) + localsrepr = self.repr_locals(entry.locals) return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style) if excinfo: lines.extend(self.get_exconly(excinfo, indent=4)) @@ -598,24 +608,54 @@ class FormattedExcinfo(object): traceback = excinfo.traceback if self.tbfilter: traceback = traceback.filter() - recursionindex = None + if is_recursion_error(excinfo): - recursionindex = traceback.recursionindex() + traceback, extraline = self._truncate_recursive_traceback(traceback) + else: + extraline = None + last = traceback[-1] entries = [] - extraline = None for index, entry in enumerate(traceback): einfo = (last == entry) and excinfo or None reprentry = self.repr_traceback_entry(entry, einfo) entries.append(reprentry) - if index == recursionindex: - extraline = "!!! Recursion detected (same locals & position)" - break return ReprTraceback(entries, extraline, style=self.style) + def _truncate_recursive_traceback(self, traceback): + """ + Truncate the given recursive traceback trying to find the starting point + of the recursion. + + The detection is done by going through each traceback entry and finding the + point in which the locals of the frame are equal to the locals of a previous frame (see ``recursionindex()``. + + Handle the situation where the recursion process might raise an exception (for example + comparing numpy arrays using equality raises a TypeError), in which case we do our best to + warn the user of the error and show a limited traceback. + """ + try: + recursionindex = traceback.recursionindex() + except Exception as e: + max_frames = 10 + extraline = ( + '!!! Recursion error detected, but an error occurred locating the origin of recursion.\n' + ' The following exception happened when comparing locals in the stack frame:\n' + ' {exc_type}: {exc_msg}\n' + ' Displaying first and last {max_frames} stack frames out of {total}.' + ).format(exc_type=type(e).__name__, exc_msg=safe_str(e), max_frames=max_frames, total=len(traceback)) + traceback = traceback[:max_frames] + traceback[-max_frames:] + else: + if recursionindex is not None: + extraline = "!!! Recursion detected (same locals & position)" + traceback = traceback[:recursionindex + 1] + else: + extraline = None + + return traceback, extraline def repr_excinfo(self, excinfo): - if sys.version_info[0] < 3: + if _PY2: reprtraceback = self.repr_traceback(excinfo) reprcrash = excinfo._getreprcrash() @@ -639,7 +679,7 @@ class FormattedExcinfo(object): e = e.__cause__ excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None descr = 'The above exception was the direct cause of the following exception:' - elif e.__context__ is not None: + elif (e.__context__ is not None and not e.__suppress_context__): e = e.__context__ excinfo = ExceptionInfo((type(e), e, e.__traceback__)) if e.__traceback__ else None descr = 'During handling of the above exception, another exception occurred:' @@ -652,7 +692,7 @@ class FormattedExcinfo(object): class TerminalRepr(object): def __str__(self): s = self.__unicode__() - if sys.version_info[0] < 3: + if _PY2: s = s.encode('utf-8') return s @@ -665,7 +705,7 @@ class TerminalRepr(object): return io.getvalue().strip() def __repr__(self): - return "<%s instance at %0x>" %(self.__class__, id(self)) + return "<%s instance at %0x>" % (self.__class__, id(self)) class ExceptionRepr(TerminalRepr): @@ -709,6 +749,7 @@ class ReprExceptionInfo(ExceptionRepr): self.reprtraceback.toterminal(tw) super(ReprExceptionInfo, self).toterminal(tw) + class ReprTraceback(TerminalRepr): entrysep = "_ " @@ -724,7 +765,7 @@ class ReprTraceback(TerminalRepr): tw.line("") entry.toterminal(tw) if i < len(self.reprentries) - 1: - next_entry = self.reprentries[i+1] + next_entry = self.reprentries[i + 1] if entry.style == "long" or \ entry.style == "short" and next_entry.style == "long": tw.sep(self.entrysep) @@ -732,12 +773,14 @@ class ReprTraceback(TerminalRepr): if self.extraline: tw.line(self.extraline) + class ReprTracebackNative(ReprTraceback): def __init__(self, tblines): self.style = "native" self.reprentries = [ReprEntryNative(tblines)] self.extraline = None + class ReprEntryNative(TerminalRepr): style = "native" @@ -747,6 +790,7 @@ class ReprEntryNative(TerminalRepr): def toterminal(self, tw): tw.write("".join(self.lines)) + class ReprEntry(TerminalRepr): localssep = "_ " @@ -763,7 +807,7 @@ class ReprEntry(TerminalRepr): for line in self.lines: red = line.startswith("E ") tw.line(line, bold=True, red=red) - #tw.line("") + # tw.line("") return if self.reprfuncargs: self.reprfuncargs.toterminal(tw) @@ -771,7 +815,7 @@ class ReprEntry(TerminalRepr): red = line.startswith("E ") tw.line(line, bold=True, red=red) if self.reprlocals: - #tw.sep(self.localssep, "Locals") + # tw.sep(self.localssep, "Locals") tw.line("") self.reprlocals.toterminal(tw) if self.reprfileloc: @@ -784,6 +828,7 @@ class ReprEntry(TerminalRepr): self.reprlocals, self.reprfileloc) + class ReprFileLocation(TerminalRepr): def __init__(self, path, lineno, message): self.path = str(path) @@ -800,6 +845,7 @@ class ReprFileLocation(TerminalRepr): tw.write(self.path, bold=True, red=True) tw.line(":%s: %s" % (self.lineno, msg)) + class ReprLocals(TerminalRepr): def __init__(self, lines): self.lines = lines @@ -808,6 +854,7 @@ class ReprLocals(TerminalRepr): for line in self.lines: tw.line(line) + class ReprFuncArgs(TerminalRepr): def __init__(self, args): self.args = args @@ -816,11 +863,11 @@ class ReprFuncArgs(TerminalRepr): if self.args: linesofar = "" for name, value in self.args: - ns = "%s = %s" %(name, value) + ns = "%s = %s" % (safe_str(name), safe_str(value)) if len(ns) + len(linesofar) + 2 > tw.fullwidth: if linesofar: tw.line(linesofar) - linesofar = ns + linesofar = ns else: if linesofar: linesofar += ", " + ns @@ -848,7 +895,7 @@ def getrawcode(obj, trycall=True): return obj -if sys.version_info[:2] >= (3, 5): # RecursionError introduced in 3.5 +if PY35: # RecursionError introduced in 3.5 def is_recursion_error(excinfo): return excinfo.errisinstance(RecursionError) # noqa else: diff --git a/_pytest/_code/source.py b/_pytest/_code/source.py index fcec0f5ca..fc4171264 100644 --- a/_pytest/_code/source.py +++ b/_pytest/_code/source.py @@ -1,8 +1,9 @@ -from __future__ import generators +from __future__ import absolute_import, division, generators, print_function from bisect import bisect_right import sys -import inspect, tokenize +import inspect +import tokenize import py cpy_compile = compile @@ -19,6 +20,7 @@ class Source(object): possibly deindenting it. """ _compilecounter = 0 + def __init__(self, *parts, **kwargs): self.lines = lines = [] de = kwargs.get('deindent', True) @@ -73,7 +75,7 @@ class Source(object): start, end = 0, len(self) while start < end and not self.lines[start].strip(): start += 1 - while end > start and not self.lines[end-1].strip(): + while end > start and not self.lines[end - 1].strip(): end -= 1 source = Source() source.lines[:] = self.lines[start:end] @@ -86,8 +88,8 @@ class Source(object): before = Source(before) after = Source(after) newsource = Source() - lines = [ (indent + line) for line in self.lines] - newsource.lines = before.lines + lines + after.lines + lines = [(indent + line) for line in self.lines] + newsource.lines = before.lines + lines + after.lines return newsource def indent(self, indent=' ' * 4): @@ -95,7 +97,7 @@ class Source(object): all lines indented by the given indent-string. """ newsource = Source() - newsource.lines = [(indent+line) for line in self.lines] + newsource.lines = [(indent + line) for line in self.lines] return newsource def getstatement(self, lineno, assertion=False): @@ -134,7 +136,8 @@ class Source(object): try: import parser except ImportError: - syntax_checker = lambda x: compile(x, 'asd', 'exec') + def syntax_checker(x): + return compile(x, 'asd', 'exec') else: syntax_checker = parser.suite @@ -143,8 +146,8 @@ class Source(object): else: source = str(self) try: - #compile(source+'\n', "x", "exec") - syntax_checker(source+'\n') + # compile(source+'\n', "x", "exec") + syntax_checker(source + '\n') except KeyboardInterrupt: raise except Exception: @@ -164,8 +167,8 @@ class Source(object): """ if not filename or py.path.local(filename).check(file=0): if _genframe is None: - _genframe = sys._getframe(1) # the caller - fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno + _genframe = sys._getframe(1) # the caller + fn, lineno = _genframe.f_code.co_filename, _genframe.f_lineno base = "<%d-codegen " % self._compilecounter self.__class__._compilecounter += 1 if not filename: @@ -180,7 +183,7 @@ class Source(object): # re-represent syntax errors from parsing python strings msglines = self.lines[:ex.lineno] if ex.offset: - msglines.append(" "*ex.offset + '^') + msglines.append(" " * ex.offset + '^') msglines.append("(code was compiled probably from here: %s)" % filename) newex = SyntaxError('\n'.join(msglines)) newex.offset = ex.offset @@ -198,8 +201,8 @@ class Source(object): # public API shortcut functions # -def compile_(source, filename=None, mode='exec', flags= - generators.compiler_flag, dont_inherit=0): + +def compile_(source, filename=None, mode='exec', flags=generators.compiler_flag, dont_inherit=0): """ compile the given source to a raw code object, and maintain an internal cache which allows later retrieval of the source code for the code object @@ -208,7 +211,7 @@ def compile_(source, filename=None, mode='exec', flags= if _ast is not None and isinstance(source, _ast.AST): # XXX should Source support having AST? return cpy_compile(source, filename, mode, flags, dont_inherit) - _genframe = sys._getframe(1) # the caller + _genframe = sys._getframe(1) # the caller s = Source(source) co = s.compile(filename, mode, flags, _genframe=_genframe) return co @@ -245,12 +248,13 @@ def getfslineno(obj): # helper functions # + def findsource(obj): try: sourcelines, lineno = py.std.inspect.findsource(obj) except py.builtin._sysex: raise - except: + except: # noqa return None, -1 source = Source() source.lines = [line.rstrip() for line in sourcelines] @@ -274,7 +278,7 @@ def deindent(lines, offset=None): line = line.expandtabs() s = line.lstrip() if s: - offset = len(line)-len(s) + offset = len(line) - len(s) break else: offset = 0 @@ -293,11 +297,11 @@ def deindent(lines, offset=None): try: for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)): if sline > len(lines): - break # End of input reached + break # End of input reached if sline > len(newlines): line = lines[sline - 1].expandtabs() if line.lstrip() and line[:offset].isspace(): - line = line[offset:] # Deindent + line = line[offset:] # Deindent newlines.append(line) for i in range(sline, eline): @@ -315,29 +319,29 @@ def get_statement_startend2(lineno, node): import ast # flatten all statements and except handlers into one lineno-list # AST's line numbers start indexing at 1 - l = [] + values = [] for x in ast.walk(node): if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler): - l.append(x.lineno - 1) + values.append(x.lineno - 1) for name in "finalbody", "orelse": val = getattr(x, name, None) if val: # treat the finally/orelse part as its own statement - l.append(val[0].lineno - 1 - 1) - l.sort() - insert_index = bisect_right(l, lineno) - start = l[insert_index - 1] - if insert_index >= len(l): + values.append(val[0].lineno - 1 - 1) + values.sort() + insert_index = bisect_right(values, lineno) + start = values[insert_index - 1] + if insert_index >= len(values): end = None else: - end = l[insert_index] + end = values[insert_index] return start, end def getstatementrange_ast(lineno, source, assertion=False, astnode=None): if astnode is None: content = str(source) - if sys.version_info < (2,7): + if sys.version_info < (2, 7): content += "\n" try: astnode = compile(content, "source", "exec", 1024) # 1024 for AST @@ -393,7 +397,7 @@ def getstatementrange_old(lineno, source, assertion=False): raise IndexError("likely a subclass") if "assert" not in line and "raise" not in line: continue - trylines = source.lines[start:lineno+1] + trylines = source.lines[start:lineno + 1] # quick hack to prepare parsing an indented line with # compile_command() (which errors on "return" outside defs) trylines.insert(0, 'def xxx():') @@ -405,10 +409,8 @@ def getstatementrange_old(lineno, source, assertion=False): continue # 2. find the end of the statement - for end in range(lineno+1, len(source)+1): + for end in range(lineno + 1, len(source) + 1): trysource = source[start:end] if trysource.isparseable(): return start, end raise SyntaxError("no valid source range around line %d " % (lineno,)) - - diff --git a/_pytest/_pluggy.py b/_pytest/_pluggy.py index 87d32cf8d..6cc1d3d54 100644 --- a/_pytest/_pluggy.py +++ b/_pytest/_pluggy.py @@ -2,7 +2,7 @@ imports symbols from vendored "pluggy" if available, otherwise falls back to importing "pluggy" from the default namespace. """ - +from __future__ import absolute_import, division, print_function try: from _pytest.vendored_packages.pluggy import * # noqa from _pytest.vendored_packages.pluggy import __version__ # noqa diff --git a/_pytest/assertion/__init__.py b/_pytest/assertion/__init__.py index 3f14a7ae7..b0ef667d5 100644 --- a/_pytest/assertion/__init__.py +++ b/_pytest/assertion/__init__.py @@ -1,12 +1,13 @@ """ support for presenting detailed information in failing assertions. """ +from __future__ import absolute_import, division, print_function import py -import os import sys from _pytest.assertion import util from _pytest.assertion import rewrite +from _pytest.assertion import truncate def pytest_addoption(parser): @@ -24,10 +25,6 @@ def pytest_addoption(parser): expression information.""") -def pytest_namespace(): - return {'register_assert_rewrite': register_assert_rewrite} - - def register_assert_rewrite(*names): """Register one or more module names to be rewritten on import. @@ -100,12 +97,6 @@ def pytest_collection(session): assertstate.hook.set_session(session) -def _running_on_ci(): - """Check if we're currently running on a CI system.""" - env_vars = ['CI', 'BUILD_NUMBER'] - return any(var in os.environ for var in env_vars) - - def pytest_runtest_setup(item): """Setup the pytest_assertrepr_compare hook @@ -119,8 +110,8 @@ def pytest_runtest_setup(item): This uses the first result from the hook and then ensures the following: - * Overly verbose explanations are dropped unless -vv was used or - running on a CI. + * Overly verbose explanations are truncated unless configured otherwise + (eg. if running in verbose mode). * Embedded newlines are escaped to help util.format_explanation() later. * If the rewrite mode is used embedded %-characters are replaced @@ -133,14 +124,7 @@ def pytest_runtest_setup(item): config=item.config, op=op, left=left, right=right) for new_expl in hook_result: if new_expl: - if (sum(len(p) for p in new_expl[1:]) > 80*8 and - item.config.option.verbose < 2 and - not _running_on_ci()): - show_max = 10 - truncated_lines = len(new_expl) - show_max - new_expl[show_max:] = [py.builtin._totext( - 'Detailed information truncated (%d more lines)' - ', use "-vv" to show' % truncated_lines)] + new_expl = truncate.truncate_if_required(new_expl, item) new_expl = [line.replace("\n", "\\n") for line in new_expl] res = py.builtin._totext("\n~").join(new_expl) if item.config.getvalue("assertmode") == "rewrite": diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py index abf5b491f..d48b6648f 100644 --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -1,5 +1,5 @@ """Rewrite assertion AST to produce nice error messages""" - +from __future__ import absolute_import, division, print_function import ast import _ast import errno @@ -11,7 +11,6 @@ import re import struct import sys import types -from fnmatch import fnmatch import py from _pytest.assertion import util @@ -37,10 +36,11 @@ PYC_TAIL = "." + PYTEST_TAG + PYC_EXT REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2) ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3 -if sys.version_info >= (3,5): +if sys.version_info >= (3, 5): ast_Call = ast.Call else: - ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None) + def ast_Call(a, b, c): + return ast.Call(a, b, c, None, None) class AssertionRewritingHook(object): @@ -163,11 +163,7 @@ class AssertionRewritingHook(object): # modules not passed explicitly on the command line are only # rewritten if they match the naming convention for test files for pat in self.fnpats: - # use fnmatch instead of fn_pypath.fnmatch because the - # latter might trigger an import to fnmatch.fnmatch - # internally, which would cause this method to be - # called recursively - if fnmatch(fn_pypath.basename, pat): + if fn_pypath.fnmatch(pat): state.trace("matched test file %r" % (fn,)) return True @@ -214,13 +210,12 @@ class AssertionRewritingHook(object): mod.__cached__ = pyc mod.__loader__ = self py.builtin.exec_(co, mod.__dict__) - except: - del sys.modules[name] + except: # noqa + if name in sys.modules: + del sys.modules[name] raise return sys.modules[name] - - def is_package(self, name): try: fd, fn, desc = imp.find_module(name) @@ -265,7 +260,7 @@ def _write_pyc(state, co, source_stat, pyc): fp = open(pyc, "wb") except IOError: err = sys.exc_info()[1].errno - state.trace("error writing pyc file at %s: errno=%s" %(pyc, err)) + state.trace("error writing pyc file at %s: errno=%s" % (pyc, err)) # we ignore any failure to write the cache file # there are many reasons, permission-denied, __pycache__ being a # file etc. @@ -287,6 +282,7 @@ N = "\n".encode("utf-8") cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+") BOM_UTF8 = '\xef\xbb\xbf' + def _rewrite_test(config, fn): """Try to read and rewrite *fn* and return the code object.""" state = config._assertstate @@ -311,7 +307,7 @@ def _rewrite_test(config, fn): end2 = source.find("\n", end1 + 1) if (not source.startswith(BOM_UTF8) and cookie_re.match(source[0:end1]) is None and - cookie_re.match(source[end1 + 1:end2]) is None): + cookie_re.match(source[end1 + 1:end2]) is None): if hasattr(state, "_indecode"): # encodings imported us again, so don't rewrite. return None, None @@ -336,7 +332,7 @@ def _rewrite_test(config, fn): return None, None rewrite_asserts(tree, fn, config) try: - co = compile(tree, fn.strpath, "exec") + co = compile(tree, fn.strpath, "exec", dont_inherit=True) except SyntaxError: # It's possible that this error is from some bug in the # assertion rewriting, but I don't know of a fast way to tell. @@ -344,6 +340,7 @@ def _rewrite_test(config, fn): return None, None return stat, co + def _make_rewritten_pyc(state, source_stat, pyc, co): """Try to dump rewritten code to *pyc*.""" if sys.platform.startswith("win"): @@ -357,6 +354,7 @@ def _make_rewritten_pyc(state, source_stat, pyc, co): if _write_pyc(state, co, source_stat, proc_pyc): os.rename(proc_pyc, pyc) + def _read_pyc(source, pyc, trace=lambda x: None): """Possibly read a pytest pyc containing rewritten code. @@ -414,7 +412,8 @@ def _saferepr(obj): return repr.replace(t("\n"), t("\\n")) -from _pytest.assertion.util import format_explanation as _format_explanation # noqa +from _pytest.assertion.util import format_explanation as _format_explanation # noqa + def _format_assertmsg(obj): """Format the custom assertion message given. @@ -443,9 +442,11 @@ def _format_assertmsg(obj): s = s.replace(t("\\n"), t("\n~")) return s + def _should_repr_global_name(obj): return not hasattr(obj, "__name__") and not py.builtin.callable(obj) + def _format_boolop(explanations, is_or): explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")" if py.builtin._istext(explanation): @@ -454,6 +455,7 @@ def _format_boolop(explanations, is_or): t = py.builtin.bytes return explanation.replace(t('%'), t('%%')) + def _call_reprcompare(ops, results, expls, each_obj): for i, res, expl in zip(range(len(ops)), results, expls): try: @@ -487,7 +489,7 @@ binop_map = { ast.Mult: "*", ast.Div: "/", ast.FloorDiv: "//", - ast.Mod: "%%", # escaped for string formatting + ast.Mod: "%%", # escaped for string formatting ast.Eq: "==", ast.NotEq: "!=", ast.Lt: "<", @@ -593,23 +595,26 @@ class AssertionRewriter(ast.NodeVisitor): # docstrings and __future__ imports. aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"), ast.alias("_pytest.assertion.rewrite", "@pytest_ar")] - expect_docstring = True + doc = getattr(mod, "docstring", None) + expect_docstring = doc is None + if doc is not None and self.is_rewrite_disabled(doc): + return pos = 0 - lineno = 0 + lineno = 1 for item in mod.body: if (expect_docstring and isinstance(item, ast.Expr) and isinstance(item.value, ast.Str)): doc = item.value.s - if "PYTEST_DONT_REWRITE" in doc: - # The module has disabled assertion rewriting. + if self.is_rewrite_disabled(doc): return - lineno += len(doc) - 1 expect_docstring = False elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or item.module != "__future__"): lineno = item.lineno break pos += 1 + else: + lineno = item.lineno imports = [ast.Import([alias], lineno=lineno, col_offset=0) for alias in aliases] mod.body[pos:pos] = imports @@ -635,6 +640,9 @@ class AssertionRewriter(ast.NodeVisitor): not isinstance(field, ast.expr)): nodes.append(field) + def is_rewrite_disabled(self, docstring): + return "PYTEST_DONT_REWRITE" in docstring + def variable(self): """Get a new variable.""" # Use a character invalid in python identifiers to avoid clashing. @@ -727,7 +735,7 @@ class AssertionRewriter(ast.NodeVisitor): if isinstance(assert_.test, ast.Tuple) and self.config is not None: fslocation = (self.module_path, assert_.lineno) self.config.warn('R1', 'assertion is always true, perhaps ' - 'remove parentheses?', fslocation=fslocation) + 'remove parentheses?', fslocation=fslocation) self.statements = [] self.variables = [] self.variable_counter = itertools.count() @@ -791,7 +799,7 @@ class AssertionRewriter(ast.NodeVisitor): if i: fail_inner = [] # cond is set in a prior loop iteration below - self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa + self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa self.on_failure = fail_inner self.push_format_context() res, expl = self.visit(v) @@ -843,7 +851,7 @@ class AssertionRewriter(ast.NodeVisitor): new_kwargs.append(ast.keyword(keyword.arg, res)) if keyword.arg: arg_expls.append(keyword.arg + "=" + expl) - else: ## **args have `arg` keywords with an .arg of None + else: # **args have `arg` keywords with an .arg of None arg_expls.append("**" + expl) expl = "%s(%s)" % (func_expl, ', '.join(arg_expls)) @@ -897,7 +905,6 @@ class AssertionRewriter(ast.NodeVisitor): else: visit_Call = visit_Call_legacy - def visit_Attribute(self, attr): if not isinstance(attr.ctx, ast.Load): return self.generic_visit(attr) diff --git a/_pytest/assertion/truncate.py b/_pytest/assertion/truncate.py new file mode 100644 index 000000000..1e1306356 --- /dev/null +++ b/_pytest/assertion/truncate.py @@ -0,0 +1,102 @@ +""" +Utilities for truncating assertion output. + +Current default behaviour is to truncate assertion explanations at +~8 terminal lines, unless running in "-vv" mode or running on CI. +""" +from __future__ import absolute_import, division, print_function +import os + +import py + + +DEFAULT_MAX_LINES = 8 +DEFAULT_MAX_CHARS = 8 * 80 +USAGE_MSG = "use '-vv' to show" + + +def truncate_if_required(explanation, item, max_length=None): + """ + Truncate this assertion explanation if the given test item is eligible. + """ + if _should_truncate_item(item): + return _truncate_explanation(explanation) + return explanation + + +def _should_truncate_item(item): + """ + Whether or not this test item is eligible for truncation. + """ + verbose = item.config.option.verbose + return verbose < 2 and not _running_on_ci() + + +def _running_on_ci(): + """Check if we're currently running on a CI system.""" + env_vars = ['CI', 'BUILD_NUMBER'] + return any(var in os.environ for var in env_vars) + + +def _truncate_explanation(input_lines, max_lines=None, max_chars=None): + """ + Truncate given list of strings that makes up the assertion explanation. + + Truncates to either 8 lines, or 640 characters - whichever the input reaches + first. The remaining lines will be replaced by a usage message. + """ + + if max_lines is None: + max_lines = DEFAULT_MAX_LINES + if max_chars is None: + max_chars = DEFAULT_MAX_CHARS + + # Check if truncation required + input_char_count = len("".join(input_lines)) + if len(input_lines) <= max_lines and input_char_count <= max_chars: + return input_lines + + # Truncate first to max_lines, and then truncate to max_chars if max_chars + # is exceeded. + truncated_explanation = input_lines[:max_lines] + truncated_explanation = _truncate_by_char_count(truncated_explanation, max_chars) + + # Add ellipsis to final line + truncated_explanation[-1] = truncated_explanation[-1] + "..." + + # Append useful message to explanation + truncated_line_count = len(input_lines) - len(truncated_explanation) + truncated_line_count += 1 # Account for the part-truncated final line + msg = '...Full output truncated' + if truncated_line_count == 1: + msg += ' ({0} line hidden)'.format(truncated_line_count) + else: + msg += ' ({0} lines hidden)'.format(truncated_line_count) + msg += ", {0}" .format(USAGE_MSG) + truncated_explanation.extend([ + py.builtin._totext(""), + py.builtin._totext(msg), + ]) + return truncated_explanation + + +def _truncate_by_char_count(input_lines, max_chars): + # Check if truncation required + if len("".join(input_lines)) <= max_chars: + return input_lines + + # Find point at which input length exceeds total allowed length + iterated_char_count = 0 + for iterated_index, input_line in enumerate(input_lines): + if iterated_char_count + len(input_line) > max_chars: + break + iterated_char_count += len(input_line) + + # Create truncated explanation with modified final line + truncated_result = input_lines[:iterated_index] + final_line = input_lines[iterated_index] + if final_line: + final_line_truncate_point = max_chars - iterated_char_count + final_line = final_line[:final_line_truncate_point] + truncated_result.append(final_line) + return truncated_result diff --git a/_pytest/assertion/util.py b/_pytest/assertion/util.py index 4a0a4e431..9f0092907 100644 --- a/_pytest/assertion/util.py +++ b/_pytest/assertion/util.py @@ -1,4 +1,5 @@ """Utilities for assertion debugging""" +from __future__ import absolute_import, division, print_function import pprint import _pytest._code @@ -8,7 +9,7 @@ try: except ImportError: Sequence = list -BuiltinAssertionError = py.builtin.builtins.AssertionError + u = py.builtin._totext # The _reprcompare attribute on the util module is used by the new assertion @@ -52,11 +53,11 @@ def _split_explanation(explanation): """ raw_lines = (explanation or u('')).split('\n') lines = [raw_lines[0]] - for l in raw_lines[1:]: - if l and l[0] in ['{', '}', '~', '>']: - lines.append(l) + for values in raw_lines[1:]: + if values and values[0] in ['{', '}', '~', '>']: + lines.append(values) else: - lines[-1] += '\\n' + l + lines[-1] += '\\n' + values return lines @@ -81,7 +82,7 @@ def _format_lines(lines): stack.append(len(result)) stackcnt[-1] += 1 stackcnt.append(0) - result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:]) + result.append(u(' +') + u(' ') * (len(stack) - 1) + s + line[1:]) elif line.startswith('}'): stack.pop() stackcnt.pop() @@ -90,7 +91,7 @@ def _format_lines(lines): assert line[0] in ['~', '>'] stack[-1] += 1 indent = len(stack) if line.startswith('~') else len(stack) - 1 - result.append(u(' ')*indent + line[1:]) + result.append(u(' ') * indent + line[1:]) assert len(stack) == 1 return result @@ -105,16 +106,22 @@ except NameError: def assertrepr_compare(config, op, left, right): """Return specialised explanations for some operators/operands""" width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op - left_repr = py.io.saferepr(left, maxsize=int(width//2)) - right_repr = py.io.saferepr(right, maxsize=width-len(left_repr)) + left_repr = py.io.saferepr(left, maxsize=int(width // 2)) + right_repr = py.io.saferepr(right, maxsize=width - len(left_repr)) summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr)) - issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and - not isinstance(x, basestring)) - istext = lambda x: isinstance(x, basestring) - isdict = lambda x: isinstance(x, dict) - isset = lambda x: isinstance(x, (set, frozenset)) + def issequence(x): + return (isinstance(x, (list, tuple, Sequence)) and not isinstance(x, basestring)) + + def istext(x): + return isinstance(x, basestring) + + def isdict(x): + return isinstance(x, dict) + + def isset(x): + return isinstance(x, (set, frozenset)) def isiterable(obj): try: @@ -256,8 +263,8 @@ def _compare_eq_dict(left, right, verbose=False): explanation = [] common = set(left).intersection(set(right)) same = dict((k, left[k]) for k in common if left[k] == right[k]) - if same and not verbose: - explanation += [u('Omitting %s identical items, use -v to show') % + if same and verbose < 2: + explanation += [u('Omitting %s identical items, use -vv to show') % len(same)] elif same: explanation += [u('Common items:')] @@ -284,7 +291,7 @@ def _compare_eq_dict(left, right, verbose=False): def _notin_text(term, text, verbose=False): index = text.find(term) head = text[:index] - tail = text[index+len(term):] + tail = text[index + len(term):] correct_text = head + tail diff = _diff_text(correct_text, text, verbose) newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)] diff --git a/_pytest/cacheprovider.py b/_pytest/cacheprovider.py index 0657001f2..c537c1447 100755 --- a/_pytest/cacheprovider.py +++ b/_pytest/cacheprovider.py @@ -1,20 +1,21 @@ """ merged implementation of the cache provider -the name cache was not choosen to ensure pluggy automatically +the name cache was not chosen to ensure pluggy automatically ignores the external pytest-cache """ - +from __future__ import absolute_import, division, print_function import py import pytest import json +import os from os.path import sep as _sep, altsep as _altsep class Cache(object): def __init__(self, config): self.config = config - self._cachedir = config.rootdir.join(".cache") + self._cachedir = Cache.cache_dir_from_config(config) self.trace = config.trace.root.get("cache") if config.getvalue("cacheclear"): self.trace("clearing cachedir") @@ -22,6 +23,16 @@ class Cache(object): self._cachedir.remove() self._cachedir.mkdir() + @staticmethod + def cache_dir_from_config(config): + cache_dir = config.getini("cache_dir") + cache_dir = os.path.expanduser(cache_dir) + cache_dir = os.path.expandvars(cache_dir) + if os.path.isabs(cache_dir): + return py.path.local(cache_dir) + else: + return config.rootdir.join(cache_dir) + def makedir(self, name): """ return a directory path object with the given name. If the directory does not yet exist, it will be created. You can use it @@ -89,31 +100,31 @@ class Cache(object): class LFPlugin: """ Plugin which implements the --lf (run last-failing) option """ + def __init__(self, config): self.config = config active_keys = 'lf', 'failedfirst' self.active = any(config.getvalue(key) for key in active_keys) - if self.active: - self.lastfailed = config.cache.get("cache/lastfailed", {}) - else: - self.lastfailed = {} + self.lastfailed = config.cache.get("cache/lastfailed", {}) + self._previously_failed_count = None - def pytest_report_header(self): + def pytest_report_collectionfinish(self): if self.active: - if not self.lastfailed: + if not self._previously_failed_count: mode = "run all (no recorded failures)" else: - mode = "rerun last %d failures%s" % ( - len(self.lastfailed), - " first" if self.config.getvalue("failedfirst") else "") + noun = 'failure' if self._previously_failed_count == 1 else 'failures' + suffix = " first" if self.config.getvalue("failedfirst") else "" + mode = "rerun previous {count} {noun}{suffix}".format( + count=self._previously_failed_count, suffix=suffix, noun=noun + ) return "run-last-failure: %s" % mode def pytest_runtest_logreport(self, report): - if report.failed and "xfail" not in report.keywords: + if (report.when == 'call' and report.passed) or report.skipped: + self.lastfailed.pop(report.nodeid, None) + elif report.failed: self.lastfailed[report.nodeid] = True - elif not report.failed: - if report.when == "call": - self.lastfailed.pop(report.nodeid, None) def pytest_collectreport(self, report): passed = report.outcome in ('passed', 'skipped') @@ -135,22 +146,24 @@ class LFPlugin: previously_failed.append(item) else: previously_passed.append(item) - if not previously_failed and previously_passed: + self._previously_failed_count = len(previously_failed) + if not previously_failed: # running a subset of all tests with recorded failures outside # of the set of tests currently executing - pass - elif self.config.getvalue("failedfirst"): - items[:] = previously_failed + previously_passed - else: + return + if self.config.getvalue("lf"): items[:] = previously_failed config.hook.pytest_deselected(items=previously_passed) + else: + items[:] = previously_failed + previously_passed def pytest_sessionfinish(self, session): config = self.config if config.getvalue("cacheshow") or hasattr(config, "slaveinput"): return - prev_failed = config.cache.get("cache/lastfailed", None) is not None - if (session.testscollected and prev_failed) or self.lastfailed: + + saved_lastfailed = config.cache.get("cache/lastfailed", {}) + if saved_lastfailed != self.lastfailed: config.cache.set("cache/lastfailed", self.lastfailed) @@ -171,6 +184,9 @@ def pytest_addoption(parser): group.addoption( '--cache-clear', action='store_true', dest="cacheclear", help="remove all cache contents at start of test run.") + parser.addini( + "cache_dir", default='.cache', + help="cache directory path.") def pytest_cmdline_main(config): @@ -179,7 +195,6 @@ def pytest_cmdline_main(config): return wrap_session(config, cacheshow) - @pytest.hookimpl(tryfirst=True) def pytest_configure(config): config.cache = Cache(config) @@ -219,12 +234,12 @@ def cacheshow(config, session): basedir = config.cache._cachedir vdir = basedir.join("v") tw.sep("-", "cache values") - for valpath in vdir.visit(lambda x: x.isfile()): + for valpath in sorted(vdir.visit(lambda x: x.isfile())): key = valpath.relto(vdir).replace(valpath.sep, "/") val = config.cache.get(key, dummy) if val is dummy: tw.line("%s contains unreadable content, " - "will be ignored" % key) + "will be ignored" % key) else: tw.line("%s contains:" % key) stream = py.io.TextIO() @@ -235,8 +250,8 @@ def cacheshow(config, session): ddir = basedir.join("d") if ddir.isdir() and ddir.listdir(): tw.sep("-", "cache directories") - for p in basedir.join("d").visit(): - #if p.check(dir=1): + for p in sorted(basedir.join("d").visit()): + # if p.check(dir=1): # print("%s/" % p.relto(basedir)) if p.isfile(): key = p.relto(basedir) diff --git a/_pytest/capture.py b/_pytest/capture.py index eea81ca18..cb5af6fcb 100644 --- a/_pytest/capture.py +++ b/_pytest/capture.py @@ -2,17 +2,19 @@ per-test stdout/stderr capturing mechanism. """ -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import contextlib import sys import os +import io +from io import UnsupportedOperation from tempfile import TemporaryFile import py import pytest +from _pytest.compat import CaptureIO -from py.io import TextIO unicode = py.builtin.text patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'} @@ -32,8 +34,11 @@ def pytest_addoption(parser): @pytest.hookimpl(hookwrapper=True) def pytest_load_initial_conftests(early_config, parser, args): - _readline_workaround() ns = early_config.known_args_namespace + if ns.capture == "fd": + _py36_windowsconsoleio_workaround(sys.stdout) + _colorama_workaround() + _readline_workaround() pluginmanager = early_config.pluginmanager capman = CaptureManager(ns.capture) pluginmanager.register(capman, "capturemanager") @@ -130,7 +135,7 @@ class CaptureManager: self.resumecapture() self.activate_funcargs(item) yield - #self.deactivate_funcargs() called from suspendcapture() + # self.deactivate_funcargs() called from suspendcapture() self.suspendcapture_item(item, "call") @pytest.hookimpl(hookwrapper=True) @@ -167,6 +172,7 @@ def capsys(request): request.node._capfuncarg = c = CaptureFixture(SysCapture, request) return c + @pytest.fixture def capfd(request): """Enable capturing of writes to file descriptors 1 and 2 and make @@ -234,6 +240,7 @@ def safe_text_dupfile(f, mode, default_encoding="UTF8"): class EncodedFile(object): errors = "strict" # possibly needed by py3 code (issue555) + def __init__(self, buffer, encoding): self.buffer = buffer self.encoding = encoding @@ -247,6 +254,11 @@ class EncodedFile(object): data = ''.join(linelist) self.write(data) + @property + def name(self): + """Ensure that file.name is a string.""" + return repr(self.buffer) + def __getattr__(self, name): return getattr(object.__getattribute__(self, "buffer"), name) @@ -314,9 +326,11 @@ class MultiCapture(object): return (self.out.snap() if self.out is not None else "", self.err.snap() if self.err is not None else "") + class NoCapture: __init__ = start = done = suspend = resume = lambda *args: None + class FDCapture: """ Capture IO to/from a given os-level filedescriptor. """ @@ -389,7 +403,7 @@ class FDCapture: def writeorg(self, data): """ write to original file descriptor. """ if py.builtin._istext(data): - data = data.encode("utf8") # XXX use encoding of original stream + data = data.encode("utf8") # XXX use encoding of original stream os.write(self.targetfd_save, data) @@ -402,7 +416,7 @@ class SysCapture: if name == "stdin": tmpfile = DontReadFromInput() else: - tmpfile = TextIO() + tmpfile = CaptureIO() self.tmpfile = tmpfile def start(self): @@ -448,7 +462,8 @@ class DontReadFromInput: __iter__ = read def fileno(self): - raise ValueError("redirected Stdin is pseudofile, has no fileno()") + raise UnsupportedOperation("redirected stdin is pseudofile, " + "has no fileno()") def isatty(self): return False @@ -458,12 +473,30 @@ class DontReadFromInput: @property def buffer(self): - if sys.version_info >= (3,0): + if sys.version_info >= (3, 0): return self else: raise AttributeError('redirected stdin has no attribute buffer') +def _colorama_workaround(): + """ + Ensure colorama is imported so that it attaches to the correct stdio + handles on Windows. + + colorama uses the terminal on import time. So if something does the + first import of colorama while I/O capture is active, colorama will + fail in various ways. + """ + + if not sys.platform.startswith('win32'): + return + try: + import colorama # noqa + except ImportError: + pass + + def _readline_workaround(): """ Ensure readline is imported so that it attaches to the correct stdio @@ -489,3 +522,56 @@ def _readline_workaround(): import readline # noqa except ImportError: pass + + +def _py36_windowsconsoleio_workaround(stream): + """ + Python 3.6 implemented unicode console handling for Windows. This works + by reading/writing to the raw console handle using + ``{Read,Write}ConsoleW``. + + The problem is that we are going to ``dup2`` over the stdio file + descriptors when doing ``FDCapture`` and this will ``CloseHandle`` the + handles used by Python to write to the console. Though there is still some + weirdness and the console handle seems to only be closed randomly and not + on the first call to ``CloseHandle``, or maybe it gets reopened with the + same handle value when we suspend capturing. + + The workaround in this case will reopen stdio with a different fd which + also means a different handle by replicating the logic in + "Py_lifecycle.c:initstdio/create_stdio". + + :param stream: in practice ``sys.stdout`` or ``sys.stderr``, but given + here as parameter for unittesting purposes. + + See https://github.com/pytest-dev/py/issues/103 + """ + if not sys.platform.startswith('win32') or sys.version_info[:2] < (3, 6): + return + + # bail out if ``stream`` doesn't seem like a proper ``io`` stream (#2666) + if not hasattr(stream, 'buffer'): + return + + buffered = hasattr(stream.buffer, 'raw') + raw_stdout = stream.buffer.raw if buffered else stream.buffer + + if not isinstance(raw_stdout, io._WindowsConsoleIO): + return + + def _reopen_stdio(f, mode): + if not buffered and mode[0] == 'w': + buffering = 0 + else: + buffering = -1 + + return io.TextIOWrapper( + open(os.dup(f.fileno()), mode, buffering), + f.encoding, + f.errors, + f.newlines, + f.line_buffering) + + sys.__stdin__ = sys.stdin = _reopen_stdio(sys.stdin, 'rb') + sys.__stdout__ = sys.stdout = _reopen_stdio(sys.stdout, 'wb') + sys.__stderr__ = sys.stderr = _reopen_stdio(sys.stderr, 'wb') diff --git a/_pytest/compat.py b/_pytest/compat.py index 51fc3bc5c..255f69ce0 100644 --- a/_pytest/compat.py +++ b/_pytest/compat.py @@ -1,6 +1,7 @@ """ python version compatibility code """ +from __future__ import absolute_import, division, print_function import sys import inspect import types @@ -9,8 +10,8 @@ import functools import py -import _pytest - +import _pytest +from _pytest.outcomes import TEST_OUTCOME try: @@ -19,6 +20,7 @@ except ImportError: # pragma: no cover # Only available in Python 3.4+ or as a backport enum = None + _PY3 = sys.version_info > (3, 0) _PY2 = not _PY3 @@ -26,6 +28,10 @@ _PY2 = not _PY3 NoneType = type(None) NOTSET = object() +PY35 = sys.version_info[:2] >= (3, 5) +PY36 = sys.version_info[:2] >= (3, 6) +MODULE_NOT_FOUND_ERROR = 'ModuleNotFoundError' if PY36 else 'ImportError' + if hasattr(inspect, 'signature'): def _format_args(func): return str(inspect.signature(func)) @@ -42,11 +48,18 @@ REGEX_TYPE = type(re.compile('')) def is_generator(func): - try: - return _pytest._code.getrawcode(func).co_flags & 32 # generator function - except AttributeError: # builtin functions have no bytecode - # assume them to not be generators - return False + genfunc = inspect.isgeneratorfunction(func) + return genfunc and not iscoroutinefunction(func) + + +def iscoroutinefunction(func): + """Return True if func is a decorated coroutine function. + + Note: copied and modified from Python 3.5's builtin couroutines.py to avoid import asyncio directly, + which in turns also initializes the "logging" module as side-effect (see issue #8). + """ + return (getattr(func, '_is_coroutine', False) or + (hasattr(inspect, 'iscoroutinefunction') and inspect.iscoroutinefunction(func))) def getlocation(function, curdir): @@ -55,7 +68,7 @@ def getlocation(function, curdir): lineno = py.builtin._getcode(function).co_firstlineno if fn.relto(curdir): fn = fn.relto(curdir) - return "%s:%d" %(fn, lineno+1) + return "%s:%d" % (fn, lineno + 1) def num_mock_patch_args(function): @@ -66,13 +79,21 @@ def num_mock_patch_args(function): mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None)) if mock is not None: return len([p for p in patchings - if not p.attribute_name and p.new is mock.DEFAULT]) + if not p.attribute_name and p.new is mock.DEFAULT]) return len(patchings) -def getfuncargnames(function, startindex=None): +def getfuncargnames(function, startindex=None, cls=None): + """ + @RonnyPfannschmidt: This function should be refactored when we revisit fixtures. The + fixture mechanism should ask the node for the fixture names, and not try to obtain + directly from the function object well after collection has occurred. + """ + if startindex is None and cls is not None: + is_staticmethod = isinstance(cls.__dict__.get(function.__name__, None), staticmethod) + startindex = 0 if is_staticmethod else 1 # XXX merge with main.py's varnames - #assert not isclass(function) + # assert not isclass(function) realfunction = function while hasattr(realfunction, "__wrapped__"): realfunction = realfunction.__wrapped__ @@ -98,8 +119,7 @@ def getfuncargnames(function, startindex=None): return tuple(argnames[startindex:]) - -if sys.version_info[:2] == (2, 6): +if sys.version_info[:2] == (2, 6): def isclass(object): """ Return true if the object is a class. Overrides inspect.isclass for python 2.6 because it will return True for objects which always return @@ -111,10 +131,12 @@ if sys.version_info[:2] == (2, 6): if _PY3: import codecs - + imap = map + izip = zip STRING_TYPES = bytes, str + UNICODE_TYPES = str, - def _escape_strings(val): + def _ascii_escaped(val): """If val is pure ascii, returns it as a str(). Otherwise, escapes bytes objects into a sequence of escaped bytes: @@ -144,8 +166,11 @@ if _PY3: return val.encode('unicode_escape').decode('ascii') else: STRING_TYPES = bytes, str, unicode + UNICODE_TYPES = unicode, - def _escape_strings(val): + from itertools import imap, izip # NOQA + + def _ascii_escaped(val): """In py2 bytes and str are the same type, so return if it's a bytes object, return it unchanged if it is a full ascii string, otherwise escape it into its binary form. @@ -167,8 +192,18 @@ def get_real_func(obj): """ gets the real function object of the (possibly) wrapped object by functools.wraps or functools.partial. """ - while hasattr(obj, "__wrapped__"): - obj = obj.__wrapped__ + start_obj = obj + for i in range(100): + new_obj = getattr(obj, '__wrapped__', None) + if new_obj is None: + break + obj = new_obj + else: + raise ValueError( + ("could not find real function of {start}" + "\nstopped at {current}").format( + start=py.io.saferepr(start_obj), + current=py.io.saferepr(obj))) if isinstance(obj, functools.partial): obj = obj.func return obj @@ -195,14 +230,16 @@ def getimfunc(func): def safe_getattr(object, name, default): - """ Like getattr but return default upon any Exception. + """ Like getattr but return default upon any Exception or any OutcomeException. Attribute access can potentially fail for 'evil' Python objects. - See issue214 + See issue #214. + It catches OutcomeException because of #2490 (issue #580), new outcomes are derived from BaseException + instead of Exception (for more details check #2707) """ try: return getattr(object, name, default) - except Exception: + except TEST_OUTCOME: return default @@ -226,5 +263,64 @@ else: try: return str(v) except UnicodeError: + if not isinstance(v, unicode): + v = unicode(v) errors = 'replace' - return v.encode('ascii', errors) + return v.encode('utf-8', errors) + + +COLLECT_FAKEMODULE_ATTRIBUTES = ( + 'Collector', + 'Module', + 'Generator', + 'Function', + 'Instance', + 'Session', + 'Item', + 'Class', + 'File', + '_fillfuncargs', +) + + +def _setup_collect_fakemodule(): + from types import ModuleType + import pytest + pytest.collect = ModuleType('pytest.collect') + pytest.collect.__all__ = [] # used for setns + for attr in COLLECT_FAKEMODULE_ATTRIBUTES: + setattr(pytest.collect, attr, getattr(pytest, attr)) + + +if _PY2: + # Without this the test_dupfile_on_textio will fail, otherwise CaptureIO could directly inherit from StringIO. + from py.io import TextIO + + class CaptureIO(TextIO): + + @property + def encoding(self): + return getattr(self, '_encoding', 'UTF-8') + +else: + import io + + class CaptureIO(io.TextIOWrapper): + def __init__(self): + super(CaptureIO, self).__init__( + io.BytesIO(), + encoding='UTF-8', newline='', write_through=True, + ) + + def getvalue(self): + return self.buffer.getvalue().decode('UTF-8') + + +class FuncargnamesCompatAttr(object): + """ helper class so that Metafunc, Function and FixtureRequest + don't need to each define the "funcargnames" compatibility attribute. + """ + @property + def funcargnames(self): + """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" + return self.fixturenames diff --git a/_pytest/config.py b/_pytest/config.py index fe386ed0b..19835d2c3 100644 --- a/_pytest/config.py +++ b/_pytest/config.py @@ -1,4 +1,5 @@ """ command line options, ini-file and conftest.py processing. """ +from __future__ import absolute_import, division, print_function import argparse import shlex import traceback @@ -7,7 +8,8 @@ import warnings import py # DON't import pytest here because it causes import cycle troubles -import sys, os +import sys +import os import _pytest._code import _pytest.hookspec # the extension point definitions import _pytest.assertion @@ -53,15 +55,15 @@ def main(args=None, plugins=None): return 4 else: try: - config.pluginmanager.check_pending() return config.hook.pytest_cmdline_main(config=config) finally: config._ensure_unconfigure() except UsageError as e: for msg in e.args: - sys.stderr.write("ERROR: %s\n" %(msg,)) + sys.stderr.write("ERROR: %s\n" % (msg,)) return 4 + class cmdline: # compatibility namespace main = staticmethod(main) @@ -70,6 +72,12 @@ class UsageError(Exception): """ error in pytest usage or invocation""" +class PrintHelp(Exception): + """Raised when pytest should print it's help to skip the rest of the + argument parsing and validation.""" + pass + + def filename_arg(path, optname): """ Argparse type validator for filename arguments. @@ -95,10 +103,11 @@ def directory_arg(path, optname): _preinit = [] default_plugins = ( - "mark main terminal runner python fixtures debugging unittest capture skipping " - "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " - "junitxml resultlog doctest cacheprovider freeze_support " - "setuponly setupplan").split() + "mark main terminal runner python fixtures debugging unittest capture skipping " + "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion " + "junitxml resultlog doctest cacheprovider freeze_support " + "setuponly setupplan warnings").split() + builtin_plugins = set(default_plugins) builtin_plugins.add("pytester") @@ -108,6 +117,7 @@ def _preloadplugins(): assert not _preinit _preinit.append(get_config()) + def get_config(): if _preinit: return _preinit.pop(0) @@ -118,6 +128,7 @@ def get_config(): pluginmanager.import_plugin(spec) return config + def get_plugin_manager(): """ Obtain a new instance of the @@ -129,6 +140,7 @@ def get_plugin_manager(): """ return get_config().pluginmanager + def _prepareconfig(args=None, plugins=None): warning = None if args is None: @@ -153,7 +165,7 @@ def _prepareconfig(args=None, plugins=None): if warning: config.warn('C1', warning) return pluginmanager.hook.pytest_cmdline_parse( - pluginmanager=pluginmanager, args=args) + pluginmanager=pluginmanager, args=args) except BaseException: config._ensure_unconfigure() raise @@ -161,13 +173,14 @@ def _prepareconfig(args=None, plugins=None): class PytestPluginManager(PluginManager): """ - Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific + Overwrites :py:class:`pluggy.PluginManager <_pytest.vendored_packages.pluggy.PluginManager>` to add pytest-specific functionality: * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and ``pytest_plugins`` global variables found in plugins being loaded; * ``conftest.py`` loading during start-up; """ + def __init__(self): super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_") self._conftest_plugins = set() @@ -198,7 +211,8 @@ class PytestPluginManager(PluginManager): """ .. deprecated:: 2.8 - Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead. + Use :py:meth:`pluggy.PluginManager.add_hookspecs <_pytest.vendored_packages.pluggy.PluginManager.add_hookspecs>` + instead. """ warning = dict(code="I2", fslocation=_pytest._code.getfslineno(sys._getframe(1)), @@ -227,7 +241,7 @@ class PytestPluginManager(PluginManager): def parse_hookspec_opts(self, module_or_class, name): opts = super(PytestPluginManager, self).parse_hookspec_opts( - module_or_class, name) + module_or_class, name) if opts is None: method = getattr(module_or_class, name) if name.startswith("pytest_"): @@ -250,7 +264,10 @@ class PytestPluginManager(PluginManager): ret = super(PytestPluginManager, self).register(plugin, name) if ret: self.hook.pytest_plugin_registered.call_historic( - kwargs=dict(plugin=plugin, manager=self)) + kwargs=dict(plugin=plugin, manager=self)) + + if isinstance(plugin, types.ModuleType): + self.consider_module(plugin) return ret def getplugin(self, name): @@ -265,11 +282,11 @@ class PytestPluginManager(PluginManager): # XXX now that the pluginmanager exposes hookimpl(tryfirst...) # we should remove tryfirst/trylast as markers config.addinivalue_line("markers", - "tryfirst: mark a hook implementation function such that the " - "plugin machinery will try to call it first/as early as possible.") + "tryfirst: mark a hook implementation function such that the " + "plugin machinery will try to call it first/as early as possible.") config.addinivalue_line("markers", - "trylast: mark a hook implementation function such that the " - "plugin machinery will try to call it last/as late as possible.") + "trylast: mark a hook implementation function such that the " + "plugin machinery will try to call it last/as late as possible.") def _warn(self, message): kwargs = message if isinstance(message, dict) else { @@ -293,7 +310,7 @@ class PytestPluginManager(PluginManager): """ current = py.path.local() self._confcutdir = current.join(namespace.confcutdir, abs=True) \ - if namespace.confcutdir else None + if namespace.confcutdir else None self._noconftest = namespace.noconftest testpaths = namespace.file_or_dir foundanchor = False @@ -304,7 +321,7 @@ class PytestPluginManager(PluginManager): if i != -1: path = path[:i] anchor = current.join(path, abs=1) - if exists(anchor): # we found some file object + if exists(anchor): # we found some file object self._try_load_conftest(anchor) foundanchor = True if not foundanchor: @@ -371,7 +388,7 @@ class PytestPluginManager(PluginManager): if path and path.relto(dirpath) or path == dirpath: assert mod not in mods mods.append(mod) - self.trace("loaded conftestmodule %r" %(mod)) + self.trace("loaded conftestmodule %r" % (mod)) self.consider_conftest(mod) return mod @@ -381,7 +398,7 @@ class PytestPluginManager(PluginManager): # def consider_preparse(self, args): - for opt1,opt2 in zip(args, args[1:]): + for opt1, opt2 in zip(args, args[1:]): if opt1 == "-p": self.consider_pluginarg(opt2) @@ -395,38 +412,33 @@ class PytestPluginManager(PluginManager): self.import_plugin(arg) def consider_conftest(self, conftestmodule): - if self.register(conftestmodule, name=conftestmodule.__file__): - self.consider_module(conftestmodule) + self.register(conftestmodule, name=conftestmodule.__file__) def consider_env(self): self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS")) def consider_module(self, mod): - plugins = getattr(mod, 'pytest_plugins', []) - if isinstance(plugins, str): - plugins = [plugins] - self.rewrite_hook.mark_rewrite(*plugins) - self._import_plugin_specs(plugins) + self._import_plugin_specs(getattr(mod, 'pytest_plugins', [])) def _import_plugin_specs(self, spec): - if spec: - if isinstance(spec, str): - spec = spec.split(",") - for import_spec in spec: - self.import_plugin(import_spec) + plugins = _get_plugin_specs_as_list(spec) + for import_spec in plugins: + self.import_plugin(import_spec) def import_plugin(self, modname): # most often modname refers to builtin modules, e.g. "pytester", # "terminal" or "capture". Those plugins are registered under their # basename for historic purposes but must be imported with the # _pytest prefix. - assert isinstance(modname, str) + assert isinstance(modname, (py.builtin.text, str)), "module name as text required, got %r" % modname + modname = str(modname) if self.get_plugin(modname) is not None: return if modname in builtin_plugins: importspec = "_pytest." + modname else: importspec = modname + self.rewrite_hook.mark_rewrite(importspec) try: __import__(importspec) except ImportError as e: @@ -440,11 +452,28 @@ class PytestPluginManager(PluginManager): import pytest if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception): raise - self._warn("skipped plugin %r: %s" %((modname, e.msg))) + self._warn("skipped plugin %r: %s" % ((modname, e.msg))) else: mod = sys.modules[importspec] self.register(mod, modname) - self.consider_module(mod) + + +def _get_plugin_specs_as_list(specs): + """ + Parses a list of "plugin specs" and returns a list of plugin names. + + Plugin specs can be given as a list of strings separated by "," or already as a list/tuple in + which case it is returned as a list. Specs can also be `None` in which case an + empty list is returned. + """ + if specs is not None: + if isinstance(specs, str): + specs = specs.split(',') if specs else [] + if not isinstance(specs, (list, tuple)): + raise UsageError("Plugin specs must be a ','-separated string or a " + "list/tuple of strings for plugin names. Given: %r" % specs) + return list(specs) + return [] class Parser: @@ -488,7 +517,7 @@ class Parser: for i, grp in enumerate(self._groups): if grp.name == after: break - self._groups.insert(i+1, group) + self._groups.insert(i + 1, group) return group def addoption(self, *opts, **attrs): @@ -526,7 +555,7 @@ class Parser: a = option.attrs() arggroup.add_argument(*n, **a) # bash like autocompletion for dirs (appending '/') - optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter + optparser.add_argument(FILE_OR_DIR, nargs='*').completer = filescompleter return optparser def parse_setoption(self, args, option, namespace=None): @@ -670,7 +699,7 @@ class Argument: if self._attrs.get('help'): a = self._attrs['help'] a = a.replace('%default', '%(default)s') - #a = a.replace('%prog', '%(prog)s') + # a = a.replace('%prog', '%(prog)s') self._attrs['help'] = a return self._attrs @@ -754,7 +783,7 @@ class MyOptionParser(argparse.ArgumentParser): extra_info = {} self._parser = parser argparse.ArgumentParser.__init__(self, usage=parser._usage, - add_help=False, formatter_class=DropShorterLongHelpFormatter) + add_help=False, formatter_class=DropShorterLongHelpFormatter) # extra_info is a dict of (param -> value) to display if there's # an usage error to provide more contextual information to the user self.extra_info = extra_info @@ -782,9 +811,10 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): - shortcut if there are only two options and one of them is a short one - cache result on action object as this is called at least 2 times """ + def _format_action_invocation(self, action): orgstr = argparse.HelpFormatter._format_action_invocation(self, action) - if orgstr and orgstr[0] != '-': # only optional arguments + if orgstr and orgstr[0] != '-': # only optional arguments return orgstr res = getattr(action, '_formatted_action_invocation', None) if res: @@ -795,7 +825,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): action._formatted_action_invocation = orgstr return orgstr return_list = [] - option_map = getattr(action, 'map_long_option', {}) + option_map = getattr(action, 'map_long_option', {}) if option_map is None: option_map = {} short_long = {} @@ -813,7 +843,7 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): short_long[shortened] = xxoption # now short_long has been filled out to the longest with dashes # **and** we keep the right option ordering from add_argument - for option in options: # + for option in options: if len(option) == 2 or option[2] == ' ': return_list.append(option) if option[2:] == short_long.get(option.replace('-', '')): @@ -822,22 +852,26 @@ class DropShorterLongHelpFormatter(argparse.HelpFormatter): return action._formatted_action_invocation - def _ensure_removed_sysmodule(modname): try: del sys.modules[modname] except KeyError: pass + class CmdOptions(object): """ holds cmdline options as attributes.""" + def __init__(self, values=()): self.__dict__.update(values) + def __repr__(self): - return "" %(self.__dict__,) + return "" % (self.__dict__,) + def copy(self): return CmdOptions(self.__dict__) + class Notset: def __repr__(self): return "" @@ -847,6 +881,18 @@ notset = Notset() FILE_OR_DIR = 'file_or_dir' +def _iter_rewritable_modules(package_files): + for fn in package_files: + is_simple_module = '/' not in fn and fn.endswith('.py') + is_package = fn.count('/') == 1 and fn.endswith('__init__.py') + if is_simple_module: + module_name, _ = os.path.splitext(fn) + yield module_name + elif is_package: + package_name = os.path.dirname(fn) + yield package_name + + class Config(object): """ access to configuration values, pluginmanager and plugin hooks. """ @@ -864,6 +910,7 @@ class Config(object): self.trace = self.pluginmanager.trace.root.get("config") self.hook = self.pluginmanager.hook self._inicache = {} + self._override_ini = () self._opt2dest = {} self._cleanup = [] self._warn = self.pluginmanager._warn @@ -896,11 +943,11 @@ class Config(object): fin = self._cleanup.pop() fin() - def warn(self, code, message, fslocation=None): + def warn(self, code, message, fslocation=None, nodeid=None): """ generate a warning for this test session. """ self.hook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, - fslocation=fslocation, nodeid=None)) + fslocation=fslocation, nodeid=nodeid)) def get_terminal_writer(self): return self.pluginmanager.get_plugin("terminalreporter")._tw @@ -916,14 +963,14 @@ class Config(object): else: style = "native" excrepr = excinfo.getrepr(funcargs=True, - showlocals=getattr(option, 'showlocals', False), - style=style, - ) + showlocals=getattr(option, 'showlocals', False), + style=style, + ) res = self.hook.pytest_internalerror(excrepr=excrepr, excinfo=excinfo) if not py.builtin.any(res): for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() def cwd_relative_nodeid(self, nodeid): @@ -964,8 +1011,9 @@ class Config(object): self.invocation_dir = py.path.local() self._parser.addini('addopts', 'extra command line options', 'args') self._parser.addini('minversion', 'minimally required pytest version') + self._override_ini = ns.override_ini or () - def _consider_importhook(self, args, entrypoint_name): + def _consider_importhook(self, args): """Install the PEP 302 import hook if using assertion re-writing. Needs to parse the --assert= option from the commandline @@ -980,26 +1028,34 @@ class Config(object): except SystemError: mode = 'plain' else: - import pkg_resources - self.pluginmanager.rewrite_hook = hook - for entrypoint in pkg_resources.iter_entry_points('pytest11'): - # 'RECORD' available for plugins installed normally (pip install) - # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) - # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa - # so it shouldn't be an issue - for metadata in ('RECORD', 'SOURCES.txt'): - for entry in entrypoint.dist._get_metadata(metadata): - fn = entry.split(',')[0] - is_simple_module = os.sep not in fn and fn.endswith('.py') - is_package = fn.count(os.sep) == 1 and fn.endswith('__init__.py') - if is_simple_module: - module_name, ext = os.path.splitext(fn) - hook.mark_rewrite(module_name) - elif is_package: - package_name = os.path.dirname(fn) - hook.mark_rewrite(package_name) + self._mark_plugins_for_rewrite(hook) self._warn_about_missing_assertion(mode) + def _mark_plugins_for_rewrite(self, hook): + """ + Given an importhook, mark for rewrite any top-level + modules or packages in the distribution package for + all pytest plugins. + """ + import pkg_resources + self.pluginmanager.rewrite_hook = hook + + # 'RECORD' available for plugins installed normally (pip install) + # 'SOURCES.txt' available for plugins installed in dev mode (pip install -e) + # for installed plugins 'SOURCES.txt' returns an empty list, and vice-versa + # so it shouldn't be an issue + metadata_files = 'RECORD', 'SOURCES.txt' + + package_files = ( + entry.split(',')[0] + for entrypoint in pkg_resources.iter_entry_points('pytest11') + for metadata in metadata_files + for entry in entrypoint.dist._get_metadata(metadata) + ) + + for name in _iter_rewritable_modules(package_files): + hook.mark_rewrite(name) + def _warn_about_missing_assertion(self, mode): try: assert False @@ -1023,19 +1079,17 @@ class Config(object): args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args args[:] = self.getini("addopts") + args self._checkversion() - entrypoint_name = 'pytest11' - self._consider_importhook(args, entrypoint_name) + self._consider_importhook(args) self.pluginmanager.consider_preparse(args) - self.pluginmanager.load_setuptools_entrypoints(entrypoint_name) + self.pluginmanager.load_setuptools_entrypoints('pytest11') self.pluginmanager.consider_env() self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy()) - confcutdir = self.known_args_namespace.confcutdir if self.known_args_namespace.confcutdir is None and self.inifile: confcutdir = py.path.local(self.inifile).dirname self.known_args_namespace.confcutdir = confcutdir try: self.hook.pytest_load_initial_conftests(early_config=self, - args=args, parser=self._parser) + args=args, parser=self._parser) except ConftestImportFailure: e = sys.exc_info()[1] if ns.help or ns.version: @@ -1053,28 +1107,32 @@ class Config(object): myver = pytest.__version__.split(".") if myver < ver: raise pytest.UsageError( - "%s:%d: requires pytest-%s, actual pytest-%s'" %( - self.inicfg.config.path, self.inicfg.lineof('minversion'), - minver, pytest.__version__)) + "%s:%d: requires pytest-%s, actual pytest-%s'" % ( + self.inicfg.config.path, self.inicfg.lineof('minversion'), + minver, pytest.__version__)) def parse(self, args, addopts=True): # parse given cmdline arguments into this config object. assert not hasattr(self, 'args'), ( - "can only parse cmdline args at most once per Config object") + "can only parse cmdline args at most once per Config object") self._origargs = args self.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=self.pluginmanager)) + kwargs=dict(pluginmanager=self.pluginmanager)) self._preparse(args, addopts=addopts) # XXX deprecated hook: self.hook.pytest_cmdline_preparse(config=self, args=args) - args = self._parser.parse_setoption(args, self.option, namespace=self.option) - if not args: - cwd = os.getcwd() - if cwd == self.rootdir: - args = self.getini('testpaths') + self._parser.after_preparse = True + try: + args = self._parser.parse_setoption(args, self.option, namespace=self.option) if not args: - args = [cwd] - self.args = args + cwd = os.getcwd() + if cwd == self.rootdir: + args = self.getini('testpaths') + if not args: + args = [cwd] + self.args = args + except PrintHelp: + pass def addinivalue_line(self, name, line): """ add a line to an ini-file option. The option must have been @@ -1082,12 +1140,12 @@ class Config(object): the first line in its value. """ x = self.getini(name) assert isinstance(x, list) - x.append(line) # modifies the cached list inline + x.append(line) # modifies the cached list inline def getini(self, name): """ return configuration value from an :ref:`ini file `. If the specified name hasn't been registered through a prior - :py:func:`parser.addini ` + :py:func:`parser.addini <_pytest.config.Parser.addini>` call (usually from a plugin), a ValueError is raised. """ try: return self._inicache[name] @@ -1099,7 +1157,7 @@ class Config(object): try: description, type, default = self._parser._inidict[name] except KeyError: - raise ValueError("unknown configuration value: %r" %(name,)) + raise ValueError("unknown configuration value: %r" % (name,)) value = self._get_override_ini_value(name) if value is None: try: @@ -1112,10 +1170,10 @@ class Config(object): return [] if type == "pathlist": dp = py.path.local(self.inicfg.config.path).dirpath() - l = [] + values = [] for relpath in shlex.split(value): - l.append(dp.join(relpath, abs=True)) - return l + values.append(dp.join(relpath, abs=True)) + return values elif type == "args": return shlex.split(value) elif type == "linelist": @@ -1132,13 +1190,13 @@ class Config(object): except KeyError: return None modpath = py.path.local(mod.__file__).dirpath() - l = [] + values = [] for relroot in relroots: if not isinstance(relroot, py.path.local): relroot = relroot.replace("/", py.path.local.sep) relroot = modpath.join(relroot, abs=True) - l.append(relroot) - return l + values.append(relroot) + return values def _get_override_ini_value(self, name): value = None @@ -1146,15 +1204,14 @@ class Config(object): # and -o foo1=bar1 -o foo2=bar2 options # always use the last item if multiple value set for same ini-name, # e.g. -o foo=bar1 -o foo=bar2 will set foo to bar2 - if self.getoption("override_ini", None): - for ini_config_list in self.option.override_ini: - for ini_config in ini_config_list: - try: - (key, user_ini_value) = ini_config.split("=", 1) - except ValueError: - raise UsageError("-o/--override-ini expects option=value style.") - if key == name: - value = user_ini_value + for ini_config_list in self._override_ini: + for ini_config in ini_config_list: + try: + (key, user_ini_value) = ini_config.split("=", 1) + except ValueError: + raise UsageError("-o/--override-ini expects option=value style.") + if key == name: + value = user_ini_value return value def getoption(self, name, default=notset, skip=False): @@ -1177,7 +1234,7 @@ class Config(object): return default if skip: import pytest - pytest.skip("no %r option found" %(name,)) + pytest.skip("no %r option found" % (name,)) raise ValueError("no option named %r" % (name,)) def getvalue(self, name, path=None): @@ -1188,12 +1245,14 @@ class Config(object): """ (deprecated, use getoption(skip=True)) """ return self.getoption(name, skip=True) + def exists(path, ignore=EnvironmentError): try: return path.check() except ignore: return False + def getcfg(args, warnfunc=None): """ Search the list of arguments for a valid ini-file for pytest, @@ -1228,25 +1287,20 @@ def getcfg(args, warnfunc=None): return None, None, None -def get_common_ancestor(args): - # args are what we get after early command line parsing (usually - # strings, but can be py.path.local objects as well) +def get_common_ancestor(paths): common_ancestor = None - for arg in args: - if str(arg)[0] == "-": - continue - p = py.path.local(arg) - if not p.exists(): + for path in paths: + if not path.exists(): continue if common_ancestor is None: - common_ancestor = p + common_ancestor = path else: - if p.relto(common_ancestor) or p == common_ancestor: + if path.relto(common_ancestor) or path == common_ancestor: continue - elif common_ancestor.relto(p): - common_ancestor = p + elif common_ancestor.relto(path): + common_ancestor = path else: - shared = p.common(common_ancestor) + shared = path.common(common_ancestor) if shared is not None: common_ancestor = shared if common_ancestor is None: @@ -1257,9 +1311,29 @@ def get_common_ancestor(args): def get_dirs_from_args(args): - return [d for d in (py.path.local(x) for x in args - if not str(x).startswith("-")) - if d.exists()] + def is_option(x): + return str(x).startswith('-') + + def get_file_part_from_node_id(x): + return str(x).split('::')[0] + + def get_dir_from_path(path): + if path.isdir(): + return path + return py.path.local(path.dirname) + + # These look like paths but may not exist + possible_paths = ( + py.path.local(get_file_part_from_node_id(arg)) + for arg in args + if not is_option(arg) + ) + + return [ + get_dir_from_path(path) + for path in possible_paths + if path.exists() + ] def determine_setup(inifile, args, warnfunc=None): @@ -1282,7 +1356,7 @@ def determine_setup(inifile, args, warnfunc=None): rootdir, inifile, inicfg = getcfg(dirs, warnfunc=warnfunc) if rootdir is None: rootdir = get_common_ancestor([py.path.local(), ancestor]) - is_fs_root = os.path.splitdrive(str(rootdir))[1] == os.sep + is_fs_root = os.path.splitdrive(str(rootdir))[1] == '/' if is_fs_root: rootdir = ancestor return rootdir, inifile, inicfg or {} @@ -1304,7 +1378,7 @@ def setns(obj, dic): else: setattr(obj, name, value) obj.__all__.append(name) - #if obj != pytest: + # if obj != pytest: # pytest.__all__.append(name) setattr(pytest, name, value) diff --git a/_pytest/debugging.py b/_pytest/debugging.py index d96170bd8..aa9c9a386 100644 --- a/_pytest/debugging.py +++ b/_pytest/debugging.py @@ -1,10 +1,8 @@ """ interactive debugging with PDB, the Python Debugger. """ -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import pdb import sys -import pytest - def pytest_addoption(parser): group = parser.getgroup("general") @@ -16,19 +14,17 @@ def pytest_addoption(parser): help="start a custom interactive Python debugger on errors. " "For example: --pdbcls=IPython.terminal.debugger:TerminalPdb") -def pytest_namespace(): - return {'set_trace': pytestPDB().set_trace} def pytest_configure(config): - if config.getvalue("usepdb") or config.getvalue("usepdb_cls"): + if config.getvalue("usepdb_cls"): + modname, classname = config.getvalue("usepdb_cls").split(":") + __import__(modname) + pdb_cls = getattr(sys.modules[modname], classname) + else: + pdb_cls = pdb.Pdb + + if config.getvalue("usepdb"): config.pluginmanager.register(PdbInvoke(), 'pdbinvoke') - if config.getvalue("usepdb_cls"): - modname, classname = config.getvalue("usepdb_cls").split(":") - __import__(modname) - pdb_cls = getattr(sys.modules[modname], classname) - else: - pdb_cls = pdb.Pdb - pytestPDB._pdb_cls = pdb_cls old = (pdb.set_trace, pytestPDB._pluginmanager) @@ -37,30 +33,33 @@ def pytest_configure(config): pytestPDB._config = None pytestPDB._pdb_cls = pdb.Pdb - pdb.set_trace = pytest.set_trace + pdb.set_trace = pytestPDB.set_trace pytestPDB._pluginmanager = config.pluginmanager pytestPDB._config = config + pytestPDB._pdb_cls = pdb_cls config._cleanup.append(fin) + class pytestPDB: """ Pseudo PDB that defers to the real pdb. """ _pluginmanager = None _config = None _pdb_cls = pdb.Pdb - def set_trace(self): + @classmethod + def set_trace(cls): """ invoke PDB set_trace debugging, dropping any IO capturing. """ import _pytest.config frame = sys._getframe().f_back - if self._pluginmanager is not None: - capman = self._pluginmanager.getplugin("capturemanager") + if cls._pluginmanager is not None: + capman = cls._pluginmanager.getplugin("capturemanager") if capman: capman.suspendcapture(in_=True) - tw = _pytest.config.create_terminal_writer(self._config) + tw = _pytest.config.create_terminal_writer(cls._config) tw.line() tw.sep(">", "PDB set_trace (IO-capturing turned off)") - self._pluginmanager.hook.pytest_enter_pdb(config=self._config) - self._pdb_cls().set_trace(frame) + cls._pluginmanager.hook.pytest_enter_pdb(config=cls._config) + cls._pdb_cls().set_trace(frame) class PdbInvoke: @@ -74,7 +73,7 @@ class PdbInvoke: def pytest_internalerror(self, excrepr, excinfo): for line in str(excrepr).split("\n"): - sys.stderr.write("INTERNALERROR> %s\n" %line) + sys.stderr.write("INTERNALERROR> %s\n" % line) sys.stderr.flush() tb = _postmortem_traceback(excinfo) post_mortem(tb) diff --git a/_pytest/deprecated.py b/_pytest/deprecated.py index 6edc475f6..38e949677 100644 --- a/_pytest/deprecated.py +++ b/_pytest/deprecated.py @@ -5,10 +5,15 @@ that is planned to be removed in the next pytest release. Keeping it in a central location makes it easy to track what is deprecated and should be removed when the time comes. """ +from __future__ import absolute_import, division, print_function + + +class RemovedInPytest4Warning(DeprecationWarning): + """warning class for features removed in pytest 4.0""" MAIN_STR_ARGS = 'passing a string to pytest.main() is deprecated, ' \ - 'pass a list of arguments instead.' + 'pass a list of arguments instead.' YIELD_TESTS = 'yield tests are deprecated, and scheduled to be removed in pytest 4.0' @@ -21,4 +26,17 @@ SETUP_CFG_PYTEST = '[pytest] section in setup.cfg files is deprecated, use [tool GETFUNCARGVALUE = "use of getfuncargvalue is deprecated, use getfixturevalue" -RESULT_LOG = '--result-log is deprecated and scheduled for removal in pytest 4.0' +RESULT_LOG = ( + '--result-log is deprecated and scheduled for removal in pytest 4.0.\n' + 'See https://docs.pytest.org/en/latest/usage.html#creating-resultlog-format-files for more information.' +) + +MARK_INFO_ATTRIBUTE = RemovedInPytest4Warning( + "MarkInfo objects are deprecated as they contain the merged marks" +) + +MARK_PARAMETERSET_UNPACKING = RemovedInPytest4Warning( + "Applying marks directly to parameters is deprecated," + " please use pytest.param(..., marks=...) instead.\n" + "For more details, see: https://docs.pytest.org/en/latest/parametrize.html" +) diff --git a/_pytest/doctest.py b/_pytest/doctest.py index f4782dded..4c05acddf 100644 --- a/_pytest/doctest.py +++ b/_pytest/doctest.py @@ -1,5 +1,5 @@ """ discover and run doctests in modules and test files.""" -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import traceback @@ -22,27 +22,29 @@ DOCTEST_REPORT_CHOICES = ( DOCTEST_REPORT_CHOICE_ONLY_FIRST_FAILURE, ) + def pytest_addoption(parser): parser.addini('doctest_optionflags', 'option flags for doctests', - type="args", default=["ELLIPSIS"]) + type="args", default=["ELLIPSIS"]) + parser.addini("doctest_encoding", 'encoding used for doctest files', default="utf-8") group = parser.getgroup("collect") group.addoption("--doctest-modules", - action="store_true", default=False, - help="run doctests in all .py modules", - dest="doctestmodules") + action="store_true", default=False, + help="run doctests in all .py modules", + dest="doctestmodules") group.addoption("--doctest-report", - type=str.lower, default="udiff", - help="choose another output format for diffs on doctest failure", - choices=DOCTEST_REPORT_CHOICES, - dest="doctestreport") + type=str.lower, default="udiff", + help="choose another output format for diffs on doctest failure", + choices=DOCTEST_REPORT_CHOICES, + dest="doctestreport") group.addoption("--doctest-glob", - action="append", default=[], metavar="pat", - help="doctests file matching pattern, default: test*.txt", - dest="doctestglob") + action="append", default=[], metavar="pat", + help="doctests file matching pattern, default: test*.txt", + dest="doctestglob") group.addoption("--doctest-ignore-import-errors", - action="store_true", default=False, - help="ignore doctest ImportErrors", - dest="doctest_ignore_import_errors") + action="store_true", default=False, + help="ignore doctest ImportErrors", + dest="doctest_ignore_import_errors") def pytest_collect_file(path, parent): @@ -118,7 +120,7 @@ class DoctestItem(pytest.Item): lines = ["%03d %s" % (i + test.lineno + 1, x) for (i, x) in enumerate(lines)] # trim docstring error lines to 10 - lines = lines[example.lineno - 9:example.lineno + 1] + lines = lines[max(example.lineno - 9, 0):example.lineno + 1] else: lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example'] indent = '>>>' @@ -127,18 +129,18 @@ class DoctestItem(pytest.Item): indent = '...' if excinfo.errisinstance(doctest.DocTestFailure): lines += checker.output_difference(example, - doctestfailure.got, report_choice).split("\n") + doctestfailure.got, report_choice).split("\n") else: inner_excinfo = ExceptionInfo(excinfo.value.exc_info) lines += ["UNEXPECTED EXCEPTION: %s" % - repr(inner_excinfo.value)] + repr(inner_excinfo.value)] lines += traceback.format_exception(*excinfo.value.exc_info) return ReprFailDoctest(reprlocation, lines) else: return super(DoctestItem, self).repr_failure(excinfo) def reportinfo(self): - return self.fspath, None, "[doctest] %s" % self.name + return self.fspath, self.dtest.lineno, "[doctest] %s" % self.name def _get_flag_lookup(): @@ -171,15 +173,16 @@ class DoctestTextfile(pytest.Module): # inspired by doctest.testfile; ideally we would use it directly, # but it doesn't support passing a custom checker - text = self.fspath.read() + encoding = self.config.getini("doctest_encoding") + text = self.fspath.read_text(encoding) filename = str(self.fspath) name = self.fspath.basename globs = {'__name__': '__main__'} - optionflags = get_optionflags(self) runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, checker=_get_checker()) + _fix_spoof_python2(runner, encoding) parser = doctest.DocTestParser() test = parser.get_doctest(text, globs, name, filename, 0) @@ -215,6 +218,7 @@ class DoctestModule(pytest.Module): optionflags = get_optionflags(self) runner = doctest.DebugRunner(verbose=0, optionflags=optionflags, checker=_get_checker()) + for test in finder.find(module, module.__name__): if test.examples: # skip empty doctests yield DoctestItem(test.name, self, runner, test) @@ -323,6 +327,33 @@ def _get_report_choice(key): DOCTEST_REPORT_CHOICE_NONE: 0, }[key] + +def _fix_spoof_python2(runner, encoding): + """ + Installs a "SpoofOut" into the given DebugRunner so it properly deals with unicode output. This + should patch only doctests for text files because they don't have a way to declare their + encoding. Doctests in docstrings from Python modules don't have the same problem given that + Python already decoded the strings. + + This fixes the problem related in issue #2434. + """ + from _pytest.compat import _PY2 + if not _PY2: + return + + from doctest import _SpoofOut + + class UnicodeSpoof(_SpoofOut): + + def getvalue(self): + result = _SpoofOut.getvalue(self) + if encoding: + result = result.decode(encoding) + return result + + runner._fakeout = UnicodeSpoof() + + @pytest.fixture(scope='session') def doctest_namespace(): """ diff --git a/_pytest/fixtures.py b/_pytest/fixtures.py index b951ae818..7b50b8574 100644 --- a/_pytest/fixtures.py +++ b/_pytest/fixtures.py @@ -1,22 +1,39 @@ -import sys - -from py._code.code import FormattedExcinfo - -import py -import pytest -import warnings +from __future__ import absolute_import, division, print_function import inspect +import sys +import warnings + +import py +from py._code.code import FormattedExcinfo + import _pytest +from _pytest import nodes from _pytest._code.code import TerminalRepr from _pytest.compat import ( NOTSET, exc_clear, _format_args, getfslineno, get_real_func, is_generator, isclass, getimfunc, getlocation, getfuncargnames, + safe_getattr, + FuncargnamesCompatAttr, ) +from _pytest.outcomes import fail, TEST_OUTCOME + + +if sys.version_info[:2] == (2, 6): + from ordereddict import OrderedDict +else: + from collections import OrderedDict + def pytest_sessionstart(session): + import _pytest.python + scopename2class.update({ + 'class': _pytest.python.Class, + 'module': _pytest.python.Module, + 'function': _pytest.main.Item, + }) session._fixturemanager = FixtureManager(session) @@ -29,6 +46,7 @@ scope2props["class"] = scope2props["module"] + ("cls",) scope2props["instance"] = scope2props["class"] + ("instance", ) scope2props["function"] = scope2props["instance"] + ("function", "keywords") + def scopeproperty(name=None, doc=None): def decoratescope(func): scopename = name or func.__name__ @@ -43,19 +61,6 @@ def scopeproperty(name=None, doc=None): return decoratescope -def pytest_namespace(): - scopename2class.update({ - 'class': pytest.Class, - 'module': pytest.Module, - 'function': pytest.Item, - }) - return { - 'fixture': fixture, - 'yield_fixture': yield_fixture, - 'collect': {'_fillfuncargs': fillfixtures} - } - - def get_scope_node(node, scope): cls = scopename2class.get(scope) if cls is None: @@ -73,7 +78,7 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): # XXX we can probably avoid this algorithm if we modify CallSpec2 # to directly care for creating the fixturedefs within its methods. if not metafunc._calls[0].funcargs: - return # this function call does not have direct parametrization + return # this function call does not have direct parametrization # collect funcargs of all callspecs into a list of values arg2params = {} arg2scope = {} @@ -103,36 +108,32 @@ def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager): if scope != "function": node = get_scope_node(collector, scope) if node is None: - assert scope == "class" and isinstance(collector, pytest.Module) + assert scope == "class" and isinstance(collector, _pytest.python.Module) # use module-level collector for class-scope (for now) node = collector if node and argname in node._name2pseudofixturedef: arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]] else: - fixturedef = FixtureDef(fixturemanager, '', argname, - get_direct_param_fixture_func, - arg2scope[argname], - valuelist, False, False) + fixturedef = FixtureDef(fixturemanager, '', argname, + get_direct_param_fixture_func, + arg2scope[argname], + valuelist, False, False) arg2fixturedefs[argname] = [fixturedef] if node is not None: node._name2pseudofixturedef[argname] = fixturedef - def getfixturemarker(obj): """ return fixturemarker or None if it doesn't exist or raised exceptions.""" try: return getattr(obj, "_pytestfixturefunction", None) - except KeyboardInterrupt: - raise - except Exception: + except TEST_OUTCOME: # some objects raise errors like request (from flask import request) # we don't expect them to be fixture functions return None - def get_parametrized_fixture_keys(item, scopenum): """ return list of keys for all parametrized arguments which match the specified scope. """ @@ -142,10 +143,10 @@ def get_parametrized_fixture_keys(item, scopenum): except AttributeError: pass else: - # cs.indictes.items() is random order of argnames but - # then again different functions (items) can change order of - # arguments so it doesn't matter much probably - for argname, param_index in cs.indices.items(): + # cs.indices.items() is random order of argnames. Need to + # sort this so that different calls to + # get_parametrized_fixture_keys will be deterministic. + for argname, param_index in sorted(cs.indices.items()): if cs._arg2scopenum[argname] != scopenum: continue if scopenum == 0: # session @@ -167,20 +168,21 @@ def reorder_items(items): for scopenum in range(0, scopenum_function): argkeys_cache[scopenum] = d = {} for item in items: - keys = set(get_parametrized_fixture_keys(item, scopenum)) + keys = OrderedDict.fromkeys(get_parametrized_fixture_keys(item, scopenum)) if keys: d[item] = keys return reorder_items_atscope(items, set(), argkeys_cache, 0) + def reorder_items_atscope(items, ignore, argkeys_cache, scopenum): if scopenum >= scopenum_function or len(items) < 3: return items items_done = [] while 1: items_before, items_same, items_other, newignore = \ - slice_items(items, ignore, argkeys_cache[scopenum]) + slice_items(items, ignore, argkeys_cache[scopenum]) items_before = reorder_items_atscope( - items_before, ignore, argkeys_cache,scopenum+1) + items_before, ignore, argkeys_cache, scopenum + 1) if items_same is None: # nothing to reorder in this scope assert items_other is None @@ -201,9 +203,9 @@ def slice_items(items, ignore, scoped_argkeys_cache): for i, item in enumerate(it): argkeys = scoped_argkeys_cache.get(item) if argkeys is not None: - argkeys = argkeys.difference(ignore) - if argkeys: # found a slicing key - slicing_argkey = argkeys.pop() + newargkeys = OrderedDict.fromkeys(k for k in argkeys if k not in ignore) + if newargkeys: # found a slicing key + slicing_argkey, _ = newargkeys.popitem() items_before = items[:i] items_same = [item] items_other = [] @@ -211,7 +213,7 @@ def slice_items(items, ignore, scoped_argkeys_cache): for item in it: argkeys = scoped_argkeys_cache.get(item) if argkeys and slicing_argkey in argkeys and \ - slicing_argkey not in ignore: + slicing_argkey not in ignore: items_same.append(item) else: items_other.append(item) @@ -221,17 +223,6 @@ def slice_items(items, ignore, scoped_argkeys_cache): return items, None, None, None - -class FuncargnamesCompatAttr: - """ helper class so that Metafunc, Function and FixtureRequest - don't need to each define the "funcargnames" compatibility attribute. - """ - @property - def funcargnames(self): - """ alias attribute for ``fixturenames`` for pre-2.3 compatibility""" - return self.fixturenames - - def fillfixtures(function): """ fill missing funcargs for a test function. """ try: @@ -254,10 +245,10 @@ def fillfixtures(function): request._fillfixtures() - def get_direct_param_fixture_func(request): return request.param + class FuncFixtureInfo: def __init__(self, argnames, names_closure, name2fixturedefs): self.argnames = argnames @@ -296,7 +287,6 @@ class FixtureRequest(FuncargnamesCompatAttr): """ underlying collection node (depends on current request scope)""" return self._getscopeitem(self.scope) - def _getnextfixturedef(self, argname): fixturedefs = self._arg2fixturedefs.get(argname, None) if fixturedefs is None: @@ -318,7 +308,6 @@ class FixtureRequest(FuncargnamesCompatAttr): """ the pytest config object associated with this request. """ return self._pyfuncitem.config - @scopeproperty() def function(self): """ test function object if the request has a per-function scope. """ @@ -327,7 +316,7 @@ class FixtureRequest(FuncargnamesCompatAttr): @scopeproperty("class") def cls(self): """ class (can be None) where the test function was collected. """ - clscol = self._pyfuncitem.getparent(pytest.Class) + clscol = self._pyfuncitem.getparent(_pytest.python.Class) if clscol: return clscol.obj @@ -345,7 +334,7 @@ class FixtureRequest(FuncargnamesCompatAttr): @scopeproperty() def module(self): """ python module object where the test function was collected. """ - return self._pyfuncitem.getparent(pytest.Module).obj + return self._pyfuncitem.getparent(_pytest.python.Module).obj @scopeproperty() def fspath(self): @@ -414,7 +403,7 @@ class FixtureRequest(FuncargnamesCompatAttr): :arg extrakey: added to internal caching key of (funcargname, scope). """ if not hasattr(self.config, '_setupcache'): - self.config._setupcache = {} # XXX weakref? + self.config._setupcache = {} # XXX weakref? cachekey = (self.fixturename, self._getscopeitem(scope), extrakey) cache = self.config._setupcache try: @@ -445,7 +434,8 @@ class FixtureRequest(FuncargnamesCompatAttr): from _pytest import deprecated warnings.warn( deprecated.GETFUNCARGVALUE, - DeprecationWarning) + DeprecationWarning, + stacklevel=2) return self.getfixturevalue(argname) def _get_active_fixturedef(self, argname): @@ -470,13 +460,13 @@ class FixtureRequest(FuncargnamesCompatAttr): def _get_fixturestack(self): current = self - l = [] + values = [] while 1: fixturedef = getattr(current, "_fixturedef", None) if fixturedef is None: - l.reverse() - return l - l.append(fixturedef) + values.reverse() + return values + values.append(fixturedef) current = current._parent_request def _getfixturevalue(self, fixturedef): @@ -508,7 +498,7 @@ class FixtureRequest(FuncargnamesCompatAttr): source_lineno, ) ) - pytest.fail(msg) + fail(msg) else: # indices might not be set if old-style metafunc.addcall() was used param_index = funcitem.callspec.indices.get(argname, 0) @@ -541,11 +531,11 @@ class FixtureRequest(FuncargnamesCompatAttr): if scopemismatch(invoking_scope, requested_scope): # try to report something helpful lines = self._factorytraceback() - pytest.fail("ScopeMismatch: You tried to access the %r scoped " - "fixture %r with a %r scoped request object, " - "involved factories\n%s" %( - (requested_scope, argname, invoking_scope, "\n".join(lines))), - pytrace=False) + fail("ScopeMismatch: You tried to access the %r scoped " + "fixture %r with a %r scoped request object, " + "involved factories\n%s" % ( + (requested_scope, argname, invoking_scope, "\n".join(lines))), + pytrace=False) def _factorytraceback(self): lines = [] @@ -554,7 +544,7 @@ class FixtureRequest(FuncargnamesCompatAttr): fs, lineno = getfslineno(factory) p = self._pyfuncitem.session.fspath.bestrelpath(fs) args = _format_args(factory) - lines.append("%s:%d: def %s%s" %( + lines.append("%s:%d: def %s%s" % ( p, lineno, factory.__name__, args)) return lines @@ -570,12 +560,13 @@ class FixtureRequest(FuncargnamesCompatAttr): return node def __repr__(self): - return "" %(self.node) + return "" % (self.node) class SubRequest(FixtureRequest): """ a sub request for handling getting a fixture from a test function/fixture. """ + def __init__(self, request, scope, param, param_index, fixturedef): self._parent_request = request self.fixturename = fixturedef.argname @@ -584,9 +575,8 @@ class SubRequest(FixtureRequest): self.param_index = param_index self.scope = scope self._fixturedef = fixturedef - self.addfinalizer = fixturedef.addfinalizer self._pyfuncitem = request._pyfuncitem - self._fixture_values = request._fixture_values + self._fixture_values = request._fixture_values self._fixture_defs = request._fixture_defs self._arg2fixturedefs = request._arg2fixturedefs self._arg2index = request._arg2index @@ -595,6 +585,9 @@ class SubRequest(FixtureRequest): def __repr__(self): return "" % (self.fixturename, self._pyfuncitem) + def addfinalizer(self, finalizer): + self._fixturedef.addfinalizer(finalizer) + class ScopeMismatchError(Exception): """ A fixture function tries to use a different fixture function which @@ -626,6 +619,7 @@ def scope2index(scope, descr, where=None): class FixtureLookupError(LookupError): """ could not return a requested Fixture (missing or invalid). """ + def __init__(self, argname, request, msg=None): self.argname = argname self.request = request @@ -648,9 +642,9 @@ class FixtureLookupError(LookupError): lines, _ = inspect.getsourcelines(get_real_func(function)) except (IOError, IndexError, TypeError): error_msg = "file %s, line %s: source code not available" - addline(error_msg % (fspath, lineno+1)) + addline(error_msg % (fspath, lineno + 1)) else: - addline("file %s, line %s" % (fspath, lineno+1)) + addline("file %s, line %s" % (fspath, lineno + 1)) for i, line in enumerate(lines): line = line.rstrip() addline(" " + line) @@ -666,7 +660,7 @@ class FixtureLookupError(LookupError): if faclist and name not in available: available.append(name) msg = "fixture %r not found" % (self.argname,) - msg += "\n available fixtures: %s" %(", ".join(sorted(available)),) + msg += "\n available fixtures: %s" % (", ".join(sorted(available)),) msg += "\n use 'pytest --fixtures [testpath]' for help on them." return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname) @@ -692,15 +686,16 @@ class FixtureLookupErrorRepr(TerminalRepr): tw.line('{0} {1}'.format(FormattedExcinfo.flow_marker, line.strip()), red=True) tw.line() - tw.line("%s:%d" % (self.filename, self.firstlineno+1)) + tw.line("%s:%d" % (self.filename, self.firstlineno + 1)) def fail_fixturefunc(fixturefunc, msg): fs, lineno = getfslineno(fixturefunc) - location = "%s:%s" % (fs, lineno+1) + location = "%s:%s" % (fs, lineno + 1) source = _pytest._code.Source(fixturefunc) - pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, - pytrace=False) + fail(msg + ":\n\n" + str(source.indent()) + "\n" + location, + pytrace=False) + def call_fixture_func(fixturefunc, request, kwargs): yieldctx = is_generator(fixturefunc) @@ -715,7 +710,7 @@ def call_fixture_func(fixturefunc, request, kwargs): pass else: fail_fixturefunc(fixturefunc, - "yield_fixture function has more than one 'yield'") + "yield_fixture function has more than one 'yield'") request.addfinalizer(teardown) else: @@ -725,6 +720,7 @@ def call_fixture_func(fixturefunc, request, kwargs): class FixtureDef: """ A container for a factory definition. """ + def __init__(self, fixturemanager, baseid, argname, func, scope, params, unittest=False, ids=None): self._fixturemanager = fixturemanager @@ -749,10 +745,19 @@ class FixtureDef: self._finalizer.append(finalizer) def finish(self): + exceptions = [] try: while self._finalizer: - func = self._finalizer.pop() - func() + try: + func = self._finalizer.pop() + func() + except: # noqa + exceptions.append(sys.exc_info()) + if exceptions: + e = exceptions[0] + del exceptions # ensure we don't keep all frames alive because of the traceback + py.builtin._reraise(*e) + finally: hook = self._fixturemanager.session.config.hook hook.pytest_fixture_post_finalizer(fixturedef=self) @@ -792,6 +797,7 @@ class FixtureDef: return ("" % (self.argname, self.scope, self.baseid)) + def pytest_fixture_setup(fixturedef, request): """ Execution of fixture setup. """ kwargs = {} @@ -817,7 +823,7 @@ def pytest_fixture_setup(fixturedef, request): my_cache_key = request.param_index try: result = call_fixture_func(fixturefunc, request, kwargs) - except Exception: + except TEST_OUTCOME: fixturedef.cached_result = (None, my_cache_key, sys.exc_info()) raise fixturedef.cached_result = (result, my_cache_key, None) @@ -835,17 +841,16 @@ class FixtureFunctionMarker: def __call__(self, function): if isclass(function): raise ValueError( - "class fixtures not supported (may be in the future)") + "class fixtures not supported (may be in the future)") function._pytestfixturefunction = self return function - def fixture(scope="function", params=None, autouse=False, ids=None, name=None): """ (return a) decorator to mark a fixture factory function. - This decorator can be used (with or or without parameters) to define - a fixture function. The name of the fixture function can later be + This decorator can be used (with or without parameters) to define a + fixture function. The name of the fixture function can later be referenced to cause its invocation ahead of running tests: test modules or classes can use the pytest.mark.usefixtures(fixturename) marker. Test functions can directly use fixture names as input @@ -864,25 +869,25 @@ def fixture(scope="function", params=None, autouse=False, ids=None, name=None): reference is needed to activate the fixture. :arg ids: list of string ids each corresponding to the params - so that they are part of the test id. If no ids are provided - they will be generated automatically from the params. + so that they are part of the test id. If no ids are provided + they will be generated automatically from the params. :arg name: the name of the fixture. This defaults to the name of the - decorated function. If a fixture is used in the same module in - which it is defined, the function name of the fixture will be - shadowed by the function arg that requests the fixture; one way - to resolve this is to name the decorated function - ``fixture_`` and then use - ``@pytest.fixture(name='')``. + decorated function. If a fixture is used in the same module in + which it is defined, the function name of the fixture will be + shadowed by the function arg that requests the fixture; one way + to resolve this is to name the decorated function + ``fixture_`` and then use + ``@pytest.fixture(name='')``. Fixtures can optionally provide their values to test functions using a ``yield`` statement, instead of ``return``. In this case, the code block after the ``yield`` statement is executed as teardown code regardless of the test outcome. A fixture function must yield exactly once. """ - if callable(scope) and params is None and autouse == False: + if callable(scope) and params is None and autouse is False: # direct decoration return FixtureFunctionMarker( - "function", params, autouse, name=name)(scope) + "function", params, autouse, name=name)(scope) if params is not None and not isinstance(params, (list, tuple)): params = list(params) return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) @@ -897,7 +902,7 @@ def yield_fixture(scope="function", params=None, autouse=False, ids=None, name=N if callable(scope) and params is None and not autouse: # direct decoration return FixtureFunctionMarker( - "function", params, autouse, ids=ids, name=name)(scope) + "function", params, autouse, ids=ids, name=name)(scope) else: return FixtureFunctionMarker(scope, params, autouse, ids=ids, name=name) @@ -956,14 +961,9 @@ class FixtureManager: self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))] session.config.pluginmanager.register(self, "funcmanage") - def getfixtureinfo(self, node, func, cls, funcargs=True): if funcargs and not hasattr(node, "nofuncargs"): - if cls is not None: - startindex = 1 - else: - startindex = None - argnames = getfuncargnames(func, startindex) + argnames = getfuncargnames(func, cls=cls) else: argnames = () usefixtures = getattr(func, "usefixtures", None) @@ -987,8 +987,8 @@ class FixtureManager: # by their test id) if p.basename.startswith("conftest.py"): nodeid = p.dirpath().relto(self.config.rootdir) - if p.sep != "/": - nodeid = nodeid.replace(p.sep, "/") + if p.sep != nodes.SEP: + nodeid = nodeid.replace(p.sep, nodes.SEP) self.parsefactories(plugin, nodeid) def _getautousenames(self, nodeid): @@ -998,7 +998,7 @@ class FixtureManager: if nodeid.startswith(baseid): if baseid: i = len(baseid) - nextchar = nodeid[i:i+1] + nextchar = nodeid[i:i + 1] if nextchar and nextchar not in ":/": continue autousenames.extend(basenames) @@ -1043,9 +1043,14 @@ class FixtureManager: if faclist: fixturedef = faclist[-1] if fixturedef.params is not None: - func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]]) + parametrize_func = getattr(metafunc.function, 'parametrize', None) + func_params = getattr(parametrize_func, 'args', [[None]]) + func_kwargs = getattr(parametrize_func, 'kwargs', {}) # skip directly parametrized arguments - argnames = func_params[0] + if "argnames" in func_kwargs: + argnames = parametrize_func.kwargs["argnames"] + else: + argnames = func_params[0] if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] if argname not in func_params and argname not in argnames: @@ -1070,7 +1075,9 @@ class FixtureManager: self._holderobjseen.add(holderobj) autousenames = [] for name in dir(holderobj): - obj = getattr(holderobj, name, None) + # The attribute can be an arbitrary descriptor, so the attribute + # access below can raise. safe_getatt() ignores such exceptions. + obj = safe_getattr(holderobj, name, None) # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style) # or are "@pytest.fixture" marked marker = getfixturemarker(obj) @@ -1081,7 +1088,7 @@ class FixtureManager: continue marker = defaultfuncargprefixmarker from _pytest import deprecated - self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name)) + self.config.warn('C1', deprecated.FUNCARG_PREFIX.format(name=name), nodeid=nodeid) name = name[len(self._argprefix):] elif not isinstance(marker, FixtureFunctionMarker): # magic globals with __getattr__ might have got us a wrong @@ -1131,6 +1138,5 @@ class FixtureManager: def _matchfactories(self, fixturedefs, nodeid): for fixturedef in fixturedefs: - if nodeid.startswith(fixturedef.baseid): + if nodes.ischildnode(fixturedef.baseid, nodeid): yield fixturedef - diff --git a/_pytest/freeze_support.py b/_pytest/freeze_support.py index f78ccd298..97147a882 100644 --- a/_pytest/freeze_support.py +++ b/_pytest/freeze_support.py @@ -2,9 +2,7 @@ Provides a function to report all internal modules for using freezing tools pytest """ - -def pytest_namespace(): - return {'freeze_includes': freeze_includes} +from __future__ import absolute_import, division, print_function def freeze_includes(): @@ -42,4 +40,4 @@ def _iter_all_modules(package, prefix=''): for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'): yield prefix + m else: - yield prefix + name \ No newline at end of file + yield prefix + name diff --git a/_pytest/helpconfig.py b/_pytest/helpconfig.py index 6e66b11c4..e744637f8 100644 --- a/_pytest/helpconfig.py +++ b/_pytest/helpconfig.py @@ -1,25 +1,61 @@ """ version info, help messages, tracing configuration. """ +from __future__ import absolute_import, division, print_function + import py import pytest -import os, sys +from _pytest.config import PrintHelp +import os +import sys +from argparse import Action + + +class HelpAction(Action): + """This is an argparse Action that will raise an exception in + order to skip the rest of the argument parsing when --help is passed. + This prevents argparse from quitting due to missing required arguments + when any are defined, for example by ``pytest_addoption``. + This is similar to the way that the builtin argparse --help option is + implemented by raising SystemExit. + """ + + def __init__(self, + option_strings, + dest=None, + default=False, + help=None): + super(HelpAction, self).__init__( + option_strings=option_strings, + dest=dest, + const=True, + default=default, + nargs=0, + help=help) + + def __call__(self, parser, namespace, values, option_string=None): + setattr(namespace, self.dest, self.const) + + # We should only skip the rest of the parsing after preparse is done + if getattr(parser._parser, 'after_preparse', False): + raise PrintHelp + def pytest_addoption(parser): group = parser.getgroup('debugconfig') group.addoption('--version', action="store_true", - help="display pytest lib version and import information.") - group._addoption("-h", "--help", action="store_true", dest="help", - help="show help message and configuration info") - group._addoption('-p', action="append", dest="plugins", default = [], - metavar="name", - help="early-load given plugin (multi-allowed). " - "To avoid loading of plugins, use the `no:` prefix, e.g. " - "`no:doctest`.") + help="display pytest lib version and import information.") + group._addoption("-h", "--help", action=HelpAction, dest="help", + help="show help message and configuration info") + group._addoption('-p', action="append", dest="plugins", default=[], + metavar="name", + help="early-load given plugin (multi-allowed). " + "To avoid loading of plugins, use the `no:` prefix, e.g. " + "`no:doctest`.") group.addoption('--traceconfig', '--trace-config', - action="store_true", default=False, - help="trace considerations of conftest.py files."), + action="store_true", default=False, + help="trace considerations of conftest.py files."), group.addoption('--debug', - action="store_true", dest="debug", default=False, - help="store internal tracing debug information in 'pytestdebug.log'.") + action="store_true", dest="debug", default=False, + help="store internal tracing debug information in 'pytestdebug.log'.") group._addoption( '-o', '--override-ini', nargs='*', dest="override_ini", action="append", @@ -34,10 +70,10 @@ def pytest_cmdline_parse(): path = os.path.abspath("pytestdebug.log") debugfile = open(path, 'w') debugfile.write("versions pytest-%s, py-%s, " - "python-%s\ncwd=%s\nargs=%s\n\n" %( - pytest.__version__, py.__version__, - ".".join(map(str, sys.version_info)), - os.getcwd(), config._origargs)) + "python-%s\ncwd=%s\nargs=%s\n\n" % ( + pytest.__version__, py.__version__, + ".".join(map(str, sys.version_info)), + os.getcwd(), config._origargs)) config.trace.root.setwriter(debugfile.write) undo_tracing = config.pluginmanager.enable_tracing() sys.stderr.write("writing pytestdebug information to %s\n" % path) @@ -51,11 +87,12 @@ def pytest_cmdline_parse(): config.add_cleanup(unset_tracing) + def pytest_cmdline_main(config): if config.option.version: p = py.path.local(pytest.__file__) sys.stderr.write("This is pytest version %s, imported from %s\n" % - (pytest.__version__, p)) + (pytest.__version__, p)) plugininfo = getpluginversioninfo(config) if plugininfo: for line in plugininfo: @@ -67,6 +104,7 @@ def pytest_cmdline_main(config): config._ensure_unconfigure() return 0 + def showhelp(config): reporter = config.pluginmanager.get_plugin('terminalreporter') tw = reporter._tw @@ -82,7 +120,7 @@ def showhelp(config): if type is None: type = "string" spec = "%s (%s)" % (name, type) - line = " %-24s %s" %(spec, help) + line = " %-24s %s" % (spec, help) tw.line(line[:tw.fullwidth]) tw.line() @@ -111,6 +149,7 @@ conftest_options = [ ('pytest_plugins', 'list of plugin names to load'), ] + def getpluginversioninfo(config): lines = [] plugininfo = config.pluginmanager.list_plugin_distinfo() @@ -122,11 +161,12 @@ def getpluginversioninfo(config): lines.append(" " + content) return lines + def pytest_report_header(config): lines = [] if config.option.debug or config.option.traceconfig: lines.append("using: pytest-%s pylib-%s" % - (pytest.__version__,py.__version__)) + (pytest.__version__, py.__version__)) verinfo = getpluginversioninfo(config) if verinfo: @@ -140,5 +180,5 @@ def pytest_report_header(config): r = plugin.__file__ else: r = repr(plugin) - lines.append(" %-20s: %s" %(name, r)) + lines.append(" %-20s: %s" % (name, r)) return lines diff --git a/_pytest/hookspec.py b/_pytest/hookspec.py index b5f51eccf..e5c966e58 100644 --- a/_pytest/hookspec.py +++ b/_pytest/hookspec.py @@ -8,6 +8,7 @@ hookspec = HookspecMarker("pytest") # Initialization hooks called for every plugin # ------------------------------------------------------------------------- + @hookspec(historic=True) def pytest_addhooks(pluginmanager): """called at plugin registration time to allow adding new hooks via a call to @@ -16,11 +17,14 @@ def pytest_addhooks(pluginmanager): @hookspec(historic=True) def pytest_namespace(): - """return dict of name->object to be made globally available in + """ + DEPRECATED: this hook causes direct monkeypatching on pytest, its use is strongly discouraged + return dict of name->object to be made globally available in the pytest namespace. This hook is called at plugin registration time. """ + @hookspec(historic=True) def pytest_plugin_registered(plugin, manager): """ a new pytest plugin got registered. """ @@ -56,11 +60,20 @@ def pytest_addoption(parser): via (deprecated) ``pytest.config``. """ + @hookspec(historic=True) def pytest_configure(config): - """ called after command line options have been parsed - and all plugins and initial conftest files been loaded. - This hook is called for every plugin. + """ + Allows plugins and conftest files to perform initial configuration. + + This hook is called for every plugin and initial conftest file + after command line options have been parsed. + + After that, the hook is called for other conftest files as they are + imported. + + :arg config: pytest config object + :type config: _pytest.config.Config """ # ------------------------------------------------------------------------- @@ -69,17 +82,25 @@ def pytest_configure(config): # discoverable conftest.py local plugins. # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_cmdline_parse(pluginmanager, args): - """return initialized config object, parsing the specified args. """ + """return initialized config object, parsing the specified args. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_cmdline_preparse(config, args): """(deprecated) modify command line arguments before option parsing. """ + @hookspec(firstresult=True) def pytest_cmdline_main(config): """ called for performing the main command line action. The default - implementation will invoke the configure hooks and runtest_mainloop. """ + implementation will invoke the configure hooks and runtest_mainloop. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_load_initial_conftests(early_config, parser, args): """ implements the loading of initial conftest files ahead @@ -92,88 +113,124 @@ def pytest_load_initial_conftests(early_config, parser, args): @hookspec(firstresult=True) def pytest_collection(session): - """ perform the collection protocol for the given session. """ + """ perform the collection protocol for the given session. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_collection_modifyitems(session, config, items): """ called after collection has been performed, may filter or re-order the items in-place.""" + def pytest_collection_finish(session): """ called after collection has been performed and modified. """ + @hookspec(firstresult=True) def pytest_ignore_collect(path, config): """ return True to prevent considering this path for collection. This hook is consulted for all files and directories prior to calling more specific hooks. + + Stops at first non-None result, see :ref:`firstresult` """ + @hookspec(firstresult=True) def pytest_collect_directory(path, parent): - """ called before traversing a directory for collection files. """ + """ called before traversing a directory for collection files. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_collect_file(path, parent): """ return collection Node or None for the given path. Any new node needs to have the specified ``parent`` as a parent.""" # logging hooks for collection + + def pytest_collectstart(collector): """ collector starts collecting. """ + def pytest_itemcollected(item): """ we just collected a test item. """ + def pytest_collectreport(report): """ collector finished collecting. """ + def pytest_deselected(items): """ called for test items deselected by keyword. """ + @hookspec(firstresult=True) def pytest_make_collect_report(collector): - """ perform ``collector.collect()`` and return a CollectReport. """ + """ perform ``collector.collect()`` and return a CollectReport. + + Stops at first non-None result, see :ref:`firstresult` """ # ------------------------------------------------------------------------- # Python test function related hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_pycollect_makemodule(path, parent): """ return a Module collector or None for the given path. This hook will be called for each matching test module path. The pytest_collect_file hook needs to be used if you want to create test modules for files that do not match as a test module. - """ + + Stops at first non-None result, see :ref:`firstresult` """ + @hookspec(firstresult=True) def pytest_pycollect_makeitem(collector, name, obj): - """ return custom item/collector for a python object in a module, or None. """ + """ return custom item/collector for a python object in a module, or None. + + Stops at first non-None result, see :ref:`firstresult` """ + @hookspec(firstresult=True) def pytest_pyfunc_call(pyfuncitem): - """ call underlying test function. """ + """ call underlying test function. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_generate_tests(metafunc): """ generate (multiple) parametrized calls to a test function.""" + @hookspec(firstresult=True) -def pytest_make_parametrize_id(config, val): +def pytest_make_parametrize_id(config, val, argname): """Return a user-friendly string representation of the given ``val`` that will be used by @pytest.mark.parametrize calls. Return None if the hook doesn't know about ``val``. - """ + The parameter name is available as ``argname``, if required. + + Stops at first non-None result, see :ref:`firstresult` """ # ------------------------------------------------------------------------- # generic runtest related hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_runtestloop(session): """ called for performing the main runtest loop - (after collection finished). """ + (after collection finished). + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_itemstart(item, node): """ (deprecated, use pytest_runtest_logstart). """ + @hookspec(firstresult=True) def pytest_runtest_protocol(item, nextitem): """ implements the runtest_setup/call/teardown protocol for @@ -187,17 +244,23 @@ def pytest_runtest_protocol(item, nextitem): :py:func:`pytest_runtest_teardown`. :return boolean: True if no further hook implementations should be invoked. - """ + + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_runtest_logstart(nodeid, location): """ signal the start of running a single test item. """ + def pytest_runtest_setup(item): """ called before ``pytest_runtest_call(item)``. """ + def pytest_runtest_call(item): """ called to execute the test ``item``. """ + def pytest_runtest_teardown(item, nextitem): """ called after ``pytest_runtest_call``. @@ -207,12 +270,15 @@ def pytest_runtest_teardown(item, nextitem): so that nextitem only needs to call setup-functions. """ + @hookspec(firstresult=True) def pytest_runtest_makereport(item, call): """ return a :py:class:`_pytest.runner.TestReport` object - for the given :py:class:`pytest.Item` and + for the given :py:class:`pytest.Item <_pytest.main.Item>` and :py:class:`_pytest.runner.CallInfo`. - """ + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_runtest_logreport(report): """ process a test setup/call/teardown report relating to @@ -222,9 +288,13 @@ def pytest_runtest_logreport(report): # Fixture related hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_fixture_setup(fixturedef, request): - """ performs fixture setup execution. """ + """ performs fixture setup execution. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_fixture_post_finalizer(fixturedef): """ called after fixture teardown, but before the cache is cleared so @@ -235,18 +305,21 @@ def pytest_fixture_post_finalizer(fixturedef): # test session related hooks # ------------------------------------------------------------------------- + def pytest_sessionstart(session): """ before session.main() is called. """ + def pytest_sessionfinish(session, exitstatus): """ whole test run finishes. """ + def pytest_unconfigure(config): """ called before test process is exited. """ # ------------------------------------------------------------------------- -# hooks for customising the assert methods +# hooks for customizing the assert methods # ------------------------------------------------------------------------- def pytest_assertrepr_compare(config, op, left, right): @@ -255,19 +328,48 @@ def pytest_assertrepr_compare(config, op, left, right): Return None for no custom explanation, otherwise return a list of strings. The strings will be joined by newlines but any newlines *in* a string will be escaped. Note that all but the first line will - be indented sligthly, the intention is for the first line to be a summary. + be indented slightly, the intention is for the first line to be a summary. """ # ------------------------------------------------------------------------- # hooks for influencing reporting (invoked from _pytest_terminal) # ------------------------------------------------------------------------- + def pytest_report_header(config, startdir): - """ return a string to be displayed as header info for terminal reporting.""" + """ return a string or list of strings to be displayed as header info for terminal reporting. + + :param config: the pytest config object. + :param startdir: py.path object with the starting dir + + .. note:: + + This function should be implemented only in plugins or ``conftest.py`` + files situated at the tests root directory due to how pytest + :ref:`discovers plugins during startup `. + """ + + +def pytest_report_collectionfinish(config, startdir, items): + """ + .. versionadded:: 3.2 + + return a string or list of strings to be displayed after collection has finished successfully. + + This strings will be displayed after the standard "collected X items" message. + + :param config: the pytest config object. + :param startdir: py.path object with the starting dir + :param items: list of pytest items that are going to be executed; this list should not be modified. + """ + @hookspec(firstresult=True) def pytest_report_teststatus(report): - """ return result-category, shortletter and verbose word for reporting.""" + """ return result-category, shortletter and verbose word for reporting. + + Stops at first non-None result, see :ref:`firstresult` """ + def pytest_terminal_summary(terminalreporter, exitstatus): """ add additional section in terminal summary reporting. """ @@ -283,20 +385,26 @@ def pytest_logwarning(message, code, nodeid, fslocation): # doctest hooks # ------------------------------------------------------------------------- + @hookspec(firstresult=True) def pytest_doctest_prepare_content(content): - """ return processed content for a given doctest""" + """ return processed content for a given doctest + + Stops at first non-None result, see :ref:`firstresult` """ # ------------------------------------------------------------------------- # error handling and internal debugging hooks # ------------------------------------------------------------------------- + def pytest_internalerror(excrepr, excinfo): """ called for internal errors. """ + def pytest_keyboard_interrupt(excinfo): """ called for keyboard interrupt. """ + def pytest_exception_interact(node, call, report): """called when an exception was raised which can potentially be interactively handled. @@ -305,6 +413,7 @@ def pytest_exception_interact(node, call, report): that is not an internal exception like ``skip.Exception``. """ + def pytest_enter_pdb(config): """ called upon pdb.set_trace(), can be used by plugins to take special action just before the python debugger enters in interactive mode. diff --git a/_pytest/impl b/_pytest/impl deleted file mode 100644 index 889e37e5a..000000000 --- a/_pytest/impl +++ /dev/null @@ -1,254 +0,0 @@ -Sorting per-resource ------------------------------ - -for any given set of items: - -- collect items per session-scoped parametrized funcarg -- re-order until items no parametrizations are mixed - - examples: - - test() - test1(s1) - test1(s2) - test2() - test3(s1) - test3(s2) - - gets sorted to: - - test() - test2() - test1(s1) - test3(s1) - test1(s2) - test3(s2) - - -the new @setup functions --------------------------------------- - -Consider a given @setup-marked function:: - - @pytest.mark.setup(maxscope=SCOPE) - def mysetup(request, arg1, arg2, ...) - ... - request.addfinalizer(fin) - ... - -then FUNCARGSET denotes the set of (arg1, arg2, ...) funcargs and -all of its dependent funcargs. The mysetup function will execute -for any matching test item once per scope. - -The scope is determined as the minimum scope of all scopes of the args -in FUNCARGSET and the given "maxscope". - -If mysetup has been called and no finalizers have been called it is -called "active". - -Furthermore the following rules apply: - -- if an arg value in FUNCARGSET is about to be torn down, the - mysetup-registered finalizers will execute as well. - -- There will never be two active mysetup invocations. - -Example 1, session scope:: - - @pytest.mark.funcarg(scope="session", params=[1,2]) - def db(request): - request.addfinalizer(db_finalize) - - @pytest.mark.setup - def mysetup(request, db): - request.addfinalizer(mysetup_finalize) - ... - -And a given test module: - - def test_something(): - ... - def test_otherthing(): - pass - -Here is what happens:: - - db(request) executes with request.param == 1 - mysetup(request, db) executes - test_something() executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - db(request) executes with request.param == 2 - mysetup(request, db) executes - test_something() executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - -Example 2, session/function scope:: - - @pytest.mark.funcarg(scope="session", params=[1,2]) - def db(request): - request.addfinalizer(db_finalize) - - @pytest.mark.setup(scope="function") - def mysetup(request, db): - ... - request.addfinalizer(mysetup_finalize) - ... - -And a given test module: - - def test_something(): - ... - def test_otherthing(): - pass - -Here is what happens:: - - db(request) executes with request.param == 1 - mysetup(request, db) executes - test_something() executes - mysetup_finalize() executes - mysetup(request, db) executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - db(request) executes with request.param == 2 - mysetup(request, db) executes - test_something() executes - mysetup_finalize() executes - mysetup(request, db) executes - test_otherthing() executes - mysetup_finalize() executes - db_finalize() executes - - -Example 3 - funcargs session-mix ----------------------------------------- - -Similar with funcargs, an example:: - - @pytest.mark.funcarg(scope="session", params=[1,2]) - def db(request): - request.addfinalizer(db_finalize) - - @pytest.mark.funcarg(scope="function") - def table(request, db): - ... - request.addfinalizer(table_finalize) - ... - -And a given test module: - - def test_something(table): - ... - def test_otherthing(table): - pass - def test_thirdthing(): - pass - -Here is what happens:: - - db(request) executes with param == 1 - table(request, db) - test_something(table) - table_finalize() - table(request, db) - test_otherthing(table) - table_finalize() - db_finalize - db(request) executes with param == 2 - table(request, db) - test_something(table) - table_finalize() - table(request, db) - test_otherthing(table) - table_finalize() - db_finalize - test_thirdthing() - -Data structures --------------------- - -pytest internally maintains a dict of active funcargs with cache, param, -finalizer, (scopeitem?) information: - - active_funcargs = dict() - -if a parametrized "db" is activated: - - active_funcargs["db"] = FuncargInfo(dbvalue, paramindex, - FuncargFinalize(...), scopeitem) - -if a test is torn down and the next test requires a differently -parametrized "db": - - for argname in item.callspec.params: - if argname in active_funcargs: - funcarginfo = active_funcargs[argname] - if funcarginfo.param != item.callspec.params[argname]: - funcarginfo.callfinalizer() - del node2funcarg[funcarginfo.scopeitem] - del active_funcargs[argname] - nodes_to_be_torn_down = ... - for node in nodes_to_be_torn_down: - if node in node2funcarg: - argname = node2funcarg[node] - active_funcargs[argname].callfinalizer() - del node2funcarg[node] - del active_funcargs[argname] - -if a test is setup requiring a "db" funcarg: - - if "db" in active_funcargs: - return active_funcargs["db"][0] - funcarginfo = setup_funcarg() - active_funcargs["db"] = funcarginfo - node2funcarg[funcarginfo.scopeitem] = "db" - -Implementation plan for resources ------------------------------------------- - -1. Revert FuncargRequest to the old form, unmerge item/request - (done) -2. make funcarg factories be discovered at collection time -3. Introduce funcarg marker -4. Introduce funcarg scope parameter -5. Introduce funcarg parametrize parameter -6. make setup functions be discovered at collection time -7. (Introduce a pytest_fixture_protocol/setup_funcargs hook) - -methods and data structures --------------------------------- - -A FuncarcManager holds all information about funcarg definitions -including parametrization and scope definitions. It implements -a pytest_generate_tests hook which performs parametrization as appropriate. - -as a simple example, let's consider a tree where a test function requires -a "abc" funcarg and its factory defines it as parametrized and scoped -for Modules. When collections hits the function item, it creates -the metafunc object, and calls funcargdb.pytest_generate_tests(metafunc) -which looks up available funcarg factories and their scope and parametrization. -This information is equivalent to what can be provided today directly -at the function site and it should thus be relatively straight forward -to implement the additional way of defining parametrization/scoping. - -conftest loading: - each funcarg-factory will populate the session.funcargmanager - -When a test item is collected, it grows a dictionary -(funcargname2factorycalllist). A factory lookup is performed -for each required funcarg. The resulting factory call is stored -with the item. If a function is parametrized multiple items are -created with respective factory calls. Else if a factory is parametrized -multiple items and calls to the factory function are created as well. - -At setup time, an item populates a funcargs mapping, mapping names -to values. If a value is funcarg factories are queried for a given item -test functions and setup functions are put in a class -which looks up required funcarg factories. - - diff --git a/_pytest/junitxml.py b/_pytest/junitxml.py index 317382e63..7fb40dc35 100644 --- a/_pytest/junitxml.py +++ b/_pytest/junitxml.py @@ -4,9 +4,11 @@ Based on initial code from Ross Lawley. + +Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ +src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd """ -# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/ -# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd +from __future__ import absolute_import, division, print_function import functools import py @@ -15,6 +17,7 @@ import re import sys import time import pytest +from _pytest import nodes from _pytest.config import filename_arg # Python 2.X and 3.X compatibility @@ -105,6 +108,8 @@ class _NodeReporter(object): } if testreport.location[1] is not None: attrs["line"] = testreport.location[1] + if hasattr(testreport, "url"): + attrs["url"] = testreport.url self.attrs = attrs def to_xml(self): @@ -119,7 +124,7 @@ class _NodeReporter(object): node = kind(data, message=message) self.append(node) - def _write_captured_output(self, report): + def write_captured_output(self, report): for capname in ('out', 'err'): content = getattr(report, 'capstd' + capname) if content: @@ -128,7 +133,6 @@ class _NodeReporter(object): def append_pass(self, report): self.add_stats('passed') - self._write_captured_output(report) def append_failure(self, report): # msg = str(report.longrepr.reprtraceback.extraline) @@ -147,7 +151,6 @@ class _NodeReporter(object): fail = Junit.failure(message=message) fail.append(bin_xml_escape(report.longrepr)) self.append(fail) - self._write_captured_output(report) def append_collect_error(self, report): # msg = str(report.longrepr.reprtraceback.extraline) @@ -165,7 +168,6 @@ class _NodeReporter(object): msg = "test setup failure" self._add_simple( Junit.error, msg, report.longrepr) - self._write_captured_output(report) def append_skipped(self, report): if hasattr(report, "wasxfail"): @@ -180,7 +182,7 @@ class _NodeReporter(object): Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason), type="pytest.skip", message=skipreason)) - self._write_captured_output(report) + self.write_captured_output(report) def finalize(self): data = self.to_xml().unicode(indent=0) @@ -225,13 +227,14 @@ def pytest_addoption(parser): metavar="str", default=None, help="prepend prefix to classnames in junit-xml output") + parser.addini("junit_suite_name", "Test suite name for JUnit report", default="pytest") def pytest_configure(config): xmlpath = config.option.xmlpath # prevent opening xmllog on slave nodes (xdist) if xmlpath and not hasattr(config, 'slaveinput'): - config._xml = LogXML(xmlpath, config.option.junitprefix) + config._xml = LogXML(xmlpath, config.option.junitprefix, config.getini("junit_suite_name")) config.pluginmanager.register(config._xml) @@ -250,7 +253,7 @@ def mangle_test_address(address): except ValueError: pass # convert file path to dotted path - names[0] = names[0].replace("/", '.') + names[0] = names[0].replace(nodes.SEP, '.') names[0] = _py_ext_re.sub("", names[0]) # put any params back names[-1] += possible_open_bracket + params @@ -258,10 +261,11 @@ def mangle_test_address(address): class LogXML(object): - def __init__(self, logfile, prefix): + def __init__(self, logfile, prefix, suite_name="pytest"): logfile = os.path.expanduser(os.path.expandvars(logfile)) self.logfile = os.path.normpath(os.path.abspath(logfile)) self.prefix = prefix + self.suite_name = suite_name self.stats = dict.fromkeys([ 'error', 'passed', @@ -271,6 +275,9 @@ class LogXML(object): self.node_reporters = {} # nodeid -> _NodeReporter self.node_reporters_ordered = [] self.global_properties = [] + # List of reports that failed on call but teardown is pending. + self.open_reports = [] + self.cnt_double_fail_tests = 0 def finalize(self, report): nodeid = getattr(report, 'nodeid', report) @@ -330,14 +337,33 @@ class LogXML(object): -> teardown node2 -> teardown node1 """ + close_report = None if report.passed: if report.when == "call": # ignore setup/teardown reporter = self._opentestcase(report) reporter.append_pass(report) elif report.failed: + if report.when == "teardown": + # The following vars are needed when xdist plugin is used + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + (rep for rep in self.open_reports + if (rep.nodeid == report.nodeid and + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid + ) + ), None) + if close_report: + # We need to open new testcase in case we have failure in + # call and error in teardown in order to follow junit + # schema + self.finalize(close_report) + self.cnt_double_fail_tests += 1 reporter = self._opentestcase(report) if report.when == "call": reporter.append_failure(report) + self.open_reports.append(report) else: reporter.append_error(report) elif report.skipped: @@ -345,7 +371,20 @@ class LogXML(object): reporter.append_skipped(report) self.update_testcase_duration(report) if report.when == "teardown": + reporter = self._opentestcase(report) + reporter.write_captured_output(report) self.finalize(report) + report_wid = getattr(report, "worker_id", None) + report_ii = getattr(report, "item_index", None) + close_report = next( + (rep for rep in self.open_reports + if (rep.nodeid == report.nodeid and + getattr(rep, "item_index", None) == report_ii and + getattr(rep, "worker_id", None) == report_wid + ) + ), None) + if close_report: + self.open_reports.remove(close_report) def update_testcase_duration(self, report): """accumulates total duration for nodeid from given report and updates @@ -378,14 +417,15 @@ class LogXML(object): suite_stop_time = time.time() suite_time_delta = suite_stop_time - self.suite_start_time - numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped'] + self.stats['error'] - + numtests = (self.stats['passed'] + self.stats['failure'] + + self.stats['skipped'] + self.stats['error'] - + self.cnt_double_fail_tests) logfile.write('') logfile.write(Junit.testsuite( self._get_global_properties_node(), [x.to_xml() for x in self.node_reporters_ordered], - name="pytest", + name=self.suite_name, errors=self.stats['error'], failures=self.stats['failure'], skips=self.stats['skipped'], @@ -405,9 +445,9 @@ class LogXML(object): """ if self.global_properties: return Junit.properties( - [ - Junit.property(name=name, value=value) - for name, value in self.global_properties - ] + [ + Junit.property(name=name, value=value) + for name, value in self.global_properties + ] ) return '' diff --git a/_pytest/main.py b/_pytest/main.py index 52876c12a..eacae8dab 100644 --- a/_pytest/main.py +++ b/_pytest/main.py @@ -1,18 +1,21 @@ """ core implementation of testing process: init, session, runtest loop. """ +from __future__ import absolute_import, division, print_function + import functools import os import sys import _pytest +from _pytest import nodes import _pytest._code import py -import pytest try: from collections import MutableMapping as MappingMixin except ImportError: from UserDict import DictMixin as MappingMixin -from _pytest.config import directory_arg +from _pytest.config import directory_arg, UsageError, hookimpl +from _pytest.outcomes import exit from _pytest.runner import collect_one_node tracebackcutdir = py.path.local(_pytest.__file__).dirpath() @@ -25,63 +28,73 @@ EXIT_INTERNALERROR = 3 EXIT_USAGEERROR = 4 EXIT_NOTESTSCOLLECTED = 5 + def pytest_addoption(parser): parser.addini("norecursedirs", "directory patterns to avoid for recursion", - type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg']) - parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.", - type="args", default=[]) - #parser.addini("dirpatterns", + type="args", default=['.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv']) + parser.addini("testpaths", "directories to search for tests when no files or directories are given in the " + "command line.", + type="args", default=[]) + # parser.addini("dirpatterns", # "patterns specifying possible locations of test files", # type="linelist", default=["**/test_*.txt", # "**/test_*.py", "**/*_test.py"] - #) + # ) group = parser.getgroup("general", "running and selection options") group._addoption('-x', '--exitfirst', action="store_const", - dest="maxfail", const=1, - help="exit instantly on first error or failed test."), + dest="maxfail", const=1, + help="exit instantly on first error or failed test."), group._addoption('--maxfail', metavar="num", - action="store", type=int, dest="maxfail", default=0, - help="exit after first num failures or errors.") + action="store", type=int, dest="maxfail", default=0, + help="exit after first num failures or errors.") group._addoption('--strict', action="store_true", - help="run pytest in strict mode, warnings become errors.") + help="marks not registered in configuration file raise errors.") group._addoption("-c", metavar="file", type=str, dest="inifilename", - help="load configuration from `file` instead of trying to locate one of the implicit configuration files.") + help="load configuration from `file` instead of trying to locate one of the implicit " + "configuration files.") group._addoption("--continue-on-collection-errors", action="store_true", - default=False, dest="continue_on_collection_errors", - help="Force test execution even if collection errors occur.") + default=False, dest="continue_on_collection_errors", + help="Force test execution even if collection errors occur.") group = parser.getgroup("collect", "collection") group.addoption('--collectonly', '--collect-only', action="store_true", - help="only collect tests, don't execute them."), + help="only collect tests, don't execute them."), group.addoption('--pyargs', action="store_true", - help="try to interpret all arguments as python packages.") + help="try to interpret all arguments as python packages.") group.addoption("--ignore", action="append", metavar="path", - help="ignore path during collection (multi-allowed).") + help="ignore path during collection (multi-allowed).") # when changing this to --conf-cut-dir, config.py Conftest.setinitial # needs upgrading as well group.addoption('--confcutdir', dest="confcutdir", default=None, - metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), - help="only load conftest.py's relative to specified dir.") + metavar="dir", type=functools.partial(directory_arg, optname="--confcutdir"), + help="only load conftest.py's relative to specified dir.") group.addoption('--noconftest', action="store_true", - dest="noconftest", default=False, - help="Don't load any conftest.py files.") + dest="noconftest", default=False, + help="Don't load any conftest.py files.") group.addoption('--keepduplicates', '--keep-duplicates', action="store_true", - dest="keepduplicates", default=False, - help="Keep duplicate tests.") + dest="keepduplicates", default=False, + help="Keep duplicate tests.") + group.addoption('--collect-in-virtualenv', action='store_true', + dest='collect_in_virtualenv', default=False, + help="Don't ignore tests in a local virtualenv directory") group = parser.getgroup("debugconfig", - "test session debugging and configuration") + "test session debugging and configuration") group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir", - help="base temporary directory for this test run.") + help="base temporary directory for this test run.") def pytest_namespace(): - collect = dict(Item=Item, Collector=Collector, File=File, Session=Session) - return dict(collect=collect) + """keeping this one works around a deeper startup issue in pytest + + i tried to find it for a while but the amount of time turned unsustainable, + so i put a hack in to revisit later + """ + return {} def pytest_configure(config): - pytest.config = config # compatibiltiy + __import__('pytest').config = config # compatibiltiy def wrap_session(config, doit): @@ -96,17 +109,16 @@ def wrap_session(config, doit): config.hook.pytest_sessionstart(session=session) initstate = 2 session.exitstatus = doit(config, session) or 0 - except pytest.UsageError: + except UsageError: raise except KeyboardInterrupt: excinfo = _pytest._code.ExceptionInfo() - if initstate < 2 and isinstance( - excinfo.value, pytest.exit.Exception): + if initstate < 2 and isinstance(excinfo.value, exit.Exception): sys.stderr.write('{0}: {1}\n'.format( excinfo.typename, excinfo.value.msg)) config.hook.pytest_keyboard_interrupt(excinfo=excinfo) session.exitstatus = EXIT_INTERRUPTED - except: + except: # noqa excinfo = _pytest._code.ExceptionInfo() config.notify_exception(excinfo, config.option) session.exitstatus = EXIT_INTERNALERROR @@ -123,9 +135,11 @@ def wrap_session(config, doit): config._ensure_unconfigure() return session.exitstatus + def pytest_cmdline_main(config): return wrap_session(config, _main) + def _main(config, session): """ default command line protocol for initialization, session, running tests and reporting. """ @@ -137,9 +151,11 @@ def _main(config, session): elif session.testscollected == 0: return EXIT_NOTESTSCOLLECTED + def pytest_collection(session): return session.perform_collect() + def pytest_runtestloop(session): if (session.testsfailed and not session.config.option.continue_on_collection_errors): @@ -150,21 +166,36 @@ def pytest_runtestloop(session): return True for i, item in enumerate(session.items): - nextitem = session.items[i+1] if i+1 < len(session.items) else None + nextitem = session.items[i + 1] if i + 1 < len(session.items) else None item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem) if session.shouldstop: raise session.Interrupted(session.shouldstop) return True + +def _in_venv(path): + """Attempts to detect if ``path`` is the root of a Virtual Environment by + checking for the existence of the appropriate activate script""" + bindir = path.join('Scripts' if sys.platform.startswith('win') else 'bin') + if not bindir.exists(): + return False + activates = ('activate', 'activate.csh', 'activate.fish', + 'Activate', 'Activate.bat', 'Activate.ps1') + return any([fname.basename in activates for fname in bindir.listdir()]) + + def pytest_ignore_collect(path, config): - p = path.dirpath() - ignore_paths = config._getconftest_pathlist("collect_ignore", path=p) + ignore_paths = config._getconftest_pathlist("collect_ignore", path=path.dirpath()) ignore_paths = ignore_paths or [] excludeopt = config.getoption("ignore") if excludeopt: ignore_paths.extend([py.path.local(x) for x in excludeopt]) - if path in ignore_paths: + if py.path.local(path) in ignore_paths: + return True + + allow_in_venv = config.getoption("collect_in_virtualenv") + if _in_venv(path) and not allow_in_venv: return True # Skip duplicate paths. @@ -190,14 +221,22 @@ class FSHookProxy: self.__dict__[name] = x return x -def compatproperty(name): - def fget(self): - import warnings - warnings.warn("This usage is deprecated, please use pytest.{0} instead".format(name), - PendingDeprecationWarning, stacklevel=2) - return getattr(pytest, name) - return property(fget) +class _CompatProperty(object): + def __init__(self, name): + self.name = name + + def __get__(self, obj, owner): + if obj is None: + return self + + # TODO: reenable in the features branch + # warnings.warn( + # "usage of {owner!r}.{name} is deprecated, please use pytest.{name} instead".format( + # name=self.name, owner=type(owner).__name__), + # PendingDeprecationWarning, stacklevel=2) + return getattr(__import__('pytest'), self.name) + class NodeKeywords(MappingMixin): def __init__(self, node): @@ -269,24 +308,28 @@ class Node(object): """ fspath sensitive hook proxy used to call pytest hooks""" return self.session.gethookproxy(self.fspath) - Module = compatproperty("Module") - Class = compatproperty("Class") - Instance = compatproperty("Instance") - Function = compatproperty("Function") - File = compatproperty("File") - Item = compatproperty("Item") + Module = _CompatProperty("Module") + Class = _CompatProperty("Class") + Instance = _CompatProperty("Instance") + Function = _CompatProperty("Function") + File = _CompatProperty("File") + Item = _CompatProperty("Item") def _getcustomclass(self, name): - cls = getattr(self, name) - if cls != getattr(pytest, name): - py.log._apiwarn("2.0", "use of node.%s is deprecated, " - "use pytest_pycollect_makeitem(...) to create custom " - "collection nodes" % name) + maybe_compatprop = getattr(type(self), name) + if isinstance(maybe_compatprop, _CompatProperty): + return getattr(__import__('pytest'), name) + else: + cls = getattr(self, name) + # TODO: reenable in the features branch + # warnings.warn("use of node.%s is deprecated, " + # "use pytest_pycollect_makeitem(...) to create custom " + # "collection nodes" % name, category=DeprecationWarning) return cls def __repr__(self): - return "<%s %r>" %(self.__class__.__name__, - getattr(self, 'name', None)) + return "<%s %r>" % (self.__class__.__name__, + getattr(self, 'name', None)) def warn(self, code, message): """ generate a warning with the given code and message for this @@ -295,9 +338,6 @@ class Node(object): fslocation = getattr(self, "location", None) if fslocation is None: fslocation = getattr(self, "fspath", None) - else: - fslocation = "%s:%s" % (fslocation[0], fslocation[1] + 1) - self.ihook.pytest_logwarning.call_historic(kwargs=dict( code=code, message=message, nodeid=self.nodeid, fslocation=fslocation)) @@ -335,7 +375,7 @@ class Node(object): res = function() except py.builtin._sysex: raise - except: + except: # noqa failure = sys.exc_info() setattr(self, exattrname, failure) raise @@ -358,9 +398,9 @@ class Node(object): ``marker`` can be a string or pytest.mark.* instance. """ - from _pytest.mark import MarkDecorator + from _pytest.mark import MarkDecorator, MARK_GEN if isinstance(marker, py.builtin._basestring): - marker = MarkDecorator(marker) + marker = getattr(MARK_GEN, marker) elif not isinstance(marker, MarkDecorator): raise ValueError("is not a string or pytest.mark.* Marker") self.keywords[marker.name] = marker @@ -410,7 +450,7 @@ class Node(object): return excinfo.value.formatrepr() tbfilter = True if self.config.option.fulltrace: - style="long" + style = "long" else: tb = _pytest._code.Traceback([excinfo.traceback[-1]]) self._prunetraceback(excinfo) @@ -438,6 +478,7 @@ class Node(object): repr_failure = _repr_failure_py + class Collector(Node): """ Collector instances create children through collect() and thus iteratively build a tree. @@ -459,10 +500,6 @@ class Collector(Node): return str(exc.args[0]) return self._repr_failure_py(excinfo, style="short") - def _memocollect(self): - """ internal helper method to cache results of calling collect(). """ - return self._memoizedcall('_collected', lambda: list(self.collect())) - def _prunetraceback(self, excinfo): if hasattr(self, 'fspath'): traceback = excinfo.traceback @@ -471,27 +508,38 @@ class Collector(Node): ntraceback = ntraceback.cut(excludepath=tracebackcutdir) excinfo.traceback = ntraceback.filter() + class FSCollector(Collector): def __init__(self, fspath, parent=None, config=None, session=None): - fspath = py.path.local(fspath) # xxx only for test_resultlog.py? + fspath = py.path.local(fspath) # xxx only for test_resultlog.py? name = fspath.basename if parent is not None: rel = fspath.relto(parent.fspath) if rel: name = rel - name = name.replace(os.sep, "/") + name = name.replace(os.sep, nodes.SEP) super(FSCollector, self).__init__(name, parent, config, session) self.fspath = fspath + def _check_initialpaths_for_relpath(self): + for initialpath in self.session._initialpaths: + if self.fspath.common(initialpath) == initialpath: + return self.fspath.relto(initialpath.dirname) + def _makeid(self): relpath = self.fspath.relto(self.config.rootdir) - if os.sep != "/": - relpath = relpath.replace(os.sep, "/") + + if not relpath: + relpath = self._check_initialpaths_for_relpath() + if os.sep != nodes.SEP: + relpath = relpath.replace(os.sep, nodes.SEP) return relpath + class File(FSCollector): """ base class for collecting tests from a file. """ + class Item(Node): """ a basic test invocation item. Note that for a single function there might be multiple test invocation items. @@ -503,6 +551,21 @@ class Item(Node): self._report_sections = [] def add_report_section(self, when, key, content): + """ + Adds a new report section, similar to what's done internally to add stdout and + stderr captured output:: + + item.add_report_section("call", "stdout", "report section contents") + + :param str when: + One of the possible capture states, ``"setup"``, ``"call"``, ``"teardown"``. + :param str key: + Name of the section, can be customized at will. Pytest uses ``"stdout"`` and + ``"stderr"`` internally. + + :param str content: + The full contents as a string. + """ if content: self._report_sections.append((when, key, content)) @@ -526,12 +589,15 @@ class Item(Node): self._location = location return location + class NoMatch(Exception): """ raised if matching cannot locate a matching names. """ + class Interrupted(KeyboardInterrupt): """ signals an interrupted test run. """ - __module__ = 'builtins' # for py3 + __module__ = 'builtins' # for py3 + class Session(FSCollector): Interrupted = Interrupted @@ -550,12 +616,12 @@ class Session(FSCollector): def _makeid(self): return "" - @pytest.hookimpl(tryfirst=True) + @hookimpl(tryfirst=True) def pytest_collectstart(self): if self.shouldstop: raise self.Interrupted(self.shouldstop) - @pytest.hookimpl(tryfirst=True) + @hookimpl(tryfirst=True) def pytest_runtest_logreport(self, report): if report.failed and not hasattr(report, 'wasxfail'): self.testsfailed += 1 @@ -586,8 +652,9 @@ class Session(FSCollector): hook = self.config.hook try: items = self._perform_collect(args, genitems) + self.config.pluginmanager.check_pending() hook.pytest_collection_modifyitems(session=self, - config=self.config, items=items) + config=self.config, items=items) finally: hook.pytest_collection_finish(session=self) self.testscollected = len(items) @@ -614,8 +681,8 @@ class Session(FSCollector): for arg, exc in self._notfound: line = "(no name %r in any of %r)" % (arg, exc.args[0]) errors.append("not found: %s\n%s" % (arg, line)) - #XXX: test this - raise pytest.UsageError(*errors) + # XXX: test this + raise UsageError(*errors) if not genitems: return rep.result else: @@ -643,7 +710,7 @@ class Session(FSCollector): names = self._parsearg(arg) path = names.pop(0) if path.check(dir=1): - assert not names, "invalid arg %r" %(arg,) + assert not names, "invalid arg %r" % (arg,) for path in path.visit(fil=lambda x: x.check(file=1), rec=self._recurse, bf=True, sort=True): for x in self._collectfile(path): @@ -702,9 +769,11 @@ class Session(FSCollector): path = self.config.invocation_dir.join(relpath, abs=True) if not path.check(): if self.config.option.pyargs: - raise pytest.UsageError("file or package not found: " + arg + " (missing __init__.py?)") + raise UsageError( + "file or package not found: " + arg + + " (missing __init__.py?)") else: - raise pytest.UsageError("file not found: " + arg) + raise UsageError("file not found: " + arg) parts[0] = path return parts @@ -727,11 +796,11 @@ class Session(FSCollector): nextnames = names[1:] resultnodes = [] for node in matching: - if isinstance(node, pytest.Item): + if isinstance(node, Item): if not names: resultnodes.append(node) continue - assert isinstance(node, pytest.Collector) + assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: has_matched = False @@ -744,16 +813,20 @@ class Session(FSCollector): if not has_matched and len(rep.result) == 1 and x.name == "()": nextnames.insert(0, name) resultnodes.extend(self.matchnodes([x], nextnames)) - node.ihook.pytest_collectreport(report=rep) + else: + # report collection failures here to avoid failing to run some test + # specified in the command line because the module could not be + # imported (#134) + node.ihook.pytest_collectreport(report=rep) return resultnodes def genitems(self, node): self.trace("genitems", node) - if isinstance(node, pytest.Item): + if isinstance(node, Item): node.ihook.pytest_itemcollected(item=node) yield node else: - assert isinstance(node, pytest.Collector) + assert isinstance(node, Collector) rep = collect_one_node(node) if rep.passed: for subnode in rep.result: diff --git a/_pytest/mark.py b/_pytest/mark.py index 357a60492..454722ca2 100644 --- a/_pytest/mark.py +++ b/_pytest/mark.py @@ -1,5 +1,75 @@ """ generic mechanism for marking and selecting python functions. """ +from __future__ import absolute_import, division, print_function + import inspect +import warnings +from collections import namedtuple +from operator import attrgetter +from .compat import imap +from .deprecated import MARK_PARAMETERSET_UNPACKING + + +def alias(name, warning=None): + getter = attrgetter(name) + + def warned(self): + warnings.warn(warning, stacklevel=2) + return getter(self) + + return property(getter if warning is None else warned, doc='alias for ' + name) + + +class ParameterSet(namedtuple('ParameterSet', 'values, marks, id')): + @classmethod + def param(cls, *values, **kw): + marks = kw.pop('marks', ()) + if isinstance(marks, MarkDecorator): + marks = marks, + else: + assert isinstance(marks, (tuple, list, set)) + + def param_extract_id(id=None): + return id + + id = param_extract_id(**kw) + return cls(values, marks, id) + + @classmethod + def extract_from(cls, parameterset, legacy_force_tuple=False): + """ + :param parameterset: + a legacy style parameterset that may or may not be a tuple, + and may or may not be wrapped into a mess of mark objects + + :param legacy_force_tuple: + enforce tuple wrapping so single argument tuple values + don't get decomposed and break tests + + """ + + if isinstance(parameterset, cls): + return parameterset + if not isinstance(parameterset, MarkDecorator) and legacy_force_tuple: + return cls.param(parameterset) + + newmarks = [] + argval = parameterset + while isinstance(argval, MarkDecorator): + newmarks.append(MarkDecorator(Mark( + argval.markname, argval.args[:-1], argval.kwargs))) + argval = argval.args[-1] + assert not isinstance(argval, ParameterSet) + if legacy_force_tuple: + argval = argval, + + if newmarks: + warnings.warn(MARK_PARAMETERSET_UNPACKING) + + return cls(argval, marks=newmarks, id=None) + + @property + def deprecated_arg_dict(self): + return dict((mark.name, mark) for mark in self.marks) class MarkerError(Exception): @@ -7,8 +77,8 @@ class MarkerError(Exception): """Error in use of a pytest marker/attribute.""" -def pytest_namespace(): - return {'mark': MarkGenerator()} +def param(*values, **kw): + return ParameterSet.param(*values, **kw) def pytest_addoption(parser): @@ -21,7 +91,8 @@ def pytest_addoption(parser): "where all names are substring-matched against test names " "and their parent classes. Example: -k 'test_method or test_" "other' matches all test functions and classes whose name " - "contains 'test_method' or 'test_other'. " + "contains 'test_method' or 'test_other', while -k 'not test_method' " + "matches those that don't contain 'test_method' in their names. " "Additionally keywords are matched to classes and functions " "containing extra names in their 'extra_keyword_matches' set, " "as well as functions which have names assigned directly to them." @@ -66,7 +137,7 @@ def pytest_collection_modifyitems(items, config): return # pytest used to allow "-" for negating # but today we just allow "-" at the beginning, use "not" instead - # we probably remove "-" alltogether soon + # we probably remove "-" altogether soon if keywordexpr.startswith("-"): keywordexpr = "not " + keywordexpr[1:] selectuntil = False @@ -96,6 +167,7 @@ def pytest_collection_modifyitems(items, config): class MarkMapping: """Provides a local mapping for markers where item access resolves to True if the marker is present. """ + def __init__(self, keywords): mymarks = set() for key, value in keywords.items(): @@ -111,6 +183,7 @@ class KeywordMapping: """Provides a local mapping for keywords. Given a list of names, map any substring of one of these names to True. """ + def __init__(self, names): self._names = names @@ -162,9 +235,13 @@ def matchkeyword(colitem, keywordexpr): def pytest_configure(config): - import pytest + config._old_mark_config = MARK_GEN._config if config.option.strict: - pytest.mark._config = config + MARK_GEN._config = config + + +def pytest_unconfigure(config): + MARK_GEN._config = getattr(config, '_old_mark_config', None) class MarkGenerator: @@ -178,13 +255,14 @@ class MarkGenerator: will set a 'slowtest' :class:`MarkInfo` object on the ``test_function`` object. """ + _config = None def __getattr__(self, name): if name[0] == "_": raise AttributeError("Marker name must NOT start with underscore") - if hasattr(self, '_config'): + if self._config is not None: self._check(name) - return MarkDecorator(name) + return MarkDecorator(Mark(name, (), {})) def _check(self, name): try: @@ -192,18 +270,21 @@ class MarkGenerator: return except AttributeError: pass - self._markers = l = set() + self._markers = values = set() for line in self._config.getini("markers"): - beginning = line.split(":", 1) - x = beginning[0].split("(", 1)[0] - l.add(x) + marker, _ = line.split(":", 1) + marker = marker.rstrip() + x = marker.split("(", 1)[0] + values.add(x) if name not in self._markers: raise AttributeError("%r not a registered marker" % (name,)) + def istestfunc(func): return hasattr(func, "__call__") and \ getattr(func, "__name__", "") != "" + class MarkDecorator: """ A decorator for test functions and test classes. When applied it will create :class:`MarkInfo` objects which may be @@ -237,19 +318,35 @@ class MarkDecorator: additional keyword or positional arguments. """ - def __init__(self, name, args=None, kwargs=None): - self.name = name - self.args = args or () - self.kwargs = kwargs or {} + + def __init__(self, mark): + assert isinstance(mark, Mark), repr(mark) + self.mark = mark + + name = alias('mark.name') + args = alias('mark.args') + kwargs = alias('mark.kwargs') @property def markname(self): - return self.name # for backward-compat (2.4.1 had this attr) + return self.name # for backward-compat (2.4.1 had this attr) + + def __eq__(self, other): + return self.mark == other.mark if isinstance(other, MarkDecorator) else False def __repr__(self): - d = self.__dict__.copy() - name = d.pop('name') - return "" % (name, d) + return "" % (self.mark,) + + def with_args(self, *args, **kwargs): + """ return a MarkDecorator with extra arguments added + + unlike call this can be used even if the sole argument is a callable/class + + :return: MarkDecorator + """ + + mark = Mark(self.name, args, kwargs) + return self.__class__(self.mark.combined_with(mark)) def __call__(self, *args, **kwargs): """ if passed a single callable argument: decorate it with mark info. @@ -259,70 +356,110 @@ class MarkDecorator: is_class = inspect.isclass(func) if len(args) == 1 and (istestfunc(func) or is_class): if is_class: - if hasattr(func, 'pytestmark'): - mark_list = func.pytestmark - if not isinstance(mark_list, list): - mark_list = [mark_list] - # always work on a copy to avoid updating pytestmark - # from a superclass by accident - mark_list = mark_list + [self] - func.pytestmark = mark_list - else: - func.pytestmark = [self] + store_mark(func, self.mark) else: - holder = getattr(func, self.name, None) - if holder is None: - holder = MarkInfo( - self.name, self.args, self.kwargs - ) - setattr(func, self.name, holder) - else: - holder.add(self.args, self.kwargs) + store_legacy_markinfo(func, self.mark) + store_mark(func, self.mark) return func - kw = self.kwargs.copy() - kw.update(kwargs) - args = self.args + args - return self.__class__(self.name, args=args, kwargs=kw) + return self.with_args(*args, **kwargs) -def extract_argvalue(maybe_marked_args): - # TODO: incorrect mark data, the old code wanst able to collect lists - # individual parametrized argument sets can be wrapped in a series - # of markers in which case we unwrap the values and apply the mark - # at Function init - newmarks = {} - argval = maybe_marked_args - while isinstance(argval, MarkDecorator): - newmark = MarkDecorator(argval.markname, - argval.args[:-1], argval.kwargs) - newmarks[newmark.markname] = newmark - argval = argval.args[-1] - return argval, newmarks +def get_unpacked_marks(obj): + """ + obtain the unpacked marks that are stored on a object + """ + mark_list = getattr(obj, 'pytestmark', []) + + if not isinstance(mark_list, list): + mark_list = [mark_list] + return [ + getattr(mark, 'mark', mark) # unpack MarkDecorator + for mark in mark_list + ] -class MarkInfo: +def store_mark(obj, mark): + """store a Mark on a object + this is used to implement the Mark declarations/decorators correctly + """ + assert isinstance(mark, Mark), mark + # always reassign name to avoid updating pytestmark + # in a reference that was only borrowed + obj.pytestmark = get_unpacked_marks(obj) + [mark] + + +def store_legacy_markinfo(func, mark): + """create the legacy MarkInfo objects and put them onto the function + """ + if not isinstance(mark, Mark): + raise TypeError("got {mark!r} instead of a Mark".format(mark=mark)) + holder = getattr(func, mark.name, None) + if holder is None: + holder = MarkInfo(mark) + setattr(func, mark.name, holder) + else: + holder.add_mark(mark) + + +class Mark(namedtuple('Mark', 'name, args, kwargs')): + + def combined_with(self, other): + assert self.name == other.name + return Mark( + self.name, self.args + other.args, + dict(self.kwargs, **other.kwargs)) + + +class MarkInfo(object): """ Marking object created by :class:`MarkDecorator` instances. """ - def __init__(self, name, args, kwargs): - #: name of attribute - self.name = name - #: positional argument list, empty if none specified - self.args = args - #: keyword argument dictionary, empty if nothing specified - self.kwargs = kwargs.copy() - self._arglist = [(args, kwargs.copy())] + + def __init__(self, mark): + assert isinstance(mark, Mark), repr(mark) + self.combined = mark + self._marks = [mark] + + name = alias('combined.name') + args = alias('combined.args') + kwargs = alias('combined.kwargs') def __repr__(self): - return "" % ( - self.name, self.args, self.kwargs - ) + return "".format(self.combined) - def add(self, args, kwargs): + def add_mark(self, mark): """ add a MarkInfo with the given args and kwargs. """ - self._arglist.append((args, kwargs)) - self.args += args - self.kwargs.update(kwargs) + self._marks.append(mark) + self.combined = self.combined.combined_with(mark) def __iter__(self): """ yield MarkInfo objects each relating to a marking-call. """ - for args, kwargs in self._arglist: - yield MarkInfo(self.name, args, kwargs) + return imap(MarkInfo, self._marks) + + +MARK_GEN = MarkGenerator() + + +def _marked(func, mark): + """ Returns True if :func: is already marked with :mark:, False otherwise. + This can happen if marker is applied to class and the test file is + invoked more than once. + """ + try: + func_mark = getattr(func, mark.name) + except AttributeError: + return False + return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs + + +def transfer_markers(funcobj, cls, mod): + """ + this function transfers class level markers and module level markers + into function level markinfo objects + + this is the main reason why marks are so broken + the resolution will involve phasing out function level MarkInfo objects + + """ + for obj in (cls, mod): + for mark in get_unpacked_marks(obj): + if not _marked(funcobj, mark): + store_legacy_markinfo(funcobj, mark) diff --git a/_pytest/monkeypatch.py b/_pytest/monkeypatch.py index 852e72bed..39ac77013 100644 --- a/_pytest/monkeypatch.py +++ b/_pytest/monkeypatch.py @@ -1,17 +1,18 @@ """ monkeypatching and mocking functionality. """ +from __future__ import absolute_import, division, print_function -import os, sys +import os +import sys import re from py.builtin import _basestring - -import pytest +from _pytest.fixtures import fixture RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$") -@pytest.fixture -def monkeypatch(request): +@fixture +def monkeypatch(): """The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: @@ -30,8 +31,8 @@ def monkeypatch(request): will be raised if the set/deletion operation has no target. """ mpatch = MonkeyPatch() - request.addfinalizer(mpatch.undo) - return mpatch + yield mpatch + mpatch.undo() def resolve(name): @@ -70,9 +71,9 @@ def annotated_getattr(obj, name, ann): obj = getattr(obj, name) except AttributeError: raise AttributeError( - '%r object at %s has no attribute %r' % ( - type(obj).__name__, ann, name - ) + '%r object at %s has no attribute %r' % ( + type(obj).__name__, ann, name + ) ) return obj diff --git a/_pytest/nodes.py b/_pytest/nodes.py new file mode 100644 index 000000000..ad3af2ce6 --- /dev/null +++ b/_pytest/nodes.py @@ -0,0 +1,37 @@ +SEP = "/" + + +def _splitnode(nodeid): + """Split a nodeid into constituent 'parts'. + + Node IDs are strings, and can be things like: + '' + 'testing/code' + 'testing/code/test_excinfo.py' + 'testing/code/test_excinfo.py::TestFormattedExcinfo::()' + + Return values are lists e.g. + [] + ['testing', 'code'] + ['testing', 'code', 'test_excinfo.py'] + ['testing', 'code', 'test_excinfo.py', 'TestFormattedExcinfo', '()'] + """ + if nodeid == '': + # If there is no root node at all, return an empty list so the caller's logic can remain sane + return [] + parts = nodeid.split(SEP) + # Replace single last element 'test_foo.py::Bar::()' with multiple elements 'test_foo.py', 'Bar', '()' + parts[-1:] = parts[-1].split("::") + return parts + + +def ischildnode(baseid, nodeid): + """Return True if the nodeid is a child node of the baseid. + + E.g. 'foo/bar::Baz::()' is a child of 'foo', 'foo/bar' and 'foo/bar::Baz', but not of 'foo/blorp' + """ + base_parts = _splitnode(baseid) + node_parts = _splitnode(nodeid) + if len(node_parts) < len(base_parts): + return False + return node_parts[:len(base_parts)] == base_parts diff --git a/_pytest/nose.py b/_pytest/nose.py index 038746868..d246c5603 100644 --- a/_pytest/nose.py +++ b/_pytest/nose.py @@ -1,10 +1,11 @@ """ run test suites written for nose. """ +from __future__ import absolute_import, division, print_function import sys import py -import pytest -from _pytest import unittest +from _pytest import unittest, runner, python +from _pytest.config import hookimpl def get_skip_exceptions(): @@ -19,45 +20,46 @@ def get_skip_exceptions(): def pytest_runtest_makereport(item, call): if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()): # let's substitute the excinfo with a pytest.skip one - call2 = call.__class__(lambda: - pytest.skip(str(call.excinfo.value)), call.when) + call2 = call.__class__( + lambda: runner.skip(str(call.excinfo.value)), call.when) call.excinfo = call2.excinfo -@pytest.hookimpl(trylast=True) +@hookimpl(trylast=True) def pytest_runtest_setup(item): if is_potential_nosetest(item): - if isinstance(item.parent, pytest.Generator): + if isinstance(item.parent, python.Generator): gen = item.parent if not hasattr(gen, '_nosegensetup'): call_optional(gen.obj, 'setup') - if isinstance(gen.parent, pytest.Instance): + if isinstance(gen.parent, python.Instance): call_optional(gen.parent.obj, 'setup') gen._nosegensetup = True if not call_optional(item.obj, 'setup'): # call module level setup if there is no object level one call_optional(item.parent.obj, 'setup') - #XXX this implies we only call teardown when setup worked + # XXX this implies we only call teardown when setup worked item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item) + def teardown_nose(item): if is_potential_nosetest(item): if not call_optional(item.obj, 'teardown'): call_optional(item.parent.obj, 'teardown') - #if hasattr(item.parent, '_nosegensetup'): + # if hasattr(item.parent, '_nosegensetup'): # #call_optional(item._nosegensetup, 'teardown') # del item.parent._nosegensetup def pytest_make_collect_report(collector): - if isinstance(collector, pytest.Generator): + if isinstance(collector, python.Generator): call_optional(collector.obj, 'setup') def is_potential_nosetest(item): # extra check needed since we do not do nose style setup/teardown # on direct unittest style classes - return isinstance(item, pytest.Function) and \ + return isinstance(item, python.Function) and \ not isinstance(item, unittest.TestCaseFunction) diff --git a/_pytest/outcomes.py b/_pytest/outcomes.py new file mode 100644 index 000000000..ff5ef756d --- /dev/null +++ b/_pytest/outcomes.py @@ -0,0 +1,140 @@ +""" +exception classes and constants handling test outcomes +as well as functions creating them +""" +from __future__ import absolute_import, division, print_function +import py +import sys + + +class OutcomeException(BaseException): + """ OutcomeException and its subclass instances indicate and + contain info about test and collection outcomes. + """ + def __init__(self, msg=None, pytrace=True): + BaseException.__init__(self, msg) + self.msg = msg + self.pytrace = pytrace + + def __repr__(self): + if self.msg: + val = self.msg + if isinstance(val, bytes): + val = py._builtin._totext(val, errors='replace') + return val + return "<%s instance>" % (self.__class__.__name__,) + __str__ = __repr__ + + +TEST_OUTCOME = (OutcomeException, Exception) + + +class Skipped(OutcomeException): + # XXX hackish: on 3k we fake to live in the builtins + # in order to have Skipped exception printing shorter/nicer + __module__ = 'builtins' + + def __init__(self, msg=None, pytrace=True, allow_module_level=False): + OutcomeException.__init__(self, msg=msg, pytrace=pytrace) + self.allow_module_level = allow_module_level + + +class Failed(OutcomeException): + """ raised from an explicit call to pytest.fail() """ + __module__ = 'builtins' + + +class Exit(KeyboardInterrupt): + """ raised for immediate program exits (no tracebacks/summaries)""" + def __init__(self, msg="unknown reason"): + self.msg = msg + KeyboardInterrupt.__init__(self, msg) + +# exposed helper methods + + +def exit(msg): + """ exit testing process as if KeyboardInterrupt was triggered. """ + __tracebackhide__ = True + raise Exit(msg) + + +exit.Exception = Exit + + +def skip(msg=""): + """ skip an executing test with the given message. Note: it's usually + better to use the pytest.mark.skipif marker to declare a test to be + skipped under certain conditions like mismatching platforms or + dependencies. See the pytest_skipping plugin for details. + """ + __tracebackhide__ = True + raise Skipped(msg=msg) + + +skip.Exception = Skipped + + +def fail(msg="", pytrace=True): + """ explicitly fail an currently-executing test with the given Message. + + :arg pytrace: if false the msg represents the full failure information + and no python traceback will be reported. + """ + __tracebackhide__ = True + raise Failed(msg=msg, pytrace=pytrace) + + +fail.Exception = Failed + + +class XFailed(fail.Exception): + """ raised from an explicit call to pytest.xfail() """ + + +def xfail(reason=""): + """ xfail an executing test or setup functions with the given reason.""" + __tracebackhide__ = True + raise XFailed(reason) + + +xfail.Exception = XFailed + + +def importorskip(modname, minversion=None): + """ return imported module if it has at least "minversion" as its + __version__ attribute. If no minversion is specified the a skip + is only triggered if the module can not be imported. + """ + import warnings + __tracebackhide__ = True + compile(modname, '', 'eval') # to catch syntaxerrors + should_skip = False + + with warnings.catch_warnings(): + # make sure to ignore ImportWarnings that might happen because + # of existing directories with the same name we're trying to + # import but without a __init__.py file + warnings.simplefilter('ignore') + try: + __import__(modname) + except ImportError: + # Do not raise chained exception here(#1485) + should_skip = True + if should_skip: + raise Skipped("could not import %r" % (modname,), allow_module_level=True) + mod = sys.modules[modname] + if minversion is None: + return mod + verattr = getattr(mod, '__version__', None) + if minversion is not None: + try: + from pkg_resources import parse_version as pv + except ImportError: + raise Skipped("we have a required version for %r but can not import " + "pkg_resources to parse version strings." % (modname,), + allow_module_level=True) + if verattr is None or pv(verattr) < pv(minversion): + raise Skipped("module %r has __version__ %r, required is: %r" % ( + modname, verattr, minversion), allow_module_level=True) + return mod diff --git a/_pytest/pastebin.py b/_pytest/pastebin.py index 9f1cf9063..9d689819f 100644 --- a/_pytest/pastebin.py +++ b/_pytest/pastebin.py @@ -1,4 +1,6 @@ """ submit failure or test session information to a pastebin service. """ +from __future__ import absolute_import, division, print_function + import pytest import sys import tempfile @@ -7,9 +9,9 @@ import tempfile def pytest_addoption(parser): group = parser.getgroup("terminal reporting") group._addoption('--pastebin', metavar="mode", - action='store', dest="pastebin", default=None, - choices=['failed', 'all'], - help="send failed|all info to bpaste.net pastebin service.") + action='store', dest="pastebin", default=None, + choices=['failed', 'all'], + help="send failed|all info to bpaste.net pastebin service.") @pytest.hookimpl(trylast=True) @@ -95,4 +97,4 @@ def pytest_terminal_summary(terminalreporter): s = tw.stringio.getvalue() assert len(s) pastebinurl = create_new_paste(s) - tr.write_line("%s --> %s" %(msg, pastebinurl)) + tr.write_line("%s --> %s" % (msg, pastebinurl)) diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 17ff529a6..82aa00e0d 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,4 +1,6 @@ """ (disabled by default) support for testing pytest and pytest plugins. """ +from __future__ import absolute_import, division, print_function + import codecs import gc import os @@ -10,8 +12,9 @@ import time import traceback from fnmatch import fnmatch -from py.builtin import print_ +from weakref import WeakKeyDictionary +from _pytest.capture import MultiCapture, SysCapture from _pytest._code import Source import py import pytest @@ -22,13 +25,13 @@ from _pytest.assertion.rewrite import AssertionRewritingHook def pytest_addoption(parser): # group = parser.getgroup("pytester", "pytester (self-tests) options") parser.addoption('--lsof', - action="store_true", dest="lsof", default=False, - help=("run FD checks if lsof is available")) + action="store_true", dest="lsof", default=False, + help=("run FD checks if lsof is available")) parser.addoption('--runpytest', default="inprocess", dest="runpytest", - choices=("inprocess", "subprocess", ), - help=("run pytest sub runs in tests using an 'inprocess' " - "or 'subprocess' (python -m main) method")) + choices=("inprocess", "subprocess", ), + help=("run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method")) def pytest_configure(config): @@ -59,7 +62,7 @@ class LsofFdLeakChecker(object): def _parse_lsof_output(self, out): def isopen(line): return line.startswith('f') and ("deleted" not in line and - 'mem' not in line and "txt" not in line and 'cwd' not in line) + 'mem' not in line and "txt" not in line and 'cwd' not in line) open_files = [] @@ -85,7 +88,7 @@ class LsofFdLeakChecker(object): return True @pytest.hookimpl(hookwrapper=True, tryfirst=True) - def pytest_runtest_item(self, item): + def pytest_runtest_protocol(self, item): lines1 = self.get_open_files() yield if hasattr(sys, "pypy_version_info"): @@ -104,7 +107,8 @@ class LsofFdLeakChecker(object): error.extend([str(f) for f in lines2]) error.append(error[0]) error.append("*** function %s:%s: %s " % item.location) - pytest.fail("\n".join(error), pytrace=False) + error.append("See issue #2366") + item.warn('', "\n".join(error)) # XXX copied from execnet's conftest.py - needs to be merged @@ -118,6 +122,7 @@ winpymap = { 'python3.5': r'C:\Python35\python.exe', } + def getexecutable(name, cache={}): try: return cache[name] @@ -126,19 +131,20 @@ def getexecutable(name, cache={}): if executable: import subprocess popen = subprocess.Popen([str(executable), "--version"], - universal_newlines=True, stderr=subprocess.PIPE) + universal_newlines=True, stderr=subprocess.PIPE) out, err = popen.communicate() if name == "jython": if not err or "2.5" not in err: executable = None if "2.5.2" in err: - executable = None # http://bugs.jython.org/issue1790 + executable = None # http://bugs.jython.org/issue1790 elif popen.returncode != 0: # Handle pyenv's 127. executable = None cache[name] = executable return executable + @pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", 'pypy', 'pypy3']) def anypython(request): @@ -155,6 +161,8 @@ def anypython(request): return executable # used at least by pytest-xdist plugin + + @pytest.fixture def _pytest(request): """ Return a helper which offers a gethookrecorder(hook) @@ -163,6 +171,7 @@ def _pytest(request): """ return PytestArg(request) + class PytestArg: def __init__(self, request): self.request = request @@ -173,9 +182,9 @@ class PytestArg: return hookrecorder -def get_public_names(l): - """Only return names from iterator l without a leading underscore.""" - return [x for x in l if x[0] != "_"] +def get_public_names(values): + """Only return names from iterator values without a leading underscore.""" + return [x for x in values if x[0] != "_"] class ParsedCall: @@ -186,7 +195,7 @@ class ParsedCall: def __repr__(self): d = self.__dict__.copy() del d['_name'] - return "" %(self._name, d) + return "" % (self._name, d) class HookRecorder: @@ -226,15 +235,15 @@ class HookRecorder: name, check = entries.pop(0) for ind, call in enumerate(self.calls[i:]): if call._name == name: - print_("NAMEMATCH", name, call) + print("NAMEMATCH", name, call) if eval(check, backlocals, call.__dict__): - print_("CHECKERMATCH", repr(check), "->", call) + print("CHECKERMATCH", repr(check), "->", call) else: - print_("NOCHECKERMATCH", repr(check), "-", call) + print("NOCHECKERMATCH", repr(check), "-", call) continue i += ind + 1 break - print_("NONAMEMATCH", name, "with", call) + print("NONAMEMATCH", name, "with", call) else: pytest.fail("could not find %r check %r" % (name, check)) @@ -249,9 +258,9 @@ class HookRecorder: pytest.fail("\n".join(lines)) def getcall(self, name): - l = self.getcalls(name) - assert len(l) == 1, (name, l) - return l[0] + values = self.getcalls(name) + assert len(values) == 1, (name, values) + return values[0] # functionality for test reports @@ -260,9 +269,9 @@ class HookRecorder: return [x.report for x in self.getcalls(names)] def matchreport(self, inamepart="", - names="pytest_runtest_logreport pytest_collectreport", when=None): + names="pytest_runtest_logreport pytest_collectreport", when=None): """ return a testreport whose dotted import path matches """ - l = [] + values = [] for rep in self.getreports(names=names): try: if not when and rep.when != "call" and rep.passed: @@ -273,14 +282,14 @@ class HookRecorder: if when and getattr(rep, 'when', None) != when: continue if not inamepart or inamepart in rep.nodeid.split("::"): - l.append(rep) - if not l: + values.append(rep) + if not values: raise ValueError("could not find test report matching %r: " "no test reports at all!" % (inamepart,)) - if len(l) > 1: + if len(values) > 1: raise ValueError( - "found 2 or more testreports matching %r: %s" %(inamepart, l)) - return l[0] + "found 2 or more testreports matching %r: %s" % (inamepart, values)) + return values[0] def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'): @@ -294,7 +303,7 @@ class HookRecorder: skipped = [] failed = [] for rep in self.getreports( - "pytest_collectreport pytest_runtest_logreport"): + "pytest_collectreport pytest_runtest_logreport"): if rep.passed: if getattr(rep, "when", None) == "call": passed.append(rep) @@ -332,7 +341,9 @@ def testdir(request, tmpdir_factory): return Testdir(request, tmpdir_factory) -rex_outcome = re.compile("(\d+) ([\w-]+)") +rex_outcome = re.compile(r"(\d+) ([\w-]+)") + + class RunResult: """The result of running a command. @@ -348,6 +359,7 @@ class RunResult: :duration: Duration in seconds. """ + def __init__(self, ret, outlines, errlines, duration): self.ret = ret self.outlines = outlines @@ -367,15 +379,19 @@ class RunResult: for num, cat in outcomes: d[cat] = int(num) return d + raise ValueError("Pytest terminal report not found") - def assert_outcomes(self, passed=0, skipped=0, failed=0): + def assert_outcomes(self, passed=0, skipped=0, failed=0, error=0): """ assert that the specified outcomes appear with the respective numbers (0 means it didn't occur) in the text output from a test run.""" d = self.parseoutcomes() - assert passed == d.get("passed", 0) - assert skipped == d.get("skipped", 0) - assert failed == d.get("failed", 0) - + obtained = { + 'passed': d.get('passed', 0), + 'skipped': d.get('skipped', 0), + 'failed': d.get('failed', 0), + 'error': d.get('error', 0), + } + assert obtained == dict(passed=passed, skipped=skipped, failed=failed, error=error) class Testdir: @@ -401,6 +417,7 @@ class Testdir: def __init__(self, request, tmpdir_factory): self.request = request + self._mod_collections = WeakKeyDictionary() # XXX remove duplication with tmpdir plugin basetmp = tmpdir_factory.ensuretemp("testdir") name = request.function.__name__ @@ -414,7 +431,7 @@ class Testdir: self.plugins = [] self._savesyspath = (list(sys.path), list(sys.meta_path)) self._savemodulekeys = set(sys.modules) - self.chdir() # always chdir + self.chdir() # always chdir self.request.addfinalizer(self.finalize) method = self.request.config.getoption("--runpytest") if method == "inprocess": @@ -446,9 +463,10 @@ class Testdir: the module is re-imported. """ for name in set(sys.modules).difference(self._savemodulekeys): - # it seems zope.interfaces is keeping some state - # (used by twisted related tests) - if name != "zope.interface": + # some zope modules used by twisted-related tests keeps internal + # state and can't be deleted; we had some trouble in the past + # with zope.interface for example + if not name.startswith("zope"): del sys.modules[name] def make_hook_recorder(self, pluginmanager): @@ -468,7 +486,7 @@ class Testdir: if not hasattr(self, '_olddir'): self._olddir = old - def _makefile(self, ext, args, kwargs): + def _makefile(self, ext, args, kwargs, encoding="utf-8"): items = list(kwargs.items()) if args: source = py.builtin._totext("\n").join( @@ -488,8 +506,8 @@ class Testdir: source_unicode = "\n".join([my_totext(line) for line in source.lines]) source = py.builtin._totext(source_unicode) - content = source.strip().encode("utf-8") # + "\n" - #content = content.rstrip() + "\n" + content = source.strip().encode(encoding) # + "\n" + # content = content.rstrip() + "\n" p.write(content, "wb") if ret is None: ret = p @@ -565,7 +583,7 @@ class Testdir: def mkpydir(self, name): """Create a new python package. - This creates a (sub)direcotry with an empty ``__init__.py`` + This creates a (sub)directory with an empty ``__init__.py`` file so that is recognised as a python package. """ @@ -574,6 +592,7 @@ class Testdir: return p Session = Session + def getnode(self, config, arg): """Return the collection node of a file. @@ -654,13 +673,13 @@ class Testdir: """ p = self.makepyfile(source) - l = list(cmdlineargs) + [p] - return self.inline_run(*l) + values = list(cmdlineargs) + [p] + return self.inline_run(*values) def inline_genitems(self, *args): """Run ``pytest.main(['--collectonly'])`` in-process. - Retuns a tuple of the collected items and a + Returns a tuple of the collected items and a :py:class:`HookRecorder` instance. This runs the :py:func:`pytest.main` function to run all of @@ -733,7 +752,8 @@ class Testdir: if kwargs.get("syspathinsert"): self.syspathinsert() now = time.time() - capture = py.io.StdCapture() + capture = MultiCapture(Capture=SysCapture) + capture.start_capturing() try: try: reprec = self.inline_run(*args, **kwargs) @@ -748,13 +768,14 @@ class Testdir: class reprec: ret = 3 finally: - out, err = capture.reset() + out, err = capture.readouterr() + capture.stop_capturing() sys.stdout.write(out) sys.stderr.write(err) res = RunResult(reprec.ret, out.split("\n"), err.split("\n"), - time.time()-now) + time.time() - now) res.reprec = reprec return res @@ -770,11 +791,11 @@ class Testdir: args = [str(x) for x in args] for x in args: if str(x).startswith('--basetemp'): - #print ("basedtemp exists: %s" %(args,)) + # print("basedtemp exists: %s" %(args,)) break else: args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp')) - #print ("added basetemp: %s" %(args,)) + # print("added basetemp: %s" %(args,)) return args def parseconfig(self, *args): @@ -812,7 +833,7 @@ class Testdir: self.request.addfinalizer(config._ensure_unconfigure) return config - def getitem(self, source, funcname="test_func"): + def getitem(self, source, funcname="test_func"): """Return the test item for a test function. This writes the source to a python file and runs pytest's @@ -829,10 +850,10 @@ class Testdir: for item in items: if item.name == funcname: return item - assert 0, "%r item not found in module:\n%s\nitems: %s" %( + assert 0, "%r item not found in module:\n%s\nitems: %s" % ( funcname, source, items) - def getitems(self, source): + def getitems(self, source): """Return all test items collected from the module. This writes the source to a python file and runs pytest's @@ -843,7 +864,7 @@ class Testdir: modcol = self.getmodulecol(source) return self.genitems([modcol]) - def getmodulecol(self, source, configargs=(), withinit=False): + def getmodulecol(self, source, configargs=(), withinit=False): """Return the module collection node for ``source``. This writes ``source`` to a file using :py:meth:`makepyfile` @@ -856,15 +877,16 @@ class Testdir: :py:meth:`parseconfigure`. :param withinit: Whether to also write a ``__init__.py`` file - to the temporarly directory to ensure it is a package. + to the temporary directory to ensure it is a package. """ kw = {self.request.function.__name__: Source(source).strip()} path = self.makepyfile(**kw) if withinit: - self.makepyfile(__init__ = "#") + self.makepyfile(__init__="#") self.config = config = self.parseconfigure(path, *configargs) node = self.getnode(config, path) + return node def collect_by_name(self, modcol, name): @@ -879,7 +901,9 @@ class Testdir: :param name: The name of the node to return. """ - for colitem in modcol._memocollect(): + if modcol not in self._mod_collections: + self._mod_collections[modcol] = list(modcol.collect()) + for colitem in self._mod_collections[modcol]: if colitem.name == name: return colitem @@ -896,8 +920,11 @@ class Testdir: env['PYTHONPATH'] = os.pathsep.join(filter(None, [ str(os.getcwd()), env.get('PYTHONPATH', '')])) kw['env'] = env - return subprocess.Popen(cmdargs, - stdout=stdout, stderr=stderr, **kw) + + popen = subprocess.Popen(cmdargs, stdin=subprocess.PIPE, stdout=stdout, stderr=stderr, **kw) + popen.stdin.close() + + return popen def run(self, *cmdargs): """Run a command with arguments. @@ -914,14 +941,14 @@ class Testdir: cmdargs = [str(x) for x in cmdargs] p1 = self.tmpdir.join("stdout") p2 = self.tmpdir.join("stderr") - print_("running:", ' '.join(cmdargs)) - print_(" in:", str(py.path.local())) + print("running:", ' '.join(cmdargs)) + print(" in:", str(py.path.local())) f1 = codecs.open(str(p1), "w", encoding="utf8") f2 = codecs.open(str(p2), "w", encoding="utf8") try: now = time.time() popen = self.popen(cmdargs, stdout=f1, stderr=f2, - close_fds=(sys.platform != "win32")) + close_fds=(sys.platform != "win32")) ret = popen.wait() finally: f1.close() @@ -936,19 +963,19 @@ class Testdir: f2.close() self._dump_lines(out, sys.stdout) self._dump_lines(err, sys.stderr) - return RunResult(ret, out, err, time.time()-now) + return RunResult(ret, out, err, time.time() - now) def _dump_lines(self, lines, fp): try: for line in lines: - py.builtin.print_(line, file=fp) + print(line, file=fp) except UnicodeEncodeError: print("couldn't print to %s because of encoding" % (fp,)) def _getpytestargs(self): # we cannot use "(sys.executable,script)" # because on windows the script is e.g. a pytest.exe - return (sys.executable, _pytest_fullpath,) # noqa + return (sys.executable, _pytest_fullpath,) # noqa def runpython(self, script): """Run a python script using sys.executable as interpreter. @@ -975,12 +1002,12 @@ class Testdir: """ p = py.path.local.make_numbered_dir(prefix="runpytest-", - keep=None, rootdir=self.tmpdir) + keep=None, rootdir=self.tmpdir) args = ('--basetemp=%s' % p, ) + args - #for x in args: + # for x in args: # if '--confcutdir' in str(x): # break - #else: + # else: # pass # args = ('--confcutdir=.',) + args plugins = [x for x in self.plugins if isinstance(x, str)] @@ -998,7 +1025,7 @@ class Testdir: The pexpect child is returned. """ - basetemp = self.tmpdir.mkdir("pexpect") + basetemp = self.tmpdir.mkdir("temp-pexpect") invoke = " ".join(map(str, self._getpytestargs())) cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) return self.spawn(cmd, expect_timeout=expect_timeout) @@ -1019,12 +1046,13 @@ class Testdir: child.timeout = expect_timeout return child + def getdecoded(out): - try: - return out.decode("utf-8") - except UnicodeDecodeError: - return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( - py.io.saferepr(out),) + try: + return out.decode("utf-8") + except UnicodeDecodeError: + return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % ( + py.io.saferepr(out),) class LineComp: @@ -1054,7 +1082,7 @@ class LineMatcher: """ - def __init__(self, lines): + def __init__(self, lines): self.lines = lines self._log_output = [] @@ -1093,7 +1121,7 @@ class LineMatcher: """ for i, line in enumerate(self.lines): if fnline == line or fnmatch(line, fnline): - return self.lines[i+1:] + return self.lines[i + 1:] raise ValueError("line %r not found in output" % fnline) def _log(self, *args): diff --git a/_pytest/python.py b/_pytest/python.py index e46f2f1bc..41fd2bdb7 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -1,26 +1,30 @@ """ Python test discovery, setup and run of test functions. """ +from __future__ import absolute_import, division, print_function import fnmatch import inspect import sys +import os import collections -import math +from textwrap import dedent from itertools import count import py -import pytest from _pytest.mark import MarkerError - +from _pytest.config import hookimpl import _pytest import _pytest._pluggy as pluggy from _pytest import fixtures +from _pytest import main from _pytest.compat import ( - isclass, isfunction, is_generator, _escape_strings, + isclass, isfunction, is_generator, _ascii_escaped, REGEX_TYPE, STRING_TYPES, NoneType, NOTSET, get_real_func, getfslineno, safe_getattr, - getlocation, enum, + safe_str, getlocation, enum, ) +from _pytest.outcomes import fail +from _pytest.mark import transfer_markers cutdir1 = py.path.local(pluggy.__file__.rstrip("oc")) cutdir2 = py.path.local(_pytest.__file__).dirpath() @@ -45,10 +49,9 @@ def filter_traceback(entry): return p != cutdir1 and not p.relto(cutdir2) and not p.relto(cutdir3) - def pyobj_property(name): def get(self): - node = self.getparent(getattr(pytest, name)) + node = self.getparent(getattr(__import__('pytest'), name)) if node is not None: return node.obj doc = "python %s object this node was collected from (can be None)." % ( @@ -59,8 +62,8 @@ def pyobj_property(name): def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--fixtures', '--funcargs', - action="store_true", dest="showfixtures", default=False, - help="show available fixtures, sorted by plugin appearance") + action="store_true", dest="showfixtures", default=False, + help="show available fixtures, sorted by plugin appearance") group.addoption( '--fixtures-per-test', action="store_true", @@ -69,20 +72,20 @@ def pytest_addoption(parser): help="show fixtures per test", ) parser.addini("usefixtures", type="args", default=[], - help="list of default fixtures to be used with this project") + help="list of default fixtures to be used with this project") parser.addini("python_files", type="args", - default=['test_*.py', '*_test.py'], - help="glob-style file patterns for Python test module discovery") - parser.addini("python_classes", type="args", default=["Test",], - help="prefixes or glob names for Python test class discovery") - parser.addini("python_functions", type="args", default=["test",], - help="prefixes or glob names for Python test function and " - "method discovery") + default=['test_*.py', '*_test.py'], + help="glob-style file patterns for Python test module discovery") + parser.addini("python_classes", type="args", default=["Test", ], + help="prefixes or glob names for Python test class discovery") + parser.addini("python_functions", type="args", default=["test", ], + help="prefixes or glob names for Python test function and " + "method discovery") group.addoption("--import-mode", default="prepend", - choices=["prepend", "append"], dest="importmode", - help="prepend/append to sys.path when importing test modules, " - "default is to prepend.") + choices=["prepend", "append"], dest="importmode", + help="prepend/append to sys.path when importing test modules, " + "default is to prepend.") def pytest_cmdline_main(config): @@ -109,39 +112,25 @@ def pytest_generate_tests(metafunc): for marker in markers: metafunc.parametrize(*marker.args, **marker.kwargs) + def pytest_configure(config): config.addinivalue_line("markers", - "parametrize(argnames, argvalues): call a test function multiple " - "times passing in different arguments in turn. argvalues generally " - "needs to be a list of values if argnames specifies only one name " - "or a list of tuples of values if argnames specifies multiple names. " - "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " - "decorated test function, one with arg1=1 and another with arg1=2." - "see http://pytest.org/latest/parametrize.html for more info and " - "examples." - ) + "parametrize(argnames, argvalues): call a test function multiple " + "times passing in different arguments in turn. argvalues generally " + "needs to be a list of values if argnames specifies only one name " + "or a list of tuples of values if argnames specifies multiple names. " + "Example: @parametrize('arg1', [1,2]) would lead to two calls of the " + "decorated test function, one with arg1=1 and another with arg1=2." + "see http://pytest.org/latest/parametrize.html for more info and " + "examples." + ) config.addinivalue_line("markers", - "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " - "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " - ) - -@pytest.hookimpl(trylast=True) -def pytest_namespace(): - raises.Exception = pytest.fail.Exception - return { - 'raises': raises, - 'approx': approx, - 'collect': { - 'Module': Module, - 'Class': Class, - 'Instance': Instance, - 'Function': Function, - 'Generator': Generator, - } - } + "usefixtures(fixturename1, fixturename2, ...): mark tests as needing " + "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures " + ) -@pytest.hookimpl(trylast=True) +@hookimpl(trylast=True) def pytest_pyfunc_call(pyfuncitem): testfunction = pyfuncitem.obj if pyfuncitem._isyieldedfunction(): @@ -154,6 +143,7 @@ def pytest_pyfunc_call(pyfuncitem): testfunction(**testargs) return True + def pytest_collect_file(path, parent): ext = path.ext if ext == ".py": @@ -162,19 +152,21 @@ def pytest_collect_file(path, parent): if path.fnmatch(pat): break else: - return + return ihook = parent.session.gethookproxy(path) return ihook.pytest_pycollect_makemodule(path=path, parent=parent) + def pytest_pycollect_makemodule(path, parent): return Module(path, parent) -@pytest.hookimpl(hookwrapper=True) + +@hookimpl(hookwrapper=True) def pytest_pycollect_makeitem(collector, name, obj): outcome = yield res = outcome.get_result() if res is not None: - raise StopIteration + return # nothing was collected elsewhere, let's do it here if isclass(obj): if collector.istestclass(obj, name): @@ -187,9 +179,8 @@ def pytest_pycollect_makeitem(collector, name, obj): # or a funtools.wrapped. # We musn't if it's been wrapped with mock.patch (python 2 only) if not (isfunction(obj) or isfunction(get_real_func(obj))): - collector.warn(code="C2", message= - "cannot collect %r because it is not a function." - % name, ) + collector.warn(code="C2", message="cannot collect %r because it is not a function." + % name, ) elif getattr(obj, "__test__", True): if is_generator(obj): res = Generator(name, parent=collector) @@ -197,9 +188,9 @@ def pytest_pycollect_makeitem(collector, name, obj): res = list(collector._genfunctions(name, obj)) outcome.force_result(res) -def pytest_make_parametrize_id(config, val): - return None +def pytest_make_parametrize_id(config, val, argname=None): + return None class PyobjContext(object): @@ -207,6 +198,7 @@ class PyobjContext(object): cls = pyobj_property("Class") instance = pyobj_property("Instance") + class PyobjMixin(PyobjContext): def obj(): def fget(self): @@ -235,8 +227,7 @@ class PyobjMixin(PyobjContext): continue name = node.name if isinstance(node, Module): - assert name.endswith(".py") - name = name[:-3] + name = os.path.splitext(name)[0] if stopatmodule: if includemodule: parts.append(name) @@ -265,7 +256,8 @@ class PyobjMixin(PyobjContext): assert isinstance(lineno, int) return fspath, lineno, modpath -class PyCollector(PyobjMixin, pytest.Collector): + +class PyCollector(PyobjMixin, main.Collector): def funcnamefilter(self, name): return self._matches_prefix_or_glob_option('python_functions', name) @@ -283,10 +275,22 @@ class PyCollector(PyobjMixin, pytest.Collector): return self._matches_prefix_or_glob_option('python_classes', name) def istestfunction(self, obj, name): - return ( - (self.funcnamefilter(name) or self.isnosetest(obj)) and - safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None - ) + if self.funcnamefilter(name) or self.isnosetest(obj): + if isinstance(obj, staticmethod): + # static methods need to be unwrapped + obj = safe_getattr(obj, '__func__', False) + if obj is False: + # Python 2.6 wraps in a different way that we won't try to handle + msg = "cannot collect static method %r because " \ + "it is not a function (always the case in Python 2.6)" + self.warn( + code="C2", message=msg % name) + return False + return ( + safe_getattr(obj, "__call__", False) and fixtures.getfixturemarker(obj) is None + ) + else: + return False def istestclass(self, obj, name): return self.classnamefilter(name) or self.isnosetest(obj) @@ -317,7 +321,7 @@ class PyCollector(PyobjMixin, pytest.Collector): for basecls in inspect.getmro(self.obj.__class__): dicts.append(basecls.__dict__) seen = {} - l = [] + values = [] for dic in dicts: for name, obj in list(dic.items()): if name in seen: @@ -328,12 +332,12 @@ class PyCollector(PyobjMixin, pytest.Collector): continue if not isinstance(res, list): res = [res] - l.extend(res) - l.sort(key=lambda item: item.reportinfo()[:2]) - return l + values.extend(res) + values.sort(key=lambda item: item.reportinfo()[:2]) + return values def makeitem(self, name, obj): - #assert self.ihook.fspath == self.fspath, self + # assert self.ihook.fspath == self.fspath, self return self.ihook.pytest_pycollect_makeitem( collector=self, name=name, obj=obj) @@ -369,43 +373,16 @@ class PyCollector(PyobjMixin, pytest.Collector): yield Function(name=subname, parent=self, callspec=callspec, callobj=funcobj, fixtureinfo=fixtureinfo, - keywords={callspec.id:True}, + keywords={callspec.id: True}, originalname=name, ) -def _marked(func, mark): - """ Returns True if :func: is already marked with :mark:, False otherwise. - This can happen if marker is applied to class and the test file is - invoked more than once. - """ - try: - func_mark = getattr(func, mark.name) - except AttributeError: - return False - return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs - - -def transfer_markers(funcobj, cls, mod): - # XXX this should rather be code in the mark plugin or the mark - # plugin should merge with the python plugin. - for holder in (cls, mod): - try: - pytestmark = holder.pytestmark - except AttributeError: - continue - if isinstance(pytestmark, list): - for mark in pytestmark: - if not _marked(funcobj, mark): - mark(funcobj) - else: - if not _marked(funcobj, pytestmark): - pytestmark(funcobj) - -class Module(pytest.File, PyCollector): +class Module(main.File, PyCollector): """ Collector for test classes and functions. """ + def _getobj(self): - return self._memoizedcall('_obj', self._importtestmodule) + return self._importtestmodule() def collect(self): self.session._fixturemanager.parsefactories(self) @@ -429,7 +406,7 @@ class Module(pytest.File, PyCollector): " %s\n" "HINT: remove __pycache__ / .pyc files and/or use a " "unique basename for your test file modules" - % e.args + % e.args ) except ImportError: from _pytest._code.code import ExceptionInfo @@ -437,7 +414,7 @@ class Module(pytest.File, PyCollector): if self.config.getoption('verbose') < 2: exc_info.traceback = exc_info.traceback.filter(filter_traceback) exc_repr = exc_info.getrepr(style='short') if exc_info.traceback else exc_info.exconly() - formatted_tb = py._builtin._totext(exc_repr) + formatted_tb = safe_str(exc_repr) raise self.CollectError( "ImportError while importing test module '{fspath}'.\n" "Hint: make sure your test modules/packages have valid Python names.\n" @@ -448,9 +425,10 @@ class Module(pytest.File, PyCollector): if e.allow_module_level: raise raise self.CollectError( - "Using pytest.skip outside of a test is not allowed. If you are " - "trying to decorate a test function, use the @pytest.mark.skip " - "or @pytest.mark.skipif decorators instead." + "Using pytest.skip outside of a test is not allowed. " + "To decorate a test function, use the @pytest.mark.skip " + "or @pytest.mark.skipif decorators instead, and to skip a " + "module use `pytestmark = pytest.mark.{skip,skipif}." ) self.config.pluginmanager.consider_module(mod) return mod @@ -501,10 +479,13 @@ def _get_xunit_func(obj, name): class Class(PyCollector): """ Collector for test methods. """ + def collect(self): + if not safe_getattr(self.obj, "__test__", True): + return [] if hasinit(self.obj): self.warn("C1", "cannot collect test class %r because it has a " - "__init__ constructor" % self.obj.__name__) + "__init__ constructor" % self.obj.__name__) return [] elif hasnew(self.obj): self.warn("C1", "cannot collect test class %r because it has a " @@ -525,6 +506,7 @@ class Class(PyCollector): fin_class = getattr(fin_class, '__func__', fin_class) self.addfinalizer(lambda: fin_class(self.obj)) + class Instance(PyCollector): def _getobj(self): return self.parent.obj() @@ -537,6 +519,7 @@ class Instance(PyCollector): self.obj = self._getobj() return self.obj + class FunctionMixin(PyobjMixin): """ mixin for the code common to Function and Generator. """ @@ -572,7 +555,7 @@ class FunctionMixin(PyobjMixin): if ntraceback == traceback: ntraceback = ntraceback.cut(path=path) if ntraceback == traceback: - #ntraceback = ntraceback.cut(excludepath=cutdir2) + # ntraceback = ntraceback.cut(excludepath=cutdir2) ntraceback = ntraceback.filter(filter_traceback) if not ntraceback: ntraceback = traceback @@ -586,11 +569,11 @@ class FunctionMixin(PyobjMixin): entry.set_repr_style('short') def _repr_failure_py(self, excinfo, style="long"): - if excinfo.errisinstance(pytest.fail.Exception): + if excinfo.errisinstance(fail.Exception): if not excinfo.value.pytrace: return py._builtin._totext(excinfo.value) return super(FunctionMixin, self)._repr_failure_py(excinfo, - style=style) + style=style) def repr_failure(self, excinfo, outerr=None): assert outerr is None, "XXX outerr usage is deprecated" @@ -609,27 +592,27 @@ class Generator(FunctionMixin, PyCollector): self.session._setupstate.prepare(self) # see FunctionMixin.setup and test_setupstate_is_preserved_134 self._preservedparent = self.parent.obj - l = [] + values = [] seen = {} for i, x in enumerate(self.obj()): name, call, args = self.getcallargs(x) if not callable(call): - raise TypeError("%r yielded non callable test %r" %(self.obj, call,)) + raise TypeError("%r yielded non callable test %r" % (self.obj, call,)) if name is None: name = "[%d]" % i else: name = "['%s']" % name if name in seen: - raise ValueError("%r generated tests with non-unique name %r" %(self, name)) + raise ValueError("%r generated tests with non-unique name %r" % (self, name)) seen[name] = True - l.append(self.Function(name, self, args=args, callobj=call)) - self.config.warn('C1', deprecated.YIELD_TESTS, fslocation=self.fspath) - return l + values.append(self.Function(name, self, args=args, callobj=call)) + self.warn('C1', deprecated.YIELD_TESTS) + return values def getcallargs(self, obj): if not isinstance(obj, (tuple, list)): obj = (obj,) - # explict naming + # explicit naming if isinstance(obj[0], py.builtin._basestring): name = obj[0] obj = obj[1:] @@ -679,7 +662,7 @@ class CallSpec2(object): def _checkargnotcontained(self, arg): if arg in self.params or arg in self.funcargs: - raise ValueError("duplicate %r" %(arg,)) + raise ValueError("duplicate %r" % (arg,)) def getparam(self, name): try: @@ -695,7 +678,7 @@ class CallSpec2(object): def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum, param_index): - for arg,val in zip(argnames, valset): + for arg, val in zip(argnames, valset): self._checkargnotcontained(arg) valtype_for_arg = valtypes[arg] getattr(self, valtype_for_arg)[arg] = val @@ -724,6 +707,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): test configuration or values specified in the class or module where a test function is defined. """ + def __init__(self, function, fixtureinfo, config, cls=None, module=None): #: access to the :class:`_pytest.config.Config` object for the test session self.config = config @@ -745,7 +729,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): self._arg2fixturedefs = fixtureinfo.name2fixturedefs def parametrize(self, argnames, argvalues, indirect=False, ids=None, - scope=None): + scope=None): """ Add new invocations to the underlying test function using the list of argvalues for the given argnames. Parametrization is performed during the collection phase. If you need to setup expensive resources @@ -784,36 +768,34 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): to set a dynamic scope using test context or configuration. """ from _pytest.fixtures import scope2index - from _pytest.mark import extract_argvalue + from _pytest.mark import MARK_GEN, ParameterSet from py.io import saferepr - unwrapped_argvalues = [] - newkeywords = [] - for maybe_marked_args in argvalues: - argval, newmarks = extract_argvalue(maybe_marked_args) - unwrapped_argvalues.append(argval) - newkeywords.append(newmarks) - argvalues = unwrapped_argvalues - if not isinstance(argnames, (tuple, list)): argnames = [x.strip() for x in argnames.split(",") if x.strip()] - if len(argnames) == 1: - argvalues = [(val,) for val in argvalues] - if not argvalues: - argvalues = [(NOTSET,) * len(argnames)] - # we passed a empty list to parameterize, skip that test - # + force_tuple = len(argnames) == 1 + else: + force_tuple = False + parameters = [ + ParameterSet.extract_from(x, legacy_force_tuple=force_tuple) + for x in argvalues] + del argvalues + + if not parameters: fs, lineno = getfslineno(self.function) - newmark = pytest.mark.skip( - reason="got empty parameter set %r, function %s at %s:%d" % ( - argnames, self.function.__name__, fs, lineno)) - newkeywords = [{newmark.markname: newmark}] + reason = "got empty parameter set %r, function %s at %s:%d" % ( + argnames, self.function.__name__, fs, lineno) + mark = MARK_GEN.skip(reason=reason) + parameters.append(ParameterSet( + values=(NOTSET,) * len(argnames), + marks=[mark], + id=None, + )) if scope is None: scope = _find_parametrized_scope(argnames, self._arg2fixturedefs, indirect) - scopenum = scope2index( - scope, descr='call to {0}'.format(self.parametrize)) + scopenum = scope2index(scope, descr='call to {0}'.format(self.parametrize)) valtypes = {} for arg in argnames: if arg not in self.fixturenames: @@ -823,7 +805,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): name = 'fixture' if indirect else 'argument' raise ValueError( "%r uses no %s %r" % ( - self.function, name, arg)) + self.function, name, arg)) if indirect is True: valtypes = dict.fromkeys(argnames, "params") @@ -841,22 +823,26 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): idfn = ids ids = None if ids: - if len(ids) != len(argvalues): - raise ValueError('%d tests specified with %d ids' %( - len(argvalues), len(ids))) + if len(ids) != len(parameters): + raise ValueError('%d tests specified with %d ids' % ( + len(parameters), len(ids))) for id_value in ids: if id_value is not None and not isinstance(id_value, py.builtin._basestring): msg = 'ids must be list of strings, found: %s (type: %s)' raise ValueError(msg % (saferepr(id_value), type(id_value).__name__)) - ids = idmaker(argnames, argvalues, idfn, ids, self.config) + ids = idmaker(argnames, parameters, idfn, ids, self.config) newcalls = [] for callspec in self._calls or [CallSpec2(self)]: - elements = zip(ids, argvalues, newkeywords, count()) - for a_id, valset, keywords, param_index in elements: - assert len(valset) == len(argnames) + elements = zip(ids, parameters, count()) + for a_id, param, param_index in elements: + if len(param.values) != len(argnames): + raise ValueError( + 'In "parametrize" the number of values ({0}) must be ' + 'equal to the number of names ({1})'.format( + param.values, argnames)) newcallspec = callspec.copy(self) - newcallspec.setmulti(valtypes, argnames, valset, a_id, - keywords, scopenum, param_index) + newcallspec.setmulti(valtypes, argnames, param.values, a_id, + param.deprecated_arg_dict, scopenum, param_index) newcalls.append(newcallspec) self._calls = newcalls @@ -880,7 +866,7 @@ class Metafunc(fixtures.FuncargnamesCompatAttr): if funcargs is not None: for name in funcargs: if name not in self.fixturenames: - pytest.fail("funcarg %r not used in this function." % name) + fail("funcarg %r not used in this function." % name) else: funcargs = {} if id is None: @@ -910,7 +896,7 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): from _pytest.fixtures import scopes indirect_as_list = isinstance(indirect, (list, tuple)) all_arguments_are_fixtures = indirect is True or \ - indirect_as_list and len(indirect) == argnames + indirect_as_list and len(indirect) == argnames if all_arguments_are_fixtures: fixturedefs = arg2fixturedefs or {} used_scopes = [fixturedef[0].scope for name, fixturedef in fixturedefs.items()] @@ -925,41 +911,51 @@ def _find_parametrized_scope(argnames, arg2fixturedefs, indirect): def _idval(val, argname, idx, idfn, config=None): if idfn: + s = None try: s = idfn(val) - if s: - return _escape_strings(s) except Exception: - pass + # See issue https://github.com/pytest-dev/pytest/issues/2169 + import warnings + msg = "Raised while trying to determine id of parameter %s at position %d." % (argname, idx) + msg += '\nUpdate your code as this will raise an error in pytest-4.0.' + warnings.warn(msg, DeprecationWarning) + if s: + return _ascii_escaped(s) if config: - hook_id = config.hook.pytest_make_parametrize_id(config=config, val=val) + hook_id = config.hook.pytest_make_parametrize_id( + config=config, val=val, argname=argname) if hook_id: return hook_id if isinstance(val, STRING_TYPES): - return _escape_strings(val) + return _ascii_escaped(val) elif isinstance(val, (float, int, bool, NoneType)): return str(val) elif isinstance(val, REGEX_TYPE): - return _escape_strings(val.pattern) + return _ascii_escaped(val.pattern) elif enum is not None and isinstance(val, enum.Enum): return str(val) elif isclass(val) and hasattr(val, '__name__'): return val.__name__ - return str(argname)+str(idx) + return str(argname) + str(idx) -def _idvalset(idx, valset, argnames, idfn, ids, config=None): + +def _idvalset(idx, parameterset, argnames, idfn, ids, config=None): + if parameterset.id is not None: + return parameterset.id if ids is None or (idx >= len(ids) or ids[idx] is None): this_id = [_idval(val, argname, idx, idfn, config) - for val, argname in zip(valset, argnames)] + for val, argname in zip(parameterset.values, argnames)] return "-".join(this_id) else: - return _escape_strings(ids[idx]) + return _ascii_escaped(ids[idx]) -def idmaker(argnames, argvalues, idfn=None, ids=None, config=None): - ids = [_idvalset(valindex, valset, argnames, idfn, ids, config) - for valindex, valset in enumerate(argvalues)] + +def idmaker(argnames, parametersets, idfn=None, ids=None, config=None): + ids = [_idvalset(valindex, parameterset, argnames, idfn, ids, config) + for valindex, parameterset in enumerate(parametersets)] if len(set(ids)) != len(ids): # The ids are not unique duplicates = [testid for testid in ids if ids.count(testid) > 1] @@ -983,58 +979,55 @@ def _show_fixtures_per_test(config, session): tw = _pytest.config.create_terminal_writer(config) verbose = config.getvalue("verbose") - def get_best_rel(func): + def get_best_relpath(func): loc = getlocation(func, curdir) return curdir.bestrelpath(loc) def write_fixture(fixture_def): argname = fixture_def.argname - if verbose <= 0 and argname.startswith("_"): return if verbose > 0: - bestrel = get_best_rel(fixture_def.func) + bestrel = get_best_relpath(fixture_def.func) funcargspec = "{0} -- {1}".format(argname, bestrel) else: funcargspec = argname tw.line(funcargspec, green=True) - - INDENT = ' {0}' fixture_doc = fixture_def.func.__doc__ - if fixture_doc: - for line in fixture_doc.strip().split('\n'): - tw.line(INDENT.format(line.strip())) + write_docstring(tw, fixture_doc) else: - tw.line(INDENT.format('no docstring available'), red=True) + tw.line(' no docstring available', red=True) def write_item(item): - name2fixturedefs = item._fixtureinfo.name2fixturedefs - - if not name2fixturedefs: - # The given test item does not use any fixtures + try: + info = item._fixtureinfo + except AttributeError: + # doctests items have no _fixtureinfo attribute + return + if not info.name2fixturedefs: + # this test item does not use any fixtures return - bestrel = get_best_rel(item.function) - tw.line() tw.sep('-', 'fixtures used by {0}'.format(item.name)) - tw.sep('-', '({0})'.format(bestrel)) - for argname, fixture_defs in sorted(name2fixturedefs.items()): - assert fixture_defs is not None - if not fixture_defs: + tw.sep('-', '({0})'.format(get_best_relpath(item.function))) + # dict key not used in loop but needed for sorting + for _, fixturedefs in sorted(info.name2fixturedefs.items()): + assert fixturedefs is not None + if not fixturedefs: continue - # The last fixture def item in the list is expected - # to be the one used by the test item - write_fixture(fixture_defs[-1]) + # last item is expected to be the one used by the test item + write_fixture(fixturedefs[-1]) - for item in session.items: - write_item(item) + for session_item in session.items: + write_item(session_item) def showfixtures(config): from _pytest.main import wrap_session return wrap_session(config, _showfixtures_main) + def _showfixtures_main(config, session): import _pytest.config session.perform_collect() @@ -1067,444 +1060,46 @@ def _showfixtures_main(config, session): if currentmodule != module: if not module.startswith("_pytest."): tw.line() - tw.sep("-", "fixtures defined from %s" %(module,)) + tw.sep("-", "fixtures defined from %s" % (module,)) currentmodule = module if verbose <= 0 and argname[0] == "_": continue if verbose > 0: - funcargspec = "%s -- %s" %(argname, bestrel,) + funcargspec = "%s -- %s" % (argname, bestrel,) else: funcargspec = argname tw.line(funcargspec, green=True) loc = getlocation(fixturedef.func, curdir) doc = fixturedef.func.__doc__ or "" if doc: - for line in doc.strip().split("\n"): - tw.line(" " + line.strip()) + write_docstring(tw, doc) else: - tw.line(" %s: no docstring available" %(loc,), - red=True) + tw.line(" %s: no docstring available" % (loc,), + red=True) -# builtin pytest.raises helper - -def raises(expected_exception, *args, **kwargs): - """ - Assert that a code block/function call raises ``expected_exception`` - and raise a failure exception otherwise. - - This helper produces a ``ExceptionInfo()`` object (see below). - - If using Python 2.5 or above, you may use this function as a - context manager:: - - >>> with raises(ZeroDivisionError): - ... 1/0 - - .. versionchanged:: 2.10 - - In the context manager form you may use the keyword argument - ``message`` to specify a custom failure message:: - - >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): - ... pass - Traceback (most recent call last): - ... - Failed: Expecting ZeroDivisionError - - - .. note:: - - When using ``pytest.raises`` as a context manager, it's worthwhile to - note that normal context manager rules apply and that the exception - raised *must* be the final line in the scope of the context manager. - Lines of code after that, within the scope of the context manager will - not be executed. For example:: - - >>> value = 15 - >>> with raises(ValueError) as exc_info: - ... if value > 10: - ... raise ValueError("value must be <= 10") - ... assert str(exc_info.value) == "value must be <= 10" # this will not execute - - Instead, the following approach must be taken (note the difference in - scope):: - - >>> with raises(ValueError) as exc_info: - ... if value > 10: - ... raise ValueError("value must be <= 10") - ... - >>> assert str(exc_info.value) == "value must be <= 10" - - - Or you can specify a callable by passing a to-be-called lambda:: - - >>> raises(ZeroDivisionError, lambda: 1/0) - - - or you can specify an arbitrary callable with arguments:: - - >>> def f(x): return 1/x - ... - >>> raises(ZeroDivisionError, f, 0) - - >>> raises(ZeroDivisionError, f, x=0) - - - A third possibility is to use a string to be executed:: - - >>> raises(ZeroDivisionError, "f(0)") - - - .. autoclass:: _pytest._code.ExceptionInfo - :members: - - .. note:: - Similar to caught exception objects in Python, explicitly clearing - local references to returned ``ExceptionInfo`` objects can - help the Python interpreter speed up its garbage collection. - - Clearing those references breaks a reference cycle - (``ExceptionInfo`` --> caught exception --> frame stack raising - the exception --> current frame stack --> local variables --> - ``ExceptionInfo``) which makes Python keep all objects referenced - from that cycle (including all local variables in the current - frame) alive until the next cyclic garbage collection run. See the - official Python ``try`` statement documentation for more detailed - information. - - """ - __tracebackhide__ = True - if expected_exception is AssertionError: - # we want to catch a AssertionError - # replace our subclass with the builtin one - # see https://github.com/pytest-dev/pytest/issues/176 - from _pytest.assertion.util import BuiltinAssertionError \ - as expected_exception - msg = ("exceptions must be old-style classes or" - " derived from BaseException, not %s") - if isinstance(expected_exception, tuple): - for exc in expected_exception: - if not isclass(exc): - raise TypeError(msg % type(exc)) - elif not isclass(expected_exception): - raise TypeError(msg % type(expected_exception)) - - message = "DID NOT RAISE {0}".format(expected_exception) - - if not args: - if "message" in kwargs: - message = kwargs.pop("message") - return RaisesContext(expected_exception, message) - elif isinstance(args[0], str): - code, = args - assert isinstance(code, str) - frame = sys._getframe(1) - loc = frame.f_locals.copy() - loc.update(kwargs) - #print "raises frame scope: %r" % frame.f_locals - try: - code = _pytest._code.Source(code).compile() - py.builtin.exec_(code, frame.f_globals, loc) - # XXX didn'T mean f_globals == f_locals something special? - # this is destroyed here ... - except expected_exception: - return _pytest._code.ExceptionInfo() +def write_docstring(tw, doc): + INDENT = " " + doc = doc.rstrip() + if "\n" in doc: + firstline, rest = doc.split("\n", 1) else: - func = args[0] - try: - func(*args[1:], **kwargs) - except expected_exception: - return _pytest._code.ExceptionInfo() - pytest.fail(message) + firstline, rest = doc, "" -class RaisesContext(object): - def __init__(self, expected_exception, message): - self.expected_exception = expected_exception - self.message = message - self.excinfo = None + if firstline.strip(): + tw.line(INDENT + firstline.strip()) - def __enter__(self): - self.excinfo = object.__new__(_pytest._code.ExceptionInfo) - return self.excinfo - - def __exit__(self, *tp): - __tracebackhide__ = True - if tp[0] is None: - pytest.fail(self.message) - if sys.version_info < (2, 7): - # py26: on __exit__() exc_value often does not contain the - # exception value. - # http://bugs.python.org/issue7853 - if not isinstance(tp[1], BaseException): - exc_type, value, traceback = tp - tp = exc_type, exc_type(value), traceback - self.excinfo.__init__(tp) - suppress_exception = issubclass(self.excinfo.type, self.expected_exception) - if sys.version_info[0] == 2 and suppress_exception: - sys.exc_clear() - return suppress_exception + if rest: + for line in dedent(rest).split("\n"): + tw.write(INDENT + line + "\n") -# builtin pytest.approx helper - -class approx(object): - """ - Assert that two numbers (or two sets of numbers) are equal to each other - within some tolerance. - - Due to the `intricacies of floating-point arithmetic`__, numbers that we - would intuitively expect to be equal are not always so:: - - >>> 0.1 + 0.2 == 0.3 - False - - __ https://docs.python.org/3/tutorial/floatingpoint.html - - This problem is commonly encountered when writing tests, e.g. when making - sure that floating-point values are what you expect them to be. One way to - deal with this problem is to assert that two floating-point numbers are - equal to within some appropriate tolerance:: - - >>> abs((0.1 + 0.2) - 0.3) < 1e-6 - True - - However, comparisons like this are tedious to write and difficult to - understand. Furthermore, absolute comparisons like the one above are - usually discouraged because there's no tolerance that works well for all - situations. ``1e-6`` is good for numbers around ``1``, but too small for - very big numbers and too big for very small ones. It's better to express - the tolerance as a fraction of the expected value, but relative comparisons - like that are even more difficult to write correctly and concisely. - - The ``approx`` class performs floating-point comparisons using a syntax - that's as intuitive as possible:: - - >>> from pytest import approx - >>> 0.1 + 0.2 == approx(0.3) - True - - The same syntax also works on sequences of numbers:: - - >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) - True - - By default, ``approx`` considers numbers within a relative tolerance of - ``1e-6`` (i.e. one part in a million) of its expected value to be equal. - This treatment would lead to surprising results if the expected value was - ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. - To handle this case less surprisingly, ``approx`` also considers numbers - within an absolute tolerance of ``1e-12`` of its expected value to be - equal. Infinite numbers are another special case. They are only - considered equal to themselves, regardless of the relative tolerance. Both - the relative and absolute tolerances can be changed by passing arguments to - the ``approx`` constructor:: - - >>> 1.0001 == approx(1) - False - >>> 1.0001 == approx(1, rel=1e-3) - True - >>> 1.0001 == approx(1, abs=1e-3) - True - - If you specify ``abs`` but not ``rel``, the comparison will not consider - the relative tolerance at all. In other words, two numbers that are within - the default relative tolerance of ``1e-6`` will still be considered unequal - if they exceed the specified absolute tolerance. If you specify both - ``abs`` and ``rel``, the numbers will be considered equal if either - tolerance is met:: - - >>> 1 + 1e-8 == approx(1) - True - >>> 1 + 1e-8 == approx(1, abs=1e-12) - False - >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) - True - - If you're thinking about using ``approx``, then you might want to know how - it compares to other good ways of comparing floating-point numbers. All of - these algorithms are based on relative and absolute tolerances and should - agree for the most part, but they do have meaningful differences: - - - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative - tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute - tolerance is met. Because the relative tolerance is calculated w.r.t. - both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor - ``b`` is a "reference value"). You have to specify an absolute tolerance - if you want to compare to ``0.0`` because there is no tolerance by - default. Only available in python>=3.5. `More information...`__ - - __ https://docs.python.org/3/library/math.html#math.isclose - - - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference - between ``a`` and ``b`` is less that the sum of the relative tolerance - w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance - is only calculated w.r.t. ``b``, this test is asymmetric and you can - think of ``b`` as the reference value. Support for comparing sequences - is provided by ``numpy.allclose``. `More information...`__ - - __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html - - - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` - are within an absolute tolerance of ``1e-7``. No relative tolerance is - considered and the absolute tolerance cannot be changed, so this function - is not appropriate for very large or very small numbers. Also, it's only - available in subclasses of ``unittest.TestCase`` and it's ugly because it - doesn't follow PEP8. `More information...`__ - - __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual - - - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative - tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. - Because the relative tolerance is only calculated w.r.t. ``b``, this test - is asymmetric and you can think of ``b`` as the reference value. In the - special case that you explicitly specify an absolute tolerance but not a - relative tolerance, only the absolute tolerance is considered. - """ - - def __init__(self, expected, rel=None, abs=None): - self.expected = expected - self.abs = abs - self.rel = rel - - def __repr__(self): - return ', '.join(repr(x) for x in self.expected) - - def __eq__(self, actual): - from collections import Iterable - if not isinstance(actual, Iterable): - actual = [actual] - if len(actual) != len(self.expected): - return False - return all(a == x for a, x in zip(actual, self.expected)) - - __hash__ = None - - def __ne__(self, actual): - return not (actual == self) - - @property - def expected(self): - # Regardless of whether the user-specified expected value is a number - # or a sequence of numbers, return a list of ApproxNotIterable objects - # that can be compared against. - from collections import Iterable - approx_non_iter = lambda x: ApproxNonIterable(x, self.rel, self.abs) - if isinstance(self._expected, Iterable): - return [approx_non_iter(x) for x in self._expected] - else: - return [approx_non_iter(self._expected)] - - @expected.setter - def expected(self, expected): - self._expected = expected - - -class ApproxNonIterable(object): - """ - Perform approximate comparisons for single numbers only. - - In other words, the ``expected`` attribute for objects of this class must - be some sort of number. This is in contrast to the ``approx`` class, where - the ``expected`` attribute can either be a number of a sequence of numbers. - This class is responsible for making comparisons, while ``approx`` is - responsible for abstracting the difference between numbers and sequences of - numbers. Although this class can stand on its own, it's only meant to be - used within ``approx``. - """ - - def __init__(self, expected, rel=None, abs=None): - self.expected = expected - self.abs = abs - self.rel = rel - - def __repr__(self): - if isinstance(self.expected, complex): - return str(self.expected) - - # Infinities aren't compared using tolerances, so don't show a - # tolerance. - if math.isinf(self.expected): - return str(self.expected) - - # If a sensible tolerance can't be calculated, self.tolerance will - # raise a ValueError. In this case, display '???'. - try: - vetted_tolerance = '{:.1e}'.format(self.tolerance) - except ValueError: - vetted_tolerance = '???' - - if sys.version_info[0] == 2: - return '{0} +- {1}'.format(self.expected, vetted_tolerance) - else: - return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) - - def __eq__(self, actual): - # Short-circuit exact equality. - if actual == self.expected: - return True - - # Infinity shouldn't be approximately equal to anything but itself, but - # if there's a relative tolerance, it will be infinite and infinity - # will seem approximately equal to everything. The equal-to-itself - # case would have been short circuited above, so here we can just - # return false if the expected value is infinite. The abs() call is - # for compatibility with complex numbers. - if math.isinf(abs(self.expected)): - return False - - # Return true if the two numbers are within the tolerance. - return abs(self.expected - actual) <= self.tolerance - - __hash__ = None - - def __ne__(self, actual): - return not (actual == self) - - @property - def tolerance(self): - set_default = lambda x, default: x if x is not None else default - - # Figure out what the absolute tolerance should be. ``self.abs`` is - # either None or a value specified by the user. - absolute_tolerance = set_default(self.abs, 1e-12) - - if absolute_tolerance < 0: - raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance)) - if math.isnan(absolute_tolerance): - raise ValueError("absolute tolerance can't be NaN.") - - # If the user specified an absolute tolerance but not a relative one, - # just return the absolute tolerance. - if self.rel is None: - if self.abs is not None: - return absolute_tolerance - - # Figure out what the relative tolerance should be. ``self.rel`` is - # either None or a value specified by the user. This is done after - # we've made sure the user didn't ask for an absolute tolerance only, - # because we don't want to raise errors about the relative tolerance if - # we aren't even going to use it. - relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected) - - if relative_tolerance < 0: - raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance)) - if math.isnan(relative_tolerance): - raise ValueError("relative tolerance can't be NaN.") - - # Return the larger of the relative and absolute tolerances. - return max(relative_tolerance, absolute_tolerance) - - -# -# the basic pytest Function item -# - -class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr): +class Function(FunctionMixin, main.Item, fixtures.FuncargnamesCompatAttr): """ a Function Item is responsible for setting up and executing a Python test function. """ _genid = None + def __init__(self, name, parent, args=None, config=None, callspec=None, callobj=NOTSET, keywords=None, session=None, fixtureinfo=None, originalname=None): @@ -1556,7 +1151,7 @@ class Function(FunctionMixin, pytest.Item, fixtures.FuncargnamesCompatAttr): def _getobj(self): name = self.name - i = name.find("[") # parametrization + i = name.find("[") # parametrization if i != -1: name = name[:i] return getattr(self.parent.obj, name) diff --git a/_pytest/python_api.py b/_pytest/python_api.py new file mode 100644 index 000000000..80684c131 --- /dev/null +++ b/_pytest/python_api.py @@ -0,0 +1,626 @@ +import math +import sys + +import py + +from _pytest.compat import isclass, izip +from _pytest.outcomes import fail +import _pytest._code + + +def _cmp_raises_type_error(self, other): + """__cmp__ implementation which raises TypeError. Used + by Approx base classes to implement only == and != and raise a + TypeError for other comparisons. + + Needed in Python 2 only, Python 3 all it takes is not implementing the + other operators at all. + """ + __tracebackhide__ = True + raise TypeError('Comparison operators other than == and != not supported by approx objects') + + +# builtin pytest.approx helper + + +class ApproxBase(object): + """ + Provide shared utilities for making approximate comparisons between numbers + or sequences of numbers. + """ + + def __init__(self, expected, rel=None, abs=None, nan_ok=False): + self.expected = expected + self.abs = abs + self.rel = rel + self.nan_ok = nan_ok + + def __repr__(self): + raise NotImplementedError + + def __eq__(self, actual): + return all( + a == self._approx_scalar(x) + for a, x in self._yield_comparisons(actual)) + + __hash__ = None + + def __ne__(self, actual): + return not (actual == self) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def _approx_scalar(self, x): + return ApproxScalar(x, rel=self.rel, abs=self.abs, nan_ok=self.nan_ok) + + def _yield_comparisons(self, actual): + """ + Yield all the pairs of numbers to be compared. This is used to + implement the `__eq__` method. + """ + raise NotImplementedError + + +class ApproxNumpy(ApproxBase): + """ + Perform approximate comparisons for numpy arrays. + """ + + # Tell numpy to use our `__eq__` operator instead of its. + __array_priority__ = 100 + + def __repr__(self): + # It might be nice to rewrite this function to account for the + # shape of the array... + return "approx({0!r})".format(list( + self._approx_scalar(x) for x in self.expected)) + + if sys.version_info[0] == 2: + __cmp__ = _cmp_raises_type_error + + def __eq__(self, actual): + import numpy as np + + try: + actual = np.asarray(actual) + except: # noqa + raise TypeError("cannot compare '{0}' to numpy.ndarray".format(actual)) + + if actual.shape != self.expected.shape: + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + import numpy as np + + # We can be sure that `actual` is a numpy array, because it's + # casted in `__eq__` before being passed to `ApproxBase.__eq__`, + # which is the only method that calls this one. + for i in np.ndindex(self.expected.shape): + yield actual[i], self.expected[i] + + +class ApproxMapping(ApproxBase): + """ + Perform approximate comparisons for mappings where the values are numbers + (the keys can be anything). + """ + + def __repr__(self): + return "approx({0!r})".format(dict( + (k, self._approx_scalar(v)) + for k, v in self.expected.items())) + + def __eq__(self, actual): + if set(actual.keys()) != set(self.expected.keys()): + return False + + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + for k in self.expected.keys(): + yield actual[k], self.expected[k] + + +class ApproxSequence(ApproxBase): + """ + Perform approximate comparisons for sequences of numbers. + """ + + # Tell numpy to use our `__eq__` operator instead of its. + __array_priority__ = 100 + + def __repr__(self): + seq_type = type(self.expected) + if seq_type not in (tuple, list, set): + seq_type = list + return "approx({0!r})".format(seq_type( + self._approx_scalar(x) for x in self.expected)) + + def __eq__(self, actual): + if len(actual) != len(self.expected): + return False + return ApproxBase.__eq__(self, actual) + + def _yield_comparisons(self, actual): + return izip(actual, self.expected) + + +class ApproxScalar(ApproxBase): + """ + Perform approximate comparisons for single numbers only. + """ + + def __repr__(self): + """ + Return a string communicating both the expected value and the tolerance + for the comparison being made, e.g. '1.0 +- 1e-6'. Use the unicode + plus/minus symbol if this is python3 (it's too hard to get right for + python2). + """ + if isinstance(self.expected, complex): + return str(self.expected) + + # Infinities aren't compared using tolerances, so don't show a + # tolerance. + if math.isinf(self.expected): + return str(self.expected) + + # If a sensible tolerance can't be calculated, self.tolerance will + # raise a ValueError. In this case, display '???'. + try: + vetted_tolerance = '{:.1e}'.format(self.tolerance) + except ValueError: + vetted_tolerance = '???' + + if sys.version_info[0] == 2: + return '{0} +- {1}'.format(self.expected, vetted_tolerance) + else: + return u'{0} \u00b1 {1}'.format(self.expected, vetted_tolerance) + + def __eq__(self, actual): + """ + Return true if the given value is equal to the expected value within + the pre-specified tolerance. + """ + + # Short-circuit exact equality. + if actual == self.expected: + return True + + # Allow the user to control whether NaNs are considered equal to each + # other or not. The abs() calls are for compatibility with complex + # numbers. + if math.isnan(abs(self.expected)): + return self.nan_ok and math.isnan(abs(actual)) + + # Infinity shouldn't be approximately equal to anything but itself, but + # if there's a relative tolerance, it will be infinite and infinity + # will seem approximately equal to everything. The equal-to-itself + # case would have been short circuited above, so here we can just + # return false if the expected value is infinite. The abs() call is + # for compatibility with complex numbers. + if math.isinf(abs(self.expected)): + return False + + # Return true if the two numbers are within the tolerance. + return abs(self.expected - actual) <= self.tolerance + + __hash__ = None + + @property + def tolerance(self): + """ + Return the tolerance for the comparison. This could be either an + absolute tolerance or a relative tolerance, depending on what the user + specified or which would be larger. + """ + def set_default(x, default): + return x if x is not None else default + + # Figure out what the absolute tolerance should be. ``self.abs`` is + # either None or a value specified by the user. + absolute_tolerance = set_default(self.abs, 1e-12) + + if absolute_tolerance < 0: + raise ValueError("absolute tolerance can't be negative: {}".format(absolute_tolerance)) + if math.isnan(absolute_tolerance): + raise ValueError("absolute tolerance can't be NaN.") + + # If the user specified an absolute tolerance but not a relative one, + # just return the absolute tolerance. + if self.rel is None: + if self.abs is not None: + return absolute_tolerance + + # Figure out what the relative tolerance should be. ``self.rel`` is + # either None or a value specified by the user. This is done after + # we've made sure the user didn't ask for an absolute tolerance only, + # because we don't want to raise errors about the relative tolerance if + # we aren't even going to use it. + relative_tolerance = set_default(self.rel, 1e-6) * abs(self.expected) + + if relative_tolerance < 0: + raise ValueError("relative tolerance can't be negative: {}".format(absolute_tolerance)) + if math.isnan(relative_tolerance): + raise ValueError("relative tolerance can't be NaN.") + + # Return the larger of the relative and absolute tolerances. + return max(relative_tolerance, absolute_tolerance) + + +def approx(expected, rel=None, abs=None, nan_ok=False): + """ + Assert that two numbers (or two sets of numbers) are equal to each other + within some tolerance. + + Due to the `intricacies of floating-point arithmetic`__, numbers that we + would intuitively expect to be equal are not always so:: + + >>> 0.1 + 0.2 == 0.3 + False + + __ https://docs.python.org/3/tutorial/floatingpoint.html + + This problem is commonly encountered when writing tests, e.g. when making + sure that floating-point values are what you expect them to be. One way to + deal with this problem is to assert that two floating-point numbers are + equal to within some appropriate tolerance:: + + >>> abs((0.1 + 0.2) - 0.3) < 1e-6 + True + + However, comparisons like this are tedious to write and difficult to + understand. Furthermore, absolute comparisons like the one above are + usually discouraged because there's no tolerance that works well for all + situations. ``1e-6`` is good for numbers around ``1``, but too small for + very big numbers and too big for very small ones. It's better to express + the tolerance as a fraction of the expected value, but relative comparisons + like that are even more difficult to write correctly and concisely. + + The ``approx`` class performs floating-point comparisons using a syntax + that's as intuitive as possible:: + + >>> from pytest import approx + >>> 0.1 + 0.2 == approx(0.3) + True + + The same syntax also works for sequences of numbers:: + + >>> (0.1 + 0.2, 0.2 + 0.4) == approx((0.3, 0.6)) + True + + Dictionary *values*:: + + >>> {'a': 0.1 + 0.2, 'b': 0.2 + 0.4} == approx({'a': 0.3, 'b': 0.6}) + True + + And ``numpy`` arrays:: + + >>> import numpy as np # doctest: +SKIP + >>> np.array([0.1, 0.2]) + np.array([0.2, 0.4]) == approx(np.array([0.3, 0.6])) # doctest: +SKIP + True + + By default, ``approx`` considers numbers within a relative tolerance of + ``1e-6`` (i.e. one part in a million) of its expected value to be equal. + This treatment would lead to surprising results if the expected value was + ``0.0``, because nothing but ``0.0`` itself is relatively close to ``0.0``. + To handle this case less surprisingly, ``approx`` also considers numbers + within an absolute tolerance of ``1e-12`` of its expected value to be + equal. Infinity and NaN are special cases. Infinity is only considered + equal to itself, regardless of the relative tolerance. NaN is not + considered equal to anything by default, but you can make it be equal to + itself by setting the ``nan_ok`` argument to True. (This is meant to + facilitate comparing arrays that use NaN to mean "no data".) + + Both the relative and absolute tolerances can be changed by passing + arguments to the ``approx`` constructor:: + + >>> 1.0001 == approx(1) + False + >>> 1.0001 == approx(1, rel=1e-3) + True + >>> 1.0001 == approx(1, abs=1e-3) + True + + If you specify ``abs`` but not ``rel``, the comparison will not consider + the relative tolerance at all. In other words, two numbers that are within + the default relative tolerance of ``1e-6`` will still be considered unequal + if they exceed the specified absolute tolerance. If you specify both + ``abs`` and ``rel``, the numbers will be considered equal if either + tolerance is met:: + + >>> 1 + 1e-8 == approx(1) + True + >>> 1 + 1e-8 == approx(1, abs=1e-12) + False + >>> 1 + 1e-8 == approx(1, rel=1e-6, abs=1e-12) + True + + If you're thinking about using ``approx``, then you might want to know how + it compares to other good ways of comparing floating-point numbers. All of + these algorithms are based on relative and absolute tolerances and should + agree for the most part, but they do have meaningful differences: + + - ``math.isclose(a, b, rel_tol=1e-9, abs_tol=0.0)``: True if the relative + tolerance is met w.r.t. either ``a`` or ``b`` or if the absolute + tolerance is met. Because the relative tolerance is calculated w.r.t. + both ``a`` and ``b``, this test is symmetric (i.e. neither ``a`` nor + ``b`` is a "reference value"). You have to specify an absolute tolerance + if you want to compare to ``0.0`` because there is no tolerance by + default. Only available in python>=3.5. `More information...`__ + + __ https://docs.python.org/3/library/math.html#math.isclose + + - ``numpy.isclose(a, b, rtol=1e-5, atol=1e-8)``: True if the difference + between ``a`` and ``b`` is less that the sum of the relative tolerance + w.r.t. ``b`` and the absolute tolerance. Because the relative tolerance + is only calculated w.r.t. ``b``, this test is asymmetric and you can + think of ``b`` as the reference value. Support for comparing sequences + is provided by ``numpy.allclose``. `More information...`__ + + __ http://docs.scipy.org/doc/numpy-1.10.0/reference/generated/numpy.isclose.html + + - ``unittest.TestCase.assertAlmostEqual(a, b)``: True if ``a`` and ``b`` + are within an absolute tolerance of ``1e-7``. No relative tolerance is + considered and the absolute tolerance cannot be changed, so this function + is not appropriate for very large or very small numbers. Also, it's only + available in subclasses of ``unittest.TestCase`` and it's ugly because it + doesn't follow PEP8. `More information...`__ + + __ https://docs.python.org/3/library/unittest.html#unittest.TestCase.assertAlmostEqual + + - ``a == pytest.approx(b, rel=1e-6, abs=1e-12)``: True if the relative + tolerance is met w.r.t. ``b`` or if the absolute tolerance is met. + Because the relative tolerance is only calculated w.r.t. ``b``, this test + is asymmetric and you can think of ``b`` as the reference value. In the + special case that you explicitly specify an absolute tolerance but not a + relative tolerance, only the absolute tolerance is considered. + + .. warning:: + + .. versionchanged:: 3.2 + + In order to avoid inconsistent behavior, ``TypeError`` is + raised for ``>``, ``>=``, ``<`` and ``<=`` comparisons. + The example below illustrates the problem:: + + assert approx(0.1) > 0.1 + 1e-10 # calls approx(0.1).__gt__(0.1 + 1e-10) + assert 0.1 + 1e-10 > approx(0.1) # calls approx(0.1).__lt__(0.1 + 1e-10) + + In the second example one expects ``approx(0.1).__le__(0.1 + 1e-10)`` + to be called. But instead, ``approx(0.1).__lt__(0.1 + 1e-10)`` is used to + comparison. This is because the call hierarchy of rich comparisons + follows a fixed behavior. `More information...`__ + + __ https://docs.python.org/3/reference/datamodel.html#object.__ge__ + """ + + from collections import Mapping, Sequence + from _pytest.compat import STRING_TYPES as String + + # Delegate the comparison to a class that knows how to deal with the type + # of the expected value (e.g. int, float, list, dict, numpy.array, etc). + # + # This architecture is really driven by the need to support numpy arrays. + # The only way to override `==` for arrays without requiring that approx be + # the left operand is to inherit the approx object from `numpy.ndarray`. + # But that can't be a general solution, because it requires (1) numpy to be + # installed and (2) the expected value to be a numpy array. So the general + # solution is to delegate each type of expected value to a different class. + # + # This has the advantage that it made it easy to support mapping types + # (i.e. dict). The old code accepted mapping types, but would only compare + # their keys, which is probably not what most people would expect. + + if _is_numpy_array(expected): + cls = ApproxNumpy + elif isinstance(expected, Mapping): + cls = ApproxMapping + elif isinstance(expected, Sequence) and not isinstance(expected, String): + cls = ApproxSequence + else: + cls = ApproxScalar + + return cls(expected, rel, abs, nan_ok) + + +def _is_numpy_array(obj): + """ + Return true if the given object is a numpy array. Make a special effort to + avoid importing numpy unless it's really necessary. + """ + import inspect + + for cls in inspect.getmro(type(obj)): + if cls.__module__ == 'numpy': + try: + import numpy as np + return isinstance(obj, np.ndarray) + except ImportError: + pass + + return False + + +# builtin pytest.raises helper + +def raises(expected_exception, *args, **kwargs): + """ + Assert that a code block/function call raises ``expected_exception`` + and raise a failure exception otherwise. + + This helper produces a ``ExceptionInfo()`` object (see below). + + If using Python 2.5 or above, you may use this function as a + context manager:: + + >>> with raises(ZeroDivisionError): + ... 1/0 + + .. versionchanged:: 2.10 + + In the context manager form you may use the keyword argument + ``message`` to specify a custom failure message:: + + >>> with raises(ZeroDivisionError, message="Expecting ZeroDivisionError"): + ... pass + Traceback (most recent call last): + ... + Failed: Expecting ZeroDivisionError + + .. note:: + + When using ``pytest.raises`` as a context manager, it's worthwhile to + note that normal context manager rules apply and that the exception + raised *must* be the final line in the scope of the context manager. + Lines of code after that, within the scope of the context manager will + not be executed. For example:: + + >>> value = 15 + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... assert exc_info.type == ValueError # this will not execute + + Instead, the following approach must be taken (note the difference in + scope):: + + >>> with raises(ValueError) as exc_info: + ... if value > 10: + ... raise ValueError("value must be <= 10") + ... + >>> assert exc_info.type == ValueError + + + Since version ``3.1`` you can use the keyword argument ``match`` to assert that the + exception matches a text or regex:: + + >>> with raises(ValueError, match='must be 0 or None'): + ... raise ValueError("value must be 0 or None") + + >>> with raises(ValueError, match=r'must be \d+$'): + ... raise ValueError("value must be 42") + + **Legacy forms** + + The forms below are fully supported but are discouraged for new code because the + context manager form is regarded as more readable and less error-prone. + + It is possible to specify a callable by passing a to-be-called lambda:: + + >>> raises(ZeroDivisionError, lambda: 1/0) + + + or you can specify an arbitrary callable with arguments:: + + >>> def f(x): return 1/x + ... + >>> raises(ZeroDivisionError, f, 0) + + >>> raises(ZeroDivisionError, f, x=0) + + + It is also possible to pass a string to be evaluated at runtime:: + + >>> raises(ZeroDivisionError, "f(0)") + + + The string will be evaluated using the same ``locals()`` and ``globals()`` + at the moment of the ``raises`` call. + + .. autoclass:: _pytest._code.ExceptionInfo + :members: + + .. note:: + Similar to caught exception objects in Python, explicitly clearing + local references to returned ``ExceptionInfo`` objects can + help the Python interpreter speed up its garbage collection. + + Clearing those references breaks a reference cycle + (``ExceptionInfo`` --> caught exception --> frame stack raising + the exception --> current frame stack --> local variables --> + ``ExceptionInfo``) which makes Python keep all objects referenced + from that cycle (including all local variables in the current + frame) alive until the next cyclic garbage collection run. See the + official Python ``try`` statement documentation for more detailed + information. + + """ + __tracebackhide__ = True + msg = ("exceptions must be old-style classes or" + " derived from BaseException, not %s") + if isinstance(expected_exception, tuple): + for exc in expected_exception: + if not isclass(exc): + raise TypeError(msg % type(exc)) + elif not isclass(expected_exception): + raise TypeError(msg % type(expected_exception)) + + message = "DID NOT RAISE {0}".format(expected_exception) + match_expr = None + + if not args: + if "message" in kwargs: + message = kwargs.pop("message") + if "match" in kwargs: + match_expr = kwargs.pop("match") + message += " matching '{0}'".format(match_expr) + return RaisesContext(expected_exception, message, match_expr) + elif isinstance(args[0], str): + code, = args + assert isinstance(code, str) + frame = sys._getframe(1) + loc = frame.f_locals.copy() + loc.update(kwargs) + # print "raises frame scope: %r" % frame.f_locals + try: + code = _pytest._code.Source(code).compile() + py.builtin.exec_(code, frame.f_globals, loc) + # XXX didn'T mean f_globals == f_locals something special? + # this is destroyed here ... + except expected_exception: + return _pytest._code.ExceptionInfo() + else: + func = args[0] + try: + func(*args[1:], **kwargs) + except expected_exception: + return _pytest._code.ExceptionInfo() + fail(message) + + +raises.Exception = fail.Exception + + +class RaisesContext(object): + def __init__(self, expected_exception, message, match_expr): + self.expected_exception = expected_exception + self.message = message + self.match_expr = match_expr + self.excinfo = None + + def __enter__(self): + self.excinfo = object.__new__(_pytest._code.ExceptionInfo) + return self.excinfo + + def __exit__(self, *tp): + __tracebackhide__ = True + if tp[0] is None: + fail(self.message) + if sys.version_info < (2, 7): + # py26: on __exit__() exc_value often does not contain the + # exception value. + # http://bugs.python.org/issue7853 + if not isinstance(tp[1], BaseException): + exc_type, value, traceback = tp + tp = exc_type, exc_type(value), traceback + self.excinfo.__init__(tp) + suppress_exception = issubclass(self.excinfo.type, self.expected_exception) + if sys.version_info[0] == 2 and suppress_exception: + sys.exc_clear() + if self.match_expr: + self.excinfo.match(self.match_expr) + return suppress_exception diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index 87823bfbc..c9fa872c0 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -1,4 +1,5 @@ """ recording warnings during test function execution. """ +from __future__ import absolute_import, division, print_function import inspect @@ -6,11 +7,13 @@ import _pytest._code import py import sys import warnings -import pytest + +from _pytest.fixtures import yield_fixture +from _pytest.outcomes import fail -@pytest.yield_fixture -def recwarn(request): +@yield_fixture +def recwarn(): """Return a WarningsRecorder instance that provides these methods: * ``pop(category=None)``: return last warning matching the category. @@ -25,16 +28,9 @@ def recwarn(request): yield wrec -def pytest_namespace(): - return {'deprecated_call': deprecated_call, - 'warns': warns} - - def deprecated_call(func=None, *args, **kwargs): - """ assert that calling ``func(*args, **kwargs)`` triggers a - ``DeprecationWarning`` or ``PendingDeprecationWarning``. - - This function can be used as a context manager:: + """context manager that can be used to ensure a block of code triggers a + ``DeprecationWarning`` or ``PendingDeprecationWarning``:: >>> import warnings >>> def api_call_v2(): @@ -44,40 +40,47 @@ def deprecated_call(func=None, *args, **kwargs): >>> with deprecated_call(): ... assert api_call_v2() == 200 - Note: we cannot use WarningsRecorder here because it is still subject - to the mechanism that prevents warnings of the same type from being - triggered twice for the same module. See #1190. + ``deprecated_call`` can also be used by passing a function and ``*args`` and ``*kwargs``, + in which case it will ensure calling ``func(*args, **kwargs)`` produces one of the warnings + types above. """ if not func: - return WarningsChecker(expected_warning=DeprecationWarning) - - categories = [] - - def warn_explicit(message, category, *args, **kwargs): - categories.append(category) - old_warn_explicit(message, category, *args, **kwargs) - - def warn(message, category=None, *args, **kwargs): - if isinstance(message, Warning): - categories.append(message.__class__) - else: - categories.append(category) - old_warn(message, category, *args, **kwargs) - - old_warn = warnings.warn - old_warn_explicit = warnings.warn_explicit - warnings.warn_explicit = warn_explicit - warnings.warn = warn - try: - ret = func(*args, **kwargs) - finally: - warnings.warn_explicit = old_warn_explicit - warnings.warn = old_warn - deprecation_categories = (DeprecationWarning, PendingDeprecationWarning) - if not any(issubclass(c, deprecation_categories) for c in categories): + return _DeprecatedCallContext() + else: __tracebackhide__ = True - raise AssertionError("%r did not produce DeprecationWarning" % (func,)) - return ret + with _DeprecatedCallContext(): + return func(*args, **kwargs) + + +class _DeprecatedCallContext(object): + """Implements the logic to capture deprecation warnings as a context manager.""" + + def __enter__(self): + self._captured_categories = [] + self._old_warn = warnings.warn + self._old_warn_explicit = warnings.warn_explicit + warnings.warn_explicit = self._warn_explicit + warnings.warn = self._warn + + def _warn_explicit(self, message, category, *args, **kwargs): + self._captured_categories.append(category) + + def _warn(self, message, category=None, *args, **kwargs): + if isinstance(message, Warning): + self._captured_categories.append(message.__class__) + else: + self._captured_categories.append(category) + + def __exit__(self, exc_type, exc_val, exc_tb): + warnings.warn_explicit = self._old_warn_explicit + warnings.warn = self._old_warn + + if exc_type is None: + deprecation_categories = (DeprecationWarning, PendingDeprecationWarning) + if not any(issubclass(c, deprecation_categories) for c in self._captured_categories): + __tracebackhide__ = True + msg = "Did not produce DeprecationWarning or PendingDeprecationWarning" + raise AssertionError(msg) def warns(expected_warning, *args, **kwargs): @@ -115,24 +118,14 @@ def warns(expected_warning, *args, **kwargs): return func(*args[1:], **kwargs) -class RecordedWarning(object): - def __init__(self, message, category, filename, lineno, file, line): - self.message = message - self.category = category - self.filename = filename - self.lineno = lineno - self.file = file - self.line = line - - -class WarningsRecorder(object): +class WarningsRecorder(warnings.catch_warnings): """A context manager to record raised warnings. Adapted from `warnings.catch_warnings`. """ - def __init__(self, module=None): - self._module = sys.modules['warnings'] if module is None else module + def __init__(self): + super(WarningsRecorder, self).__init__(record=True) self._entered = False self._list = [] @@ -169,38 +162,20 @@ class WarningsRecorder(object): if self._entered: __tracebackhide__ = True raise RuntimeError("Cannot enter %r twice" % self) - self._entered = True - self._filters = self._module.filters - self._module.filters = self._filters[:] - self._showwarning = self._module.showwarning - - def showwarning(message, category, filename, lineno, - file=None, line=None): - self._list.append(RecordedWarning( - message, category, filename, lineno, file, line)) - - # still perform old showwarning functionality - self._showwarning( - message, category, filename, lineno, file=file, line=line) - - self._module.showwarning = showwarning - - # allow the same warning to be raised more than once - - self._module.simplefilter('always') + self._list = super(WarningsRecorder, self).__enter__() + warnings.simplefilter('always') return self def __exit__(self, *exc_info): if not self._entered: __tracebackhide__ = True raise RuntimeError("Cannot exit %r without entering first" % self) - self._module.filters = self._filters - self._module.showwarning = self._showwarning + super(WarningsRecorder, self).__exit__(*exc_info) class WarningsChecker(WarningsRecorder): - def __init__(self, expected_warning=None, module=None): - super(WarningsChecker, self).__init__(module=module) + def __init__(self, expected_warning=None): + super(WarningsChecker, self).__init__() msg = ("exceptions must be old-style classes or " "derived from Warning, not %s") @@ -221,6 +196,10 @@ class WarningsChecker(WarningsRecorder): # only check if we're not currently handling an exception if all(a is None for a in exc_info): if self.expected_warning is not None: - if not any(r.category in self.expected_warning for r in self): + if not any(issubclass(r.category, self.expected_warning) + for r in self): __tracebackhide__ = True - pytest.fail("DID NOT WARN") + fail("DID NOT WARN. No warnings of type {0} was emitted. " + "The list of emitted warnings is: {1}.".format( + self.expected_warning, + [each.message for each in self])) diff --git a/_pytest/resultlog.py b/_pytest/resultlog.py index fc0025983..9f9c2d1f6 100644 --- a/_pytest/resultlog.py +++ b/_pytest/resultlog.py @@ -1,15 +1,18 @@ """ log machine-parseable test session result information in a plain text file. """ +from __future__ import absolute_import, division, print_function import py import os + def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "resultlog plugin options") group.addoption('--resultlog', '--result-log', action="store", - metavar="path", default=None, - help="DEPRECATED path for machine-readable result log.") + metavar="path", default=None, + help="DEPRECATED path for machine-readable result log.") + def pytest_configure(config): resultlog = config.option.resultlog @@ -18,13 +21,14 @@ def pytest_configure(config): dirname = os.path.dirname(os.path.abspath(resultlog)) if not os.path.isdir(dirname): os.makedirs(dirname) - logfile = open(resultlog, 'w', 1) # line buffered + logfile = open(resultlog, 'w', 1) # line buffered config._resultlog = ResultLog(config, logfile) config.pluginmanager.register(config._resultlog) from _pytest.deprecated import RESULT_LOG config.warn('C1', RESULT_LOG) + def pytest_unconfigure(config): resultlog = getattr(config, '_resultlog', None) if resultlog: @@ -32,6 +36,7 @@ def pytest_unconfigure(config): del config._resultlog config.pluginmanager.unregister(resultlog) + def generic_path(item): chain = item.listchain() gpath = [chain[0].name] @@ -55,15 +60,16 @@ def generic_path(item): fspath = newfspath return ''.join(gpath) + class ResultLog(object): def __init__(self, config, logfile): self.config = config - self.logfile = logfile # preferably line buffered + self.logfile = logfile # preferably line buffered def write_log_entry(self, testpath, lettercode, longrepr): - py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile) + print("%s %s" % (lettercode, testpath), file=self.logfile) for line in longrepr.splitlines(): - py.builtin.print_(" %s" % line, file=self.logfile) + print(" %s" % line, file=self.logfile) def log_outcome(self, report, lettercode, longrepr): testpath = getattr(report, 'nodeid', None) diff --git a/_pytest/runner.py b/_pytest/runner.py index eb29e7370..b643fa3c9 100644 --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -1,29 +1,26 @@ """ basic collect and runtest protocol implementations """ +from __future__ import absolute_import, division, print_function + import bdb +import os import sys from time import time import py -import pytest +from _pytest.compat import _PY2 from _pytest._code.code import TerminalRepr, ExceptionInfo - - -def pytest_namespace(): - return { - 'fail' : fail, - 'skip' : skip, - 'importorskip' : importorskip, - 'exit' : exit, - } +from _pytest.outcomes import skip, Skipped, TEST_OUTCOME # # pytest plugin hooks + def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") group.addoption('--durations', - action="store", type=int, default=None, metavar="N", - help="show N slowest setup/test durations (N=0 for all)."), + action="store", type=int, default=None, metavar="N", + help="show N slowest setup/test durations (N=0 for all)."), + def pytest_terminal_summary(terminalreporter): durations = terminalreporter.config.option.durations @@ -48,16 +45,16 @@ def pytest_terminal_summary(terminalreporter): for rep in dlist: nodeid = rep.nodeid.replace("::()::", "::") tr.write_line("%02.2fs %-8s %s" % - (rep.duration, rep.when, nodeid)) + (rep.duration, rep.when, nodeid)) + def pytest_sessionstart(session): session._setupstate = SetupState() + + def pytest_sessionfinish(session): session._setupstate.teardown_all() -class NodeInfo: - def __init__(self, location): - self.location = location def pytest_runtest_protocol(item, nextitem): item.ihook.pytest_runtest_logstart( @@ -66,6 +63,7 @@ def pytest_runtest_protocol(item, nextitem): runtestprotocol(item, nextitem=nextitem) return True + def runtestprotocol(item, log=True, nextitem=None): hasrequest = hasattr(item, "_request") if hasrequest and not item._request: @@ -78,7 +76,7 @@ def runtestprotocol(item, log=True, nextitem=None): if not item.config.option.setuponly: reports.append(call_and_report(item, "call", log)) reports.append(call_and_report(item, "teardown", log, - nextitem=nextitem)) + nextitem=nextitem)) # after all teardown hooks have been called # want funcargs and request info to go away if hasrequest: @@ -86,6 +84,7 @@ def runtestprotocol(item, log=True, nextitem=None): item.funcargs = None return reports + def show_test_item(item): """Show test function, parameters and the fixtures of the test item.""" tw = item.config.get_terminal_writer() @@ -96,10 +95,14 @@ def show_test_item(item): if used_fixtures: tw.write(' (fixtures used: {0})'.format(', '.join(used_fixtures))) + def pytest_runtest_setup(item): + _update_current_test_var(item, 'setup') item.session._setupstate.prepare(item) + def pytest_runtest_call(item): + _update_current_test_var(item, 'call') try: item.runtest() except Exception: @@ -112,8 +115,29 @@ def pytest_runtest_call(item): del tb # Get rid of it in this namespace raise + def pytest_runtest_teardown(item, nextitem): + _update_current_test_var(item, 'teardown') item.session._setupstate.teardown_exact(item, nextitem) + _update_current_test_var(item, None) + + +def _update_current_test_var(item, when): + """ + Update PYTEST_CURRENT_TEST to reflect the current item and stage. + + If ``when`` is None, delete PYTEST_CURRENT_TEST from the environment. + """ + var_name = 'PYTEST_CURRENT_TEST' + if when: + value = '{0} ({1})'.format(item.nodeid, when) + if _PY2: + # python 2 doesn't like null bytes on environment variables (see #2644) + value = value.replace('\x00', '(null)') + os.environ[var_name] = value + else: + os.environ.pop(var_name) + def pytest_report_teststatus(report): if report.when in ("setup", "teardown"): @@ -139,21 +163,25 @@ def call_and_report(item, when, log=True, **kwds): hook.pytest_exception_interact(node=item, call=call, report=report) return report + def check_interactive_exception(call, report): return call.excinfo and not ( - hasattr(report, "wasxfail") or - call.excinfo.errisinstance(skip.Exception) or - call.excinfo.errisinstance(bdb.BdbQuit)) + hasattr(report, "wasxfail") or + call.excinfo.errisinstance(skip.Exception) or + call.excinfo.errisinstance(bdb.BdbQuit)) + def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when ihook = getattr(item.ihook, hookname) return CallInfo(lambda: ihook(item=item, **kwds), when=when) + class CallInfo: """ Result/Exception info a function invocation. """ #: None or ExceptionInfo object. excinfo = None + def __init__(self, func, when): #: context of invocation: one of "setup", "call", #: "teardown", "memocollect" @@ -164,7 +192,7 @@ class CallInfo: except KeyboardInterrupt: self.stop = time() raise - except: + except: # noqa self.excinfo = ExceptionInfo() self.stop = time() @@ -175,6 +203,7 @@ class CallInfo: status = "result: %r" % (self.result,) return "" % (self.when, status) + def getslaveinfoline(node): try: return node._slaveinfocache @@ -185,6 +214,7 @@ def getslaveinfoline(node): d['id'], d['sysplatform'], ver, d['executable']) return s + class BaseReport(object): def __init__(self, **kw): @@ -249,10 +279,11 @@ class BaseReport(object): def fspath(self): return self.nodeid.split("::")[0] + def pytest_runtest_makereport(item, call): when = call.when - duration = call.stop-call.start - keywords = dict([(x,1) for x in item.keywords]) + duration = call.stop - call.start + keywords = dict([(x, 1) for x in item.keywords]) excinfo = call.excinfo sections = [] if not call.excinfo: @@ -262,7 +293,7 @@ def pytest_runtest_makereport(item, call): if not isinstance(excinfo, ExceptionInfo): outcome = "failed" longrepr = excinfo - elif excinfo.errisinstance(pytest.skip.Exception): + elif excinfo.errisinstance(skip.Exception): outcome = "skipped" r = excinfo._getreprcrash() longrepr = (str(r.path), r.lineno, r.message) @@ -270,19 +301,21 @@ def pytest_runtest_makereport(item, call): outcome = "failed" if call.when == "call": longrepr = item.repr_failure(excinfo) - else: # exception in setup or teardown + else: # exception in setup or teardown longrepr = item._repr_failure_py(excinfo, - style=item.config.option.tbstyle) + style=item.config.option.tbstyle) for rwhen, key, content in item._report_sections: - sections.append(("Captured %s %s" %(key, rwhen), content)) + sections.append(("Captured %s %s" % (key, rwhen), content)) return TestReport(item.nodeid, item.location, keywords, outcome, longrepr, when, sections, duration) + class TestReport(BaseReport): """ Basic test report object (also used for setup and teardown calls if they fail). """ + def __init__(self, nodeid, location, keywords, outcome, longrepr, when, sections=(), duration=0, **extra): #: normalized collection node id @@ -321,16 +354,21 @@ class TestReport(BaseReport): return "" % ( self.nodeid, self.when, self.outcome) + class TeardownErrorReport(BaseReport): outcome = "failed" when = "teardown" + def __init__(self, longrepr, **extra): self.longrepr = longrepr self.sections = [] self.__dict__.update(extra) + def pytest_make_collect_report(collector): - call = CallInfo(collector._memocollect, "memocollect") + call = CallInfo( + lambda: list(collector.collect()), + 'collect') longrepr = None if not call.excinfo: outcome = "passed" @@ -348,7 +386,7 @@ def pytest_make_collect_report(collector): errorinfo = CollectErrorRepr(errorinfo) longrepr = errorinfo rep = CollectReport(collector.nodeid, outcome, longrepr, - getattr(call, 'result', None)) + getattr(call, 'result', None)) rep.call = call # see collect_one_node return rep @@ -369,16 +407,20 @@ class CollectReport(BaseReport): def __repr__(self): return "" % ( - self.nodeid, len(self.result), self.outcome) + self.nodeid, len(self.result), self.outcome) + class CollectErrorRepr(TerminalRepr): def __init__(self, msg): self.longrepr = msg + def toterminal(self, out): out.line(self.longrepr, red=True) + class SetupState(object): """ shared state for setting up/tearing down test items or collectors. """ + def __init__(self): self.stack = [] self._finalizers = {} @@ -390,7 +432,7 @@ class SetupState(object): """ assert colitem and not isinstance(colitem, tuple) assert py.builtin.callable(finalizer) - #assert colitem in self.stack # some unit tests don't setup stack :/ + # assert colitem in self.stack # some unit tests don't setup stack :/ self._finalizers.setdefault(colitem, []).append(finalizer) def _pop_and_teardown(self): @@ -404,7 +446,7 @@ class SetupState(object): fin = finalizers.pop() try: fin() - except Exception: + except TEST_OUTCOME: # XXX Only first exception will be seen by user, # ideally all should be reported. if exc is None: @@ -418,7 +460,7 @@ class SetupState(object): colitem.teardown() for colitem in self._finalizers: assert colitem is None or colitem in self.stack \ - or isinstance(colitem, tuple) + or isinstance(colitem, tuple) def teardown_all(self): while self.stack: @@ -451,10 +493,11 @@ class SetupState(object): self.stack.append(col) try: col.setup() - except Exception: + except TEST_OUTCOME: col._prepare_exc = sys.exc_info() raise + def collect_one_node(collector): ihook = collector.ihook ihook.pytest_collectstart(collector=collector) @@ -463,116 +506,3 @@ def collect_one_node(collector): if call and check_interactive_exception(call, rep): ihook.pytest_exception_interact(node=collector, call=call, report=rep) return rep - - -# ============================================================= -# Test OutcomeExceptions and helpers for creating them. - - -class OutcomeException(Exception): - """ OutcomeException and its subclass instances indicate and - contain info about test and collection outcomes. - """ - def __init__(self, msg=None, pytrace=True): - Exception.__init__(self, msg) - self.msg = msg - self.pytrace = pytrace - - def __repr__(self): - if self.msg: - val = self.msg - if isinstance(val, bytes): - val = py._builtin._totext(val, errors='replace') - return val - return "<%s instance>" %(self.__class__.__name__,) - __str__ = __repr__ - -class Skipped(OutcomeException): - # XXX hackish: on 3k we fake to live in the builtins - # in order to have Skipped exception printing shorter/nicer - __module__ = 'builtins' - - def __init__(self, msg=None, pytrace=True, allow_module_level=False): - OutcomeException.__init__(self, msg=msg, pytrace=pytrace) - self.allow_module_level = allow_module_level - - -class Failed(OutcomeException): - """ raised from an explicit call to pytest.fail() """ - __module__ = 'builtins' - - -class Exit(KeyboardInterrupt): - """ raised for immediate program exits (no tracebacks/summaries)""" - def __init__(self, msg="unknown reason"): - self.msg = msg - KeyboardInterrupt.__init__(self, msg) - -# exposed helper methods - -def exit(msg): - """ exit testing process as if KeyboardInterrupt was triggered. """ - __tracebackhide__ = True - raise Exit(msg) - - -exit.Exception = Exit - - -def skip(msg=""): - """ skip an executing test with the given message. Note: it's usually - better to use the pytest.mark.skipif marker to declare a test to be - skipped under certain conditions like mismatching platforms or - dependencies. See the pytest_skipping plugin for details. - """ - __tracebackhide__ = True - raise Skipped(msg=msg) - - -skip.Exception = Skipped - - -def fail(msg="", pytrace=True): - """ explicitly fail an currently-executing test with the given Message. - - :arg pytrace: if false the msg represents the full failure information - and no python traceback will be reported. - """ - __tracebackhide__ = True - raise Failed(msg=msg, pytrace=pytrace) - - -fail.Exception = Failed - - -def importorskip(modname, minversion=None): - """ return imported module if it has at least "minversion" as its - __version__ attribute. If no minversion is specified the a skip - is only triggered if the module can not be imported. - """ - __tracebackhide__ = True - compile(modname, '', 'eval') # to catch syntaxerrors - should_skip = False - try: - __import__(modname) - except ImportError: - # Do not raise chained exception here(#1485) - should_skip = True - if should_skip: - raise Skipped("could not import %r" %(modname,), allow_module_level=True) - mod = sys.modules[modname] - if minversion is None: - return mod - verattr = getattr(mod, '__version__', None) - if minversion is not None: - try: - from pkg_resources import parse_version as pv - except ImportError: - raise Skipped("we have a required version for %r but can not import " - "pkg_resources to parse version strings." % (modname,), - allow_module_level=True) - if verattr is None or pv(verattr) < pv(minversion): - raise Skipped("module %r has __version__ %r, required is: %r" %( - modname, verattr, minversion), allow_module_level=True) - return mod - diff --git a/_pytest/setuponly.py b/_pytest/setuponly.py index 1752c575f..15e195ad5 100644 --- a/_pytest/setuponly.py +++ b/_pytest/setuponly.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division, print_function + import pytest import sys diff --git a/_pytest/setupplan.py b/_pytest/setupplan.py index f0853dee5..e11bd4069 100644 --- a/_pytest/setupplan.py +++ b/_pytest/setupplan.py @@ -1,3 +1,5 @@ +from __future__ import absolute_import, division, print_function + import pytest diff --git a/_pytest/skipping.py b/_pytest/skipping.py index a8eaea98a..b92800d10 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -1,18 +1,21 @@ """ support for skip/xfail functions and markers. """ +from __future__ import absolute_import, division, print_function + import os import sys import traceback import py -import pytest +from _pytest.config import hookimpl from _pytest.mark import MarkInfo, MarkDecorator +from _pytest.outcomes import fail, skip, xfail, TEST_OUTCOME def pytest_addoption(parser): group = parser.getgroup("general") group.addoption('--runxfail', - action="store_true", dest="runxfail", default=False, - help="run tests even if they are marked xfail") + action="store_true", dest="runxfail", default=False, + help="run tests even if they are marked xfail") parser.addini("xfail_strict", "default for the strict parameter of xfail " "markers when not given explicitly (default: " @@ -23,53 +26,38 @@ def pytest_addoption(parser): def pytest_configure(config): if config.option.runxfail: + # yay a hack + import pytest old = pytest.xfail config._cleanup.append(lambda: setattr(pytest, "xfail", old)) def nop(*args, **kwargs): pass - nop.Exception = XFailed + nop.Exception = xfail.Exception setattr(pytest, "xfail", nop) config.addinivalue_line("markers", - "skip(reason=None): skip the given test function with an optional reason. " - "Example: skip(reason=\"no way of currently testing this\") skips the " - "test." - ) + "skip(reason=None): skip the given test function with an optional reason. " + "Example: skip(reason=\"no way of currently testing this\") skips the " + "test." + ) config.addinivalue_line("markers", - "skipif(condition): skip the given test function if eval(condition) " - "results in a True value. Evaluation happens within the " - "module global context. Example: skipif('sys.platform == \"win32\"') " - "skips the test if we are on the win32 platform. see " - "http://pytest.org/latest/skipping.html" - ) + "skipif(condition): skip the given test function if eval(condition) " + "results in a True value. Evaluation happens within the " + "module global context. Example: skipif('sys.platform == \"win32\"') " + "skips the test if we are on the win32 platform. see " + "http://pytest.org/latest/skipping.html" + ) config.addinivalue_line("markers", - "xfail(condition, reason=None, run=True, raises=None, strict=False): " - "mark the the test function as an expected failure if eval(condition) " - "has a True value. Optionally specify a reason for better reporting " - "and run=False if you don't even want to execute the test function. " - "If only specific exception(s) are expected, you can list them in " - "raises, and if the test fails in other ways, it will be reported as " - "a true failure. See http://pytest.org/latest/skipping.html" - ) - - -def pytest_namespace(): - return dict(xfail=xfail) - - -class XFailed(pytest.fail.Exception): - """ raised from an explicit call to pytest.xfail() """ - - -def xfail(reason=""): - """ xfail an executing test or setup functions with the given reason.""" - __tracebackhide__ = True - raise XFailed(reason) - - -xfail.Exception = XFailed + "xfail(condition, reason=None, run=True, raises=None, strict=False): " + "mark the test function as an expected failure if eval(condition) " + "has a True value. Optionally specify a reason for better reporting " + "and run=False if you don't even want to execute the test function. " + "If only specific exception(s) are expected, you can list them in " + "raises, and if the test fails in other ways, it will be reported as " + "a true failure. See http://pytest.org/latest/skipping.html" + ) class MarkEvaluator: @@ -97,51 +85,50 @@ class MarkEvaluator: def istrue(self): try: return self._istrue() - except Exception: + except TEST_OUTCOME: self.exc = sys.exc_info() if isinstance(self.exc[1], SyntaxError): - msg = [" " * (self.exc[1].offset + 4) + "^",] + msg = [" " * (self.exc[1].offset + 4) + "^", ] msg.append("SyntaxError: invalid syntax") else: msg = traceback.format_exception_only(*self.exc[:2]) - pytest.fail("Error evaluating %r expression\n" - " %s\n" - "%s" - %(self.name, self.expr, "\n".join(msg)), - pytrace=False) + fail("Error evaluating %r expression\n" + " %s\n" + "%s" + % (self.name, self.expr, "\n".join(msg)), + pytrace=False) def _getglobals(self): d = {'os': os, 'sys': sys, 'config': self.item.config} - d.update(self.item.obj.__globals__) + if hasattr(self.item, 'obj'): + d.update(self.item.obj.__globals__) return d def _istrue(self): if hasattr(self, 'result'): return self.result if self.holder: - d = self._getglobals() if self.holder.args or 'condition' in self.holder.kwargs: self.result = False # "holder" might be a MarkInfo or a MarkDecorator; only # MarkInfo keeps track of all parameters it received in an # _arglist attribute - if hasattr(self.holder, '_arglist'): - arglist = self.holder._arglist - else: - arglist = [(self.holder.args, self.holder.kwargs)] - for args, kwargs in arglist: + marks = getattr(self.holder, '_marks', None) \ + or [self.holder.mark] + for _, args, kwargs in marks: if 'condition' in kwargs: args = (kwargs['condition'],) for expr in args: self.expr = expr if isinstance(expr, py.builtin._basestring): + d = self._getglobals() result = cached_eval(self.item.config, expr, d) else: if "reason" not in kwargs: # XXX better be checked at collection time msg = "you need to specify reason=STRING " \ "when using booleans as conditions." - pytest.fail(msg) + fail(msg) result = bool(expr) if result: self.result = True @@ -165,7 +152,7 @@ class MarkEvaluator: return expl -@pytest.hookimpl(tryfirst=True) +@hookimpl(tryfirst=True) def pytest_runtest_setup(item): # Check if skip or skipif are specified as pytest marks @@ -174,23 +161,23 @@ def pytest_runtest_setup(item): eval_skipif = MarkEvaluator(item, 'skipif') if eval_skipif.istrue(): item._evalskip = eval_skipif - pytest.skip(eval_skipif.getexplanation()) + skip(eval_skipif.getexplanation()) skip_info = item.keywords.get('skip') if isinstance(skip_info, (MarkInfo, MarkDecorator)): item._evalskip = True if 'reason' in skip_info.kwargs: - pytest.skip(skip_info.kwargs['reason']) + skip(skip_info.kwargs['reason']) elif skip_info.args: - pytest.skip(skip_info.args[0]) + skip(skip_info.args[0]) else: - pytest.skip("unconditional skip") + skip("unconditional skip") item._evalxfail = MarkEvaluator(item, 'xfail') check_xfail_no_run(item) -@pytest.mark.hookwrapper +@hookimpl(hookwrapper=True) def pytest_pyfunc_call(pyfuncitem): check_xfail_no_run(pyfuncitem) outcome = yield @@ -205,7 +192,7 @@ def check_xfail_no_run(item): evalxfail = item._evalxfail if evalxfail.istrue(): if not evalxfail.get('run', True): - pytest.xfail("[NOTRUN] " + evalxfail.getexplanation()) + xfail("[NOTRUN] " + evalxfail.getexplanation()) def check_strict_xfail(pyfuncitem): @@ -217,10 +204,10 @@ def check_strict_xfail(pyfuncitem): if is_strict_xfail: del pyfuncitem._evalxfail explanation = evalxfail.getexplanation() - pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False) + fail('[XPASS(strict)] ' + explanation, pytrace=False) -@pytest.hookimpl(hookwrapper=True) +@hookimpl(hookwrapper=True) def pytest_runtest_makereport(item, call): outcome = yield rep = outcome.get_result() @@ -240,11 +227,11 @@ def pytest_runtest_makereport(item, call): rep.wasxfail = rep.longrepr elif item.config.option.runxfail: pass # don't interefere - elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception): + elif call.excinfo and call.excinfo.errisinstance(xfail.Exception): rep.wasxfail = "reason: " + call.excinfo.value.msg rep.outcome = "skipped" elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \ - evalxfail.istrue(): + evalxfail.istrue(): if call.excinfo: if evalxfail.invalidraise(call.excinfo.value): rep.outcome = "failed" @@ -270,6 +257,8 @@ def pytest_runtest_makereport(item, call): rep.longrepr = filename, line, reason # called by terminalreporter progress reporting + + def pytest_report_teststatus(report): if hasattr(report, "wasxfail"): if report.skipped: @@ -278,10 +267,12 @@ def pytest_report_teststatus(report): return "xpassed", "X", ("XPASS", {'yellow': True}) # called by the terminalreporter instance/plugin + + def pytest_terminal_summary(terminalreporter): tr = terminalreporter if not tr.reportchars: - #for name in "xfailed skipped failed xpassed": + # for name in "xfailed skipped failed xpassed": # if not tr.stats.get(name, 0): # tr.write_line("HINT: use '-r' option to see extra " # "summary info about tests") @@ -308,12 +299,14 @@ def pytest_terminal_summary(terminalreporter): for line in lines: tr._tw.line(line) + def show_simple(terminalreporter, lines, stat, format): failed = terminalreporter.stats.get(stat) if failed: for rep in failed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) - lines.append(format %(pos,)) + lines.append(format % (pos,)) + def show_xfailed(terminalreporter, lines): xfailed = terminalreporter.stats.get("xfailed") @@ -325,13 +318,15 @@ def show_xfailed(terminalreporter, lines): if reason: lines.append(" " + str(reason)) + def show_xpassed(terminalreporter, lines): xpassed = terminalreporter.stats.get("xpassed") if xpassed: for rep in xpassed: pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid) reason = rep.wasxfail - lines.append("XPASS %s %s" %(pos, reason)) + lines.append("XPASS %s %s" % (pos, reason)) + def cached_eval(config, expr, d): if not hasattr(config, '_evalcache'): @@ -351,25 +346,27 @@ def folded_skips(skipped): key = event.longrepr assert len(key) == 3, (event, key) d.setdefault(key, []).append(event) - l = [] + values = [] for key, events in d.items(): - l.append((len(events),) + key) - return l + values.append((len(events),) + key) + return values + def show_skipped(terminalreporter, lines): tr = terminalreporter skipped = tr.stats.get('skipped', []) if skipped: - #if not tr.hasopt('skipped'): + # if not tr.hasopt('skipped'): # tr.write_line( # "%d skipped tests, specify -rs for more info" % # len(skipped)) # return fskips = folded_skips(skipped) if fskips: - #tr.write_sep("_", "skipped test summary") + # tr.write_sep("_", "skipped test summary") for num, fspath, lineno, reason in fskips: if reason.startswith("Skipped: "): reason = reason[9:] - lines.append("SKIP [%d] %s:%d: %s" % - (num, fspath, lineno, reason)) + lines.append( + "SKIP [%d] %s:%d: %s" % + (num, fspath, lineno + 1, reason)) diff --git a/_pytest/terminal.py b/_pytest/terminal.py index 16bf75733..9da94d0c9 100644 --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -2,6 +2,9 @@ This is a good source for looking at the various reporting hooks. """ +from __future__ import absolute_import, division, print_function + +import itertools from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \ EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED import pytest @@ -10,39 +13,41 @@ import sys import time import platform +from _pytest import nodes import _pytest._pluggy as pluggy def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") group._addoption('-v', '--verbose', action="count", - dest="verbose", default=0, help="increase verbosity."), + dest="verbose", default=0, help="increase verbosity."), group._addoption('-q', '--quiet', action="count", - dest="quiet", default=0, help="decrease verbosity."), + dest="quiet", default=0, help="decrease verbosity."), group._addoption('-r', - action="store", dest="reportchars", default='', metavar="chars", - help="show extra test summary info as specified by chars (f)ailed, " - "(E)error, (s)skipped, (x)failed, (X)passed, " - "(p)passed, (P)passed with output, (a)all except pP. " - "The pytest warnings are displayed at all times except when " - "--disable-pytest-warnings is set") - group._addoption('--disable-pytest-warnings', default=False, - dest='disablepytestwarnings', action='store_true', - help='disable warnings summary, overrides -r w flag') + action="store", dest="reportchars", default='', metavar="chars", + help="show extra test summary info as specified by chars (f)ailed, " + "(E)error, (s)skipped, (x)failed, (X)passed, " + "(p)passed, (P)passed with output, (a)all except pP. " + "Warnings are displayed at all times except when " + "--disable-warnings is set") + group._addoption('--disable-warnings', '--disable-pytest-warnings', default=False, + dest='disable_warnings', action='store_true', + help='disable warnings summary') group._addoption('-l', '--showlocals', - action="store_true", dest="showlocals", default=False, - help="show locals in tracebacks (disabled by default).") + action="store_true", dest="showlocals", default=False, + help="show locals in tracebacks (disabled by default).") group._addoption('--tb', metavar="style", - action="store", dest="tbstyle", default='auto', - choices=['auto', 'long', 'short', 'no', 'line', 'native'], - help="traceback print mode (auto/long/short/line/native/no).") + action="store", dest="tbstyle", default='auto', + choices=['auto', 'long', 'short', 'no', 'line', 'native'], + help="traceback print mode (auto/long/short/line/native/no).") group._addoption('--fulltrace', '--full-trace', - action="store_true", default=False, - help="don't cut any tracebacks (default is to cut).") + action="store_true", default=False, + help="don't cut any tracebacks (default is to cut).") group._addoption('--color', metavar="color", - action="store", dest="color", default='auto', - choices=['yes', 'no', 'auto'], - help="color terminal output (yes/no/auto).") + action="store", dest="color", default='auto', + choices=['yes', 'no', 'auto'], + help="color terminal output (yes/no/auto).") + def pytest_configure(config): config.option.verbose -= config.option.quiet @@ -54,12 +59,13 @@ def pytest_configure(config): reporter.write_line("[traceconfig] " + msg) config.trace.root.setprocessor("pytest:config", mywriter) + def getreportopt(config): reportopts = "" reportchars = config.option.reportchars - if not config.option.disablepytestwarnings and 'w' not in reportchars: + if not config.option.disable_warnings and 'w' not in reportchars: reportchars += 'w' - elif config.option.disablepytestwarnings and 'w' in reportchars: + elif config.option.disable_warnings and 'w' in reportchars: reportchars = reportchars.replace('w', '') if reportchars: for char in reportchars: @@ -69,6 +75,7 @@ def getreportopt(config): reportopts = 'fEsxXw' return reportopts + def pytest_report_teststatus(report): if report.passed: letter = "." @@ -80,13 +87,41 @@ def pytest_report_teststatus(report): letter = "f" return report.outcome, letter, report.outcome.upper() + class WarningReport: + """ + Simple structure to hold warnings information captured by ``pytest_logwarning``. + """ + def __init__(self, code, message, nodeid=None, fslocation=None): + """ + :param code: unused + :param str message: user friendly message about the warning + :param str|None nodeid: node id that generated the warning (see ``get_location``). + :param tuple|py.path.local fslocation: + file system location of the source of the warning (see ``get_location``). + """ self.code = code self.message = message self.nodeid = nodeid self.fslocation = fslocation + def get_location(self, config): + """ + Returns the more user-friendly information about the location + of a warning, or None. + """ + if self.nodeid: + return self.nodeid + if self.fslocation: + if isinstance(self.fslocation, tuple) and len(self.fslocation) >= 2: + filename, linenum = self.fslocation[:2] + relpath = py.path.local(filename).relto(config.invocation_dir) + return '%s:%s' % (relpath, linenum) + else: + return str(self.fslocation) + return None + class TerminalReporter: def __init__(self, config, file=None): @@ -146,8 +181,22 @@ class TerminalReporter: self._tw.line(line, **markup) def rewrite(self, line, **markup): + """ + Rewinds the terminal cursor to the beginning and writes the given line. + + :kwarg erase: if True, will also add spaces until the full terminal width to ensure + previous lines are properly erased. + + The rest of the keyword arguments are markup instructions. + """ + erase = markup.pop('erase', False) + if erase: + fill_count = self._tw.fullwidth - len(line) + fill = ' ' * fill_count + else: + fill = '' line = str(line) - self._tw.write("\r" + line, **markup) + self._tw.write("\r" + line + fill, **markup) def write_sep(self, sep, title=None, **markup): self.ensure_newline() @@ -166,8 +215,6 @@ class TerminalReporter: def pytest_logwarning(self, code, fslocation, message, nodeid): warnings = self.stats.setdefault("warnings", []) - if isinstance(fslocation, tuple): - fslocation = "%s:%d" % fslocation warning = WarningReport(code=code, fslocation=fslocation, message=message, nodeid=nodeid) warnings.append(warning) @@ -212,15 +259,15 @@ class TerminalReporter: word, markup = word else: if rep.passed: - markup = {'green':True} + markup = {'green': True} elif rep.failed: - markup = {'red':True} + markup = {'red': True} elif rep.skipped: - markup = {'yellow':True} + markup = {'yellow': True} line = self._locationline(rep.nodeid, *rep.location) if not hasattr(rep, 'node'): self.write_ensure_prefix(line, word, **markup) - #self._tw.write(word, **markup) + # self._tw.write(word, **markup) else: self.ensure_newline() if hasattr(rep, 'node'): @@ -241,7 +288,7 @@ class TerminalReporter: items = [x for x in report.result if isinstance(x, pytest.Item)] self._numcollected += len(items) if self.isatty: - #self.write_fspath_result(report.nodeid, 'E') + # self.write_fspath_result(report.nodeid, 'E') self.report_collect() def report_collect(self, final=False): @@ -254,15 +301,15 @@ class TerminalReporter: line = "collected " else: line = "collecting " - line += str(self._numcollected) + " items" + line += str(self._numcollected) + " item" + ('' if self._numcollected == 1 else 's') if errors: line += " / %d errors" % errors if skipped: line += " / %d skipped" % skipped if self.isatty: + self.rewrite(line, bold=True, erase=True) if final: - line += " \n" - self.rewrite(line, bold=True) + self.write('\n') else: self.write_line(line) @@ -288,6 +335,9 @@ class TerminalReporter: self.write_line(msg) lines = self.config.hook.pytest_report_header( config=self.config, startdir=self.startdir) + self._write_report_lines_from_hooks(lines) + + def _write_report_lines_from_hooks(self, lines): lines.reverse() for line in flatten(lines): self.write_line(line) @@ -295,8 +345,8 @@ class TerminalReporter: def pytest_report_header(self, config): inifile = "" if config.inifile: - inifile = config.rootdir.bestrelpath(config.inifile) - lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)] + inifile = " " + config.rootdir.bestrelpath(config.inifile) + lines = ["rootdir: %s, inifile:%s" % (config.rootdir, inifile)] plugininfo = config.pluginmanager.list_plugin_distinfo() if plugininfo: @@ -314,10 +364,9 @@ class TerminalReporter: rep.toterminal(self._tw) return 1 return 0 - if not self.showheader: - return - #for i, testarg in enumerate(self.config.args): - # self.write_line("test path %d: %s" %(i+1, testarg)) + lines = self.config.hook.pytest_report_collectionfinish( + config=self.config, startdir=self.startdir, items=session.items) + self._write_report_lines_from_hooks(lines) def _printcollecteditems(self, items): # to print out items and their parent collectors @@ -340,14 +389,14 @@ class TerminalReporter: stack = [] indent = "" for item in items: - needed_collectors = item.listchain()[1:] # strip root node + needed_collectors = item.listchain()[1:] # strip root node while stack: if stack == needed_collectors[:len(stack)]: break stack.pop() for col in needed_collectors[len(stack):]: stack.append(col) - #if col.name == "()": + # if col.name == "()": # continue indent = (len(stack) - 1) * " " self._tw.line("%s%s" % (indent, col)) @@ -396,15 +445,15 @@ class TerminalReporter: line = self.config.cwd_relative_nodeid(nodeid) if domain and line.endswith(domain): line = line[:-len(domain)] - l = domain.split("[") - l[0] = l[0].replace('.', '::') # don't replace '.' in params - line += "[".join(l) + values = domain.split("[") + values[0] = values[0].replace('.', '::') # don't replace '.' in params + line += "[".join(values) return line # collect_fspath comes from testid which has a "/"-normalized path if fspath: res = mkrel(nodeid).replace("::()", "") # parens-normalization - if nodeid.split("::")[0] != fspath.replace("\\", "/"): + if nodeid.split("::")[0] != fspath.replace("\\", nodes.SEP): res += " <- " + self.startdir.bestrelpath(fspath) else: res = "[location]" @@ -415,7 +464,7 @@ class TerminalReporter: fspath, lineno, domain = rep.location return domain else: - return "test session" # XXX? + return "test session" # XXX? def _getcrashline(self, rep): try: @@ -430,21 +479,29 @@ class TerminalReporter: # summaries for sessionfinish # def getreports(self, name): - l = [] + values = [] for x in self.stats.get(name, []): if not hasattr(x, '_pdbshown'): - l.append(x) - return l + values.append(x) + return values def summary_warnings(self): if self.hasopt("w"): - warnings = self.stats.get("warnings") - if not warnings: + all_warnings = self.stats.get("warnings") + if not all_warnings: return - self.write_sep("=", "pytest-warning summary") - for w in warnings: - self._tw.line("W%s %s %s" % (w.code, - w.fslocation, w.message)) + + grouped = itertools.groupby(all_warnings, key=lambda wr: wr.get_location(self.config)) + + self.write_sep("=", "warnings summary", yellow=True, bold=False) + for location, warnings in grouped: + self._tw.line(str(location) or '') + for w in warnings: + lines = w.message.splitlines() + indented = '\n'.join(' ' + x for x in lines) + self._tw.line(indented) + self._tw.line() + self._tw.line('-- Docs: http://doc.pytest.org/en/latest/warnings.html') def summary_passes(self): if self.config.option.tbstyle != "no": @@ -466,7 +523,6 @@ class TerminalReporter: content = content[:-1] self._tw.line(content) - def summary_failures(self): if self.config.option.tbstyle != "no": reports = self.getreports('failed') @@ -528,6 +584,7 @@ class TerminalReporter: self.write_sep("=", "%d tests deselected" % ( len(self.stats['deselected'])), bold=True) + def repr_pythonversion(v=None): if v is None: v = sys.version_info @@ -536,30 +593,30 @@ def repr_pythonversion(v=None): except (TypeError, ValueError): return str(v) -def flatten(l): - for x in l: + +def flatten(values): + for x in values: if isinstance(x, (list, tuple)): for y in flatten(x): yield y else: yield x + def build_summary_stats_line(stats): keys = ("failed passed skipped deselected " - "xfailed xpassed warnings error").split() - key_translation = {'warnings': 'pytest-warnings'} + "xfailed xpassed warnings error").split() unknown_key_seen = False for key in stats.keys(): if key not in keys: - if key: # setup/teardown reports have an empty key, ignore them + if key: # setup/teardown reports have an empty key, ignore them keys.append(key) unknown_key_seen = True parts = [] for key in keys: val = stats.get(key, None) if val: - key_name = key_translation.get(key, key) - parts.append("%d %s" % (len(val), key_name)) + parts.append("%d %s" % (len(val), key)) if parts: line = ", ".join(parts) @@ -579,7 +636,7 @@ def build_summary_stats_line(stats): def _plugin_nameversions(plugininfo): - l = [] + values = [] for plugin, dist in plugininfo: # gets us name and version! name = '{dist.project_name}-{dist.version}'.format(dist=dist) @@ -588,6 +645,6 @@ def _plugin_nameversions(plugininfo): name = name[7:] # we decided to print python package names # they can have more than one plugin - if name not in l: - l.append(name) - return l + if name not in values: + values.append(name) + return values diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py index 28a6b0636..da1b03223 100644 --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -1,4 +1,6 @@ """ support for providing temporary directories to test functions. """ +from __future__ import absolute_import, division, print_function + import re import pytest @@ -23,7 +25,7 @@ class TempdirFactory: provides an empty unique-per-test-invocation directory and is guaranteed to be empty. """ - #py.log._apiwarn(">1.1", "use tmpdir function argument") + # py.log._apiwarn(">1.1", "use tmpdir function argument") return self.getbasetemp().ensure(string, dir=dir) def mktemp(self, basename, numbered=True): @@ -36,7 +38,7 @@ class TempdirFactory: p = basetemp.mkdir(basename) else: p = py.path.local.make_numbered_dir(prefix=basename, - keep=0, rootdir=basetemp, lock_timeout=None) + keep=0, rootdir=basetemp, lock_timeout=None) self.trace("mktemp", p) return p @@ -116,7 +118,7 @@ def tmpdir(request, tmpdir_factory): path object. """ name = request.node.name - name = re.sub("[\W]", "_", name) + name = re.sub(r"[\W]", "_", name) MAXVAL = 30 if len(name) > MAXVAL: name = name[:MAXVAL] diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 73224010b..52c9813e8 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -1,13 +1,14 @@ """ discovery and running of std-library "unittest" style tests. """ -from __future__ import absolute_import +from __future__ import absolute_import, division, print_function import sys import traceback -import pytest -# for transfering markers +# for transferring markers import _pytest._code -from _pytest.python import transfer_markers +from _pytest.config import hookimpl +from _pytest.outcomes import fail, skip, xfail +from _pytest.python import transfer_markers, Class, Module, Function from _pytest.skipping import MarkEvaluator @@ -22,11 +23,11 @@ def pytest_pycollect_makeitem(collector, name, obj): return UnitTestCase(name, parent=collector) -class UnitTestCase(pytest.Class): +class UnitTestCase(Class): # marker for fixturemanger.getfixtureinfo() # to declare that our children do not support funcargs nofuncargs = True - + def setup(self): cls = self.obj if getattr(cls, '__unittest_skip__', False): @@ -46,7 +47,7 @@ class UnitTestCase(pytest.Class): return self.session._fixturemanager.parsefactories(self, unittest=True) loader = TestLoader() - module = self.getparent(pytest.Module).obj + module = self.getparent(Module).obj foundsomething = False for name in loader.getTestCaseNames(self.obj): x = getattr(self.obj, name) @@ -65,8 +66,7 @@ class UnitTestCase(pytest.Class): yield TestCaseFunction('runTest', parent=self) - -class TestCaseFunction(pytest.Function): +class TestCaseFunction(Function): _excinfo = None def setup(self): @@ -109,38 +109,39 @@ class TestCaseFunction(pytest.Function): except TypeError: try: try: - l = traceback.format_exception(*rawexcinfo) - l.insert(0, "NOTE: Incompatible Exception Representation, " - "displaying natively:\n\n") - pytest.fail("".join(l), pytrace=False) - except (pytest.fail.Exception, KeyboardInterrupt): + values = traceback.format_exception(*rawexcinfo) + values.insert(0, "NOTE: Incompatible Exception Representation, " + "displaying natively:\n\n") + fail("".join(values), pytrace=False) + except (fail.Exception, KeyboardInterrupt): raise - except: - pytest.fail("ERROR: Unknown Incompatible Exception " - "representation:\n%r" %(rawexcinfo,), pytrace=False) + except: # noqa + fail("ERROR: Unknown Incompatible Exception " + "representation:\n%r" % (rawexcinfo,), pytrace=False) except KeyboardInterrupt: raise - except pytest.fail.Exception: + except fail.Exception: excinfo = _pytest._code.ExceptionInfo() self.__dict__.setdefault('_excinfo', []).append(excinfo) def addError(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) + def addFailure(self, testcase, rawexcinfo): self._addexcinfo(rawexcinfo) def addSkip(self, testcase, reason): try: - pytest.skip(reason) - except pytest.skip.Exception: + skip(reason) + except skip.Exception: self._evalskip = MarkEvaluator(self, 'SkipTest') self._evalskip.result = True self._addexcinfo(sys.exc_info()) def addExpectedFailure(self, testcase, rawexcinfo, reason=""): try: - pytest.xfail(str(reason)) - except pytest.xfail.Exception: + xfail(str(reason)) + except xfail.Exception: self._addexcinfo(sys.exc_info()) def addUnexpectedSuccess(self, testcase, reason=""): @@ -152,22 +153,42 @@ class TestCaseFunction(pytest.Function): def stopTest(self, testcase): pass + def _handle_skip(self): + # implements the skipping machinery (see #2137) + # analog to pythons Lib/unittest/case.py:run + testMethod = getattr(self._testcase, self._testcase._testMethodName) + if (getattr(self._testcase.__class__, "__unittest_skip__", False) or + getattr(testMethod, "__unittest_skip__", False)): + # If the class or method was skipped. + skip_why = (getattr(self._testcase.__class__, '__unittest_skip_why__', '') or + getattr(testMethod, '__unittest_skip_why__', '')) + try: # PY3, unittest2 on PY2 + self._testcase._addSkip(self, self._testcase, skip_why) + except TypeError: # PY2 + if sys.version_info[0] != 2: + raise + self._testcase._addSkip(self, skip_why) + return True + return False + def runtest(self): if self.config.pluginmanager.get_plugin("pdbinvoke") is None: self._testcase(result=self) else: # disables tearDown and cleanups for post mortem debugging (see #1890) + if self._handle_skip(): + return self._testcase.debug() - def _prunetraceback(self, excinfo): - pytest.Function._prunetraceback(self, excinfo) + Function._prunetraceback(self, excinfo) traceback = excinfo.traceback.filter( - lambda x:not x.frame.f_globals.get('__unittest')) + lambda x: not x.frame.f_globals.get('__unittest')) if traceback: excinfo.traceback = traceback -@pytest.hookimpl(tryfirst=True) + +@hookimpl(tryfirst=True) def pytest_runtest_makereport(item, call): if isinstance(item, TestCaseFunction): if item._excinfo: @@ -179,7 +200,8 @@ def pytest_runtest_makereport(item, call): # twisted trial support -@pytest.hookimpl(hookwrapper=True) + +@hookimpl(hookwrapper=True) def pytest_runtest_protocol(item): if isinstance(item, TestCaseFunction) and \ 'twisted.trial.unittest' in sys.modules: @@ -188,7 +210,7 @@ def pytest_runtest_protocol(item): check_testcase_implements_trial_reporter() def excstore(self, exc_value=None, exc_type=None, exc_tb=None, - captureVars=None): + captureVars=None): if exc_value is None: self._rawexcinfo = sys.exc_info() else: @@ -197,7 +219,7 @@ def pytest_runtest_protocol(item): self._rawexcinfo = (exc_type, exc_value, exc_tb) try: Failure__init__(self, exc_value, exc_type, exc_tb, - captureVars=captureVars) + captureVars=captureVars) except TypeError: Failure__init__(self, exc_value, exc_type, exc_tb) diff --git a/_pytest/vendored_packages/pluggy.py b/_pytest/vendored_packages/pluggy.py index 9c13932b3..aebddad01 100644 --- a/_pytest/vendored_packages/pluggy.py +++ b/_pytest/vendored_packages/pluggy.py @@ -540,7 +540,7 @@ class PluginManager(object): of HookImpl instances and the keyword arguments for the hook call. ``after(outcome, hook_name, hook_impls, kwargs)`` receives the - same arguments as ``before`` but also a :py:class:`_CallOutcome`` object + same arguments as ``before`` but also a :py:class:`_CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` object which represents the result of the overall hook call. """ return _TracedHookExecution(self, before, after).undo diff --git a/_pytest/warnings.py b/_pytest/warnings.py new file mode 100644 index 000000000..926b1f581 --- /dev/null +++ b/_pytest/warnings.py @@ -0,0 +1,94 @@ +from __future__ import absolute_import, division, print_function + +import warnings +from contextlib import contextmanager + +import pytest + +from _pytest import compat + + +def _setoption(wmod, arg): + """ + Copy of the warning._setoption function but does not escape arguments. + """ + parts = arg.split(':') + if len(parts) > 5: + raise wmod._OptionError("too many fields (max 5): %r" % (arg,)) + while len(parts) < 5: + parts.append('') + action, message, category, module, lineno = [s.strip() + for s in parts] + action = wmod._getaction(action) + category = wmod._getcategory(category) + if lineno: + try: + lineno = int(lineno) + if lineno < 0: + raise ValueError + except (ValueError, OverflowError): + raise wmod._OptionError("invalid lineno %r" % (lineno,)) + else: + lineno = 0 + wmod.filterwarnings(action, message, category, module, lineno) + + +def pytest_addoption(parser): + group = parser.getgroup("pytest-warnings") + group.addoption( + '-W', '--pythonwarnings', action='append', + help="set which warnings to report, see -W option of python itself.") + parser.addini("filterwarnings", type="linelist", + help="Each line specifies a pattern for " + "warnings.filterwarnings. " + "Processed after -W and --pythonwarnings.") + + +@contextmanager +def catch_warnings_for_item(item): + """ + catches the warnings generated during setup/call/teardown execution + of the given item and after it is done posts them as warnings to this + item. + """ + args = item.config.getoption('pythonwarnings') or [] + inifilters = item.config.getini("filterwarnings") + with warnings.catch_warnings(record=True) as log: + for arg in args: + warnings._setoption(arg) + + for arg in inifilters: + _setoption(warnings, arg) + + mark = item.get_marker('filterwarnings') + if mark: + for arg in mark.args: + warnings._setoption(arg) + + yield + + for warning in log: + warn_msg = warning.message + unicode_warning = False + + if compat._PY2 and any(isinstance(m, compat.UNICODE_TYPES) for m in warn_msg.args): + new_args = [compat.safe_str(m) for m in warn_msg.args] + unicode_warning = warn_msg.args != new_args + warn_msg.args = new_args + + msg = warnings.formatwarning( + warn_msg, warning.category, + warning.filename, warning.lineno, warning.line) + item.warn("unused", msg) + + if unicode_warning: + warnings.warn( + "Warning is using unicode non convertible to ascii, " + "converting to a safe representation:\n %s" % msg, + UnicodeWarning) + + +@pytest.hookimpl(hookwrapper=True) +def pytest_runtest_protocol(item): + with catch_warnings_for_item(item): + yield diff --git a/appveyor.yml b/appveyor.yml index a42aa16dc..3a11700e3 100644 --- a/appveyor.yml +++ b/appveyor.yml @@ -15,16 +15,19 @@ environment: - TOXENV: "py33" - TOXENV: "py34" - TOXENV: "py35" + - TOXENV: "py36" - TOXENV: "pypy" - TOXENV: "py27-pexpect" - TOXENV: "py27-xdist" - TOXENV: "py27-trial" - - TOXENV: "py35-pexpect" - - TOXENV: "py35-xdist" - - TOXENV: "py35-trial" + - TOXENV: "py27-numpy" + - TOXENV: "py36-pexpect" + - TOXENV: "py36-xdist" + - TOXENV: "py36-trial" + - TOXENV: "py36-numpy" - TOXENV: "py27-nobyte" - TOXENV: "doctesting" - - TOXENV: "freeze" + - TOXENV: "py35-freeze" - TOXENV: "docs" install: @@ -33,7 +36,7 @@ install: - if "%TOXENV%" == "pypy" call scripts\install-pypy.bat - - C:\Python35\python -m pip install tox + - C:\Python36\python -m pip install --upgrade --pre tox build: false # Not a C# project, build stuff at the test step instead. diff --git a/changelog/1505.doc b/changelog/1505.doc new file mode 100644 index 000000000..1b303d1bd --- /dev/null +++ b/changelog/1505.doc @@ -0,0 +1 @@ +Introduce a dedicated section about conftest.py. diff --git a/changelog/1997.doc b/changelog/1997.doc new file mode 100644 index 000000000..0fa110dc9 --- /dev/null +++ b/changelog/1997.doc @@ -0,0 +1 @@ +Explicitly mention ``xpass`` in the documentation of ``xfail``. diff --git a/changelog/2658.doc b/changelog/2658.doc new file mode 100644 index 000000000..2da7f3d6c --- /dev/null +++ b/changelog/2658.doc @@ -0,0 +1 @@ +Append example for pytest.param in the example/parametrize document. \ No newline at end of file diff --git a/changelog/2775.bugfix b/changelog/2775.bugfix new file mode 100644 index 000000000..8123522ac --- /dev/null +++ b/changelog/2775.bugfix @@ -0,0 +1 @@ +Fix the bug where running pytest with "--pyargs" will result in Items with empty "parent.nodeid" if run from a different root directory. diff --git a/changelog/2819.bugfix b/changelog/2819.bugfix new file mode 100644 index 000000000..303903cf7 --- /dev/null +++ b/changelog/2819.bugfix @@ -0,0 +1 @@ +Fix issue with @pytest.parametrize if argnames was specified as kwarg. \ No newline at end of file diff --git a/changelog/2836.bug b/changelog/2836.bug new file mode 100644 index 000000000..afa1961d7 --- /dev/null +++ b/changelog/2836.bug @@ -0,0 +1 @@ +Match fixture paths against actual path segments in order to avoid matching folders which share a prefix. diff --git a/changelog/2856.bugfix b/changelog/2856.bugfix new file mode 100644 index 000000000..7e5fc8fc7 --- /dev/null +++ b/changelog/2856.bugfix @@ -0,0 +1 @@ +Strip whitespace from marker names when reading them from INI config. diff --git a/changelog/2882.bugfix b/changelog/2882.bugfix new file mode 100644 index 000000000..2bda24c01 --- /dev/null +++ b/changelog/2882.bugfix @@ -0,0 +1 @@ +Show full context of doctest source in the pytest output, if the lineno of failed example in the docstring is < 9. \ No newline at end of file diff --git a/changelog/2893.doc b/changelog/2893.doc new file mode 100644 index 000000000..a305f1890 --- /dev/null +++ b/changelog/2893.doc @@ -0,0 +1 @@ +Clarify language of proposal for fixtures parameters diff --git a/changelog/2903.doc b/changelog/2903.doc new file mode 100644 index 000000000..492a8c685 --- /dev/null +++ b/changelog/2903.doc @@ -0,0 +1 @@ +List python 3.6 in the documented supported versions in the getting started document. diff --git a/changelog/538.doc b/changelog/538.doc new file mode 100644 index 000000000..bc5fb712f --- /dev/null +++ b/changelog/538.doc @@ -0,0 +1 @@ +Clarify the documentation of available fixture scopes. diff --git a/changelog/911.doc b/changelog/911.doc new file mode 100644 index 000000000..e9d94f21c --- /dev/null +++ b/changelog/911.doc @@ -0,0 +1 @@ +Add documentation about the ``python -m pytest`` invocation adding the current directory to sys.path. diff --git a/changelog/_template.rst b/changelog/_template.rst new file mode 100644 index 000000000..a898abc15 --- /dev/null +++ b/changelog/_template.rst @@ -0,0 +1,40 @@ +{% for section in sections %} +{% set underline = "-" %} +{% if section %} +{{section}} +{{ underline * section|length }}{% set underline = "~" %} + +{% endif %} +{% if sections[section] %} +{% for category, val in definitions.items() if category in sections[section] %} + +{{ definitions[category]['name'] }} +{{ underline * definitions[category]['name']|length }} + +{% if definitions[category]['showcontent'] %} +{% for text, values in sections[section][category]|dictsort(by='value') %} +{% set issue_joiner = joiner(', ') %} +- {{ text }}{% if category != 'vendor' %} ({% for value in values|sort %}{{ issue_joiner() }}`{{ value }} `_{% endfor %}){% endif %} + + +{% endfor %} +{% else %} +- {{ sections[section][category]['']|sort|join(', ') }} + + +{% endif %} +{% if sections[section][category]|length == 0 %} + +No significant changes. + + +{% else %} +{% endif %} +{% endfor %} +{% else %} + +No significant changes. + + +{% endif %} +{% endfor %} diff --git a/doc/en/Makefile b/doc/en/Makefile index 5499c405e..286bbd8e7 100644 --- a/doc/en/Makefile +++ b/doc/en/Makefile @@ -17,7 +17,12 @@ REGENDOC_ARGS := \ --normalize "/_{8,} (.*) _{8,}/_______ \1 ________/" \ --normalize "/in \d+.\d+ seconds/in 0.12 seconds/" \ --normalize "@/tmp/pytest-of-.*/pytest-\d+@PYTEST_TMPDIR@" \ - + --normalize "@pytest-(\d+)\\.[^ ,]+@pytest-\1.x.y@" \ + --normalize "@(This is pytest version )(\d+)\\.[^ ,]+@\1\2.x.y@" \ + --normalize "@py-(\d+)\\.[^ ,]+@py-\1.x.y@" \ + --normalize "@pluggy-(\d+)\\.[.\d,]+@pluggy-\1.x.y@" \ + --normalize "@hypothesis-(\d+)\\.[.\d,]+@hypothesis-\1.x.y@" \ + --normalize "@Python (\d+)\\.[^ ,]+@Python \1.x.y@" .PHONY: help clean html dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest @@ -36,7 +41,7 @@ clean: -rm -rf $(BUILDDIR)/* regen: - PYTHONDONTWRITEBYTECODE=1 COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS} + PYTHONDONTWRITEBYTECODE=1 PYTEST_ADDOPT=-pno:hypothesis COLUMNS=76 regendoc --update *.rst */*.rst ${REGENDOC_ARGS} html: $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html diff --git a/doc/en/_templates/globaltoc.html b/doc/en/_templates/globaltoc.html index af427198a..fdd4dd59b 100644 --- a/doc/en/_templates/globaltoc.html +++ b/doc/en/_templates/globaltoc.html @@ -9,6 +9,7 @@
  • Contact
  • Talks/Posts
  • Changelog
  • +
  • Backwards Compatibility
  • License
  • diff --git a/doc/en/announce/index.rst b/doc/en/announce/index.rst index 44b29cae1..58b9aeec7 100644 --- a/doc/en/announce/index.rst +++ b/doc/en/announce/index.rst @@ -6,6 +6,16 @@ Release announcements :maxdepth: 2 + release-3.2.3 + release-3.2.2 + release-3.2.1 + release-3.2.0 + release-3.1.3 + release-3.1.2 + release-3.1.1 + release-3.1.0 + release-3.0.7 + release-3.0.6 release-3.0.5 release-3.0.4 release-3.0.3 diff --git a/doc/en/announce/release-2.0.2.rst b/doc/en/announce/release-2.0.2.rst index 733a9f7bd..f1f44f34f 100644 --- a/doc/en/announce/release-2.0.2.rst +++ b/doc/en/announce/release-2.0.2.rst @@ -63,9 +63,9 @@ Changes between 2.0.1 and 2.0.2 this. - fixed typos in the docs (thanks Victor Garcia, Brianna Laugher) and particular - thanks to Laura Creighton who also revieved parts of the documentation. + thanks to Laura Creighton who also reviewed parts of the documentation. -- fix slighly wrong output of verbose progress reporting for classes +- fix slightly wrong output of verbose progress reporting for classes (thanks Amaury) - more precise (avoiding of) deprecation warnings for node.Class|Function accesses diff --git a/doc/en/announce/release-2.0.3.rst b/doc/en/announce/release-2.0.3.rst index ed746e851..9bbfdaab3 100644 --- a/doc/en/announce/release-2.0.3.rst +++ b/doc/en/announce/release-2.0.3.rst @@ -13,7 +13,7 @@ If you want to install or upgrade pytest, just type one of:: easy_install -U pytest There also is a bugfix release 1.6 of pytest-xdist, the plugin -that enables seemless distributed and "looponfail" testing for Python. +that enables seamless distributed and "looponfail" testing for Python. best, holger krekel @@ -33,7 +33,7 @@ Changes between 2.0.2 and 2.0.3 - don't require zlib (and other libs) for genscript plugin without --genscript actually being used. -- speed up skips (by not doing a full traceback represenation +- speed up skips (by not doing a full traceback representation internally) - fix issue37: avoid invalid characters in junitxml's output diff --git a/doc/en/announce/release-2.2.1.rst b/doc/en/announce/release-2.2.1.rst index f9764634c..5d28bcb01 100644 --- a/doc/en/announce/release-2.2.1.rst +++ b/doc/en/announce/release-2.2.1.rst @@ -2,7 +2,7 @@ pytest-2.2.1: bug fixes, perfect teardowns =========================================================================== -pytest-2.2.1 is a minor backward-compatible release of the the py.test +pytest-2.2.1 is a minor backward-compatible release of the py.test testing tool. It contains bug fixes and little improvements, including documentation fixes. If you are using the distributed testing pluginmake sure to upgrade it to pytest-xdist-1.8. diff --git a/doc/en/announce/release-2.2.4.rst b/doc/en/announce/release-2.2.4.rst index 8720bdb28..67f0feb27 100644 --- a/doc/en/announce/release-2.2.4.rst +++ b/doc/en/announce/release-2.2.4.rst @@ -29,7 +29,7 @@ Changes between 2.2.3 and 2.2.4 - fix issue with unittest: now @unittest.expectedFailure markers should be processed correctly (you can also use @pytest.mark markers) - document integration with the extended distribute/setuptools test commands -- fix issue 140: propperly get the real functions +- fix issue 140: properly get the real functions of bound classmethods for setup/teardown_class - fix issue #141: switch from the deceased paste.pocoo.org to bpaste.net - fix issue #143: call unconfigure/sessionfinish always when diff --git a/doc/en/announce/release-2.3.0.rst b/doc/en/announce/release-2.3.0.rst index 54fe3961f..f863aad0a 100644 --- a/doc/en/announce/release-2.3.0.rst +++ b/doc/en/announce/release-2.3.0.rst @@ -89,7 +89,7 @@ Changes between 2.2.4 and 2.3.0 - fix issue128: show captured output when capsys/capfd are used -- fix issue179: propperly show the dependency chain of factories +- fix issue179: properly show the dependency chain of factories - pluginmanager.register(...) now raises ValueError if the plugin has been already registered or the name is taken @@ -130,5 +130,5 @@ Changes between 2.2.4 and 2.3.0 - don't show deselected reason line if there is none - - py.test -vv will show all of assert comparisations instead of truncating + - py.test -vv will show all of assert comparisons instead of truncating diff --git a/doc/en/announce/release-2.3.2.rst b/doc/en/announce/release-2.3.2.rst index 948b374d4..75312b429 100644 --- a/doc/en/announce/release-2.3.2.rst +++ b/doc/en/announce/release-2.3.2.rst @@ -1,7 +1,7 @@ pytest-2.3.2: some fixes and more traceback-printing speed =========================================================================== -pytest-2.3.2 is a another stabilization release: +pytest-2.3.2 is another stabilization release: - issue 205: fixes a regression with conftest detection - issue 208/29: fixes traceback-printing speed in some bad cases diff --git a/doc/en/announce/release-2.3.3.rst b/doc/en/announce/release-2.3.3.rst index 1d7c7027b..3a48b6ac4 100644 --- a/doc/en/announce/release-2.3.3.rst +++ b/doc/en/announce/release-2.3.3.rst @@ -1,7 +1,7 @@ -pytest-2.3.3: integration fixes, py24 suport, ``*/**`` shown in traceback +pytest-2.3.3: integration fixes, py24 support, ``*/**`` shown in traceback =========================================================================== -pytest-2.3.3 is a another stabilization release of the py.test tool +pytest-2.3.3 is another stabilization release of the py.test tool which offers uebersimple assertions, scalable fixture mechanisms and deep customization for testing with Python. Particularly, this release provides: @@ -46,7 +46,7 @@ Changes between 2.3.2 and 2.3.3 - fix issue209 - reintroduce python2.4 support by depending on newer pylib which re-introduced statement-finding for pre-AST interpreters -- nose support: only call setup if its a callable, thanks Andrew +- nose support: only call setup if it's a callable, thanks Andrew Taumoefolau - fix issue219 - add py2.4-3.3 classifiers to TROVE list diff --git a/doc/en/announce/release-2.3.5.rst b/doc/en/announce/release-2.3.5.rst index c4e91e0e6..112399ef3 100644 --- a/doc/en/announce/release-2.3.5.rst +++ b/doc/en/announce/release-2.3.5.rst @@ -44,11 +44,11 @@ Changes between 2.3.4 and 2.3.5 (thanks Adam Goucher) - Issue 265 - integrate nose setup/teardown with setupstate - so it doesnt try to teardown if it did not setup + so it doesn't try to teardown if it did not setup -- issue 271 - dont write junitxml on slave nodes +- issue 271 - don't write junitxml on slave nodes -- Issue 274 - dont try to show full doctest example +- Issue 274 - don't try to show full doctest example when doctest does not know the example location - issue 280 - disable assertion rewriting on buggy CPython 2.6.0 @@ -84,7 +84,7 @@ Changes between 2.3.4 and 2.3.5 - allow to specify prefixes starting with "_" when customizing python_functions test discovery. (thanks Graham Horler) -- improve PYTEST_DEBUG tracing output by puting +- improve PYTEST_DEBUG tracing output by putting extra data on a new lines with additional indent - ensure OutcomeExceptions like skip/fail have initialized exception attributes diff --git a/doc/en/announce/release-2.4.0.rst b/doc/en/announce/release-2.4.0.rst index 88130c481..be3aaedb0 100644 --- a/doc/en/announce/release-2.4.0.rst +++ b/doc/en/announce/release-2.4.0.rst @@ -36,7 +36,7 @@ a full list of details. A few feature highlights: - reporting: color the last line red or green depending if failures/errors occurred or everything passed. -The documentation has been updated to accomodate the changes, +The documentation has been updated to accommodate the changes, see `http://pytest.org `_ To install or upgrade pytest:: @@ -118,7 +118,7 @@ new features: - fix issue322: tearDownClass is not run if setUpClass failed. Thanks Mathieu Agopian for the initial fix. Also make all of pytest/nose - finalizer mimick the same generic behaviour: if a setupX exists and + finalizer mimic the same generic behaviour: if a setupX exists and fails, don't run teardownX. This internally introduces a new method "node.addfinalizer()" helper which can only be called during the setup phase of a node. diff --git a/doc/en/announce/release-2.5.0.rst b/doc/en/announce/release-2.5.0.rst index b8f28d6fd..b04a825cd 100644 --- a/doc/en/announce/release-2.5.0.rst +++ b/doc/en/announce/release-2.5.0.rst @@ -70,7 +70,7 @@ holger krekel to problems for more than >966 non-function scoped parameters). - fix issue290 - there is preliminary support now for parametrizing - with repeated same values (sometimes useful to to test if calling + with repeated same values (sometimes useful to test if calling a second time works as with the first time). - close issue240 - document precisely how pytest module importing @@ -149,7 +149,7 @@ holger krekel would not work correctly because pytest assumes @pytest.mark.some gets a function to be decorated already. We now at least detect if this - arg is an lambda and thus the example will work. Thanks Alex Gaynor + arg is a lambda and thus the example will work. Thanks Alex Gaynor for bringing it up. - xfail a test on pypy that checks wrong encoding/ascii (pypy does diff --git a/doc/en/announce/release-2.5.2.rst b/doc/en/announce/release-2.5.2.rst index 9308ffdd6..d5cfca2db 100644 --- a/doc/en/announce/release-2.5.2.rst +++ b/doc/en/announce/release-2.5.2.rst @@ -60,5 +60,5 @@ holger krekel - fix issue429: comparing byte strings with non-ascii chars in assert expressions now work better. Thanks Floris Bruynooghe. -- make capfd/capsys.capture private, its unused and shouldnt be exposed +- make capfd/capsys.capture private, its unused and shouldn't be exposed diff --git a/doc/en/announce/release-2.6.3.rst b/doc/en/announce/release-2.6.3.rst index c1d0ad278..ee0d2692c 100644 --- a/doc/en/announce/release-2.6.3.rst +++ b/doc/en/announce/release-2.6.3.rst @@ -42,7 +42,7 @@ Changes 2.6.3 - fix conftest related fixture visibility issue: when running with a CWD outside of a test package pytest would get fixture discovery wrong. - Thanks to Wolfgang Schnerring for figuring out a reproducable example. + Thanks to Wolfgang Schnerring for figuring out a reproducible example. - Introduce pytest_enter_pdb hook (needed e.g. by pytest_timeout to cancel the timeout when interactively entering pdb). Thanks Wolfgang Schnerring. diff --git a/doc/en/announce/release-2.7.1.rst b/doc/en/announce/release-2.7.1.rst index cd37cad0c..fdc71eebb 100644 --- a/doc/en/announce/release-2.7.1.rst +++ b/doc/en/announce/release-2.7.1.rst @@ -32,7 +32,7 @@ The py.test Development Team explanations. Thanks Carl Meyer for the report and test case. - fix issue553: properly handling inspect.getsourcelines failures in - FixtureLookupError which would lead to to an internal error, + FixtureLookupError which would lead to an internal error, obfuscating the original problem. Thanks talljosh for initial diagnose/patch and Bruno Oliveira for final patch. diff --git a/doc/en/announce/release-2.9.2.rst b/doc/en/announce/release-2.9.2.rst index b71ae85dd..8f274cdf3 100644 --- a/doc/en/announce/release-2.9.2.rst +++ b/doc/en/announce/release-2.9.2.rst @@ -46,7 +46,7 @@ The py.test Development Team Thanks `@astraw38`_ for reporting the issue (`#1496`_) and `@tomviner`_ for PR the (`#1524`_). -* Fix win32 path issue when puttinging custom config file with absolute path +* Fix win32 path issue when putting custom config file with absolute path in ``pytest.main("-c your_absolute_path")``. * Fix maximum recursion depth detection when raised error class is not aware diff --git a/doc/en/announce/release-3.0.6.rst b/doc/en/announce/release-3.0.6.rst new file mode 100644 index 000000000..2988b9cb3 --- /dev/null +++ b/doc/en/announce/release-3.0.6.rst @@ -0,0 +1,33 @@ +pytest-3.0.6 +============ + +pytest 3.0.6 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + + +Thanks to all who contributed to this release, among them: + +* Andreas Pelme +* Bruno Oliveira +* Dmitry Malinovsky +* Eli Boyarski +* Jakub Wilk +* Jeff Widman +* Loïc Estève +* Luke Murphy +* Miro Hrončok +* Oscar Hellström +* Peter Heatwole +* Philippe Ombredanne +* Ronny Pfannschmidt +* Rutger Prins +* Stefan Scherfke + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.0.7.rst b/doc/en/announce/release-3.0.7.rst new file mode 100644 index 000000000..591557aa7 --- /dev/null +++ b/doc/en/announce/release-3.0.7.rst @@ -0,0 +1,33 @@ +pytest-3.0.7 +============ + +pytest 3.0.7 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Anthony Sottile +* Barney Gale +* Bruno Oliveira +* Florian Bruhin +* Floris Bruynooghe +* Ionel Cristian Mărieș +* Katerina Koukiou +* NODA, Kai +* Omer Hadari +* Patrick Hayes +* Ran Benita +* Ronny Pfannschmidt +* Victor Uriarte +* Vidar Tonaas Fauske +* Ville Skyttä +* fbjorn +* mbyt + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.1.0.rst b/doc/en/announce/release-3.1.0.rst new file mode 100644 index 000000000..99cc6bdbe --- /dev/null +++ b/doc/en/announce/release-3.1.0.rst @@ -0,0 +1,61 @@ +pytest-3.1.0 +======================================= + +The pytest team is proud to announce the 3.1.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + +http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Anthony Sottile +* Ben Lloyd +* Bruno Oliveira +* David Giese +* David Szotten +* Dmitri Pribysh +* Florian Bruhin +* Florian Schulze +* Floris Bruynooghe +* John Towler +* Jonas Obrist +* Katerina Koukiou +* Kodi Arfer +* Krzysztof Szularz +* Lev Maximov +* Loïc Estève +* Luke Murphy +* Manuel Krebber +* Matthew Duck +* Matthias Bussonnier +* Michael Howitz +* Michal Wajszczuk +* Paweł Adamczak +* Rafael Bertoldi +* Ravi Chandra +* Ronny Pfannschmidt +* Skylar Downes +* Thomas Kriechbaumer +* Vitaly Lashmanov +* Vlad Dragos +* Wheerd +* Xander Johnson +* mandeep +* reut + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/announce/release-3.1.1.rst b/doc/en/announce/release-3.1.1.rst new file mode 100644 index 000000000..370b8fd73 --- /dev/null +++ b/doc/en/announce/release-3.1.1.rst @@ -0,0 +1,23 @@ +pytest-3.1.1 +======================================= + +pytest 3.1.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Bruno Oliveira +* Florian Bruhin +* Floris Bruynooghe +* Jason R. Coombs +* Ronny Pfannschmidt +* wanghui + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.1.2.rst b/doc/en/announce/release-3.1.2.rst new file mode 100644 index 000000000..60168a857 --- /dev/null +++ b/doc/en/announce/release-3.1.2.rst @@ -0,0 +1,23 @@ +pytest-3.1.2 +======================================= + +pytest 3.1.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Andreas Pelme +* ApaDoctor +* Bruno Oliveira +* Florian Bruhin +* Ronny Pfannschmidt +* Segev Finer + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.1.3.rst b/doc/en/announce/release-3.1.3.rst new file mode 100644 index 000000000..a55280626 --- /dev/null +++ b/doc/en/announce/release-3.1.3.rst @@ -0,0 +1,23 @@ +pytest-3.1.3 +======================================= + +pytest 3.1.3 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Antoine Legrand +* Bruno Oliveira +* Max Moroz +* Raphael Pierzina +* Ronny Pfannschmidt +* Ryan Fitzpatrick + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.0.rst b/doc/en/announce/release-3.2.0.rst new file mode 100644 index 000000000..4d2830edd --- /dev/null +++ b/doc/en/announce/release-3.2.0.rst @@ -0,0 +1,48 @@ +pytest-3.2.0 +======================================= + +The pytest team is proud to announce the 3.2.0 release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +* Alex Hartoto +* Andras Tim +* Bruno Oliveira +* Daniel Hahler +* Florian Bruhin +* Floris Bruynooghe +* John Still +* Jordan Moldow +* Kale Kundert +* Lawrence Mitchell +* Llandy Riveron Del Risco +* Maik Figura +* Martin Altmayer +* Mihai Capotă +* Nathaniel Waisbrot +* Nguyễn Hồng Quân +* Pauli Virtanen +* Raphael Pierzina +* Ronny Pfannschmidt +* Segev Finer +* V.Kuznetsov + + +Happy testing, +The Pytest Development Team diff --git a/doc/en/announce/release-3.2.1.rst b/doc/en/announce/release-3.2.1.rst new file mode 100644 index 000000000..899ffcd4b --- /dev/null +++ b/doc/en/announce/release-3.2.1.rst @@ -0,0 +1,22 @@ +pytest-3.2.1 +======================================= + +pytest 3.2.1 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Alex Gaynor +* Bruno Oliveira +* Florian Bruhin +* Ronny Pfannschmidt +* Srinivas Reddy Thatiparthy + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.2.rst b/doc/en/announce/release-3.2.2.rst new file mode 100644 index 000000000..599bf8727 --- /dev/null +++ b/doc/en/announce/release-3.2.2.rst @@ -0,0 +1,28 @@ +pytest-3.2.2 +======================================= + +pytest 3.2.2 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Andreas Pelme +* Antonio Hidalgo +* Bruno Oliveira +* Felipe Dau +* Fernando Macedo +* Jesús Espino +* Joan Massich +* Joe Talbott +* Kirill Pinchuk +* Ronny Pfannschmidt +* Xuan Luong + + +Happy testing, +The pytest Development Team diff --git a/doc/en/announce/release-3.2.3.rst b/doc/en/announce/release-3.2.3.rst new file mode 100644 index 000000000..589374974 --- /dev/null +++ b/doc/en/announce/release-3.2.3.rst @@ -0,0 +1,23 @@ +pytest-3.2.3 +======================================= + +pytest 3.2.3 has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +* Bruno Oliveira +* Evan +* Joe Hamman +* Oliver Bestwalter +* Ronny Pfannschmidt +* Xuan Luong + + +Happy testing, +The pytest Development Team diff --git a/doc/en/assert.rst b/doc/en/assert.rst index f898391df..d9e044356 100644 --- a/doc/en/assert.rst +++ b/doc/en/assert.rst @@ -26,9 +26,9 @@ you will see the return value of the function call:: $ pytest test_assert1.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_assert1.py F @@ -119,9 +119,9 @@ exceptions your own code is deliberately raising, whereas using like documenting unfixed bugs (where the test describes what "should" happen) or bugs in dependencies. -If you want to test that a regular expression matches on the string -representation of an exception (like the ``TestCase.assertRaisesRegexp`` method -from ``unittest``) you can use the ``ExceptionInfo.match`` method:: +Also, the context manager form accepts a ``match`` keyword parameter to test +that a regular expression matches on the string representation of an exception +(like the ``TestCase.assertRaisesRegexp`` method from ``unittest``):: import pytest @@ -129,12 +129,11 @@ from ``unittest``) you can use the ``ExceptionInfo.match`` method:: raise ValueError("Exception 123 raised") def test_match(): - with pytest.raises(ValueError) as excinfo: + with pytest.raises(ValueError, match=r'.* 123 .*'): myfunc() - excinfo.match(r'.* 123 .*') The regexp parameter of the ``match`` method is matched with the ``re.search`` -function. So in the above example ``excinfo.match('123')`` would have worked as +function. So in the above example ``match='123'`` would have worked as well. @@ -170,9 +169,9 @@ if you run this module:: $ pytest test_assert2.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_assert2.py F @@ -183,7 +182,7 @@ if you run this module:: set1 = set("1308") set2 = set("8035") > assert set1 == set2 - E assert {'0', '1', '3', '8'} == {'0', '3', '5', '8'} + E AssertionError: assert {'0', '1', '3', '8'} == {'0', '3', '5', '8'} E Extra items in the left set: E '1' E Extra items in the right set: @@ -210,8 +209,8 @@ the ``pytest_assertrepr_compare`` hook. .. autofunction:: _pytest.hookspec.pytest_assertrepr_compare :noindex: -As an example consider adding the following hook in a conftest.py which -provides an alternative explanation for ``Foo`` objects:: +As an example consider adding the following hook in a :ref:`conftest.py ` +file which provides an alternative explanation for ``Foo`` objects:: # content of conftest.py from test_foocompare import Foo @@ -223,7 +222,7 @@ provides an alternative explanation for ``Foo`` objects:: now, given this test module:: # content of test_foocompare.py - class Foo: + class Foo(object): def __init__(self, val): self.val = val @@ -262,50 +261,29 @@ Advanced assertion introspection .. versionadded:: 2.1 -Reporting details about a failing assertion is achieved either by rewriting -assert statements before they are run or re-evaluating the assert expression and -recording the intermediate values. Which technique is used depends on the -location of the assert, ``pytest`` configuration, and Python version being used -to run ``pytest``. - -By default, ``pytest`` rewrites assert statements in test modules. -Rewritten assert statements put introspection information into the assertion failure message. -``pytest`` only rewrites test modules directly discovered by its test collection process, so -asserts in supporting modules which are not themselves test modules will not be -rewritten. +Reporting details about a failing assertion is achieved by rewriting assert +statements before they are run. Rewritten assert statements put introspection +information into the assertion failure message. ``pytest`` only rewrites test +modules directly discovered by its test collection process, so asserts in +supporting modules which are not themselves test modules will not be rewritten. .. note:: - ``pytest`` rewrites test modules on import. It does this by using an import - hook to write a new pyc files. Most of the time this works transparently. + ``pytest`` rewrites test modules on import by using an import + hook to write new ``pyc`` files. Most of the time this works transparently. However, if you are messing with import yourself, the import hook may - interfere. If this is the case, simply use ``--assert=reinterp`` or - ``--assert=plain``. Additionally, rewriting will fail silently if it cannot - write new pycs, i.e. in a read-only filesystem or a zipfile. + interfere. -If an assert statement has not been rewritten or the Python version is less than -2.6, ``pytest`` falls back on assert reinterpretation. In assert -reinterpretation, ``pytest`` walks the frame of the function containing the -assert statement to discover sub-expression results of the failing assert -statement. You can force ``pytest`` to always use assertion reinterpretation by -passing the ``--assert=reinterp`` option. + If this is the case you have two options: -Assert reinterpretation has a caveat not present with assert rewriting: If -evaluating the assert expression has side effects you may get a warning that the -intermediate values could not be determined safely. A common example of this -issue is an assertion which reads from a file:: + * Disable rewriting for a specific module by adding the string + ``PYTEST_DONT_REWRITE`` to its docstring. - assert f.read() != '...' + * Disable rewriting for all modules by using ``--assert=plain``. -If this assertion fails then the re-evaluation will probably succeed! -This is because ``f.read()`` will return an empty string when it is -called the second time during the re-evaluation. However, it is -easy to rewrite the assertion and avoid any trouble:: + Additionally, rewriting will fail silently if it cannot write new ``.pyc`` files, + i.e. in a read-only filesystem or a zipfile. - content = f.read() - assert content != '...' - -All assert introspection can be turned off by passing ``--assert=plain``. For further information, Benjamin Peterson wrote up `Behind the scenes of pytest's new assertion rewriting `_. @@ -317,4 +295,5 @@ For further information, Benjamin Peterson wrote up `Behind the scenes of pytest ``--nomagic``. .. versionchanged:: 3.0 - Removes the ``--no-assert`` and``--nomagic`` options. + Removes the ``--no-assert`` and ``--nomagic`` options. + Removes the ``--assert=reinterp`` option. diff --git a/doc/en/builtin.rst b/doc/en/builtin.rst index 26dbd44cb..b59399a79 100644 --- a/doc/en/builtin.rst +++ b/doc/en/builtin.rst @@ -38,7 +38,7 @@ Examples at :ref:`assertraises`. Comparing floating point numbers -------------------------------- -.. autoclass:: approx +.. autofunction:: approx Raising a specific test outcome -------------------------------------- @@ -47,11 +47,11 @@ You can use the following functions in your test, fixture or setup functions to force a certain test outcome. Note that most often you can rather use declarative marks, see :ref:`skipping`. -.. autofunction:: _pytest.runner.fail -.. autofunction:: _pytest.runner.skip -.. autofunction:: _pytest.runner.importorskip -.. autofunction:: _pytest.skipping.xfail -.. autofunction:: _pytest.runner.exit +.. autofunction:: _pytest.outcomes.fail +.. autofunction:: _pytest.outcomes.skip +.. autofunction:: _pytest.outcomes.importorskip +.. autofunction:: _pytest.outcomes.xfail +.. autofunction:: _pytest.outcomes.exit Fixtures and requests ----------------------------------------------------- @@ -108,14 +108,14 @@ You can ask for available builtin or project-custom The returned ``monkeypatch`` fixture provides these helper methods to modify objects, dictionaries or os.environ:: - monkeypatch.setattr(obj, name, value, raising=True) - monkeypatch.delattr(obj, name, raising=True) - monkeypatch.setitem(mapping, name, value) - monkeypatch.delitem(obj, name, raising=True) - monkeypatch.setenv(name, value, prepend=False) - monkeypatch.delenv(name, value, raising=True) - monkeypatch.syspath_prepend(path) - monkeypatch.chdir(path) + monkeypatch.setattr(obj, name, value, raising=True) + monkeypatch.delattr(obj, name, raising=True) + monkeypatch.setitem(mapping, name, value) + monkeypatch.delitem(obj, name, raising=True) + monkeypatch.setenv(name, value, prepend=False) + monkeypatch.delenv(name, value, raising=True) + monkeypatch.syspath_prepend(path) + monkeypatch.chdir(path) All modifications will be undone after the requesting test function or fixture has finished. The ``raising`` diff --git a/doc/en/cache.rst b/doc/en/cache.rst index dc1f91286..d5d6b653b 100644 --- a/doc/en/cache.rst +++ b/doc/en/cache.rst @@ -1,16 +1,12 @@ +.. _`cache_provider`: +.. _cache: + + Cache: working with cross-testrun state ======================================= .. versionadded:: 2.8 -.. warning:: - - The functionality of this core plugin was previously distributed - as a third party plugin named ``pytest-cache``. The core plugin - is compatible regarding command line options and API usage except that you - can only store/receive data between test runs that is json-serializable. - - Usage --------- @@ -80,10 +76,10 @@ If you then run it with ``--lf``:: $ pytest --lf ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - run-last-failure: rerun last 2 failures - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items + run-last-failure: rerun previous 2 failures test_50.py FF @@ -122,10 +118,10 @@ of ``FF`` and dots):: $ pytest --ff ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - run-last-failure: rerun last 2 failures first - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 50 items + run-last-failure: rerun previous 2 failures first test_50.py FF................................................ @@ -227,14 +223,14 @@ You can always peek at the content of the cache using the $ py.test --cache-show ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: cachedir: $REGENDOC_TMPDIR/.cache ------------------------------- cache values ------------------------------- - example/value contains: - 42 cache/lastfailed contains: {'test_caching.py::test_function': True} + example/value contains: + 42 ======= no tests ran in 0.12 seconds ======== @@ -246,7 +242,7 @@ by adding the ``--cache-clear`` option like this:: pytest --cache-clear -This is recommended for invocations from Continous Integration +This is recommended for invocations from Continuous Integration servers where isolation and correctness is more important than speed. diff --git a/doc/en/capture.rst b/doc/en/capture.rst index 7ee73b992..58ebdf840 100644 --- a/doc/en/capture.rst +++ b/doc/en/capture.rst @@ -64,8 +64,8 @@ of the failing function and hide the other one:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .F diff --git a/doc/en/conf.py b/doc/en/conf.py index f3b8d7d1e..40f1e4165 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -303,7 +303,7 @@ texinfo_documents = [ ('Holger Krekel@*Benjamin Peterson@*Ronny Pfannschmidt@*' 'Floris Bruynooghe@*others'), 'pytest', - 'simple powerful testing with Pytho', + 'simple powerful testing with Python', 'Programming', 1), ] diff --git a/doc/en/contact.rst b/doc/en/contact.rst index d4a1a03de..83d496640 100644 --- a/doc/en/contact.rst +++ b/doc/en/contact.rst @@ -19,9 +19,9 @@ Contact channels - `pytest-commit at python.org (mailing list)`_: for commits and new issues - :doc:`contribution guide ` for help on submitting pull - requests to bitbucket (including using git via gitifyhg). + requests to GitHub. -- #pylib on irc.freenode.net IRC channel for random questions. +- ``#pylib`` on irc.freenode.net IRC channel for random questions. - private mail to Holger.Krekel at gmail com if you want to communicate sensitive issues @@ -46,6 +46,5 @@ Contact channels .. _`py-dev`: .. _`development mailing list`: .. _`pytest-dev at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-dev -.. _`py-svn`: .. _`pytest-commit at python.org (mailing list)`: http://mail.python.org/mailman/listinfo/pytest-commit diff --git a/doc/en/contents.rst b/doc/en/contents.rst index d7f900810..6b9eed010 100644 --- a/doc/en/contents.rst +++ b/doc/en/contents.rst @@ -12,13 +12,14 @@ Full pytest documentation getting-started usage + existingtestsuite assert builtin fixture monkeypatch tmpdir capture - recwarn + warnings doctest mark skipping @@ -30,14 +31,17 @@ Full pytest documentation plugins writing_plugins - example/index goodpractices + pythonpath customize + example/index bash-completion backwards-compatibility + historical-notes license contributing + development_guide talks projects faq diff --git a/doc/en/customize.rst b/doc/en/customize.rst index d12a49037..21deb582e 100644 --- a/doc/en/customize.rst +++ b/doc/en/customize.rst @@ -1,5 +1,5 @@ -Basic test configuration -=================================== +Configuration +============= Command line options and configuration file settings ----------------------------------------------------------------- @@ -15,17 +15,31 @@ which were registered by installed plugins. .. _rootdir: .. _inifiles: -initialization: determining rootdir and inifile +Initialization: determining rootdir and inifile ----------------------------------------------- .. versionadded:: 2.7 -pytest determines a "rootdir" for each test run which depends on +pytest determines a ``rootdir`` for each test run which depends on the command line arguments (specified test files, paths) and on -the existence of inifiles. The determined rootdir and ini-file are -printed as part of the pytest header. The rootdir is used for constructing -"nodeids" during collection and may also be used by plugins to store -project/testrun-specific information. +the existence of *ini-files*. The determined ``rootdir`` and *ini-file* are +printed as part of the pytest header during startup. + +Here's a summary what ``pytest`` uses ``rootdir`` for: + +* Construct *nodeids* during collection; each test is assigned + a unique *nodeid* which is rooted at the ``rootdir`` and takes in account full path, + class name, function name and parametrization (if any). + +* Is used by plugins as a stable location to store project/test run specific information; + for example, the internal :ref:`cache ` plugin creates a ``.cache`` subdirectory + in ``rootdir`` to store its cross-test run state. + +Important to emphasize that ``rootdir`` is **NOT** used to modify ``sys.path``/``PYTHONPATH`` or +influence how modules are imported. See :ref:`pythonpath` for more details. + +Finding the ``rootdir`` +~~~~~~~~~~~~~~~~~~~~~~~ Here is the algorithm which finds the rootdir from ``args``: @@ -45,11 +59,11 @@ Here is the algorithm which finds the rootdir from ``args``: matched, it becomes the ini-file and its directory becomes the rootdir. - if no ini-file was found, use the already determined common ancestor as root - directory. This allows to work with pytest in structures that are not part of + directory. This allows the use of pytest in structures that are not part of a package and don't have any particular ini-file configuration. If no ``args`` are given, pytest collects test below the current working -directory and also starts determining the rootdir from there. +directory and also starts determining the rootdir from there. :warning: custom pytest plugin commandline arguments may include a path, as in ``pytest --log-output ../../test.log args``. Then ``args`` is mandatory, @@ -97,6 +111,8 @@ check for ini-files as follows:: .. _`how to change command line options defaults`: .. _`adding default options`: + + How to change command line options defaults ------------------------------------------------ @@ -110,15 +126,27 @@ progress output, you can write it into a configuration file: # content of pytest.ini # (or tox.ini or setup.cfg) [pytest] - addopts = -rsxX -q + addopts = -ra -q -Alternatively, you can set a PYTEST_ADDOPTS environment variable to add command +Alternatively, you can set a ``PYTEST_ADDOPTS`` environment variable to add command line options while the environment is in use:: - export PYTEST_ADDOPTS="-rsxX -q" + export PYTEST_ADDOPTS="-v" -From now on, running ``pytest`` will add the specified options. +Here's how the command-line is built in the presence of ``addopts`` or the environment variable:: + $PYTEST_ADDOTPS + +So if the user executes in the command-line:: + + pytest -m slow + +The actual command line executed is:: + + pytest -ra -q -v -m slow + +Note that as usual for other command-line applications, in case of conflicting options the last one wins, so the example +above will show verbose output because ``-v`` overwrites ``-q``. Builtin configuration file options @@ -158,7 +186,7 @@ Builtin configuration file options [seq] matches any character in seq [!seq] matches any char not in seq - Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg'``. + Default patterns are ``'.*', 'build', 'dist', 'CVS', '_darcs', '{arch}', '*.egg', 'venv'``. Setting a ``norecursedirs`` replaces the default. Here is an example of how to avoid certain directories: @@ -169,7 +197,16 @@ Builtin configuration file options norecursedirs = .svn _build tmp* This would tell ``pytest`` to not look into typical subversion or - sphinx-build directories or into any ``tmp`` prefixed directory. + sphinx-build directories or into any ``tmp`` prefixed directory. + + Additionally, ``pytest`` will attempt to intelligently identify and ignore a + virtualenv by the presence of an activation script. Any directory deemed to + be the root of a virtual environment will not be considered during test + collection unless ``‑‑collect‑in‑virtualenv`` is given. Note also that + ``norecursedirs`` takes precedence over ``‑‑collect‑in‑virtualenv``; e.g. if + you intend to run tests in a virtualenv with a base directory that matches + ``'.*'`` you *must* override ``norecursedirs`` in addition to using the + ``‑‑collect‑in‑virtualenv`` flag. .. confval:: testpaths @@ -193,13 +230,16 @@ Builtin configuration file options .. confval:: python_files One or more Glob-style file patterns determining which python files - are considered as test modules. + are considered as test modules. By default, pytest will consider + any file matching with ``test_*.py`` and ``*_test.py`` globs as a test + module. .. confval:: python_classes One or more name prefixes or glob-style patterns determining which classes - are considered for test collection. Here is an example of how to collect - tests from classes that end in ``Suite``: + are considered for test collection. By default, pytest will consider any + class prefixed with ``Test`` as a test collection. Here is an example of how + to collect tests from classes that end in ``Suite``: .. code-block:: ini @@ -214,7 +254,8 @@ Builtin configuration file options .. confval:: python_functions One or more name prefixes or glob-patterns determining which test functions - and methods are considered tests. Here is an example of how + and methods are considered tests. By default, pytest will consider any + function prefixed with ``test`` as a test. Here is an example of how to collect test functions and methods that end in ``_test``: .. code-block:: ini @@ -240,3 +281,34 @@ Builtin configuration file options By default, pytest will stop searching for ``conftest.py`` files upwards from ``pytest.ini``/``tox.ini``/``setup.cfg`` of the project if any, or up to the file-system root. + + +.. confval:: filterwarnings + + .. versionadded:: 3.1 + + Sets a list of filters and actions that should be taken for matched + warnings. By default all warnings emitted during the test session + will be displayed in a summary at the end of the test session. + + .. code-block:: ini + + # content of pytest.ini + [pytest] + filterwarnings = + error + ignore::DeprecationWarning + + This tells pytest to ignore deprecation warnings and turn all other warnings + into errors. For more information please refer to :ref:`warnings`. + +.. confval:: cache_dir + + .. versionadded:: 3.2 + + Sets a directory where stores content of cache plugin. Default directory is + ``.cache`` which is created in :ref:`rootdir `. Directory may be + relative or absolute path. If setting relative path, then directory is created + relative to :ref:`rootdir `. Additionally path may contain environment + variables, that will be expanded. For more information about cache plugin + please refer to :ref:`cache_provider`. diff --git a/doc/en/development_guide.rst b/doc/en/development_guide.rst new file mode 100644 index 000000000..465e97de0 --- /dev/null +++ b/doc/en/development_guide.rst @@ -0,0 +1,108 @@ +================= +Development Guide +================= + +Some general guidelines regarding development in pytest for core maintainers and general contributors. Nothing here +is set in stone and can't be changed, feel free to suggest improvements or changes in the workflow. + + +Code Style +---------- + +* `PEP-8 `_ +* `flake8 `_ for quality checks +* `invoke `_ to automate development tasks + + +Branches +-------- + +We have two long term branches: + +* ``master``: contains the code for the next bugfix release. +* ``features``: contains the code with new features for the next minor release. + +The official repository usually does not contain topic branches, developers and contributors should create topic +branches in their own forks. + +Exceptions can be made for cases where more than one contributor is working on the same +topic or where it makes sense to use some automatic capability of the main repository, such as automatic docs from +`readthedocs `_ for a branch dealing with documentation refactoring. + +Issues +------ + +Any question, feature, bug or proposal is welcome as an issue. Users are encouraged to use them whenever they need. + +GitHub issues should use labels to categorize them. Labels should be created sporadically, to fill a niche; we should +avoid creating labels just for the sake of creating them. + +Here is a list of labels and a brief description mentioning their intent. + + +**Type** + +* ``type: backward compatibility``: issue that will cause problems with old pytest versions. +* ``type: bug``: problem that needs to be addressed. +* ``type: deprecation``: feature that will be deprecated in the future. +* ``type: docs``: documentation missing or needing clarification. +* ``type: enhancement``: new feature or API change, should be merged into ``features``. +* ``type: feature-branch``: new feature or API change, should be merged into ``features``. +* ``type: infrastructure``: improvement to development/releases/CI structure. +* ``type: performance``: performance or memory problem/improvement. +* ``type: proposal``: proposal for a new feature, often to gather opinions or design the API around the new feature. +* ``type: question``: question regarding usage, installation, internals or how to test something. +* ``type: refactoring``: internal improvements to the code. +* ``type: regression``: indicates a problem that was introduced in a release which was working previously. + +**Status** + +* ``status: critical``: grave problem or usability issue that affects lots of users. +* ``status: easy``: easy issue that is friendly to new contributors. +* ``status: help wanted``: core developers need help from experts on this topic. +* ``status: needs information``: reporter needs to provide more information; can be closed after 2 or more weeks of inactivity. + +**Topic** + +* ``topic: collection`` +* ``topic: fixtures`` +* ``topic: parametrize`` +* ``topic: reporting`` +* ``topic: selection`` +* ``topic: tracebacks`` + +**Plugin (internal or external)** + +* ``plugin: cache`` +* ``plugin: capture`` +* ``plugin: doctests`` +* ``plugin: junitxml`` +* ``plugin: monkeypatch`` +* ``plugin: nose`` +* ``plugin: pastebin`` +* ``plugin: pytester`` +* ``plugin: tmpdir`` +* ``plugin: unittest`` +* ``plugin: warnings`` +* ``plugin: xdist`` + + +**OS** + +Issues specific to a single operating system. Do not use as a means to indicate where an issue originated from, only +for problems that happen **only** in that system. + +* ``os: linux`` +* ``os: mac`` +* ``os: windows`` + +**Temporary** + +Used to classify issues for limited time, to help find issues related in events for example. +They should be removed after they are no longer relevant. + +* ``temporary: EP2017 sprint``: +* ``temporary: sprint-candidate``: + + +.. include:: ../../HOWTORELEASE.rst diff --git a/doc/en/doctest.rst b/doc/en/doctest.rst index 513d9aed9..f5800fec2 100644 --- a/doc/en/doctest.rst +++ b/doc/en/doctest.rst @@ -11,6 +11,19 @@ can change the pattern by issuing:: on the command line. Since version ``2.9``, ``--doctest-glob`` can be given multiple times in the command-line. +.. versionadded:: 3.1 + + You can specify the encoding that will be used for those doctest files + using the ``doctest_encoding`` ini option: + + .. code-block:: ini + + # content of pytest.ini + [pytest] + doctest_encoding = latin1 + + The default encoding is UTF-8. + You can also trigger running of doctests from docstrings in all python modules (including regular python test modules):: @@ -49,9 +62,9 @@ then you can just invoke ``pytest`` without command line options:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini - collected 1 items + collected 1 item mymodule.py . diff --git a/doc/en/example/assertion/failure_demo.py b/doc/en/example/assertion/failure_demo.py index a4ff758b1..d31fba2ad 100644 --- a/doc/en/example/assertion/failure_demo.py +++ b/doc/en/example/assertion/failure_demo.py @@ -128,7 +128,7 @@ def test_attribute_multiple(): def globf(x): return x+1 -class TestRaises: +class TestRaises(object): def test_raises(self): s = 'qwe' raises(TypeError, "int(s)") @@ -167,7 +167,7 @@ def test_dynamic_compile_shows_nicely(): -class TestMoreErrors: +class TestMoreErrors(object): def test_complex_error(self): def f(): return 44 @@ -213,23 +213,23 @@ class TestMoreErrors: x = 0 -class TestCustomAssertMsg: +class TestCustomAssertMsg(object): def test_single_line(self): - class A: + class A(object): a = 1 b = 2 assert A.a == b, "A.a appears not to be b" def test_multiline(self): - class A: + class A(object): a = 1 b = 2 assert A.a == b, "A.a appears not to be b\n" \ "or does not appear to be b\none of those" def test_custom_repr(self): - class JSON: + class JSON(object): a = 1 def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" diff --git a/doc/en/example/assertion/test_setup_flow_example.py b/doc/en/example/assertion/test_setup_flow_example.py index 512330cb4..100effa49 100644 --- a/doc/en/example/assertion/test_setup_flow_example.py +++ b/doc/en/example/assertion/test_setup_flow_example.py @@ -1,7 +1,7 @@ def setup_module(module): module.TestStateFullThing.classcount = 0 -class TestStateFullThing: +class TestStateFullThing(object): def setup_class(cls): cls.classcount += 1 diff --git a/doc/en/example/attic.rst b/doc/en/example/attic.rst index 1bc32b283..9e124a5d0 100644 --- a/doc/en/example/attic.rst +++ b/doc/en/example/attic.rst @@ -15,9 +15,9 @@ example: specifying and selecting acceptance tests def pytest_funcarg__accept(request): return AcceptFixture(request) - class AcceptFixture: + class AcceptFixture(object): def __init__(self, request): - if not request.config.option.acceptance: + if not request.config.getoption('acceptance'): pytest.skip("specify -A to run acceptance tests") self.tmpdir = request.config.mktemp(request.function.__name__, numbered=True) @@ -61,7 +61,7 @@ extend the `accept example`_ by putting this in our test module: arg.tmpdir.mkdir("special") return arg - class TestSpecialAcceptance: + class TestSpecialAcceptance(object): def test_sometest(self, accept): assert accept.tmpdir.join("special").check() diff --git a/doc/en/example/costlysetup/conftest.py b/doc/en/example/costlysetup/conftest.py index c8b9a257e..ea3c1cffb 100644 --- a/doc/en/example/costlysetup/conftest.py +++ b/doc/en/example/costlysetup/conftest.py @@ -7,7 +7,7 @@ def setup(request): yield setup setup.finalize() -class CostlySetup: +class CostlySetup(object): def __init__(self): import time print ("performing costly setup") diff --git a/doc/en/example/index.rst b/doc/en/example/index.rst index 363de5ab7..f63cb822a 100644 --- a/doc/en/example/index.rst +++ b/doc/en/example/index.rst @@ -1,8 +1,8 @@ .. _examples: -Usages and Examples -=========================================== +Examples and customization tricks +================================= Here is a (growing) list of examples. :ref:`Contact ` us if you need more examples or have questions. Also take a look at the diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 7e48e6f2e..e3082f279 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -21,7 +21,7 @@ You can "mark" a test function with custom metadata like this:: pass def test_another(): pass - class TestClass: + class TestClass(object): def test_method(self): pass @@ -31,9 +31,9 @@ You can then restrict a test run to only run tests marked with ``webtest``:: $ pytest -v -m webtest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED @@ -45,9 +45,9 @@ Or the inverse, running all tests except the webtest ones:: $ pytest -v -m "not webtest" ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_something_quick PASSED @@ -66,10 +66,10 @@ tests based on their module, class, method, or function name:: $ pytest -v test_server.py::TestClass::test_method ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 5 items + rootdir: $REGENDOC_TMPDIR, inifile: + collecting ... collected 1 item test_server.py::TestClass::test_method PASSED @@ -79,10 +79,10 @@ You can also select on the class:: $ pytest -v test_server.py::TestClass ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 4 items + rootdir: $REGENDOC_TMPDIR, inifile: + collecting ... collected 1 item test_server.py::TestClass::test_method PASSED @@ -92,10 +92,10 @@ Or select multiple nodes:: $ pytest -v test_server.py::TestClass test_server.py::test_send_http ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: - collecting ... collected 8 items + rootdir: $REGENDOC_TMPDIR, inifile: + collecting ... collected 2 items test_server.py::TestClass::test_method PASSED test_server.py::test_send_http PASSED @@ -130,9 +130,9 @@ select tests based on their names:: $ pytest -v -k http # running with the above defined example module ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED @@ -144,9 +144,9 @@ And you can also run all tests except the ones that match the keyword:: $ pytest -k "not send_http" -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_something_quick PASSED @@ -160,9 +160,9 @@ Or to select "http" and "quick" tests:: $ pytest -k "http or quick" -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 4 items test_server.py::test_send_http PASSED @@ -173,14 +173,18 @@ Or to select "http" and "quick" tests:: .. note:: - If you are using expressions such as "X and Y" then both X and Y - need to be simple non-keyword names. For example, "pass" or "from" - will result in SyntaxErrors because "-k" evaluates the expression. + If you are using expressions such as ``"X and Y"`` then both ``X`` and ``Y`` + need to be simple non-keyword names. For example, ``"pass"`` or ``"from"`` + will result in SyntaxErrors because ``"-k"`` evaluates the expression using + Python's `eval`_ function. - However, if the "-k" argument is a simple string, no such restrictions - apply. Also "-k 'not STRING'" has no restrictions. You can also - specify numbers like "-k 1.3" to match tests which are parametrized - with the float "1.3". +.. _`eval`: https://docs.python.org/3.6/library/functions.html#eval + + + However, if the ``"-k"`` argument is a simple string, no such restrictions + apply. Also ``"-k 'not STRING'"`` has no restrictions. You can also + specify numbers like ``"-k 1.3"`` to match tests which are parametrized + with the float ``"1.3"``. Registering markers ------------------------------------- @@ -205,7 +209,7 @@ You can ask which markers exist for your test suite - the list includes our just @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. @@ -223,13 +227,12 @@ For an example on how to add and work with markers from a plugin, see It is recommended to explicitly register markers so that: - * there is one place in your test suite defining your markers + * There is one place in your test suite defining your markers - * asking for existing markers via ``pytest --markers`` gives good output + * Asking for existing markers via ``pytest --markers`` gives good output - * typos in function markers are treated as an error if you use - the ``--strict`` option. Future versions of ``pytest`` are probably - going to start treating non-registered markers as errors at some point. + * Typos in function markers are treated as an error if you use + the ``--strict`` option. .. _`scoped-marking`: @@ -242,7 +245,7 @@ its test methods:: # content of test_mark_classlevel.py import pytest @pytest.mark.webtest - class TestClass: + class TestClass(object): def test_startup(self): pass def test_startup_and_more(self): @@ -256,14 +259,14 @@ To remain backward-compatible with Python 2.4 you can also set a import pytest - class TestClass: + class TestClass(object): pytestmark = pytest.mark.webtest or if you need to use multiple markers you can use a list:: import pytest - class TestClass: + class TestClass(object): pytestmark = [pytest.mark.webtest, pytest.mark.slowtest] You can also set a module level marker:: @@ -352,9 +355,9 @@ the test needs:: $ pytest -E stage2 ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_someenv.py s @@ -364,9 +367,9 @@ and here is one that specifies exactly the environment needed:: $ pytest -E stage1 ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_someenv.py . @@ -381,7 +384,7 @@ The ``--markers`` option always gives you a list of available markers:: @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html + @pytest.mark.xfail(condition, reason=None, run=True, raises=None, strict=False): mark the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. @@ -392,6 +395,49 @@ The ``--markers`` option always gives you a list of available markers:: @pytest.mark.trylast: mark a hook implementation function such that the plugin machinery will try to call it last/as late as possible. +.. _`passing callables to custom markers`: + +Passing a callable to custom markers +-------------------------------------------- + +.. regendoc:wipe + +Below is the config file that will be used in the next examples:: + + # content of conftest.py + import sys + + def pytest_runtest_setup(item): + marker = item.get_marker('my_marker') + if marker is not None: + for info in marker: + print('Marker info name={} args={} kwars={}'.format(info.name, info.args, info.kwargs)) + sys.stdout.flush() + +A custom marker can have its argument set, i.e. ``args`` and ``kwargs`` properties, defined by either invoking it as a callable or using ``pytest.mark.MARKER_NAME.with_args``. These two methods achieve the same effect most of the time. + +However, if there is a callable as the single positional argument with no keyword arguments, using the ``pytest.mark.MARKER_NAME(c)`` will not pass ``c`` as a positional argument but decorate ``c`` with the custom marker (see :ref:`MarkDecorator `). Fortunately, ``pytest.mark.MARKER_NAME.with_args`` comes to the rescue:: + + # content of test_custom_marker.py + import pytest + + def hello_world(*args, **kwargs): + return 'Hello World' + + @pytest.mark.my_marker.with_args(hello_world) + def test_with_args(): + pass + +The output is as follows:: + + $ pytest -q -s + Marker info name=my_marker args=(,) kwars={} + . + 1 passed in 0.12 seconds + +We can see that the custom marker has its argument set extended with the function ``hello_world``. This is the key difference between creating a custom marker as a callable, which invokes ``__call__`` behind the scenes, and using ``with_args``. + + Reading markers which were set from multiple places ---------------------------------------------------- @@ -407,7 +453,7 @@ code you can read over all such settings. Example:: pytestmark = pytest.mark.glob("module", x=1) @pytest.mark.glob("class", x=2) - class TestClass: + class TestClass(object): @pytest.mark.glob("function", x=3) def test_something(self): pass @@ -450,7 +496,7 @@ for your particular platform, you could use the following plugin:: import sys import pytest - ALL = set("darwin linux2 win32".split()) + ALL = set("darwin linux win32".split()) def pytest_runtest_setup(item): if isinstance(item, item.Function): @@ -470,7 +516,7 @@ Let's do a little test file to show how this looks like:: def test_if_apple_is_evil(): pass - @pytest.mark.linux2 + @pytest.mark.linux def test_if_linux_works(): pass @@ -481,32 +527,32 @@ Let's do a little test file to show how this looks like:: def test_runs_everywhere(): pass -then you will see two test skipped and two executed tests as expected:: +then you will see two tests skipped and two executed tests as expected:: $ pytest -rs # this option reports skip reasons ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py sss. + test_plat.py s.s. ======= short test summary info ======== - SKIP [3] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux + SKIP [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux - ======= 1 passed, 3 skipped in 0.12 seconds ======== + ======= 2 passed, 2 skipped in 0.12 seconds ======== Note that if you specify a platform via the marker-command line option like this:: - $ pytest -m linux2 + $ pytest -m linux ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items - test_plat.py s + test_plat.py . ======= 3 tests deselected ======== - ======= 1 skipped, 3 deselected in 0.12 seconds ======== + ======= 1 passed, 3 deselected in 0.12 seconds ======== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -551,8 +597,8 @@ We can now use the ``-m option`` to select one set:: $ pytest -m interface --tb=short ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_module.py FF @@ -573,8 +619,8 @@ or to select both "event" and "interface" tests:: $ pytest -m "interface or event" --tb=short ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_module.py FFF diff --git a/doc/en/example/multipython.py b/doc/en/example/multipython.py index 1f5e976ef..586f44184 100644 --- a/doc/en/example/multipython.py +++ b/doc/en/example/multipython.py @@ -16,7 +16,7 @@ def python1(request, tmpdir): def python2(request, python1): return Python(request.param, python1.picklefile) -class Python: +class Python(object): def __init__(self, version, picklefile): self.pythonpath = py.path.local.sysfind(version) if not self.pythonpath: diff --git a/doc/en/example/nonpython.rst b/doc/en/example/nonpython.rst index 817e5693f..5784f6ed6 100644 --- a/doc/en/example/nonpython.rst +++ b/doc/en/example/nonpython.rst @@ -27,8 +27,8 @@ now execute the test specification:: nonpython $ pytest test_simple.yml ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR/nonpython, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items test_simple.yml F. @@ -59,9 +59,9 @@ consulted when reporting in ``verbose`` mode:: nonpython $ pytest -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR/nonpython, inifile: + rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collecting ... collected 2 items test_simple.yml::hello FAILED @@ -81,8 +81,8 @@ interesting to just look at the collection tree:: nonpython $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR/nonpython, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR/nonpython, inifile: collected 2 items diff --git a/doc/en/example/parametrize.rst b/doc/en/example/parametrize.rst index dac070c84..1a8de235a 100644 --- a/doc/en/example/parametrize.rst +++ b/doc/en/example/parametrize.rst @@ -36,7 +36,7 @@ Now we add a test configuration like this:: def pytest_generate_tests(metafunc): if 'param1' in metafunc.fixturenames: - if metafunc.config.option.all: + if metafunc.config.getoption('all'): end = 5 else: end = 2 @@ -116,6 +116,15 @@ the argument name:: diff = a - b assert diff == expected + @pytest.mark.parametrize("a,b,expected", [ + pytest.param(datetime(2001, 12, 12), datetime(2001, 12, 11), + timedelta(1), id='forward'), + pytest.param(datetime(2001, 12, 11), datetime(2001, 12, 12), + timedelta(-1), id='backward'), + ]) + def test_timedistance_v3(a, b, expected): + diff = a - b + assert diff == expected In ``test_timedistance_v0``, we let pytest generate the test IDs. @@ -130,9 +139,9 @@ objects, they are still using the default pytest representation:: $ pytest test_time.py --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 6 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 8 items @@ -140,9 +149,14 @@ objects, they are still using the default pytest representation:: + + ======= no tests ran in 0.12 seconds ======== +In ``test_timedistance_v3``, we used ``pytest.param`` to specify the test IDs +together with the actual data, instead of listing them separately. + A quick port of "testscenarios" ------------------------------------ @@ -168,7 +182,7 @@ only have to work a bit to construct the correct arguments for pytest's scenario1 = ('basic', {'attribute': 'value'}) scenario2 = ('advanced', {'attribute': 'value2'}) - class TestSampleWithScenarios: + class TestSampleWithScenarios(object): scenarios = [scenario1, scenario2] def test_demo1(self, attribute): @@ -181,8 +195,8 @@ this is a fully self-contained example which you can run with:: $ pytest test_scenarios.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_scenarios.py .... @@ -194,8 +208,8 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ pytest --collect-only test_scenarios.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items @@ -241,9 +255,9 @@ creates a database object for the actual test invocations:: if 'db' in metafunc.fixturenames: metafunc.parametrize("db", ['d1', 'd2'], indirect=True) - class DB1: + class DB1(object): "one database object" - class DB2: + class DB2(object): "alternative database object" @pytest.fixture @@ -259,8 +273,8 @@ Let's first see how it looks like at collection time:: $ pytest test_backends.py --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items @@ -320,9 +334,9 @@ The result of this test will be successful:: $ pytest test_indirect_list.py --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item @@ -336,7 +350,7 @@ Parametrizing test methods through per-class configuration .. _`unittest parametrizer`: https://github.com/testing-cabal/unittest-ext/blob/master/params.py -Here is an example ``pytest_generate_function`` function implementing a +Here is an example ``pytest_generate_tests`` function implementing a parametrization scheme similar to Michael Foord's `unittest parametrizer`_ but in a lot less code:: @@ -350,7 +364,7 @@ parametrizer`_ but in a lot less code:: metafunc.parametrize(argnames, [[funcargs[name] for name in argnames] for funcargs in funcarglist]) - class TestClass: + class TestClass(object): # a map specifying multiple argument sets for a test method params = { 'test_equals': [dict(a=1, b=2), dict(a=3, b=3), ], @@ -399,7 +413,7 @@ Running it results in some skips if we don't have all the python interpreters in . $ pytest -rs -q multipython.py sssssssssssssss.........sss.........sss......... ======= short test summary info ======== - SKIP [21] $REGENDOC_TMPDIR/CWD/multipython.py:23: 'python2.6' not found + SKIP [21] $REGENDOC_TMPDIR/CWD/multipython.py:24: 'python2.6' not found 27 passed, 21 skipped in 0.12 seconds Indirect parametrization of optional implementations/imports @@ -447,13 +461,13 @@ If you run this with reporting for skips enabled:: $ pytest -rs test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .s ======= short test summary info ======== - SKIP [1] $REGENDOC_TMPDIR/conftest.py:10: could not import 'opt2' + SKIP [1] $REGENDOC_TMPDIR/conftest.py:11: could not import 'opt2' ======= 1 passed, 1 skipped in 0.12 seconds ======== @@ -471,4 +485,54 @@ of our ``test_func1`` was skipped. A few notes: values as well. +Set marks or test ID for individual parametrized test +-------------------------------------------------------------------- +Use ``pytest.param`` to apply marks or set test ID to individual parametrized test. +For example:: + + # content of test_pytest_param_example.py + import pytest + @pytest.mark.parametrize('test_input,expected', [ + ('3+5', 8), + pytest.param('1+7', 8, + marks=pytest.mark.basic), + pytest.param('2+4', 6, + marks=pytest.mark.basic, + id='basic_2+4'), + pytest.param('6*9', 42, + marks=[pytest.mark.basic, pytest.mark.xfail], + id='basic_6*9'), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + +In this example, we have 4 parametrized tests. Except for the first test, +we mark the rest three parametrized tests with the custom marker ``basic``, +and for the fourth test we also use the built-in mark ``xfail`` to indicate this +test is expected to fail. For explicitness, we set test ids for some tests. + +Then run ``pytest`` with verbose mode and with only the ``basic`` marker:: + + pytest -v -m basic + ============================================ test session starts ============================================= + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 4 items + + test_pytest_param_example.py::test_eval[1+7-8] PASSED + test_pytest_param_example.py::test_eval[basic_2+4] PASSED + test_pytest_param_example.py::test_eval[basic_6*9] xfail + ========================================== short test summary info =========================================== + XFAIL test_pytest_param_example.py::test_eval[basic_6*9] + + ============================================= 1 tests deselected ============================================= + +As the result: + +- Four tests were collected +- One test was deselected because it doesn't have the ``basic`` mark. +- Three tests with the ``basic`` mark was selected. +- The test ``test_eval[1+7-8]`` passed, but the name is autogenerated and confusing. +- The test ``test_eval[basic_2+4]`` passed. +- The test ``test_eval[basic_6*9]`` was expected to fail and did fail. diff --git a/doc/en/example/pythoncollection.py b/doc/en/example/pythoncollection.py index 0b9e35df4..9c4bd31ce 100644 --- a/doc/en/example/pythoncollection.py +++ b/doc/en/example/pythoncollection.py @@ -4,7 +4,7 @@ def test_function(): pass -class TestClass: +class TestClass(object): def test_method(self): pass def test_anothermethod(self): diff --git a/doc/en/example/pythoncollection.rst b/doc/en/example/pythoncollection.rst index 3fa8834bd..5fb63035a 100644 --- a/doc/en/example/pythoncollection.rst +++ b/doc/en/example/pythoncollection.rst @@ -95,7 +95,7 @@ the :confval:`python_files`, :confval:`python_classes` and :confval:`python_functions` configuration options. Example:: # content of pytest.ini - # can also be defined in in tox.ini or setup.cfg file, although the section + # can also be defined in tox.ini or setup.cfg file, although the section # name in setup.cfg files should be "tool:pytest" [pytest] python_files=check_*.py @@ -107,7 +107,7 @@ This would make ``pytest`` look for tests in files that match the ``check_* that match ``*_check``. For example, if we have:: # content of check_myapp.py - class CheckMyApp: + class CheckMyApp(object): def simple_check(self): pass def complex_check(self): @@ -117,7 +117,7 @@ then the test collection looks like this:: $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 2 items @@ -163,7 +163,7 @@ You can always peek at the collection tree without running tests like this:: . $ pytest --collect-only pythoncollection.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 3 items @@ -175,21 +175,23 @@ You can always peek at the collection tree without running tests like this:: ======= no tests ran in 0.12 seconds ======== -customizing test collection to find all .py files ---------------------------------------------------------- +.. _customizing-test-collection: + +Customizing test collection +--------------------------- .. regendoc:wipe -You can easily instruct ``pytest`` to discover tests from every python file:: - +You can easily instruct ``pytest`` to discover tests from every Python file:: # content of pytest.ini [pytest] python_files = *.py -However, many projects will have a ``setup.py`` which they don't want to be imported. Moreover, there may files only importable by a specific python version. -For such cases you can dynamically define files to be ignored by listing -them in a ``conftest.py`` file:: +However, many projects will have a ``setup.py`` which they don't want to be +imported. Moreover, there may files only importable by a specific python +version. For such cases you can dynamically define files to be ignored by +listing them in a ``conftest.py`` file:: # content of conftest.py import sys @@ -198,7 +200,7 @@ them in a ``conftest.py`` file:: if sys.version_info[0] > 2: collect_ignore.append("pkg/module_py2.py") -And then if you have a module file like this:: +and then if you have a module file like this:: # content of pkg/module_py2.py def test_only_on_python2(): @@ -207,13 +209,13 @@ And then if you have a module file like this:: except Exception, e: pass -and a setup.py dummy file like this:: +and a ``setup.py`` dummy file like this:: # content of setup.py 0/0 # will raise exception if imported -then a pytest run on Python2 will find the one test and will leave out the -setup.py file:: +If you run with a Python 2 interpreter then you will find the one test and will +leave out the ``setup.py`` file:: #$ pytest --collect-only ====== test session starts ====== @@ -225,13 +227,13 @@ setup.py file:: ====== no tests ran in 0.04 seconds ====== -If you run with a Python3 interpreter both the one test and the setup.py file -will be left out:: +If you run with a Python 3 interpreter both the one test and the ``setup.py`` +file will be left out:: $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: pytest.ini collected 0 items - + ======= no tests ran in 0.12 seconds ======== diff --git a/doc/en/example/reportingdemo.rst b/doc/en/example/reportingdemo.rst index 0817458ad..288b57160 100644 --- a/doc/en/example/reportingdemo.rst +++ b/doc/en/example/reportingdemo.rst @@ -11,8 +11,8 @@ get on the terminal - we are working on that):: assertion $ pytest failure_demo.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR/assertion, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR/assertion, inifile: collected 42 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF @@ -81,7 +81,7 @@ get on the terminal - we are working on that):: def test_eq_text(self): > assert 'spam' == 'eggs' - E assert 'spam' == 'eggs' + E AssertionError: assert 'spam' == 'eggs' E - spam E + eggs @@ -92,7 +92,7 @@ get on the terminal - we are working on that):: def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' - E assert 'foo 1 bar' == 'foo 2 bar' + E AssertionError: assert 'foo 1 bar' == 'foo 2 bar' E - foo 1 bar E ? ^ E + foo 2 bar @@ -105,7 +105,7 @@ get on the terminal - we are working on that):: def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' - E assert 'foo\nspam\nbar' == 'foo\neggs\nbar' + E AssertionError: assert 'foo\nspam\nbar' == 'foo\neggs\nbar' E foo E - spam E + eggs @@ -120,7 +120,7 @@ get on the terminal - we are working on that):: a = '1'*100 + 'a' + '2'*100 b = '1'*100 + 'b' + '2'*100 > assert a == b - E assert '111111111111...2222222222222' == '1111111111111...2222222222222' + E AssertionError: assert '111111111111...2222222222222' == '1111111111111...2222222222222' E Skipping 90 identical leading characters in diff, use -v to show E Skipping 91 identical trailing characters in diff, use -v to show E - 1111111111a222222222 @@ -137,20 +137,16 @@ get on the terminal - we are working on that):: a = '1\n'*100 + 'a' + '2\n'*100 b = '1\n'*100 + 'b' + '2\n'*100 > assert a == b - E assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' + E AssertionError: assert '1\n1\n1\n1\n...n2\n2\n2\n2\n' == '1\n1\n1\n1\n1...n2\n2\n2\n2\n' E Skipping 190 identical leading characters in diff, use -v to show E Skipping 191 identical trailing characters in diff, use -v to show E 1 E 1 E 1 E 1 - E 1 - E - a2 - E + b2 - E 2 - E 2 - E 2 - E 2 + E 1... + E + E ...Full output truncated (7 lines hidden), use '-vv' to show failure_demo.py:59: AssertionError _______ TestSpecialisedExplanations.test_eq_list ________ @@ -183,15 +179,16 @@ get on the terminal - we are working on that):: def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} - E assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} - E Omitting 1 identical items, use -v to show + E AssertionError: assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} + E Omitting 1 identical items, use -vv to show E Differing items: E {'b': 1} != {'b': 2} E Left contains more items: E {'c': 0} E Right contains more items: - E {'d': 0} - E Use -v to get the full diff + E {'d': 0}... + E + E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:70: AssertionError _______ TestSpecialisedExplanations.test_eq_set ________ @@ -200,15 +197,16 @@ get on the terminal - we are working on that):: def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) - E assert {0, 10, 11, 12} == {0, 20, 21} + E AssertionError: assert {0, 10, 11, 12} == {0, 20, 21} E Extra items in the left set: E 10 E 11 E 12 E Extra items in the right set: E 20 - E 21 - E Use -v to get the full diff + E 21... + E + E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:73: AssertionError _______ TestSpecialisedExplanations.test_eq_longer_list ________ @@ -238,15 +236,16 @@ get on the terminal - we are working on that):: def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' > assert 'foo' not in text - E assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' + E AssertionError: assert 'foo' not in 'some multiline\ntext\nw...ncludes foo\nand a\ntail' E 'foo' is contained here: E some multiline E text E which E includes foo E ? +++ - E and a - E tail + E and a... + E + E ...Full output truncated (2 lines hidden), use '-vv' to show failure_demo.py:83: AssertionError _______ TestSpecialisedExplanations.test_not_in_text_single ________ @@ -256,7 +255,7 @@ get on the terminal - we are working on that):: def test_not_in_text_single(self): text = 'single foo line' > assert 'foo' not in text - E assert 'foo' not in 'single foo line' + E AssertionError: assert 'foo' not in 'single foo line' E 'foo' is contained here: E single foo line E ? +++ @@ -269,7 +268,7 @@ get on the terminal - we are working on that):: def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 > assert 'foo' not in text - E assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' + E AssertionError: assert 'foo' not in 'head head head head hea...ail tail tail tail tail ' E 'foo' is contained here: E head head foo tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? +++ @@ -282,7 +281,7 @@ get on the terminal - we are working on that):: def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 > assert 'f'*70 not in text - E assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' + E AssertionError: assert 'fffffffffff...ffffffffffff' not in 'head head he...l tail tail ' E 'ffffffffffffffffff...fffffffffffffffffff' is contained here: E head head fffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffffftail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail tail E ? ++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ @@ -305,7 +304,7 @@ get on the terminal - we are working on that):: class Foo(object): b = 1 > assert Foo().b == 2 - E assert 1 == 2 + E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() @@ -338,7 +337,7 @@ get on the terminal - we are working on that):: class Bar(object): b = 2 > assert Foo().b == Bar().b - E assert 1 == 2 + E AssertionError: assert 1 == 2 E + where 1 = .Foo object at 0xdeadbeef>.b E + where .Foo object at 0xdeadbeef> = .Foo'>() E + and 2 = .Bar object at 0xdeadbeef>.b @@ -359,7 +358,7 @@ get on the terminal - we are working on that):: > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python.py:1207>:1: ValueError + <0-codegen $PYTHON_PREFIX/lib/python3.5/site-packages/_pytest/python_api.py:579>:1: ValueError _______ TestRaises.test_raises_doesnt ________ self = @@ -480,7 +479,7 @@ get on the terminal - we are working on that):: s = "123" g = "456" > assert s.startswith(g) - E assert False + E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith @@ -495,7 +494,7 @@ get on the terminal - we are working on that):: def g(): return "456" > assert f().startswith(g()) - E assert False + E AssertionError: assert False E + where False = ('456') E + where = '123'.startswith E + where '123' = .f at 0xdeadbeef>() @@ -550,7 +549,7 @@ get on the terminal - we are working on that):: self = def test_single_line(self): - class A: + class A(object): a = 1 b = 2 > assert A.a == b, "A.a appears not to be b" @@ -564,7 +563,7 @@ get on the terminal - we are working on that):: self = def test_multiline(self): - class A: + class A(object): a = 1 b = 2 > assert A.a == b, "A.a appears not to be b\n" \ @@ -581,7 +580,7 @@ get on the terminal - we are working on that):: self = def test_custom_repr(self): - class JSON: + class JSON(object): a = 1 def __repr__(self): return "This is JSON\n{\n 'foo': 'bar'\n}" diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 3dd6fe92e..823474095 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -113,8 +113,8 @@ directory with the above conftest.py:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items ======= no tests ran in 0.12 seconds ======== @@ -127,7 +127,7 @@ Control skipping of tests according to command line option .. regendoc:wipe Here is a ``conftest.py`` file adding a ``--runslow`` command -line option to control skipping of ``slow`` marked tests: +line option to control skipping of ``pytest.mark.slow`` marked tests: .. code-block:: python @@ -136,7 +136,16 @@ line option to control skipping of ``slow`` marked tests: import pytest def pytest_addoption(parser): parser.addoption("--runslow", action="store_true", - help="run slow tests") + default=False, help="run slow tests") + + def pytest_collection_modifyitems(config, items): + if config.getoption("--runslow"): + # --runslow given in cli: do not skip slow tests + return + skip_slow = pytest.mark.skip(reason="need --runslow option to run") + for item in items: + if "slow" in item.keywords: + item.add_marker(skip_slow) We can now write a test module like this: @@ -146,17 +155,11 @@ We can now write a test module like this: import pytest - slow = pytest.mark.skipif( - not pytest.config.getoption("--runslow"), - reason="need --runslow option to run" - ) - - def test_func_fast(): pass - @slow + @pytest.mark.slow def test_func_slow(): pass @@ -164,13 +167,13 @@ and when running it will see a skipped "slow" test:: $ pytest -rs # "-rs" means report details on the little 's' ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .s ======= short test summary info ======== - SKIP [1] test_module.py:13: need --runslow option to run + SKIP [1] test_module.py:8: need --runslow option to run ======= 1 passed, 1 skipped in 0.12 seconds ======== @@ -178,8 +181,8 @@ Or run it including the ``slow`` marked test:: $ pytest --runslow ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py .. @@ -269,6 +272,7 @@ running from a test you can do something like this: sys._called_from_test = True def pytest_unconfigure(config): + import sys del sys._called_from_test and then check for the ``sys._called_from_test`` flag: @@ -302,9 +306,9 @@ which will add the string to the test header accordingly:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y project deps: mylib-1.1 - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items ======= no tests ran in 0.12 seconds ======== @@ -327,11 +331,11 @@ which will add info only when run with "--v":: $ pytest -v ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache info1: did you know that ... did you? - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 0 items ======= no tests ran in 0.12 seconds ======== @@ -340,8 +344,8 @@ and nothing when run plainly:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 0 items ======= no tests ran in 0.12 seconds ======== @@ -362,28 +366,28 @@ out which tests are the slowest. Let's make an artificial test suite: import time def test_funcfast(): - pass - - def test_funcslow1(): time.sleep(0.1) - def test_funcslow2(): + def test_funcslow1(): time.sleep(0.2) + def test_funcslow2(): + time.sleep(0.3) + Now we can profile which test functions execute the slowest:: $ pytest --durations=3 ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_some_are_slow.py ... ======= slowest 3 test durations ======== - 0.20s call test_some_are_slow.py::test_funcslow2 - 0.10s call test_some_are_slow.py::test_funcslow1 - 0.00s setup test_some_are_slow.py::test_funcfast + 0.30s call test_some_are_slow.py::test_funcslow2 + 0.20s call test_some_are_slow.py::test_funcslow1 + 0.10s call test_some_are_slow.py::test_funcfast ======= 3 passed in 0.12 seconds ======== incremental testing - test steps @@ -425,7 +429,7 @@ tests in a class. Here is a test module example: import pytest @pytest.mark.incremental - class TestUserHandling: + class TestUserHandling(object): def test_login(self): pass def test_modification(self): @@ -440,8 +444,8 @@ If we run this:: $ pytest -rx ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 4 items test_step.py .Fx. @@ -476,14 +480,14 @@ concept. It's however recommended to have explicit fixture references in your tests or test classes rather than relying on implicitly executing setup/teardown functions, especially if they are far away from the actual tests. -Here is a an example for making a ``db`` fixture available in a directory: +Here is an example for making a ``db`` fixture available in a directory: .. code-block:: python # content of a/conftest.py import pytest - class DB: + class DB(object): pass @pytest.fixture(scope="session") @@ -519,8 +523,8 @@ We can run this:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 7 items test_step.py .Fx. @@ -585,7 +589,7 @@ environment you can implement a hook that gets called when the test "report" object is about to be created. Here we write out all failing test calls and also access a fixture (if it was used by the test) in case you want to query/look at it during your post processing. In our -case we just write some informations out to a ``failures`` file: +case we just write some information out to a ``failures`` file: .. code-block:: python @@ -627,8 +631,8 @@ and run them:: $ pytest test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py FF @@ -678,7 +682,7 @@ here is a little example implemented via a local plugin: outcome = yield rep = outcome.get_result() - # set an report attribute for each phase of a call, which can + # set a report attribute for each phase of a call, which can # be "setup", "call", "teardown" setattr(item, "rep_" + rep.when, rep) @@ -721,8 +725,8 @@ and run it:: $ pytest -s test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_module.py Esetting up a test failed! test_module.py::test_setup_fails @@ -760,6 +764,47 @@ and run it:: You'll see that the fixture finalizers could use the precise reporting information. +``PYTEST_CURRENT_TEST`` environment variable +-------------------------------------------- + +.. versionadded:: 3.2 + +Sometimes a test session might get stuck and there might be no easy way to figure out +which test got stuck, for example if pytest was run in quiet mode (``-q``) or you don't have access to the console +output. This is particularly a problem if the problem helps only sporadically, the famous "flaky" kind of tests. + +``pytest`` sets a ``PYTEST_CURRENT_TEST`` environment variable when running tests, which can be inspected +by process monitoring utilities or libraries like `psutil `_ to discover which +test got stuck if necessary: + +.. code-block:: python + + import psutil + + for pid in psutil.pids(): + environ = psutil.Process(pid).environ() + if 'PYTEST_CURRENT_TEST' in environ: + print(f'pytest process {pid} running: {environ["PYTEST_CURRENT_TEST"]}') + +During the test session pytest will set ``PYTEST_CURRENT_TEST`` to the current test +:ref:`nodeid ` and the current stage, which can be ``setup``, ``call`` +and ``teardown``. + +For example, when running a single test function named ``test_foo`` from ``foo_module.py``, +``PYTEST_CURRENT_TEST`` will be set to: + +#. ``foo_module.py::test_foo (setup)`` +#. ``foo_module.py::test_foo (call)`` +#. ``foo_module.py::test_foo (teardown)`` + +In that order. + +.. note:: + + The contents of ``PYTEST_CURRENT_TEST`` is meant to be human readable and the actual format + can be changed between releases (even bug fixes) so it shouldn't be relied on for scripting + or automation. + Freezing pytest --------------- diff --git a/doc/en/example/special.rst b/doc/en/example/special.rst index fdffef089..1fc32f6c8 100644 --- a/doc/en/example/special.rst +++ b/doc/en/example/special.rst @@ -28,7 +28,7 @@ will be called ahead of running any tests:: # content of test_module.py - class TestHello: + class TestHello(object): @classmethod def callme(cls): print ("callme called!") @@ -39,7 +39,7 @@ will be called ahead of running any tests:: def test_method2(self): print ("test_method1 called") - class TestOther: + class TestOther(object): @classmethod def callme(cls): print ("callme other called") diff --git a/doc/en/existingtestsuite.rst b/doc/en/existingtestsuite.rst new file mode 100644 index 000000000..d304b30c9 --- /dev/null +++ b/doc/en/existingtestsuite.rst @@ -0,0 +1,34 @@ +.. _existingtestsuite: + +Using pytest with an existing test suite +=========================================== + +Pytest can be used with most existing test suites, but its +behavior differs from other test runners such as :ref:`nose ` or +Python's default unittest framework. + +Before using this section you will want to :ref:`install pytest `. + +Running an existing test suite with pytest +--------------------------------------------- + +Say you want to contribute to an existing repository somewhere. +After pulling the code into your development space using some +flavor of version control and (optionally) setting up a virtualenv +you will want to run:: + + cd + pip install -e . # Environment dependent alternatives include + # 'python setup.py develop' and 'conda develop' + +in your project root. This will set up a symlink to your code in +site-packages, allowing you to edit your code while your tests +run against it as if it were installed. + +Setting up your project in development mode lets you avoid having to +reinstall every time you want to run your tests, and is less brittle than +mucking about with sys.path to point your tests at local code. + +Also consider using :ref:`tox `. + +.. include:: links.inc diff --git a/doc/en/faq.rst b/doc/en/faq.rst index 774998b14..27d74e114 100644 --- a/doc/en/faq.rst +++ b/doc/en/faq.rst @@ -66,14 +66,6 @@ This completely avoids previous issues of confusing assertion-reporting. It also means, that you can use Python's ``-O`` optimization without losing assertions in test modules. -``pytest`` contains a second, mostly obsolete, assert debugging technique -invoked via ``--assert=reinterpret``: When an ``assert`` statement fails, ``pytest`` re-interprets -the expression part to show intermediate values. This technique suffers -from a caveat that the rewriting does not: If your expression has side -effects (better to avoid them anyway!) the intermediate values may not -be the same, confusing the reinterpreter and obfuscating the initial -error (this is also explained at the command line if it happens). - You can also turn off all assertion interaction using the ``--assert=plain`` option. diff --git a/doc/en/fixture.rst b/doc/en/fixture.rst index bbc4a95c4..1d7ba8640 100644 --- a/doc/en/fixture.rst +++ b/doc/en/fixture.rst @@ -11,7 +11,7 @@ pytest fixtures: explicit, modular, scalable .. _`xUnit`: http://en.wikipedia.org/wiki/XUnit .. _`purpose of test fixtures`: http://en.wikipedia.org/wiki/Test_fixture#Software -.. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection#Definition +.. _`Dependency injection`: http://en.wikipedia.org/wiki/Dependency_injection The `purpose of test fixtures`_ is to provide a fixed baseline upon which tests can reliably and repeatedly execute. pytest fixtures @@ -27,7 +27,7 @@ functions: * fixture management scales from simple unit to complex functional testing, allowing to parametrize fixtures and tests according to configuration and component options, or to re-use fixtures - across class, module or whole test session scopes. + across function, class, module or whole test session scopes. In addition, pytest continues to support :ref:`xunitsetup`. You can mix both styles, moving incrementally from classic to new style, as you @@ -57,7 +57,7 @@ using it:: @pytest.fixture def smtp(): import smtplib - return smtplib.SMTP("smtp.gmail.com") + return smtplib.SMTP("smtp.gmail.com", 587, timeout=5) def test_ehlo(smtp): response, msg = smtp.ehlo() @@ -70,9 +70,9 @@ marked ``smtp`` fixture function. Running the test looks like this:: $ pytest test_smtpsimple.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_smtpsimple.py F @@ -109,7 +109,7 @@ Note that if you misspell a function argument or want to use one that isn't available, you'll see an error with a list of available function arguments. -.. Note:: +.. note:: You can always issue:: @@ -117,33 +117,49 @@ with a list of available function arguments. to see available fixtures. - In versions prior to 2.3 there was no ``@pytest.fixture`` marker - and you had to use a magic ``pytest_funcarg__NAME`` prefix - for the fixture factory. This remains and will remain supported - but is not anymore advertised as the primary means of declaring fixture - functions. - -"Funcargs" a prime example of dependency injection +Fixtures: a prime example of dependency injection --------------------------------------------------- -When injecting fixtures to test functions, pytest-2.0 introduced the -term "funcargs" or "funcarg mechanism" which continues to be present -also in docs today. It now refers to the specific case of injecting -fixture values as arguments to test functions. With pytest-2.3 there are -more possibilities to use fixtures but "funcargs" remain as the main way -as they allow to directly state the dependencies of a test function. - -As the following examples show in more detail, funcargs allow test -functions to easily receive and work against specific pre-initialized -application objects without having to care about import/setup/cleanup -details. It's a prime example of `dependency injection`_ where fixture +Fixtures allow test functions to easily receive and work +against specific pre-initialized application objects without having +to care about import/setup/cleanup details. +It's a prime example of `dependency injection`_ where fixture functions take the role of the *injector* and test functions are the *consumers* of fixture objects. +.. _`conftest.py`: +.. _`conftest`: + +``conftest.py``: sharing fixture functions +------------------------------------------ + +If during implementing your tests you realize that you +want to use a fixture function from multiple test files you can move it +to a ``conftest.py`` file. +You don't need to import the fixture you want to use in a test, it +automatically gets discovered by pytest. The discovery of +fixture functions starts at test classes, then test modules, then +``conftest.py`` files and finally builtin and third party plugins. + +You can also use the ``conftest.py`` file to implement +:ref:`local per-directory plugins `. + +Sharing test data +----------------- + +If you want to make test data from files available to your tests, a good way +to do this is by loading these data in a fixture for use by your tests. +This makes use of the automatic caching mechanisms of pytest. + +Another good approach is by adding the data files in the ``tests`` folder. +There are also community plugins available to help managing this aspect of +testing, e.g. `pytest-datadir `__ +and `pytest-datafiles `__. + .. _smtpshared: -Sharing a fixture across tests in a module (or class/session) ------------------------------------------------------------------ +Scope: sharing a fixture instance across tests in a class, module or session +---------------------------------------------------------------------------- .. regendoc:wipe @@ -152,10 +168,12 @@ usually time-expensive to create. Extending the previous example, we can add a ``scope='module'`` parameter to the :py:func:`@pytest.fixture <_pytest.python.fixture>` invocation to cause the decorated ``smtp`` fixture function to only be invoked once -per test module. Multiple test functions in a test module will thus -each receive the same ``smtp`` fixture instance. The next example puts -the fixture function into a separate ``conftest.py`` file so -that tests from multiple test modules in the directory can +per test *module* (the default is to invoke once per test *function*). +Multiple test functions in a test module will thus +each receive the same ``smtp`` fixture instance, thus saving time. + +The next example puts the fixture function into a separate ``conftest.py`` file +so that tests from multiple test modules in the directory can access the fixture function:: # content of conftest.py @@ -164,7 +182,7 @@ access the fixture function:: @pytest.fixture(scope="module") def smtp(): - return smtplib.SMTP("smtp.gmail.com") + return smtplib.SMTP("smtp.gmail.com", 587, timeout=5) The name of the fixture again is ``smtp`` and you can access its result by listing the name ``smtp`` as an input parameter in any test or fixture @@ -176,7 +194,7 @@ function (in or below the directory where ``conftest.py`` is located):: response, msg = smtp.ehlo() assert response == 250 assert b"smtp.gmail.com" in msg - assert 0 # for demo purposes + assert 0 # for demo purposes def test_noop(smtp): response, msg = smtp.noop() @@ -188,8 +206,8 @@ inspect what is going on and can now run the tests:: $ pytest test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_module.py FF @@ -236,6 +254,8 @@ instance, you can simply declare it: # the returned fixture value will be shared for # all tests needing it +Finally, the ``class`` scope will invoke the fixture once per test *class*. + .. _`finalization`: Fixture finalization / executing teardown code @@ -243,7 +263,9 @@ Fixture finalization / executing teardown code pytest supports execution of fixture specific finalization code when the fixture goes out of scope. By using a ``yield`` statement instead of ``return``, all -the code after the *yield* statement serves as the teardown code.:: +the code after the *yield* statement serves as the teardown code: + +.. code-block:: python # content of conftest.py @@ -251,8 +273,8 @@ the code after the *yield* statement serves as the teardown code.:: import pytest @pytest.fixture(scope="module") - def smtp(request): - smtp = smtplib.SMTP("smtp.gmail.com") + def smtp(): + smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5) yield smtp # provide the fixture value print("teardown smtp") smtp.close() @@ -275,54 +297,72 @@ occur around each single test. In either case the test module itself does not need to change or know about these details of fixture setup. -Note that we can also seamlessly use the ``yield`` syntax with ``with`` statements:: +Note that we can also seamlessly use the ``yield`` syntax with ``with`` statements: + +.. code-block:: python # content of test_yield2.py + import smtplib import pytest - @pytest.fixture - def passwd(): - with open("/etc/passwd") as f: - yield f.readlines() + @pytest.fixture(scope="module") + def smtp(): + with smtplib.SMTP("smtp.gmail.com", 587, timeout=5) as smtp: + yield smtp # provide the fixture value - def test_has_lines(passwd): - assert len(passwd) >= 1 -The file ``f`` will be closed after the test finished execution -because the Python ``file`` object supports finalization when +The ``smtp`` connection will be closed after the test finished execution +because the ``smtp`` object automatically closes when the ``with`` statement ends. +Note that if an exception happens during the *setup* code (before the ``yield`` keyword), the +*teardown* code (after the ``yield``) will not be called. -.. note:: - Prior to version 2.10, in order to use a ``yield`` statement to execute teardown code one - had to mark a fixture using the ``yield_fixture`` marker. From 2.10 onward, normal - fixtures can use ``yield`` directly so the ``yield_fixture`` decorator is no longer needed - and considered deprecated. +An alternative option for executing *teardown* code is to +make use of the ``addfinalizer`` method of the `request-context`_ object to register +finalization functions. -.. note:: - As historical note, another way to write teardown code is - by accepting a ``request`` object into your fixture function and can call its - ``request.addfinalizer`` one or multiple times:: +Here's the ``smtp`` fixture changed to use ``addfinalizer`` for cleanup: - # content of conftest.py +.. code-block:: python - import smtplib - import pytest + # content of conftest.py + import smtplib + import pytest - @pytest.fixture(scope="module") - def smtp(request): - smtp = smtplib.SMTP("smtp.gmail.com") - def fin(): - print ("teardown smtp") - smtp.close() - request.addfinalizer(fin) - return smtp # provide the fixture value + @pytest.fixture(scope="module") + def smtp(request): + smtp = smtplib.SMTP("smtp.gmail.com", 587, timeout=5) + def fin(): + print ("teardown smtp") + smtp.close() + request.addfinalizer(fin) + return smtp # provide the fixture value - The ``fin`` function will execute when the last test in the module has finished execution. - This method is still fully supported, but ``yield`` is recommended from 2.10 onward because - it is considered simpler and better describes the natural code flow. +Both ``yield`` and ``addfinalizer`` methods work similarly by calling their code after the test +ends, but ``addfinalizer`` has two key differences over ``yield``: + +1. It is possible to register multiple finalizer functions. + +2. Finalizers will always be called regardless if the fixture *setup* code raises an exception. + This is handy to properly close all resources created by a fixture even if one of them + fails to be created/acquired:: + + @pytest.fixture + def equipments(request): + r = [] + for port in ('C1', 'C3', 'C28'): + equip = connect(port) + request.addfinalizer(equip.disconnect) + r.append(equip) + return r + + In the example above, if ``"C28"`` fails with an exception, ``"C1"`` and ``"C3"`` will still + be properly closed. Of course, if an exception happens before the finalize function is + registered then it will not be executed. + .. _`request-context`: @@ -341,7 +381,7 @@ read an optional server URL from the test module which uses our fixture:: @pytest.fixture(scope="module") def smtp(request): server = getattr(request.module, "smtpserver", "smtp.gmail.com") - smtp = smtplib.SMTP(server) + smtp = smtplib.SMTP(server, 587, timeout=5) yield smtp print ("finalizing %s (%s)" % (smtp, server)) smtp.close() @@ -352,8 +392,8 @@ again, nothing much has changed:: $ pytest -s -q --tb=no FFfinalizing (smtp.gmail.com) - . - 2 failed, 1 passed in 0.12 seconds + + 2 failed in 0.12 seconds Let's quickly create another test module that actually sets the server URL in its module namespace:: @@ -405,7 +445,7 @@ through the special :py:class:`request ` object:: @pytest.fixture(scope="module", params=["smtp.gmail.com", "mail.python.org"]) def smtp(request): - smtp = smtplib.SMTP(request.param) + smtp = smtplib.SMTP(request.param, 587, timeout=5) yield smtp print ("finalizing %s" % smtp) smtp.close() @@ -450,7 +490,7 @@ So let's just do another run:: response, msg = smtp.ehlo() assert response == 250 > assert b"smtp.gmail.com" in msg - E assert b'smtp.gmail.com' in b'mail.python.org\nSIZE 51200000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' + E AssertionError: assert b'smtp.gmail.com' in b'mail.python.org\nPIPELINING\nSIZE 51200000\nETRN\nSTARTTLS\nAUTH DIGEST-MD5 NTLM CRAM-MD5\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' test_module.py:5: AssertionError -------------------------- Captured stdout setup --------------------------- @@ -520,9 +560,9 @@ Running the above tests results in the following test IDs being used:: $ pytest --collect-only ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 11 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 10 items @@ -536,8 +576,6 @@ Running the above tests results in the following test IDs being used:: - - ======= no tests ran in 0.12 seconds ======== @@ -557,7 +595,7 @@ and instantiate an object ``app`` where we stick the already defined import pytest - class App: + class App(object): def __init__(self, smtp): self.smtp = smtp @@ -573,9 +611,9 @@ Here we declare an ``app`` fixture which receives the previously defined $ pytest -v test_appsetup.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 2 items test_appsetup.py::test_smtp_exists[smtp.gmail.com] PASSED @@ -642,9 +680,9 @@ Let's run the tests in verbose mode and with looking at the print-output:: $ pytest -v -s test_module.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 -- $PYTHON_PREFIX/bin/python3.5 + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y -- $PYTHON_PREFIX/bin/python3.5 cachedir: .cache - rootdir: $REGENDOC_TMPDIR, inifile: + rootdir: $REGENDOC_TMPDIR, inifile: collecting ... collected 8 items test_module.py::test_0[1] SETUP otherarg 1 @@ -728,7 +766,7 @@ and declare its use in a test module via a ``usefixtures`` marker:: import pytest @pytest.mark.usefixtures("cleandir") - class TestDirectoryInit: + class TestDirectoryInit(object): def test_cwd_starts_empty(self): assert os.listdir(os.getcwd()) == [] with open("myfile", "w") as f: @@ -781,8 +819,8 @@ Autouse fixtures (xUnit setup on steroids) .. regendoc:wipe Occasionally, you may want to have fixtures get invoked automatically -without a `usefixtures`_ or `funcargs`_ reference. As a practical -example, suppose we have a database fixture which has a +without declaring a function argument explicitly or a `usefixtures`_ decorator. +As a practical example, suppose we have a database fixture which has a begin/rollback/commit architecture and we want to automatically surround each test method by a transaction and a rollback. Here is a dummy self-contained implementation of this idea:: @@ -791,7 +829,7 @@ self-contained implementation of this idea:: import pytest - class DB: + class DB(object): def __init__(self): self.intransaction = [] def begin(self, name): @@ -803,7 +841,7 @@ self-contained implementation of this idea:: def db(): return DB() - class TestClass: + class TestClass(object): @pytest.fixture(autouse=True) def transact(self, request, db): db.begin(request.function.__name__) @@ -853,7 +891,7 @@ into a conftest.py file **without** using ``autouse``:: # content of conftest.py @pytest.fixture - def transact(self, request, db): + def transact(request, db): db.begin() yield db.rollback() @@ -861,7 +899,7 @@ into a conftest.py file **without** using ``autouse``:: and then e.g. have a TestClass using it by declaring the need:: @pytest.mark.usefixtures("transact") - class TestClass: + class TestClass(object): def test_method1(self): ... @@ -869,17 +907,6 @@ All test methods in this TestClass will use the transaction fixture while other test classes or functions in the module will not use it unless they also add a ``transact`` reference. - -Shifting (visibility of) fixture functions ----------------------------------------------------- - -If during implementing your tests you realize that you -want to use a fixture function from multiple test files you can move it -to a :ref:`conftest.py ` file or even separately installable -:ref:`plugins ` without changing test code. The discovery of -fixtures functions starts at test classes, then test modules, then -``conftest.py`` files and finally builtin and third party plugins. - Overriding fixtures on various levels ------------------------------------- @@ -1002,7 +1029,7 @@ Given the tests file structure is: @pytest.mark.parametrize('username', ['directly-overridden-username-other']) def test_username_other(other_username): - assert username == 'other-directly-overridden-username-other' + assert other_username == 'other-directly-overridden-username-other' In the example above, a fixture value is overridden by the test parameter value. Note that the value of the fixture can be overridden this way even if the test doesn't use it directly (doesn't mention it in the function prototype). diff --git a/doc/en/funcarg_compare.rst b/doc/en/funcarg_compare.rst index 88a34215f..b857a014d 100644 --- a/doc/en/funcarg_compare.rst +++ b/doc/en/funcarg_compare.rst @@ -24,7 +24,7 @@ resources. Here is a basic example how we could implement a per-session Database object:: # content of conftest.py - class Database: + class Database(object): def __init__(self): print ("database instance created") def destroy(self): @@ -97,7 +97,7 @@ sets. pytest-2.3 introduces a decorator for use on the factory itself:: ... # use request.param Here the factory will be invoked twice (with the respective "mysql" -and "pg" values set as ``request.param`` attributes) and and all of +and "pg" values set as ``request.param`` attributes) and all of the tests requiring "db" will run twice as well. The "mysql" and "pg" values will also be used for reporting the test-invocation variants. diff --git a/doc/en/genapi.py b/doc/en/genapi.py index 89ddc8731..0ede44fa2 100644 --- a/doc/en/genapi.py +++ b/doc/en/genapi.py @@ -1,7 +1,7 @@ import textwrap import inspect -class Writer: +class Writer(object): def __init__(self, clsname): self.clsname = clsname diff --git a/doc/en/getting-started.rst b/doc/en/getting-started.rst index e398e4db5..0b336a41f 100644 --- a/doc/en/getting-started.rst +++ b/doc/en/getting-started.rst @@ -1,7 +1,7 @@ Installation and Getting Started =================================== -**Pythons**: Python 2.6,2.7,3.3,3.4,3.5, Jython, PyPy-2.3 +**Pythons**: Python 2.6,2.7,3.3,3.4,3.5,3.6 Jython, PyPy-2.3 **Platforms**: Unix/Posix and Windows @@ -9,7 +9,8 @@ Installation and Getting Started **dependencies**: `py `_, `colorama (Windows) `_, -`argparse (py26) `_. +`argparse (py26) `_, +`ordereddict (py26) `_. **documentation as PDF**: `download latest `_ @@ -26,7 +27,7 @@ Installation:: To check your installation has installed the correct version:: $ pytest --version - This is pytest version 3.0.5, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py + This is pytest version 3.x.y, imported from $PYTHON_PREFIX/lib/python3.5/site-packages/pytest.py .. _`simpletest`: @@ -46,9 +47,9 @@ That's it. You can execute the test function now:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_sample.py F @@ -111,7 +112,7 @@ to group tests logically, in classes and modules. Let's write a class containing two tests:: # content of test_class.py - class TestClass: + class TestClass(object): def test_one(self): x = "this" assert 'h' in x @@ -134,7 +135,7 @@ run the module by passing its filename:: def test_two(self): x = "hello" > assert hasattr(x, 'check') - E assert False + E AssertionError: assert False E + where False = hasattr('hello', 'check') test_class.py:8: AssertionError @@ -192,6 +193,7 @@ Here are a few suggestions where to go next: * :ref:`cmdline` for command line invocation examples * :ref:`good practices ` for virtualenv, test layout +* :ref:`existingtestsuite` for working with pre-existing tests * :ref:`fixtures` for providing a functional baseline to your tests * :ref:`plugins` managing and writing plugins diff --git a/doc/en/goodpractices.rst b/doc/en/goodpractices.rst index 43b15a077..16fdd24c3 100644 --- a/doc/en/goodpractices.rst +++ b/doc/en/goodpractices.rst @@ -30,68 +30,106 @@ Within Python modules, ``pytest`` also discovers tests using the standard Choosing a test layout / import rules ------------------------------------------- +------------------------------------- ``pytest`` supports two common test layouts: -* putting tests into an extra directory outside your actual application - code, useful if you have many functional tests or for other reasons - want to keep tests separate from actual application code (often a good - idea):: +Tests outside application code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ - setup.py # your setuptools Python package metadata +Putting tests into an extra directory outside your actual application code +might be useful if you have many functional tests or for other reasons want +to keep tests separate from actual application code (often a good idea):: + + setup.py mypkg/ __init__.py - appmodule.py + app.py + view.py tests/ test_app.py + test_view.py ... +This way your tests can run easily against an installed version +of ``mypkg``. -* inlining test directories into your application package, useful if you - have direct relation between (unit-)test and application modules and - want to distribute your tests along with your application:: +Note that using this scheme your test files must have **unique names**, because +``pytest`` will import them as *top-level* modules since there are no packages +to derive a full package name from. In other words, the test files in the example above will +be imported as ``test_app`` and ``test_view`` top-level modules by adding ``tests/`` to +``sys.path``. - setup.py # your setuptools Python package metadata +If you need to have test modules with the same name, you might add ``__init__.py`` files to your +``tests`` folder and subfolders, changing them to packages:: + + setup.py + mypkg/ + ... + tests/ + __init__.py + foo/ + __init__.py + test_view.py + bar/ + __init__.py + test_view.py + +Now pytest will load the modules as ``tests.foo.test_view`` and ``tests.bar.test_view``, allowing +you to have modules with the same name. But now this introduces a subtle problem: in order to load +the test modules from the ``tests`` directory, pytest prepends the root of the repository to +``sys.path``, which adds the side-effect that now ``mypkg`` is also importable. +This is problematic if you are using a tool like `tox`_ to test your package in a virtual environment, +because you want to test the *installed* version of your package, not the local code from the repository. + +In this situation, it is **strongly** suggested to use a ``src`` layout where application root package resides in a +sub-directory of your root:: + + setup.py + src/ + mypkg/ + __init__.py + app.py + view.py + tests/ + __init__.py + foo/ + __init__.py + test_view.py + bar/ + __init__.py + test_view.py + + +This layout prevents a lot of common pitfalls and has many benefits, which are better explained in this excellent +`blog post by Ionel Cristian Mărieș `_. + +Tests as part of application code +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Inlining test directories into your application package +is useful if you have direct relation between tests and application modules and +want to distribute them along with your application:: + + setup.py mypkg/ __init__.py - appmodule.py - ... + app.py + view.py test/ + __init__.py test_app.py + test_view.py ... -Important notes relating to both schemes: +In this scheme, it is easy to run your tests using the ``--pyargs`` option:: -- **make sure that "mypkg" is importable**, for example by typing once:: + pytest --pyargs mypkg - pip install -e . # install package using setup.py in editable mode +``pytest`` will discover where ``mypkg`` is installed and collect tests from there. -- **avoid "__init__.py" files in your test directories**. - This way your tests can run easily against an installed version - of ``mypkg``, independently from the installed package if it contains - the tests or not. +Note that this layout also works in conjunction with the ``src`` layout mentioned in the previous section. -- With inlined tests you might put ``__init__.py`` into test - directories and make them installable as part of your application. - Using the ``pytest --pyargs mypkg`` invocation pytest will - discover where mypkg is installed and collect tests from there. - With the "external" test you can still distribute tests but they - will not be installed or become importable. - -Typically you can run tests by pointing to test directories or modules:: - - pytest tests/test_app.py # for external test dirs - pytest mypkg/test/test_app.py # for inlined test dirs - pytest mypkg # run tests in all below test directories - pytest # run all tests below current dir - ... - -Because of the above ``editable install`` mode you can change your -source code (both tests and the app) and rerun tests at will. -Once you are done with your work, you can `use tox`_ to make sure -that the package is really correct and tests pass in all -required configurations. .. note:: @@ -144,21 +182,24 @@ for installing your application and any dependencies as well as the ``pytest`` package itself. This ensures your code and dependencies are isolated from the system Python installation. -If you frequently release code and want to make sure that your actual +You can then install your package in "editable" mode:: + + pip install -e . + +which lets you change your source code (both tests and application) and rerun tests at will. +This is similar to running `python setup.py develop` or `conda develop` in that it installs +your package using a symlink to your development code. + +Once you are done with your work and want to make sure that your actual package passes all tests you may want to look into `tox`_, the virtualenv test automation tool and its `pytest support -`_. +`_. Tox helps you to setup virtualenv environments with pre-defined dependencies and then executing a pre-configured test command with options. It will run tests against the installed package and not against your source code checkout, helping to detect packaging glitches. -Continuous integration services such as Jenkins_ can make use of the -``--junitxml=PATH`` option to create a JUnitXML file and generate reports (e.g. -by publishing the results in a nice format with the `Jenkins xUnit Plugin -`_). - Integrating with setuptools / ``python setup.py test`` / ``pytest-runner`` -------------------------------------------------------------------------- @@ -208,15 +249,6 @@ by putting them into a ``[tool:pytest]`` section: python_files = testing/*/*.py -.. note:: - Prior to 3.0, the supported section name was ``[pytest]``. Due to how - this may collide with some distutils commands, the recommended - section name for ``setup.cfg`` files is now ``[tool:pytest]``. - - Note that for ``pytest.ini`` and ``tox.ini`` files the section - name is ``[pytest]``. - - Manual Integration ^^^^^^^^^^^^^^^^^^ @@ -235,7 +267,7 @@ your own setuptools Test command for invoking pytest. def initialize_options(self): TestCommand.initialize_options(self) - self.pytest_args = [] + self.pytest_args = '' def run_tests(self): import shlex diff --git a/doc/en/historical-notes.rst b/doc/en/historical-notes.rst new file mode 100644 index 000000000..028ceff9b --- /dev/null +++ b/doc/en/historical-notes.rst @@ -0,0 +1,177 @@ +Historical Notes +================ + +This page lists features or behavior from previous versions of pytest which have changed over the years. They are +kept here as a historical note so users looking at old code can find documentation related to them. + +cache plugin integrated into the core +------------------------------------- + +.. versionadded:: 2.8 + +The functionality of the :ref:`core cache ` plugin was previously distributed +as a third party plugin named ``pytest-cache``. The core plugin +is compatible regarding command line options and API usage except that you +can only store/receive data between test runs that is json-serializable. + + +funcargs and ``pytest_funcarg__`` +--------------------------------- + +.. versionchanged:: 2.3 + +In versions prior to 2.3 there was no ``@pytest.fixture`` marker +and you had to use a magic ``pytest_funcarg__NAME`` prefix +for the fixture factory. This remains and will remain supported +but is not anymore advertised as the primary means of declaring fixture +functions. + + +``@pytest.yield_fixture`` decorator +----------------------------------- + +.. versionchanged:: 2.10 + +Prior to version 2.10, in order to use a ``yield`` statement to execute teardown code one +had to mark a fixture using the ``yield_fixture`` marker. From 2.10 onward, normal +fixtures can use ``yield`` directly so the ``yield_fixture`` decorator is no longer needed +and considered deprecated. + + +``[pytest]`` header in ``setup.cfg`` +------------------------------------ + +.. versionchanged:: 3.0 + +Prior to 3.0, the supported section name was ``[pytest]``. Due to how +this may collide with some distutils commands, the recommended +section name for ``setup.cfg`` files is now ``[tool:pytest]``. + +Note that for ``pytest.ini`` and ``tox.ini`` files the section +name is ``[pytest]``. + + +Applying marks to ``@pytest.mark.parametrize`` parameters +--------------------------------------------------------- + +.. versionchanged:: 3.1 + +Prior to version 3.1 the supported mechanism for marking values +used the syntax:: + + import pytest + @pytest.mark.parametrize("test_input,expected", [ + ("3+5", 8), + ("2+4", 6), + pytest.mark.xfail(("6*9", 42),), + ]) + def test_eval(test_input, expected): + assert eval(test_input) == expected + + +This was an initial hack to support the feature but soon was demonstrated to be incomplete, +broken for passing functions or applying multiple marks with the same name but different parameters. + +The old syntax is planned to be removed in pytest-4.0. + + +``@pytest.mark.parametrize`` argument names as a tuple +------------------------------------------------------ + +.. versionchanged:: 2.4 + +In versions prior to 2.4 one needed to specify the argument +names as a tuple. This remains valid but the simpler ``"name1,name2,..."`` +comma-separated-string syntax is now advertised first because +it's easier to write and produces less line noise. + + +setup: is now an "autouse fixture" +---------------------------------- + +.. versionchanged:: 2.3 + +During development prior to the pytest-2.3 release the name +``pytest.setup`` was used but before the release it was renamed +and moved to become part of the general fixture mechanism, +namely :ref:`autouse fixtures` + + +.. _string conditions: + +Conditions as strings instead of booleans +----------------------------------------- + +.. versionchanged:: 2.4 + +Prior to pytest-2.4 the only way to specify skipif/xfail conditions was +to use strings:: + + import sys + @pytest.mark.skipif("sys.version_info >= (3,3)") + def test_function(): + ... + +During test function setup the skipif condition is evaluated by calling +``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains +all the module globals, and ``os`` and ``sys`` as a minimum. + +Since pytest-2.4 :ref:`boolean conditions ` are considered preferable +because markers can then be freely imported between test modules. +With strings you need to import not only the marker but all variables +used by the marker, which violates encapsulation. + +The reason for specifying the condition as a string was that ``pytest`` can +report a summary of skip conditions based purely on the condition string. +With conditions as booleans you are required to specify a ``reason`` string. + +Note that string conditions will remain fully supported and you are free +to use them if you have no need for cross-importing markers. + +The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` +or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace +dictionary which is constructed as follows: + +* the namespace is initialized by putting the ``sys`` and ``os`` modules + and the pytest ``config`` object into it. + +* updated with the module globals of the test function for which the + expression is applied. + +The pytest ``config`` object allows you to skip based on a test +configuration value which you might have added:: + + @pytest.mark.skipif("not config.getvalue('db')") + def test_function(...): + ... + +The equivalent with "boolean conditions" is:: + + @pytest.mark.skipif(not pytest.config.getvalue("db"), + reason="--db was not specified") + def test_function(...): + pass + +.. note:: + + You cannot use ``pytest.config.getvalue()`` in code + imported before pytest's argument parsing takes place. For example, + ``conftest.py`` files are imported before command line parsing and thus + ``config.getvalue()`` will not execute correctly. + +``pytest.set_trace()`` +---------------------- + +.. versionchanged:: 2.4 + +Previous to version 2.4 to set a break point in code one needed to use ``pytest.set_trace()``:: + + import pytest + def test_function(): + ... + pytest.set_trace() # invoke PDB debugger and tracing + + +This is no longer needed and one can use the native ``import pdb;pdb.set_trace()`` call directly. + +For more details see :ref:`breakpoints`. diff --git a/doc/en/index.rst b/doc/en/index.rst index ce1618e66..1d2ca57ef 100644 --- a/doc/en/index.rst +++ b/doc/en/index.rst @@ -25,9 +25,9 @@ To execute it:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_sample.py F @@ -59,7 +59,7 @@ Features - Python2.6+, Python3.3+, PyPy-2.3, Jython-2.5 (untested); -- Rich plugin architecture, with over 150+ :ref:`external plugins ` and thriving community; +- Rich plugin architecture, with over 315+ `external plugins `_ and thriving community; Documentation @@ -83,8 +83,8 @@ Consult the :ref:`Changelog ` page for fixes and enhancements of each License ------- -Copyright Holger Krekel and others, 2004-2016. +Copyright Holger Krekel and others, 2004-2017. Distributed under the terms of the `MIT`_ license, pytest is free and open source software. -.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE \ No newline at end of file +.. _`MIT`: https://github.com/pytest-dev/pytest/blob/master/LICENSE diff --git a/doc/en/license.rst b/doc/en/license.rst index 3fc1dad52..b8c0dce1b 100644 --- a/doc/en/license.rst +++ b/doc/en/license.rst @@ -9,7 +9,7 @@ Distributed under the terms of the `MIT`_ license, pytest is free and open sourc The MIT License (MIT) - Copyright (c) 2004-2016 Holger Krekel and others + Copyright (c) 2004-2017 Holger Krekel and others Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in diff --git a/doc/en/mark.rst b/doc/en/mark.rst index ab9546d31..0b0e072a0 100644 --- a/doc/en/mark.rst +++ b/doc/en/mark.rst @@ -10,6 +10,7 @@ By using the ``pytest.mark`` helper you can easily set metadata on your test functions. There are some builtin markers, for example: +* :ref:`skip ` - always skip a test function * :ref:`skipif ` - skip a test function if a certain condition is met * :ref:`xfail ` - produce an "expected failure" outcome if a certain condition is met diff --git a/doc/en/monkeypatch.rst b/doc/en/monkeypatch.rst index 806e910bd..0c07b2f44 100644 --- a/doc/en/monkeypatch.rst +++ b/doc/en/monkeypatch.rst @@ -35,7 +35,7 @@ patch this function before calling into a function which uses it:: assert x == '/abc/.ssh' Here our test function monkeypatches ``os.path.expanduser`` and -then calls into an function that calls it. After the test function +then calls into a function that calls it. After the test function finishes the ``os.path.expanduser`` modification will be undone. example: preventing "requests" from remote operations @@ -60,7 +60,7 @@ so that any attempts within tests to create http requests will fail. Be advised that it is not recommended to patch builtin functions such as ``open``, ``compile``, etc., because it might break pytest's internals. If that's unavoidable, passing ``--tb=native``, ``--assert=plain`` and ``--capture=no`` might - help althought there's no guarantee. + help although there's no guarantee. Method reference of the monkeypatch fixture diff --git a/doc/en/nose.rst b/doc/en/nose.rst index a785ecfaa..10a10633a 100644 --- a/doc/en/nose.rst +++ b/doc/en/nose.rst @@ -26,7 +26,7 @@ Supported nose Idioms * setup and teardown at module/class/method level * SkipTest exceptions and markers * setup/teardown decorators -* ``yield``-based tests and their setup +* ``yield``-based tests and their setup (considered deprecated as of pytest 3.0) * ``__test__`` attribute on modules/classes/functions * general usage of nose utilities @@ -47,9 +47,19 @@ Unsupported idioms / known issues ``tests.test_mod``) but different file system paths (e.g. ``tests/test_mode.py`` and ``other/tests/test_mode.py``) by extending sys.path/import semantics. pytest does not do that - but there is discussion in `issue268 `_ for adding some support. Note that + but there is discussion in `#268 `_ for adding some support. Note that `nose2 choose to avoid this sys.path/import hackery `_. + If you place a conftest.py file in the root directory of your project + (as determined by pytest) pytest will run tests "nose style" against + the code below that directory by adding it to your ``sys.path`` instead of + running against your installed code. + + You may find yourself wanting to do this if you ran ``python setup.py install`` + to set up your project, as opposed to ``python setup.py develop`` or any of + the package manager equivalents. Installing with develop in a + virtual environment like Tox is recommended over this pattern. + - nose-style doctests are not collected and executed correctly, also doctest fixtures don't work. @@ -62,3 +72,4 @@ Unsupported idioms / known issues being the recommended alternative. + diff --git a/doc/en/parametrize.rst b/doc/en/parametrize.rst index 052d0e72e..ebbae31b2 100644 --- a/doc/en/parametrize.rst +++ b/doc/en/parametrize.rst @@ -9,17 +9,16 @@ Parametrizing fixtures and test functions ========================================================================== -pytest supports test parametrization in several well-integrated ways: +pytest enables test parametrization at several levels: -- :py:func:`pytest.fixture` allows to define :ref:`parametrization - at the level of fixture functions `. +- :py:func:`pytest.fixture` allows one to :ref:`parametrize fixture + functions `. -* `@pytest.mark.parametrize`_ allows to define parametrization at the - function or class level, provides multiple argument/fixture sets - for a particular test function or class. +* `@pytest.mark.parametrize`_ allows one to define multiple sets of + arguments and fixtures at the test function or class. -* `pytest_generate_tests`_ enables implementing your own custom - dynamic parametrization scheme or extensions. +* `pytest_generate_tests`_ allows one to define custom parametrization + schemes or extensions. .. _parametrizemark: .. _`@pytest.mark.parametrize`: @@ -55,8 +54,8 @@ them in turn:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_expectation.py ..F @@ -73,7 +72,7 @@ them in turn:: ]) def test_eval(test_input, expected): > assert eval(test_input) == expected - E assert 54 == 42 + E AssertionError: assert 54 == 42 E + where 54 = eval('6*9') test_expectation.py:8: AssertionError @@ -94,7 +93,8 @@ for example with the builtin ``mark.xfail``:: @pytest.mark.parametrize("test_input,expected", [ ("3+5", 8), ("2+4", 6), - pytest.mark.xfail(("6*9", 42)), + pytest.param("6*9", 42, + marks=pytest.mark.xfail), ]) def test_eval(test_input, expected): assert eval(test_input) == expected @@ -103,8 +103,8 @@ Let's run this:: $ pytest ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 3 items test_expectation.py ..x @@ -123,15 +123,8 @@ To get all combinations of multiple parametrized arguments you can stack def test_foo(x, y): pass -This will run the test with the arguments set to x=0/y=2, x=0/y=3, x=1/y=2 and -x=1/y=3. - -.. note:: - - In versions prior to 2.4 one needed to specify the argument - names as a tuple. This remains valid but the simpler ``"name1,name2,..."`` - comma-separated-string syntax is now advertised first because - it's easier to write and produces less line noise. +This will run the test with the arguments set to ``x=0/y=2``, ``x=0/y=3``, ``x=1/y=2`` and +``x=1/y=3``. .. _`pytest_generate_tests`: @@ -167,7 +160,7 @@ command line option and the parametrization of our test function:: def pytest_generate_tests(metafunc): if 'stringinput' in metafunc.fixturenames: metafunc.parametrize("stringinput", - metafunc.config.option.stringinput) + metafunc.config.getoption('stringinput')) If we now pass two stringinput values, our test will run twice:: @@ -186,7 +179,7 @@ Let's also run with a stringinput that will lead to a failing test:: def test_valid_string(stringinput): > assert stringinput.isalpha() - E assert False + E AssertionError: assert False E + where False = () E + where = '!'.isalpha @@ -202,9 +195,15 @@ list:: $ pytest -q -rs test_strings.py s ======= short test summary info ======== - SKIP [1] test_strings.py:1: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 + SKIP [1] test_strings.py:2: got empty parameter set ['stringinput'], function test_valid_string at $REGENDOC_TMPDIR/test_strings.py:1 1 skipped in 0.12 seconds +Note that when calling ``metafunc.parametrize`` multiple times with different parameter sets, all parameter names across +those sets cannot be duplicated, otherwise an error will be raised. + +More examples +------------- + For further examples, you might want to look at :ref:`more parametrization examples `. diff --git a/doc/en/plugins.rst b/doc/en/plugins.rst index ec031e9e0..4a6772ca3 100644 --- a/doc/en/plugins.rst +++ b/doc/en/plugins.rst @@ -94,7 +94,7 @@ environment you can type:: and will get an extended test header which shows activated plugins and their names. It will also print local plugins aka -:ref:`conftest.py ` files when they are loaded. +:ref:`conftest.py ` files when they are loaded. .. _`cmdunregister`: @@ -155,4 +155,3 @@ in the `pytest repository `_. _pytest.terminal _pytest.tmpdir _pytest.unittest - diff --git a/doc/en/projects.rst b/doc/en/projects.rst index fa7a2f29a..a2edbf68f 100644 --- a/doc/en/projects.rst +++ b/doc/en/projects.rst @@ -58,7 +58,7 @@ Here are some examples of projects using ``pytest`` (please send notes via :ref: * `katcp `_ Telescope communication protocol over Twisted * `kss plugin timer `_ * `pyudev `_ a pure Python binding to the Linux library libudev -* `pytest-localserver `_ a plugin for pytest that provides a httpserver and smtpserver +* `pytest-localserver `_ a plugin for pytest that provides an httpserver and smtpserver * `pytest-monkeyplus `_ a plugin that extends monkeypatch These projects help integrate ``pytest`` into other Python frameworks: diff --git a/doc/en/proposals/parametrize_with_fixtures.rst b/doc/en/proposals/parametrize_with_fixtures.rst index 381bc98f1..146032aa4 100644 --- a/doc/en/proposals/parametrize_with_fixtures.rst +++ b/doc/en/proposals/parametrize_with_fixtures.rst @@ -1,8 +1,13 @@ :orphan: -========================= -Parametrize with fixtures -========================= +=================================== +PROPOSAL: Parametrize with fixtures +=================================== + +.. warning:: + + This document outlines a proposal around using fixtures as input + of parametrized tests or fixtures. Problem ------- @@ -108,8 +113,13 @@ the following values. Alternative approach -------------------- -A new helper function named ``fixture_request`` tells pytest to yield all -parameters of a fixture. +A new helper function named ``fixture_request`` would tell pytest to yield +all parameters marked as a fixture. + +.. note:: + + The `pytest-lazy-fixture `_ plugin implements a very + similar solution to the proposal below, make sure to check it out. .. code-block:: python diff --git a/doc/en/pythonpath.rst b/doc/en/pythonpath.rst new file mode 100644 index 000000000..b64742768 --- /dev/null +++ b/doc/en/pythonpath.rst @@ -0,0 +1,76 @@ +.. _pythonpath: + +pytest import mechanisms and ``sys.path``/``PYTHONPATH`` +======================================================== + +Here's a list of scenarios where pytest may need to change ``sys.path`` in order +to import test modules or ``conftest.py`` files. + +Test modules / ``conftest.py`` files inside packages +---------------------------------------------------- + +Consider this file and directory layout:: + + root/ + |- foo/ + |- __init__.py + |- conftest.py + |- bar/ + |- __init__.py + |- tests/ + |- __init__.py + |- test_foo.py + + +When executing:: + + pytest root/ + + + +pytest will find ``foo/bar/tests/test_foo.py`` and realize it is part of a package given that +there's an ``__init__.py`` file in the same folder. It will then search upwards until it can find the +last folder which still contains an ``__init__.py`` file in order to find the package *root* (in +this case ``foo/``). To load the module, it will insert ``root/`` to the front of +``sys.path`` (if not there already) in order to load +``test_foo.py`` as the *module* ``foo.bar.tests.test_foo``. + +The same logic applies to the ``conftest.py`` file: it will be imported as ``foo.conftest`` module. + +Preserving the full package name is important when tests live in a package to avoid problems +and allow test modules to have duplicated names. This is also discussed in details in +:ref:`test discovery`. + +Standalone test modules / ``conftest.py`` files +----------------------------------------------- + +Consider this file and directory layout:: + + root/ + |- foo/ + |- conftest.py + |- bar/ + |- tests/ + |- test_foo.py + + +When executing:: + + pytest root/ + +pytest will find ``foo/bar/tests/test_foo.py`` and realize it is NOT part of a package given that +there's no ``__init__.py`` file in the same folder. It will then add ``root/foo/bar/tests`` to +``sys.path`` in order to import ``test_foo.py`` as the *module* ``test_foo``. The same is done +with the ``conftest.py`` file by adding ``root/foo`` to ``sys.path`` to import it as ``conftest``. + +For this reason this layout cannot have test modules with the same name, as they all will be +imported in the global import namespace. + +This is also discussed in details in :ref:`test discovery`. + +Invoking ``pytest`` versus ``python -m pytest`` +----------------------------------------------- + +Running pytest with ``python -m pytest [...]`` instead of ``pytest [...]`` yields nearly +equivalent behaviour, except that the former call will add the current directory to ``sys.path``. +See also :ref:`cmdline`. diff --git a/doc/en/recwarn.rst b/doc/en/recwarn.rst index 7bb193c99..513af0d45 100644 --- a/doc/en/recwarn.rst +++ b/doc/en/recwarn.rst @@ -1,139 +1,3 @@ -.. _`asserting warnings`: +:orphan: -.. _assertwarnings: - -Asserting Warnings -===================================================== - -.. _`asserting warnings with the warns function`: - -.. _warns: - -Asserting warnings with the warns function ------------------------------------------------ - -.. versionadded:: 2.8 - -You can check that code raises a particular warning using ``pytest.warns``, -which works in a similar manner to :ref:`raises `:: - - import warnings - import pytest - - def test_warning(): - with pytest.warns(UserWarning): - warnings.warn("my warning", UserWarning) - -The test will fail if the warning in question is not raised. - -You can also call ``pytest.warns`` on a function or code string:: - - pytest.warns(expected_warning, func, *args, **kwargs) - pytest.warns(expected_warning, "func(*args, **kwargs)") - -The function also returns a list of all raised warnings (as -``warnings.WarningMessage`` objects), which you can query for -additional information:: - - with pytest.warns(RuntimeWarning) as record: - warnings.warn("another warning", RuntimeWarning) - - # check that only one warning was raised - assert len(record) == 1 - # check that the message matches - assert record[0].message.args[0] == "another warning" - -Alternatively, you can examine raised warnings in detail using the -:ref:`recwarn ` fixture (see below). - -.. note:: - ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated - differently; see :ref:`ensuring_function_triggers`. - -.. _`recording warnings`: - -.. _recwarn: - -Recording warnings ------------------------- - -You can record raised warnings either using ``pytest.warns`` or with -the ``recwarn`` fixture. - -To record with ``pytest.warns`` without asserting anything about the warnings, -pass ``None`` as the expected warning type:: - - with pytest.warns(None) as record: - warnings.warn("user", UserWarning) - warnings.warn("runtime", RuntimeWarning) - - assert len(record) == 2 - assert str(record[0].message) == "user" - assert str(record[1].message) == "runtime" - -The ``recwarn`` fixture will record warnings for the whole function:: - - import warnings - - def test_hello(recwarn): - warnings.warn("hello", UserWarning) - assert len(recwarn) == 1 - w = recwarn.pop(UserWarning) - assert issubclass(w.category, UserWarning) - assert str(w.message) == "hello" - assert w.filename - assert w.lineno - -Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded -warnings: a WarningsRecorder instance. To view the recorded warnings, you can -iterate over this instance, call ``len`` on it to get the number of recorded -warnings, or index into it to get a particular recorded warning. It also -provides these methods: - -.. autoclass:: _pytest.recwarn.WarningsRecorder() - :members: - -Each recorded warning has the attributes ``message``, ``category``, -``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the -class of the warning. The ``message`` is the warning itself; calling -``str(message)`` will return the actual message of the warning. - -.. note:: - ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated - differently; see :ref:`ensuring_function_triggers`. - -.. _`ensuring a function triggers a deprecation warning`: - -.. _ensuring_function_triggers: - -Ensuring a function triggers a deprecation warning -------------------------------------------------------- - -You can also call a global helper for checking -that a certain function call triggers a ``DeprecationWarning`` or -``PendingDeprecationWarning``:: - - import pytest - - def test_global(): - pytest.deprecated_call(myfunction, 17) - -By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be -caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide -them. If you wish to record them in your own code, use the -command ``warnings.simplefilter('always')``:: - - import warnings - import pytest - - def test_deprecation(recwarn): - warnings.simplefilter('always') - warnings.warn("deprecated", DeprecationWarning) - assert len(recwarn) == 1 - assert recwarn.pop(DeprecationWarning) - -You can also use it as a contextmanager:: - - def test_global(): - with pytest.deprecated_call(): - myobject.deprecated_method() +This page has been moved, please see :ref:`assertwarnings`. diff --git a/doc/en/requirements.txt b/doc/en/requirements.txt new file mode 100644 index 000000000..72bb60a81 --- /dev/null +++ b/doc/en/requirements.txt @@ -0,0 +1,3 @@ +# pinning sphinx to 1.4.* due to search issues with rtd: +# https://github.com/rtfd/readthedocs-sphinx-ext/issues/25 +sphinx ==1.4.* diff --git a/doc/en/setup.rst b/doc/en/setup.rst deleted file mode 100644 index fe2353465..000000000 --- a/doc/en/setup.rst +++ /dev/null @@ -1,10 +0,0 @@ - -setup: is now an "autouse fixture" -======================================================== - -During development prior to the pytest-2.3 release the name -``pytest.setup`` was used but before the release it was renamed -and moved to become part of the general fixture mechanism, -namely :ref:`autouse fixtures` - - diff --git a/doc/en/skipping.rst b/doc/en/skipping.rst index afa33444e..d6d1e2414 100644 --- a/doc/en/skipping.rst +++ b/doc/en/skipping.rst @@ -2,32 +2,40 @@ .. _skipping: -Skip and xfail: dealing with tests that can not succeed -===================================================================== +Skip and xfail: dealing with tests that cannot succeed +====================================================== -If you have test functions that cannot be run on certain platforms -or that you expect to fail you can mark them accordingly or you -may call helper functions during execution of setup or test functions. +You can mark test functions that cannot be run on certain platforms +or that you expect to fail so pytest can deal with them accordingly and +present a summary of the test session, while keeping the test suite *green*. -A *skip* means that you expect your test to pass unless the environment -(e.g. wrong Python interpreter, missing dependency) prevents it to run. -And *xfail* means that your test can run but you expect it to fail -because there is an implementation problem. +A **skip** means that you expect your test to pass only if some conditions are met, +otherwise pytest should skip running the test altogether. Common examples are skipping +windows-only tests on non-windows platforms, or skipping tests that depend on an external +resource which is not available at the moment (for example a database). + +A **xfail** means that you expect a test to fail for some reason. +A common example is a test for a feature not yet implemented, or a bug not yet fixed. +When a test passes despite being expected to fail (marked with ``pytest.mark.xfail``), +it's an **xpass** and will be reported in the test summary. ``pytest`` counts and lists *skip* and *xfail* tests separately. Detailed information about skipped/xfailed tests is not shown by default to avoid cluttering the output. You can use the ``-r`` option to see details corresponding to the "short" letters shown in the test progress:: - pytest -rxs # show extra info on skips and xfails + pytest -rxXs # show extra info on xfailed, xpassed, and skipped tests + +More details on the ``-r`` option can be found by running ``pytest -h``. (See :ref:`how to change command line options defaults`) .. _skipif: +.. _skip: .. _`condition booleans`: -Marking a test function to be skipped -------------------------------------------- +Skipping test functions +----------------------- .. versionadded:: 2.9 @@ -40,10 +48,23 @@ which may be passed an optional ``reason``: def test_the_unknown(): ... + +Alternatively, it is also possible to skip imperatively during test execution or setup +by calling the ``pytest.skip(reason)`` function: + +.. code-block:: python + + def test_function(): + if not valid_config(): + pytest.skip("unsupported configuration") + +The imperative method is useful when it is not possible to evaluate the skip condition +during import time. + ``skipif`` ~~~~~~~~~~ -.. versionadded:: 2.0, 2.4 +.. versionadded:: 2.0 If you wish to skip something conditionally then you can use ``skipif`` instead. Here is an example of marking a test function to be skipped @@ -55,16 +76,12 @@ when run on a Python3.3 interpreter:: def test_function(): ... -During test function setup the condition ("sys.version_info >= (3,3)") is -checked. If it evaluates to True, the test function will be skipped -with the specified reason. Note that pytest enforces specifying a reason -in order to report meaningful "skip reasons" (e.g. when using ``-rs``). -If the condition is a string, it will be evaluated as python expression. +If the condition evaluates to ``True`` during collection, the test function will be skipped, +with the specified reason appearing in the summary when using ``-rs``. -You can share skipif markers between modules. Consider this test module:: +You can share ``skipif`` markers between modules. Consider this test module:: # content of test_mymodule.py - import mymodule minversion = pytest.mark.skipif(mymodule.__versioninfo__ < (1,1), reason="at least mymodule-1.1 required") @@ -72,7 +89,7 @@ You can share skipif markers between modules. Consider this test module:: def test_function(): ... -You can import it from another test module:: +You can import the marker and reuse it in another test module:: # test_myothermodule.py from test_mymodule import minversion @@ -85,28 +102,33 @@ For larger test suites it's usually a good idea to have one file where you define the markers which you then consistently apply throughout your test suite. -Alternatively, the pre pytest-2.4 way to specify :ref:`condition strings -` instead of booleans will remain fully supported in future -versions of pytest. It couldn't be easily used for importing markers -between test modules so it's no longer advertised as the primary method. +Alternatively, you can use :ref:`condition strings +` instead of booleans, but they can't be shared between modules easily +so they are supported mainly for backward compatibility reasons. Skip all test functions of a class or module ---------------------------------------------- +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ -You can use the ``skipif`` decorator (and any other marker) on classes:: +You can use the ``skipif`` marker (as any other marker) on classes:: @pytest.mark.skipif(sys.platform == 'win32', reason="does not run on windows") - class TestPosixCalls: + class TestPosixCalls(object): def test_function(self): "will not be setup or run under 'win32' platform" -If the condition is true, this marker will produce a skip result for -each of the test methods. +If the condition is ``True``, this marker will produce a skip result for +each of the test methods of that class. -If you want to skip all test functions of a module, you must use +.. warning:: + + The use of ``skipif`` on classes that use inheritance is strongly + discouraged. `A Known bug `_ + in pytest's markers may cause unexpected behavior in super classes. + +If you want to skip all test functions of a module, you may use the ``pytestmark`` name on the global level: .. code-block:: python @@ -114,15 +136,67 @@ the ``pytestmark`` name on the global level: # test_module.py pytestmark = pytest.mark.skipif(...) -If multiple "skipif" decorators are applied to a test function, it +If multiple ``skipif`` decorators are applied to a test function, it will be skipped if any of the skip conditions is true. .. _`whole class- or module level`: mark.html#scoped-marking + +Skipping files or directories +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Sometimes you may need to skip an entire file or directory, for example if the +tests rely on Python version-specific features or contain code that you do not +wish pytest to run. In this case, you must exclude the files and directories +from collection. Refer to :ref:`customizing-test-collection` for more +information. + + +Skipping on a missing import dependency +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +You can use the following helper at module level +or within a test or test setup function:: + + docutils = pytest.importorskip("docutils") + +If ``docutils`` cannot be imported here, this will lead to a +skip outcome of the test. You can also skip based on the +version number of a library:: + + docutils = pytest.importorskip("docutils", minversion="0.3") + +The version will be read from the specified +module's ``__version__`` attribute. + +Summary +~~~~~~~ + +Here's a quick guide on how to skip tests in a module in different situations: + +1. Skip all tests in a module unconditionally: + + .. code-block:: python + + pytestmark = pytest.mark.skip('all tests still WIP') + +2. Skip all tests in a module based on some condition: + + .. code-block:: python + + pytestmark = pytest.mark.skipif(sys.platform == 'win32', 'tests for linux only') + +3. Skip all tests in a module if some import is missing: + + .. code-block:: python + + pexpect = pytest.importorskip('pexpect') + + .. _xfail: -Mark a test function as expected to fail -------------------------------------------------------- +XFail: mark test functions as expected to fail +---------------------------------------------- You can use the ``xfail`` marker to indicate that you expect a test to fail:: @@ -135,6 +209,29 @@ This test will be run but no traceback will be reported when it fails. Instead terminal reporting will list it in the "expected to fail" (``XFAIL``) or "unexpectedly passing" (``XPASS``) sections. +Alternatively, you can also mark a test as ``XFAIL`` from within a test or setup function +imperatively: + +.. code-block:: python + + def test_function(): + if not valid_config(): + pytest.xfail("failing configuration (but should work)") + +This will unconditionally make ``test_function`` ``XFAIL``. Note that no other code is executed +after ``pytest.xfail`` call, differently from the marker. That's because it is implemented +internally by raising a known exception. + +Here's the signature of the ``xfail`` **marker** (not the function), using Python 3 keyword-only +arguments syntax: + +.. code-block:: python + + def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False): + + + + ``strict`` parameter ~~~~~~~~~~~~~~~~~~~~ @@ -200,18 +297,19 @@ even executed, use the ``run`` parameter as ``False``: def test_function(): ... -This is specially useful for marking crashing tests for later inspection. +This is specially useful for xfailing tests that are crashing the interpreter and should be +investigated later. -Ignoring xfail marks -~~~~~~~~~~~~~~~~~~~~ +Ignoring xfail +~~~~~~~~~~~~~~ By specifying on the commandline:: pytest --runxfail you can force the running and reporting of an ``xfail`` marked test -as if it weren't marked at all. +as if it weren't marked at all. This also causes ``pytest.xfail`` to produce no effect. Examples ~~~~~~~~ @@ -224,15 +322,15 @@ Running it with the report-on-xfail option gives this output:: example $ pytest -rx xfail_demo.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR/example, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR/example, inifile: collected 7 items - + xfail_demo.py xxxxxxx ======= short test summary info ======== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 - reason: [NOTRUN] + reason: [NOTRUN] XFAIL xfail_demo.py::test_hello3 condition: hasattr(os, 'sep') XFAIL xfail_demo.py::test_hello4 @@ -242,170 +340,29 @@ Running it with the report-on-xfail option gives this output:: XFAIL xfail_demo.py::test_hello6 reason: reason XFAIL xfail_demo.py::test_hello7 - + ======= 7 xfailed in 0.12 seconds ======== -xfail signature summary -~~~~~~~~~~~~~~~~~~~~~~~ - -Here's the signature of the ``xfail`` marker, using Python 3 keyword-only -arguments syntax: - -.. code-block:: python - - def xfail(condition=None, *, reason=None, raises=None, run=True, strict=False): - - - .. _`skip/xfail with parametrize`: Skip/xfail with parametrize --------------------------- It is possible to apply markers like skip and xfail to individual -test instances when using parametrize:: +test instances when using parametrize: + +.. code-block:: python import pytest @pytest.mark.parametrize(("n", "expected"), [ (1, 2), - pytest.mark.xfail((1, 0)), - pytest.mark.xfail(reason="some bug")((1, 3)), + pytest.param(1, 0, marks=pytest.mark.xfail), + pytest.param(1, 3, marks=pytest.mark.xfail(reason="some bug")), (2, 3), (3, 4), (4, 5), - pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)), + pytest.param(10, 11, marks=pytest.mark.skipif(sys.version_info >= (3, 0), reason="py2k")), ]) def test_increment(n, expected): assert n + 1 == expected - - -Imperative xfail from within a test or setup function ------------------------------------------------------- - -If you cannot declare xfail- of skipif conditions at import -time you can also imperatively produce an according outcome -imperatively, in test or setup code:: - - def test_function(): - if not valid_config(): - pytest.xfail("failing configuration (but should work)") - # or - pytest.skip("unsupported configuration") - -Note that calling ``pytest.skip`` at the module level -is not allowed since pytest 3.0. If you are upgrading -and ``pytest.skip`` was being used at the module level, you can set a -``pytestmark`` variable: - -.. code-block:: python - - # before pytest 3.0 - pytest.skip('skipping all tests because of reasons') - # after pytest 3.0 - pytestmark = pytest.mark.skip('skipping all tests because of reasons') - -``pytestmark`` applies a mark or list of marks to all tests in a module. - - -Skipping on a missing import dependency --------------------------------------------------- - -You can use the following import helper at module level -or within a test or test setup function:: - - docutils = pytest.importorskip("docutils") - -If ``docutils`` cannot be imported here, this will lead to a -skip outcome of the test. You can also skip based on the -version number of a library:: - - docutils = pytest.importorskip("docutils", minversion="0.3") - -The version will be read from the specified -module's ``__version__`` attribute. - - -.. _string conditions: - -specifying conditions as strings versus booleans ----------------------------------------------------------- - -Prior to pytest-2.4 the only way to specify skipif/xfail conditions was -to use strings:: - - import sys - @pytest.mark.skipif("sys.version_info >= (3,3)") - def test_function(): - ... - -During test function setup the skipif condition is evaluated by calling -``eval('sys.version_info >= (3,0)', namespace)``. The namespace contains -all the module globals, and ``os`` and ``sys`` as a minimum. - -Since pytest-2.4 `condition booleans`_ are considered preferable -because markers can then be freely imported between test modules. -With strings you need to import not only the marker but all variables -everything used by the marker, which violates encapsulation. - -The reason for specifying the condition as a string was that ``pytest`` can -report a summary of skip conditions based purely on the condition string. -With conditions as booleans you are required to specify a ``reason`` string. - -Note that string conditions will remain fully supported and you are free -to use them if you have no need for cross-importing markers. - -The evaluation of a condition string in ``pytest.mark.skipif(conditionstring)`` -or ``pytest.mark.xfail(conditionstring)`` takes place in a namespace -dictionary which is constructed as follows: - -* the namespace is initialized by putting the ``sys`` and ``os`` modules - and the pytest ``config`` object into it. - -* updated with the module globals of the test function for which the - expression is applied. - -The pytest ``config`` object allows you to skip based on a test -configuration value which you might have added:: - - @pytest.mark.skipif("not config.getvalue('db')") - def test_function(...): - ... - -The equivalent with "boolean conditions" is:: - - @pytest.mark.skipif(not pytest.config.getvalue("db"), - reason="--db was not specified") - def test_function(...): - pass - -.. note:: - - You cannot use ``pytest.config.getvalue()`` in code - imported before pytest's argument parsing takes place. For example, - ``conftest.py`` files are imported before command line parsing and thus - ``config.getvalue()`` will not execute correctly. - - -Summary -------- - -Here's a quick guide on how to skip tests in a module in different situations: - -1. Skip all tests in a module unconditionally: - - .. code-block:: python - - pytestmark = pytest.mark.skip('all tests still WIP') - -2. Skip all tests in a module based on some condition: - - .. code-block:: python - - pytestmark = pytest.mark.skipif(sys.platform == 'win32', 'tests for linux only') - -3. Skip all tests in a module if some import is missing: - - .. code-block:: python - - pexpect = pytest.importorskip('pexpect') diff --git a/doc/en/talks.rst b/doc/en/talks.rst index c35fba0b0..bf593db4b 100644 --- a/doc/en/talks.rst +++ b/doc/en/talks.rst @@ -2,12 +2,21 @@ Talks and Tutorials ========================== -.. sidebar:: Next Open Trainings +.. + .. sidebar:: Next Open Trainings - `pytest workshop `_, 8th December 2016, Bern, Switzerland + `Professional Testing with Python + `_, + 26-28 April 2017, Leipzig, Germany. .. _`funcargs`: funcargs.html +Books +--------------------------------------------- + +- `Python Testing with pytest, by Brian Okken (2017) + `_. + Talks and blog postings --------------------------------------------- diff --git a/doc/en/test/attic.rst b/doc/en/test/attic.rst index 11140db2c..06944661c 100644 --- a/doc/en/test/attic.rst +++ b/doc/en/test/attic.rst @@ -110,7 +110,7 @@ If you want to disable a complete test class you can set the class-level attribute ``disabled``. For example, in order to avoid running some tests on Win32:: - class TestPosixOnly: + class TestPosixOnly(object): disabled = sys.platform == 'win32' def test_xxx(self): diff --git a/doc/en/test/plugin/xdist.rst b/doc/en/test/plugin/xdist.rst index 79440998b..506d240ae 100644 --- a/doc/en/test/plugin/xdist.rst +++ b/doc/en/test/plugin/xdist.rst @@ -71,7 +71,7 @@ you can ad-hoc distribute your tests by typing:: pytest -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg This will synchronize your ``mypkg`` package directory -to an remote ssh account and then locally collect tests +to a remote ssh account and then locally collect tests and send them to remote places for execution. You can specify multiple ``--rsyncdir`` directories diff --git a/doc/en/tmpdir.rst b/doc/en/tmpdir.rst index 56e4190c5..56a347619 100644 --- a/doc/en/tmpdir.rst +++ b/doc/en/tmpdir.rst @@ -29,9 +29,9 @@ Running this would result in a passed test except for the last $ pytest test_tmpdir.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: - collected 1 items + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item test_tmpdir.py F diff --git a/doc/en/unittest.rst b/doc/en/unittest.rst index 179347eb9..db1692029 100644 --- a/doc/en/unittest.rst +++ b/doc/en/unittest.rst @@ -2,50 +2,77 @@ .. _`unittest.TestCase`: .. _`unittest`: -Support for unittest.TestCase / Integration of fixtures -===================================================================== +unittest.TestCase Support +========================= -.. _`unittest.py style`: http://docs.python.org/library/unittest.html +``pytest`` supports running Python ``unittest``-based tests out of the box. +It's meant for leveraging existing ``unittest``-based test suites +to use pytest as a test runner and also allow to incrementally adapt +the test suite to take full advantage of pytest's features. -``pytest`` has support for running Python `unittest.py style`_ tests. -It's meant for leveraging existing unittest-style projects -to use pytest features. Concretely, pytest will automatically -collect ``unittest.TestCase`` subclasses and their ``test`` methods in -test files. It will invoke typical setup/teardown methods and -generally try to make test suites written to run on unittest, to also -run using ``pytest``. We assume here that you are familiar with writing -``unittest.TestCase`` style tests and rather focus on -integration aspects. +To run an existing ``unittest``-style test suite using ``pytest``, type:: -Usage -------------------------------------------------------------------- + pytest tests -After :ref:`installation` type:: - pytest +pytest will automatically collect ``unittest.TestCase`` subclasses and +their ``test`` methods in ``test_*.py`` or ``*_test.py`` files. -and you should be able to run your unittest-style tests if they -are contained in ``test_*`` modules. If that works for you then -you can make use of most :ref:`pytest features `, for example -``--pdb`` debugging in failures, using :ref:`plain assert-statements `, -:ref:`more informative tracebacks `, stdout-capturing or -distributing tests to multiple CPUs via the ``-nNUM`` option if you -installed the ``pytest-xdist`` plugin. Please refer to -the general ``pytest`` documentation for many more examples. +Almost all ``unittest`` features are supported: -.. note:: +* ``@unittest.skip`` style decorators; +* ``setUp/tearDown``; +* ``setUpClass/tearDownClass()``; - Running tests from ``unittest.TestCase`` subclasses with ``--pdb`` will - disable tearDown and cleanup methods for the case that an Exception - occurs. This allows proper post mortem debugging for all applications - which have significant logic in their tearDown machinery. However, - supporting this feature has the following side effect: If people - overwrite ``unittest.TestCase`` ``__call__`` or ``run``, they need to - to overwrite ``debug`` in the same way (this is also true for standard - unittest). +.. _`load_tests protocol`: https://docs.python.org/3/library/unittest.html#load-tests-protocol +.. _`setUpModule/tearDownModule`: https://docs.python.org/3/library/unittest.html#setupmodule-and-teardownmodule +.. _`subtests`: https://docs.python.org/3/library/unittest.html#distinguishing-test-iterations-using-subtests -Mixing pytest fixtures into unittest.TestCase style tests ------------------------------------------------------------ +Up to this point pytest does not have support for the following features: + +* `load_tests protocol`_; +* `setUpModule/tearDownModule`_; +* `subtests`_; + +Benefits out of the box +----------------------- + +By running your test suite with pytest you can make use of several features, +in most cases without having to modify existing code: + +* Obtain :ref:`more informative tracebacks `; +* :ref:`stdout and stderr ` capturing; +* :ref:`Test selection options ` using ``-k`` and ``-m`` flags; +* :ref:`maxfail`; +* :ref:`--pdb ` command-line option for debugging on test failures + (see :ref:`note ` below); +* Distribute tests to multiple CPUs using the `pytest-xdist `_ plugin; +* Use :ref:`plain assert-statements ` instead of ``self.assert*`` functions (`unittest2pytest + `__ is immensely helpful in this); + + +pytest features in ``unittest.TestCase`` subclasses +--------------------------------------------------- + +The following pytest features work in ``unittest.TestCase`` subclasses: + +* :ref:`Marks `: :ref:`skip `, :ref:`skipif `, :ref:`xfail `; +* :ref:`Auto-use fixtures `; + +The following pytest features **do not** work, and probably +never will due to different design philosophies: + +* :ref:`Fixtures ` (except for ``autouse`` fixtures, see :ref:`below `); +* :ref:`Parametrization `; +* :ref:`Custom hooks `; + + +Third party plugins may or may not work well, depending on the plugin and the test suite. + +.. _mixing-fixtures: + +Mixing pytest fixtures into ``unittest.TestCase`` subclasses using marks +------------------------------------------------------------------------ Running your unittest with ``pytest`` allows you to use its :ref:`fixture mechanism ` with ``unittest.TestCase`` style @@ -63,7 +90,7 @@ it from a unittest-style test:: @pytest.fixture(scope="class") def db_class(request): - class DummyDB: + class DummyDB(object): pass # set a class attribute on the invoking test context request.cls.db = DummyDB() @@ -100,8 +127,8 @@ the ``self.db`` values in the traceback:: $ pytest test_unittest_db.py ======= test session starts ======== - platform linux -- Python 3.5.2, pytest-3.0.5, py-1.4.31, pluggy-0.4.0 - rootdir: $REGENDOC_TMPDIR, inifile: + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: collected 2 items test_unittest_db.py FF @@ -135,8 +162,8 @@ share the same ``self.db`` instance which was our intention when writing the class-scoped fixture function above. -autouse fixtures and accessing other fixtures -------------------------------------------------------------------- +Using autouse fixtures and accessing other fixtures +--------------------------------------------------- Although it's usually better to explicitly declare use of fixtures you need for a given test, you may sometimes want to have fixtures that are @@ -157,13 +184,15 @@ creation of a per-test temporary directory:: import unittest class MyTest(unittest.TestCase): + @pytest.fixture(autouse=True) def initdir(self, tmpdir): tmpdir.chdir() # change to pytest-provided temporary directory tmpdir.join("samplefile.ini").write("# testdata") def test_method(self): - s = open("samplefile.ini").read() + with open("samplefile.ini") as f: + s = f.read() assert "testdata" in s Due to the ``autouse`` flag the ``initdir`` fixture function will be @@ -182,21 +211,35 @@ was executed ahead of the ``test_method``. .. note:: - While pytest supports receiving fixtures via :ref:`test function arguments ` for non-unittest test methods, ``unittest.TestCase`` methods cannot directly receive fixture - function arguments as implementing that is likely to inflict + ``unittest.TestCase`` methods cannot directly receive fixture + arguments as implementing that is likely to inflict on the ability to run general unittest.TestCase test suites. - Maybe optional support would be possible, though. If unittest finally - grows a plugin system that should help as well. In the meanwhile, the - above ``usefixtures`` and ``autouse`` examples should help to mix in - pytest fixtures into unittest suites. And of course you can also start - to selectively leave away the ``unittest.TestCase`` subclassing, use - plain asserts and get the unlimited pytest feature set. + The above ``usefixtures`` and ``autouse`` examples should help to mix in + pytest fixtures into unittest suites. -Converting from unittest to pytest ---------------------------------------- + You can also gradually move away from subclassing from ``unittest.TestCase`` to *plain asserts* + and then start to benefit from the full pytest feature set step by step. -If you want to convert your unittest testcases to pytest, there are -some helpers like `unittest2pytest -`__, which uses lib2to3 -and introspection for the transformation. +.. _pdb-unittest-note: + +.. note:: + + Running tests from ``unittest.TestCase`` subclasses with ``--pdb`` will + disable tearDown and cleanup methods for the case that an Exception + occurs. This allows proper post mortem debugging for all applications + which have significant logic in their tearDown machinery. However, + supporting this feature has the following side effect: If people + overwrite ``unittest.TestCase`` ``__call__`` or ``run``, they need to + to overwrite ``debug`` in the same way (this is also true for standard + unittest). + +.. note:: + + Due to architectural differences between the two frameworks, setup and + teardown for ``unittest``-based tests is performed during the ``call`` phase + of testing instead of in ``pytest``'s standard ``setup`` and ``teardown`` + stages. This can be important to understand in some situations, particularly + when reasoning about errors. For example, if a ``unittest``-based suite + exhibits errors during setup, ``pytest`` will report no errors during its + ``setup`` phase and will instead raise the error during ``call``. diff --git a/doc/en/usage.rst b/doc/en/usage.rst index ef63a8e06..c5b919fe9 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -16,8 +16,20 @@ You can invoke testing through the Python interpreter from the command line:: python -m pytest [...] -This is equivalent to invoking the command line script ``pytest [...]`` -directly. +This is almost equivalent to invoking the command line script ``pytest [...]`` +directly, except that calling via ``python`` will also add the current directory to ``sys.path``. + +Possible exit codes +-------------------------------------------------------------- + +Running ``pytest`` can result in six different exit codes: + +:Exit code 0: All tests were collected and passed successfully +:Exit code 1: Tests were collected and run but some of the tests failed +:Exit code 2: Test execution was interrupted by the user +:Exit code 3: Internal error happened while executing tests +:Exit code 4: pytest command line usage error +:Exit code 5: No tests were collected Getting help on version, option names, environment variables -------------------------------------------------------------- @@ -29,6 +41,8 @@ Getting help on version, option names, environment variables pytest -h | --help # show help on command line and config file options +.. _maxfail: + Stopping after the first (or N) failures --------------------------------------------------- @@ -37,26 +51,69 @@ To stop the testing process after the first (N) failures:: pytest -x # stop after first failure pytest --maxfail=2 # stop after two failures +.. _select-tests: + Specifying tests / selecting tests --------------------------------------------------- -Several test run options:: +Pytest supports several ways to run and select tests from the command-line. - pytest test_mod.py # run tests in module - pytest somepath # run all tests below somepath - pytest -k stringexpr # only run tests with names that match the - # "string expression", e.g. "MyClass and not method" - # will select TestMyClass.test_something - # but not TestMyClass.test_method_simple - pytest test_mod.py::test_func # only run tests that match the "node ID", - # e.g "test_mod.py::test_func" will select - # only test_func in test_mod.py - pytest test_mod.py::TestClass::test_method # run a single method in - # a single class +**Run tests in a module** -Import 'pkg' and use its filesystem location to find and run tests:: +:: - pytest --pyargs pkg # run all tests found below directory of pkg + pytest test_mod.py + +**Run tests in a directory** + +:: + + pytest testing/ + +**Run tests by keyword expressions** + +:: + + pytest -k "MyClass and not method" + +This will run tests which contain names that match the given *string expression*, which can +include Python operators that use filenames, class names and function names as variables. +The example above will run ``TestMyClass.test_something`` but not ``TestMyClass.test_method_simple``. + +.. _nodeids: + +**Run tests by node ids** + +Each collected test is assigned a unique ``nodeid`` which consist of the module filename followed +by specifiers like class names, function names and parameters from parametrization, separated by ``::`` characters. + +To run a specific test within a module:: + + pytest test_mod.py::test_func + + +Another example specifying a test method in the command line:: + + pytest test_mod.py::TestClass::test_method + +**Run tests by marker expressions** + +:: + + pytest -m slow + +Will run all tests which are decorated with the ``@pytest.mark.slow`` decorator. + +For more information see :ref:`marks `. + +**Run tests from packages** + +:: + + pytest --pyargs pkg.testing + +This will import ``pkg.testing`` and use its filesystem location to find and run tests from. + Modifying Python traceback printing ---------------------------------------------- @@ -76,12 +133,15 @@ Examples for modifying traceback printing:: The ``--full-trace`` causes very long traces to be printed on error (longer than ``--tb=long``). It also ensures that a stack trace is printed on -**KeyboardInterrrupt** (Ctrl+C). +**KeyboardInterrupt** (Ctrl+C). This is very useful if the tests are taking too long and you interrupt them with Ctrl+C to find out where the tests are *hanging*. By default no output will be shown (because KeyboardInterrupt is caught by pytest). By using this option you make sure a trace is shown. + +.. _pdb-option: + Dropping to PDB_ (Python Debugger) on failures ----------------------------------------------- @@ -111,22 +171,15 @@ for example:: >>> sys.last_value AssertionError('assert result == "ok"',) -Setting a breakpoint / aka ``set_trace()`` ----------------------------------------------------- +.. _breakpoints: -If you want to set a breakpoint and enter the ``pdb.set_trace()`` you -can use a helper:: +Setting breakpoints +------------------- - import pytest - def test_function(): - ... - pytest.set_trace() # invoke PDB debugger and tracing +.. versionadded: 2.4.0 -.. versionadded: 2.0.0 - -Prior to pytest version 2.0.0 you could only enter PDB_ tracing if you disabled -capturing on the command line via ``pytest -s``. In later versions, pytest -automatically disables its output capture when you enter PDB_ tracing: +To set a breakpoint in your code use the native Python ``import pdb;pdb.set_trace()`` call +in your code and pytest automatically disables its output capture for that test: * Output capture in other tests is not affected. * Any prior test output that has already been captured and will be processed as @@ -136,12 +189,6 @@ automatically disables its output capture when you enter PDB_ tracing: for test output occurring after you exit the interactive PDB_ tracing session and continue with the regular test run. -.. versionadded: 2.4.0 - -Since pytest version 2.4.0 you can also use the native Python -``import pdb;pdb.set_trace()`` call to enter PDB_ tracing without having to use -the ``pytest.set_trace()`` wrapper or explicitly disable pytest's output -capturing via ``pytest -s``. .. _durations: @@ -165,6 +212,15 @@ integration servers, use this invocation:: to create an XML file at ``path``. +.. versionadded:: 3.1 + +To set the name of the root test suite xml item, you can configure the ``junit_suite_name`` option in your config file: + +.. code-block:: ini + + [pytest] + junit_suite_name = my_suite + record_xml_property ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ @@ -192,7 +248,7 @@ This will add an extra property ``example_key="1"`` to the generated .. warning:: - This is an experimental feature, and its interface might be replaced + ``record_xml_property`` is an experimental feature, and its interface might be replaced by something more powerful and general in future versions. The functionality per-se will be kept, however. @@ -226,7 +282,7 @@ to all testcases you can use ``LogXML.add_global_properties`` def start_and_prepare_env(): pass - class TestMe: + class TestMe(object): def test_foo(self): assert True @@ -255,6 +311,13 @@ Creating resultlog format files This option is rarely used and is scheduled for removal in 4.0. + An alternative for users which still need similar functionality is to use the + `pytest-tap `_ plugin which provides + a stream of test data. + + If you have any concerns, please don't hesitate to + `open an issue `_. + To create plain-text machine-readable result files you can issue:: pytest --resultlog=path @@ -314,7 +377,7 @@ You can specify additional plugins to ``pytest.main``:: # content of myinvoke.py import pytest - class MyPlugin: + class MyPlugin(object): def pytest_sessionfinish(self): print("*** test run reporting finishing") diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst new file mode 100644 index 000000000..de8456af0 --- /dev/null +++ b/doc/en/warnings.rst @@ -0,0 +1,286 @@ +.. _`warnings`: + +Warnings Capture +================ + +.. versionadded:: 3.1 + +Starting from version ``3.1``, pytest now automatically catches warnings during test execution +and displays them at the end of the session:: + + # content of test_show_warnings.py + import warnings + + def api_v1(): + warnings.warn(UserWarning("api v1, should use functions from v2")) + return 1 + + def test_one(): + assert api_v1() == 1 + +Running pytest now produces this output:: + + $ pytest test_show_warnings.py + ======= test session starts ======== + platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y + rootdir: $REGENDOC_TMPDIR, inifile: + collected 1 item + + test_show_warnings.py . + + ======= warnings summary ======== + test_show_warnings.py::test_one + $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 + warnings.warn(UserWarning("api v1, should use functions from v2")) + + -- Docs: http://doc.pytest.org/en/latest/warnings.html + ======= 1 passed, 1 warnings in 0.12 seconds ======== + +Pytest by default catches all warnings except for ``DeprecationWarning`` and ``PendingDeprecationWarning``. + +The ``-W`` flag can be passed to control which warnings will be displayed or even turn +them into errors:: + + $ pytest -q test_show_warnings.py -W error::UserWarning + F + ======= FAILURES ======== + _______ test_one ________ + + def test_one(): + > assert api_v1() == 1 + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + + def api_v1(): + > warnings.warn(UserWarning("api v1, should use functions from v2")) + E UserWarning: api v1, should use functions from v2 + + test_show_warnings.py:4: UserWarning + 1 failed in 0.12 seconds + +The same option can be set in the ``pytest.ini`` file using the ``filterwarnings`` ini option. +For example, the configuration below will ignore all user warnings, but will transform +all other warnings into errors. + +.. code-block:: ini + + [pytest] + filterwarnings = + error + ignore::UserWarning + + +When a warning matches more than one option in the list, the action for the last matching option +is performed. + +Both ``-W`` command-line option and ``filterwarnings`` ini option are based on Python's own +`-W option`_ and `warnings.simplefilter`_, so please refer to those sections in the Python +documentation for other examples and advanced usage. + +``@pytest.mark.filterwarnings`` +------------------------------- + +.. versionadded:: 3.2 + +You can use the ``@pytest.mark.filterwarnings`` to add warning filters to specific test items, +allowing you to have finer control of which warnings should be captured at test, class or +even module level: + +.. code-block:: python + + import warnings + + def api_v1(): + warnings.warn(UserWarning("api v1, should use functions from v2")) + return 1 + + @pytest.mark.filterwarnings('ignore:api v1') + def test_one(): + assert api_v1() == 1 + + +Filters applied using a mark take precedence over filters passed on the command line or configured +by the ``filterwarnings`` ini option. + +You may apply a filter to all tests of a class by using the ``filterwarnings`` mark as a class +decorator or to all tests in a module by setting the ``pytestmark`` variable: + +.. code-block:: python + + # turns all warnings into errors for this module + pytestmark = pytest.mark.filterwarnings('error') + + +.. note:: + + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are hidden by the standard library + by default so you have to explicitly configure them to be displayed in your ``pytest.ini``: + + .. code-block:: ini + + [pytest] + filterwarnings = + once::DeprecationWarning + once::PendingDeprecationWarning + + +*Credits go to Florian Schulze for the reference implementation in the* `pytest-warnings`_ +*plugin.* + +.. _`-W option`: https://docs.python.org/3/using/cmdline.html?highlight=#cmdoption-W +.. _warnings.simplefilter: https://docs.python.org/3/library/warnings.html#warnings.simplefilter +.. _`pytest-warnings`: https://github.com/fschulze/pytest-warnings + + +Disabling warning capture +------------------------- + +This feature is enabled by default but can be disabled entirely in your ``pytest.ini`` file with: + + .. code-block:: ini + + [pytest] + addopts = -p no:warnings + +Or passing ``-p no:warnings`` in the command-line. + +.. _`asserting warnings`: + +.. _assertwarnings: + +.. _`asserting warnings with the warns function`: + +.. _warns: + +Asserting warnings with the warns function +----------------------------------------------- + +.. versionadded:: 2.8 + +You can check that code raises a particular warning using ``pytest.warns``, +which works in a similar manner to :ref:`raises `:: + + import warnings + import pytest + + def test_warning(): + with pytest.warns(UserWarning): + warnings.warn("my warning", UserWarning) + +The test will fail if the warning in question is not raised. + +You can also call ``pytest.warns`` on a function or code string:: + + pytest.warns(expected_warning, func, *args, **kwargs) + pytest.warns(expected_warning, "func(*args, **kwargs)") + +The function also returns a list of all raised warnings (as +``warnings.WarningMessage`` objects), which you can query for +additional information:: + + with pytest.warns(RuntimeWarning) as record: + warnings.warn("another warning", RuntimeWarning) + + # check that only one warning was raised + assert len(record) == 1 + # check that the message matches + assert record[0].message.args[0] == "another warning" + +Alternatively, you can examine raised warnings in detail using the +:ref:`recwarn ` fixture (see below). + +.. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + +.. _`recording warnings`: + +.. _recwarn: + +Recording warnings +------------------------ + +You can record raised warnings either using ``pytest.warns`` or with +the ``recwarn`` fixture. + +To record with ``pytest.warns`` without asserting anything about the warnings, +pass ``None`` as the expected warning type:: + + with pytest.warns(None) as record: + warnings.warn("user", UserWarning) + warnings.warn("runtime", RuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + +The ``recwarn`` fixture will record warnings for the whole function:: + + import warnings + + def test_hello(recwarn): + warnings.warn("hello", UserWarning) + assert len(recwarn) == 1 + w = recwarn.pop(UserWarning) + assert issubclass(w.category, UserWarning) + assert str(w.message) == "hello" + assert w.filename + assert w.lineno + +Both ``recwarn`` and ``pytest.warns`` return the same interface for recorded +warnings: a WarningsRecorder instance. To view the recorded warnings, you can +iterate over this instance, call ``len`` on it to get the number of recorded +warnings, or index into it to get a particular recorded warning. It also +provides these methods: + +.. autoclass:: _pytest.recwarn.WarningsRecorder() + :members: + +Each recorded warning has the attributes ``message``, ``category``, +``filename``, ``lineno``, ``file``, and ``line``. The ``category`` is the +class of the warning. The ``message`` is the warning itself; calling +``str(message)`` will return the actual message of the warning. + +.. note:: + :class:`RecordedWarning` was changed from a plain class to a namedtuple in pytest 3.1 + +.. note:: + ``DeprecationWarning`` and ``PendingDeprecationWarning`` are treated + differently; see :ref:`ensuring_function_triggers`. + +.. _`ensuring a function triggers a deprecation warning`: + +.. _ensuring_function_triggers: + +Ensuring a function triggers a deprecation warning +------------------------------------------------------- + +You can also call a global helper for checking +that a certain function call triggers a ``DeprecationWarning`` or +``PendingDeprecationWarning``:: + + import pytest + + def test_global(): + pytest.deprecated_call(myfunction, 17) + +By default, ``DeprecationWarning`` and ``PendingDeprecationWarning`` will not be +caught when using ``pytest.warns`` or ``recwarn`` because default Python warnings filters hide +them. If you wish to record them in your own code, use the +command ``warnings.simplefilter('always')``:: + + import warnings + import pytest + + def test_deprecation(recwarn): + warnings.simplefilter('always') + warnings.warn("deprecated", DeprecationWarning) + assert len(recwarn) == 1 + assert recwarn.pop(DeprecationWarning) + +You can also use it as a contextmanager:: + + def test_global(): + with pytest.deprecated_call(): + myobject.deprecated_method() diff --git a/doc/en/writing_plugins.rst b/doc/en/writing_plugins.rst index 8a46648cd..5dccdb884 100644 --- a/doc/en/writing_plugins.rst +++ b/doc/en/writing_plugins.rst @@ -49,7 +49,7 @@ Plugin discovery order at tool startup Note that pytest does not find ``conftest.py`` files in deeper nested sub directories at tool startup. It is usually a good idea to keep - your conftest.py file in the top level test or project root directory. + your ``conftest.py`` file in the top level test or project root directory. * by recursively loading all plugins specified by the ``pytest_plugins`` variable in ``conftest.py`` files @@ -57,9 +57,7 @@ Plugin discovery order at tool startup .. _`pytest/plugin`: http://bitbucket.org/pytest-dev/pytest/src/tip/pytest/plugin/ .. _`conftest.py plugins`: -.. _`conftest.py`: .. _`localplugin`: -.. _`conftest`: .. _`local conftest plugins`: conftest.py: local per-directory plugins @@ -90,14 +88,16 @@ Here is how you might run it:: pytest test_flat.py # will not show "setting up" pytest a/test_sub.py # will show "setting up" -.. Note:: +.. note:: If you have ``conftest.py`` files which do not reside in a python package directory (i.e. one containing an ``__init__.py``) then "import conftest" can be ambiguous because there might be other - ``conftest.py`` files as well on your PYTHONPATH or ``sys.path``. + ``conftest.py`` files as well on your ``PYTHONPATH`` or ``sys.path``. It is thus good practice for projects to either put ``conftest.py`` under a package scope or to never import anything from a - conftest.py file. + ``conftest.py`` file. + + See also: :ref:`pythonpath`. Writing your own plugin @@ -122,8 +122,8 @@ to extend and add functionality. for authoring plugins. The template provides an excellent starting point with a working plugin, - tests running with tox, comprehensive README and - entry-pointy already pre-configured. + tests running with tox, a comprehensive README file as well as a + pre-configured entry-point. Also consider :ref:`contributing your plugin to pytest-dev` once it has some happy users other than yourself. @@ -172,7 +172,7 @@ If a package is installed this way, ``pytest`` will load .. note:: Make sure to include ``Framework :: Pytest`` in your list of - `PyPI classifiers `_ + `PyPI classifiers `_ to make it easy for users to find your plugin. @@ -236,22 +236,33 @@ import ``helper.py`` normally. The contents of Requiring/Loading plugins in a test module or conftest file ----------------------------------------------------------- -You can require plugins in a test module or a conftest file like this:: +You can require plugins in a test module or a ``conftest.py`` file like this: - pytest_plugins = "name1", "name2", +.. code-block:: python + + pytest_plugins = ["name1", "name2"] When the test module or conftest plugin is loaded the specified plugins -will be loaded as well. You can also use dotted path like this:: +will be loaded as well. Any module can be blessed as a plugin, including internal +application modules: + +.. code-block:: python pytest_plugins = "myapp.testsupport.myplugin" -which will import the specified module as a ``pytest`` plugin. +``pytest_plugins`` variables are processed recursively, so note that in the example above +if ``myapp.testsupport.myplugin`` also declares ``pytest_plugins``, the contents +of the variable will also be loaded as plugins, and so on. -Plugins imported like this will automatically be marked to require -assertion rewriting using the :func:`pytest.register_assert_rewrite` -mechanism. However for this to have any effect the module must not be -imported already, it it was already imported at the time the -``pytest_plugins`` statement is processed a warning will result and +This mechanism makes it easy to share fixtures within applications or even +external applications without the need to create external plugins using +the ``setuptools``'s entry point technique. + +Plugins imported by ``pytest_plugins`` will also automatically be marked +for assertion rewriting (see :func:`pytest.register_assert_rewrite`). +However for this to have any effect the module must not be +imported already; if it was already imported at the time the +``pytest_plugins`` statement is processed, a warning will result and assertions inside the plugin will not be re-written. To fix this you can either call :func:`pytest.register_assert_rewrite` yourself before the module is imported, or you can arrange the code to delay the @@ -275,34 +286,101 @@ the ``--trace-config`` option. Testing plugins --------------- -pytest comes with some facilities that you can enable for testing your -plugin. Given that you have an installed plugin you can enable the -:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a -command line option to include the pytester plugin (``-p pytester``) or -by putting ``pytest_plugins = "pytester"`` into your test or -``conftest.py`` file. You then will have a ``testdir`` fixture which you -can use like this:: +pytest comes with a plugin named ``pytester`` that helps you write tests for +your plugin code. The plugin is disabled by default, so you will have to enable +it before you can use it. - # content of test_myplugin.py +You can do so by adding the following line to a ``conftest.py`` file in your +testing directory: - pytest_plugins = "pytester" # to get testdir fixture +.. code-block:: python - def test_myplugin(testdir): + # content of conftest.py + + pytest_plugins = ["pytester"] + +Alternatively you can invoke pytest with the ``-p pytester`` command line +option. + +This will allow you to use the :py:class:`testdir <_pytest.pytester.Testdir>` +fixture for testing your plugin code. + +Let's demonstrate what you can do with the plugin with an example. Imagine we +developed a plugin that provides a fixture ``hello`` which yields a function +and we can invoke this function with one optional parameter. It will return a +string value of ``Hello World!`` if we do not supply a value or ``Hello +{value}!`` if we do supply a string value. + +.. code-block:: python + + # -*- coding: utf-8 -*- + + import pytest + + def pytest_addoption(parser): + group = parser.getgroup('helloworld') + group.addoption( + '--name', + action='store', + dest='name', + default='World', + help='Default "name" for hello().' + ) + + @pytest.fixture + def hello(request): + name = request.config.getoption('name') + + def _hello(name=None): + if not name: + name = request.config.getoption('name') + return "Hello {name}!".format(name=name) + + return _hello + + +Now the ``testdir`` fixture provides a convenient API for creating temporary +``conftest.py`` files and test files. It also allows us to run the tests and +return a result object, with which we can assert the tests' outcomes. + +.. code-block:: python + + def test_hello(testdir): + """Make sure that our plugin works.""" + + # create a temporary conftest.py file + testdir.makeconftest(""" + import pytest + + @pytest.fixture(params=[ + "Brianna", + "Andreas", + "Floris", + ]) + def name(request): + return request.param + """) + + # create a temporary pytest test file testdir.makepyfile(""" - def test_example(): - pass - """) - result = testdir.runpytest("--verbose") - result.stdout.fnmatch_lines(""" - test_example* + def test_hello_default(hello): + assert hello() == "Hello World!" + + def test_hello_name(hello, name): + assert hello(name) == "Hello {0}!".format(name) """) -Note that by default ``testdir.runpytest()`` will perform a pytest -in-process. You can pass the command line option ``--runpytest=subprocess`` -to have it happen in a subprocess. + # run all tests with pytest + result = testdir.runpytest() + + # check that all 4 tests passed + result.assert_outcomes(passed=4) + + +For more information about the result object that ``runpytest()`` returns, and +the methods that it provides please check out the :py:class:`RunResult +<_pytest.pytester.RunResult>` documentation. -Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more -methods of the result object that you get from a call to ``runpytest``. .. _`writinghooks`: @@ -346,6 +424,8 @@ allowed to raise exceptions. Doing so will break the pytest run. +.. _firstresult: + firstresult: stop at first non-None result ------------------------------------------- @@ -372,7 +452,7 @@ hook wrappers and passes the same arguments as to the regular hooks. At the yield point of the hook wrapper pytest will execute the next hook implementations and return their result to the yield point in the form of -a :py:class:`CallOutcome` instance which encapsulates a result or +a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates a result or exception info. The yield point itself will thus typically not raise exceptions (unless there are bugs). @@ -437,7 +517,7 @@ Here is the order of execution: Plugin1). 4. Plugin3's pytest_collection_modifyitems then executing the code after the yield - point. The yield receives a :py:class:`CallOutcome` instance which encapsulates + point. The yield receives a :py:class:`CallOutcome <_pytest.vendored_packages.pluggy._CallOutcome>` instance which encapsulates the result from calling the non-wrappers. Wrappers shall not modify the result. It's possible to use ``tryfirst`` and ``trylast`` also in conjunction with @@ -506,7 +586,6 @@ Initialization, command line and configuration hooks .. autofunction:: pytest_load_initial_conftests .. autofunction:: pytest_cmdline_preparse .. autofunction:: pytest_cmdline_parse -.. autofunction:: pytest_namespace .. autofunction:: pytest_addoption .. autofunction:: pytest_cmdline_main .. autofunction:: pytest_configure @@ -515,7 +594,7 @@ Initialization, command line and configuration hooks Generic "runtest" hooks ----------------------- -All runtest related hooks receive a :py:class:`pytest.Item` object. +All runtest related hooks receive a :py:class:`pytest.Item <_pytest.main.Item>` object. .. autofunction:: pytest_runtest_protocol .. autofunction:: pytest_runtest_setup @@ -563,6 +642,7 @@ Session related reporting hooks: .. autofunction:: pytest_collectreport .. autofunction:: pytest_deselected .. autofunction:: pytest_report_header +.. autofunction:: pytest_report_collectionfinish .. autofunction:: pytest_report_teststatus .. autofunction:: pytest_terminal_summary .. autofunction:: pytest_fixture_setup diff --git a/pyproject.toml b/pyproject.toml new file mode 100644 index 000000000..88571e208 --- /dev/null +++ b/pyproject.toml @@ -0,0 +1,35 @@ +[tool.towncrier] +package = "pytest" +filename = "CHANGELOG.rst" +directory = "changelog/" +template = "changelog/_template.rst" + + [[tool.towncrier.type]] + directory = "removal" + name = "Deprecations and Removals" + showcontent = true + + [[tool.towncrier.type]] + directory = "feature" + name = "Features" + showcontent = true + + [[tool.towncrier.type]] + directory = "bugfix" + name = "Bug Fixes" + showcontent = true + + [[tool.towncrier.type]] + directory = "vendor" + name = "Vendored Libraries" + showcontent = true + + [[tool.towncrier.type]] + directory = "doc" + name = "Improved Documentation" + showcontent = true + + [[tool.towncrier.type]] + directory = "trivial" + name = "Trivial/Internal Changes" + showcontent = true diff --git a/pytest.py b/pytest.py index e376e417e..1c914a6ed 100644 --- a/pytest.py +++ b/pytest.py @@ -2,19 +2,7 @@ """ pytest: unit and functional testing with Python. """ -__all__ = [ - 'main', - 'UsageError', - 'cmdline', - 'hookspec', - 'hookimpl', - '__version__', -] -if __name__ == '__main__': # if run as a script or by 'python -m pytest' - # we trigger the below "else" condition by the following import - import pytest - raise SystemExit(pytest.main()) # else we are imported @@ -22,7 +10,69 @@ from _pytest.config import ( main, UsageError, _preloadplugins, cmdline, hookspec, hookimpl ) +from _pytest.fixtures import fixture, yield_fixture +from _pytest.assertion import register_assert_rewrite +from _pytest.freeze_support import freeze_includes from _pytest import __version__ +from _pytest.debugging import pytestPDB as __pytestPDB +from _pytest.recwarn import warns, deprecated_call +from _pytest.outcomes import fail, skip, importorskip, exit, xfail +from _pytest.mark import MARK_GEN as mark, param +from _pytest.main import Item, Collector, File, Session +from _pytest.fixtures import fillfixtures as _fillfuncargs +from _pytest.python import ( + Module, Class, Instance, Function, Generator, +) -_preloadplugins() # to populate pytest.* namespace so help(pytest) works +from _pytest.python_api import approx, raises +set_trace = __pytestPDB.set_trace + +__all__ = [ + 'main', + 'UsageError', + 'cmdline', + 'hookspec', + 'hookimpl', + '__version__', + 'register_assert_rewrite', + 'freeze_includes', + 'set_trace', + 'warns', + 'deprecated_call', + 'fixture', + 'yield_fixture', + 'fail', + 'skip', + 'xfail', + 'importorskip', + 'exit', + 'mark', + 'param', + 'approx', + '_fillfuncargs', + + 'Item', + 'File', + 'Collector', + 'Session', + 'Module', + 'Class', + 'Instance', + 'Function', + 'Generator', + 'raises', + + +] + +if __name__ == '__main__': + # if run as a script or by 'python -m pytest' + # we trigger the below "else" condition by the following import + import pytest + raise SystemExit(pytest.main()) +else: + + from _pytest.compat import _setup_collect_fakemodule + _preloadplugins() # to populate pytest.* namespace so help(pytest) works + _setup_collect_fakemodule() diff --git a/scripts/call-tox.bat b/scripts/call-tox.bat index 3ca9eb6d7..86fb25c1d 100644 --- a/scripts/call-tox.bat +++ b/scripts/call-tox.bat @@ -5,4 +5,4 @@ if "%TOXENV%" == "coveralls" ( exit /b 0 ) ) -C:\Python35\python -m tox +C:\Python36\python -m tox diff --git a/scripts/check-manifest.py b/scripts/check-manifest.py deleted file mode 100644 index 5911a84fe..000000000 --- a/scripts/check-manifest.py +++ /dev/null @@ -1,21 +0,0 @@ -""" -Script used by tox.ini to check the manifest file if we are under version control, or skip the -check altogether if not. - -"check-manifest" will needs a vcs to work, which is not available when testing the package -instead of the source code (with ``devpi test`` for example). -""" - -from __future__ import print_function - -import os -import subprocess -import sys - - -if os.path.isdir('.git'): - sys.exit(subprocess.call('check-manifest', shell=True)) -else: - print('No .git directory found, skipping checking the manifest file') - sys.exit(0) - diff --git a/scripts/check-rst.py b/scripts/check-rst.py new file mode 100644 index 000000000..57f717501 --- /dev/null +++ b/scripts/check-rst.py @@ -0,0 +1,11 @@ + +from __future__ import print_function + +import subprocess +import glob +import sys + +sys.exit(subprocess.call([ + 'rst-lint', '--encoding', 'utf-8', + 'CHANGELOG.rst', 'HOWTORELEASE.rst', 'README.rst', +] + glob.glob('changelog/[0-9]*.*'))) diff --git a/setup.cfg b/setup.cfg index 1ab4fd059..816539e2e 100644 --- a/setup.cfg +++ b/setup.cfg @@ -9,5 +9,12 @@ upload-dir = doc/en/build/html [bdist_wheel] universal = 1 +[check-manifest] +ignore = + _pytest/_version.py + +[metadata] +license_file = LICENSE + [devpi:upload] formats = sdist.tgz,bdist_wheel diff --git a/setup.py b/setup.py index cdcf3b3bb..55607912b 100644 --- a/setup.py +++ b/setup.py @@ -1,32 +1,27 @@ -import os, sys +import os +import sys import setuptools import pkg_resources from setuptools import setup, Command -classifiers = ['Development Status :: 6 - Mature', - 'Intended Audience :: Developers', - 'License :: OSI Approved :: MIT License', - 'Operating System :: POSIX', - 'Operating System :: Microsoft :: Windows', - 'Operating System :: MacOS :: MacOS X', - 'Topic :: Software Development :: Testing', - 'Topic :: Software Development :: Libraries', - 'Topic :: Utilities'] + [ - ('Programming Language :: Python :: %s' % x) for x in - '2 2.6 2.7 3 3.3 3.4 3.5'.split()] +classifiers = [ + 'Development Status :: 6 - Mature', + 'Intended Audience :: Developers', + 'License :: OSI Approved :: MIT License', + 'Operating System :: POSIX', + 'Operating System :: Microsoft :: Windows', + 'Operating System :: MacOS :: MacOS X', + 'Topic :: Software Development :: Testing', + 'Topic :: Software Development :: Libraries', + 'Topic :: Utilities', +] + [ + ('Programming Language :: Python :: %s' % x) + for x in '2 2.6 2.7 3 3.3 3.4 3.5 3.6'.split() +] with open('README.rst') as fd: long_description = fd.read() -def get_version(): - p = os.path.join(os.path.dirname( - os.path.abspath(__file__)), "_pytest", "__init__.py") - with open(p) as f: - for line in f.readlines(): - if "__version__" in line: - return line.strip().split("=")[-1].strip(" '") - raise ValueError("could not read version") - def has_environment_marker_support(): """ @@ -48,14 +43,15 @@ def has_environment_marker_support(): def main(): - install_requires = ['py>=1.4.29'] # pluggy is vendored in _pytest.vendored_packages + install_requires = ['py>=1.4.33', 'setuptools'] # pluggy is vendored in _pytest.vendored_packages extras_require = {} if has_environment_marker_support(): - extras_require[':python_version=="2.6"'] = ['argparse'] + extras_require[':python_version=="2.6"'] = ['argparse', 'ordereddict'] extras_require[':sys_platform=="win32"'] = ['colorama'] else: if sys.version_info < (2, 7): install_requires.append('argparse') + install_requires.append('ordereddict') if sys.platform == 'win32': install_requires.append('colorama') @@ -63,18 +59,20 @@ def main(): name='pytest', description='pytest: simple powerful testing with Python', long_description=long_description, - version=get_version(), + use_scm_version={ + 'write_to': '_pytest/_version.py', + }, url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], author='Holger Krekel, Bruno Oliveira, Ronny Pfannschmidt, Floris Bruynooghe, Brianna Laugher, Florian Bruhin and others', - author_email='holger at merlinux.eu', entry_points={'console_scripts': ['pytest=pytest:main', 'py.test=pytest:main']}, classifiers=classifiers, keywords="test unittest", cmdclass={'test': PyTest}, # the following should be enabled for release + setup_requires=['setuptools-scm'], install_requires=install_requires, extras_require=extras_require, packages=['_pytest', '_pytest.assertion', '_pytest._code', '_pytest.vendored_packages'], diff --git a/tasks/__init__.py b/tasks/__init__.py new file mode 100644 index 000000000..992f4a4ad --- /dev/null +++ b/tasks/__init__.py @@ -0,0 +1,13 @@ +""" +Invoke tasks to help with pytest development and release process. +""" + +import invoke + +from . import generate, vendoring + + +ns = invoke.Collection( + generate, + vendoring +) diff --git a/tasks/generate.py b/tasks/generate.py new file mode 100644 index 000000000..fa8ee6557 --- /dev/null +++ b/tasks/generate.py @@ -0,0 +1,162 @@ +import os +from pathlib import Path +from subprocess import check_output, check_call + +import invoke + + +@invoke.task(help={ + 'version': 'version being released', +}) +def announce(ctx, version): + """Generates a new release announcement entry in the docs.""" + # Get our list of authors + stdout = check_output(["git", "describe", "--abbrev=0", '--tags']) + stdout = stdout.decode('utf-8') + last_version = stdout.strip() + + stdout = check_output(["git", "log", "{}..HEAD".format(last_version), "--format=%aN"]) + stdout = stdout.decode('utf-8') + + contributors = set(stdout.splitlines()) + + template_name = 'release.minor.rst' if version.endswith('.0') else 'release.patch.rst' + template_text = Path(__file__).parent.joinpath(template_name).read_text(encoding='UTF-8') + + contributors_text = '\n'.join('* {}'.format(name) for name in sorted(contributors)) + '\n' + text = template_text.format(version=version, contributors=contributors_text) + + target = Path(__file__).parent.joinpath('../doc/en/announce/release-{}.rst'.format(version)) + target.write_text(text, encoding='UTF-8') + print("[generate.announce] Generated {}".format(target.name)) + + # Update index with the new release entry + index_path = Path(__file__).parent.joinpath('../doc/en/announce/index.rst') + lines = index_path.read_text(encoding='UTF-8').splitlines() + indent = ' ' + for index, line in enumerate(lines): + if line.startswith('{}release-'.format(indent)): + new_line = indent + target.stem + if line != new_line: + lines.insert(index, new_line) + index_path.write_text('\n'.join(lines) + '\n', encoding='UTF-8') + print("[generate.announce] Updated {}".format(index_path.name)) + else: + print("[generate.announce] Skip {} (already contains release)".format(index_path.name)) + break + + check_call(['git', 'add', str(target)]) + + +@invoke.task() +def regen(ctx): + """Call regendoc tool to update examples and pytest output in the docs.""" + print("[generate.regen] Updating docs") + check_call(['tox', '-e', 'regen']) + + +@invoke.task() +def make_tag(ctx, version): + """Create a new (local) tag for the release, only if the repository is clean.""" + from git import Repo + + repo = Repo('.') + if repo.is_dirty(): + print('Current repository is dirty. Please commit any changes and try again.') + raise invoke.Exit(code=2) + + tag_names = [x.name for x in repo.tags] + if version in tag_names: + print("[generate.make_tag] Delete existing tag {}".format(version)) + repo.delete_tag(version) + + print("[generate.make_tag] Create tag {}".format(version)) + repo.create_tag(version) + + +@invoke.task() +def devpi_upload(ctx, version, user, password=None): + """Creates and uploads a package to devpi for testing.""" + if password: + print("[generate.devpi_upload] devpi login {}".format(user)) + check_call(['devpi', 'login', user, '--password', password]) + + check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)]) + + env = os.environ.copy() + env['SETUPTOOLS_SCM_PRETEND_VERSION'] = version + check_call(['devpi', 'upload', '--formats', 'sdist,bdist_wheel'], env=env) + print("[generate.devpi_upload] package uploaded") + + +@invoke.task(help={ + 'version': 'version being released', + 'user': 'name of the user on devpi to stage the generated package', + 'password': 'user password on devpi to stage the generated package ' + '(if not given assumed logged in)', +}) +def pre_release(ctx, version, user, password=None): + """Generates new docs, release announcements and uploads a new release to devpi for testing.""" + announce(ctx, version) + regen(ctx) + changelog(ctx, version, write_out=True) + + msg = 'Preparing release version {}'.format(version) + check_call(['git', 'commit', '-a', '-m', msg]) + + make_tag(ctx, version) + + devpi_upload(ctx, version=version, user=user, password=password) + + print() + print('[generate.pre_release] Please push your branch and open a PR.') + + +@invoke.task(help={ + 'version': 'version being released', + 'user': 'name of the user on devpi to stage the generated package', + 'pypi_name': 'name of the pypi configuration section in your ~/.pypirc', +}) +def publish_release(ctx, version, user, pypi_name): + """Publishes a package previously created by the 'pre_release' command.""" + from git import Repo + repo = Repo('.') + tag_names = [x.name for x in repo.tags] + if version not in tag_names: + print('Could not find tag for version {}, exiting...'.format(version)) + raise invoke.Exit(code=2) + + check_call(['devpi', 'use', 'https://devpi.net/{}/dev'.format(user)]) + check_call(['devpi', 'push', 'pytest=={}'.format(version), 'pypi:{}'.format(pypi_name)]) + check_call(['git', 'push', 'git@github.com:pytest-dev/pytest.git', version]) + + emails = [ + 'pytest-dev@python.org', + 'python-announce-list@python.org' + ] + if version.endswith('.0'): + emails.append('testing-in-python@lists.idyll.org') + print('Version {} has been published to PyPI!'.format(version)) + print() + print('Please send an email announcement with the contents from:') + print() + print(' doc/en/announce/release-{}.rst'.format(version)) + print() + print('To the following mail lists:') + print() + print(' ', ','.join(emails)) + print() + print('And announce it on twitter adding the #pytest hash tag.') + + +@invoke.task(help={ + 'version': 'version being released', + 'write_out': 'write changes to the actial changelog' +}) +def changelog(ctx, version, write_out=False): + if write_out: + addopts = [] + else: + addopts = ['--draft'] + check_call(['towncrier', '--version', version] + addopts) + diff --git a/tasks/release.minor.rst b/tasks/release.minor.rst new file mode 100644 index 000000000..3c0b7d718 --- /dev/null +++ b/tasks/release.minor.rst @@ -0,0 +1,27 @@ +pytest-{version} +======================================= + +The pytest team is proud to announce the {version} release! + +pytest is a mature Python testing tool with more than a 1600 tests +against itself, passing on many different interpreters and platforms. + +This release contains a number of bugs fixes and improvements, so users are encouraged +to take a look at the CHANGELOG: + + http://doc.pytest.org/en/latest/changelog.html + +For complete documentation, please visit: + + http://docs.pytest.org + +As usual, you can upgrade from pypi via: + + pip install -U pytest + +Thanks to all who contributed to this release, among them: + +{contributors} + +Happy testing, +The Pytest Development Team diff --git a/tasks/release.patch.rst b/tasks/release.patch.rst new file mode 100644 index 000000000..56764b913 --- /dev/null +++ b/tasks/release.patch.rst @@ -0,0 +1,17 @@ +pytest-{version} +======================================= + +pytest {version} has just been released to PyPI. + +This is a bug-fix release, being a drop-in replacement. To upgrade:: + + pip install --upgrade pytest + +The full changelog is available at http://doc.pytest.org/en/latest/changelog.html. + +Thanks to all who contributed to this release, among them: + +{contributors} + +Happy testing, +The pytest Development Team diff --git a/tasks/requirements.txt b/tasks/requirements.txt new file mode 100644 index 000000000..6392de0cc --- /dev/null +++ b/tasks/requirements.txt @@ -0,0 +1,5 @@ +invoke +tox +gitpython +towncrier +wheel diff --git a/tasks/vendoring.py b/tasks/vendoring.py new file mode 100644 index 000000000..867f2946b --- /dev/null +++ b/tasks/vendoring.py @@ -0,0 +1,23 @@ +from __future__ import absolute_import, print_function +import py +import invoke + +VENDOR_TARGET = py.path.local("_pytest/vendored_packages") +GOOD_FILES = 'README.md', '__init__.py' + +@invoke.task() +def remove_libs(ctx): + print("removing vendored libs") + for path in VENDOR_TARGET.listdir(): + if path.basename not in GOOD_FILES: + print(" ", path) + path.remove() + +@invoke.task(pre=[remove_libs]) +def update_libs(ctx): + print("installing libs") + ctx.run("pip install -t {target} pluggy".format(target=VENDOR_TARGET)) + ctx.run("git add {target}".format(target=VENDOR_TARGET)) + print("Please commit to finish the update after running the tests:") + print() + print(' git commit -am "Updated vendored libs"') diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 88e3fa449..8a8c32762 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function import os import sys @@ -8,7 +9,7 @@ import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED, EXIT_USAGEERROR -class TestGeneralUsage: +class TestGeneralUsage(object): def test_config_error(self, testdir): testdir.makeconftest(""" def pytest_configure(config): @@ -73,14 +74,13 @@ class TestGeneralUsage: print("---unconfigure") """) result = testdir.runpytest("-s", "asd") - assert result.ret == 4 # EXIT_USAGEERROR + assert result.ret == 4 # EXIT_USAGEERROR result.stderr.fnmatch_lines(["ERROR: file not found*asd"]) result.stdout.fnmatch_lines([ "*---configure", "*---unconfigure", ]) - def test_config_preparse_plugin_option(self, testdir): testdir.makepyfile(pytest_xyz=""" def pytest_addoption(parser): @@ -118,7 +118,7 @@ class TestGeneralUsage: testdir.makepyfile(import_fails="import does_not_work") result = testdir.runpytest(p) result.stdout.fnmatch_lines([ - #XXX on jython this fails: "> import import_fails", + # XXX on jython this fails: "> import import_fails", "ImportError while importing test module*", "*No module named *does_not_work*", ]) @@ -130,7 +130,7 @@ class TestGeneralUsage: result = testdir.runpytest(p1, p2) assert result.ret result.stderr.fnmatch_lines([ - "*ERROR: not found:*%s" %(p2.basename,) + "*ERROR: not found:*%s" % (p2.basename,) ]) def test_issue486_better_reporting_on_conftest_load_failure(self, testdir): @@ -146,7 +146,6 @@ class TestGeneralUsage: *ERROR*could not load*conftest.py* """) - def test_early_skip(self, testdir): testdir.mkdir("xyz") testdir.makeconftest(""" @@ -254,7 +253,7 @@ class TestGeneralUsage: if path.basename.startswith("conftest"): return MyCollector(path, parent) """) - result = testdir.runpytest(c.basename+"::"+"xyz") + result = testdir.runpytest(c.basename + "::" + "xyz") assert result.ret == 0 result.stdout.fnmatch_lines([ "*1 pass*", @@ -309,15 +308,15 @@ class TestGeneralUsage: x """) result = testdir.runpytest() - assert result.ret == 3 # internal error + assert result.ret == 3 # internal error result.stderr.fnmatch_lines([ "INTERNAL*pytest_configure*", "INTERNAL*x*", ]) assert 'sessionstarttime' not in result.stderr.str() - @pytest.mark.parametrize('lookfor', ['test_fun.py', 'test_fun.py::test_a']) - def test_issue134_report_syntaxerror_when_collecting_member(self, testdir, lookfor): + @pytest.mark.parametrize('lookfor', ['test_fun.py::test_a']) + def test_issue134_report_error_when_collecting_member(self, testdir, lookfor): testdir.makepyfile(test_fun=""" def test_a(): pass @@ -338,10 +337,16 @@ class TestGeneralUsage: "*ERROR*test_b.py::b*", ]) + @pytest.mark.usefixtures('recwarn') def test_namespace_import_doesnt_confuse_import_hook(self, testdir): - # Ref #383. Python 3.3's namespace package messed with our import hooks - # Importing a module that didn't exist, even if the ImportError was - # gracefully handled, would make our test crash. + """ + Ref #383. Python 3.3's namespace package messed with our import hooks + Importing a module that didn't exist, even if the ImportError was + gracefully handled, would make our test crash. + + Use recwarn here to silence this warning in Python 2.6 and 2.7: + ImportWarning: Not importing directory '...\not_a_package': missing __init__.py + """ testdir.mkdir('not_a_package') p = testdir.makepyfile(""" try: @@ -395,7 +400,7 @@ class TestGeneralUsage: monkeypatch.setitem(sys.modules, 'myplugin', mod) assert pytest.main(args=[str(tmpdir)], plugins=['myplugin']) == 0 - def test_parameterized_with_bytes_regex(self, testdir): + def test_parametrized_with_bytes_regex(self, testdir): p = testdir.makepyfile(""" import re import pytest @@ -403,14 +408,27 @@ class TestGeneralUsage: def test_stuff(r): pass """ - ) + ) res = testdir.runpytest(p) res.stdout.fnmatch_lines([ '*1 passed*' ]) + def test_parametrized_with_null_bytes(self, testdir): + """Test parametrization with values that contain null bytes and unicode characters (#2644)""" + p = testdir.makepyfile(u""" + # encoding: UTF-8 + import pytest -class TestInvocationVariants: + @pytest.mark.parametrize("data", ["\\x00", u'ação']) + def test_foo(data): + assert data + """) + res = testdir.runpytest(p) + res.assert_outcomes(passed=2) + + +class TestInvocationVariants(object): def test_earlyinit(self, testdir): p = testdir.makepyfile(""" import pytest @@ -433,8 +451,8 @@ class TestInvocationVariants: #collect #cmdline #Item - #assert collect.Item is Item - #assert collect.Collector is Collector + # assert collect.Item is Item + # assert collect.Collector is Collector main skip xfail @@ -502,7 +520,7 @@ class TestInvocationVariants: out, err = capsys.readouterr() def test_invoke_plugin_api(self, testdir, capsys): - class MyPlugin: + class MyPlugin(object): def pytest_addoption(self, parser): parser.addoption("--myopt") @@ -523,6 +541,7 @@ class TestInvocationVariants: ]) def test_cmdline_python_package(self, testdir, monkeypatch): + import warnings monkeypatch.delenv('PYTHONDONTWRITEBYTECODE', False) path = testdir.mkpydir("tpkg") path.join("test_hello.py").write("def test_hello(): pass") @@ -545,7 +564,11 @@ class TestInvocationVariants: return what empty_package = testdir.mkpydir("empty_package") monkeypatch.setenv('PYTHONPATH', join_pythonpath(empty_package)) - result = testdir.runpytest("--pyargs", ".") + # the path which is not a package raises a warning on pypy; + # no idea why only pypy and not normal python warn about it here + with warnings.catch_warnings(): + warnings.simplefilter('ignore', ImportWarning) + result = testdir.runpytest("--pyargs", ".") assert result.ret == 0 result.stdout.fnmatch_lines([ "*2 passed*" @@ -601,8 +624,10 @@ class TestInvocationVariants: for p in search_path: monkeypatch.syspath_prepend(p) + os.chdir('world') # mixed module and filenames: - result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "world/ns_pkg") + result = testdir.runpytest("--pyargs", "-v", "ns_pkg.hello", "ns_pkg/world") + testdir.chdir() assert result.ret == 0 result.stdout.fnmatch_lines([ "*test_hello.py::test_hello*PASSED", @@ -664,13 +689,12 @@ class TestInvocationVariants: import _pytest.config assert type(_pytest.config.get_plugin_manager()) is _pytest.config.PytestPluginManager - def test_has_plugin(self, request): """Test hasplugin function of the plugin manager (#932).""" assert request.config.pluginmanager.hasplugin('python') -class TestDurations: +class TestDurations(object): source = """ import time frag = 0.002 @@ -707,12 +731,12 @@ class TestDurations: result = testdir.runpytest("--durations=0") assert result.ret == 0 for x in "123": - for y in 'call',: #'setup', 'call', 'teardown': + for y in 'call', : # 'setup', 'call', 'teardown': for line in result.stdout.lines: if ("test_%s" % x) in line and y in line: break else: - raise AssertionError("not found %s %s" % (x,y)) + raise AssertionError("not found %s %s" % (x, y)) def test_with_deselected(self, testdir): testdir.makepyfile(self.source) @@ -741,7 +765,7 @@ class TestDurations: assert result.ret == 0 -class TestDurationWithFixture: +class TestDurationWithFixture(object): source = """ import time frag = 0.001 @@ -752,6 +776,7 @@ class TestDurationWithFixture: def test_2(): time.sleep(frag) """ + def test_setup_function(self, testdir): testdir.makepyfile(self.source) result = testdir.runpytest("--durations=10") @@ -781,3 +806,45 @@ def test_zipimport_hook(testdir, tmpdir): assert result.ret == 0 result.stderr.fnmatch_lines(['*not found*foo*']) assert 'INTERNALERROR>' not in result.stdout.str() + + +def test_import_plugin_unicode_name(testdir): + testdir.makepyfile( + myplugin='', + ) + testdir.makepyfile(""" + def test(): pass + """) + testdir.makeconftest(""" + pytest_plugins = [u'myplugin'] + """) + r = testdir.runpytest() + assert r.ret == 0 + + +def test_deferred_hook_checking(testdir): + """ + Check hooks as late as possible (#1821). + """ + testdir.syspathinsert() + testdir.makepyfile(**{ + 'plugin.py': """ + class Hooks: + def pytest_my_hook(self, config): + pass + + def pytest_configure(config): + config.pluginmanager.add_hookspecs(Hooks) + """, + 'conftest.py': """ + pytest_plugins = ['plugin'] + def pytest_my_hook(config): + return 40 + """, + 'test_foo.py': """ + def test(request): + assert request.config.hook.pytest_my_hook(config=request.config) == [40] + """ + }) + result = testdir.runpytest() + result.stdout.fnmatch_lines(['* 1 passed *']) diff --git a/testing/code/test_code.py b/testing/code/test_code.py index ad9db6d2e..209a8ef19 100644 --- a/testing/code/test_code.py +++ b/testing/code/test_code.py @@ -1,8 +1,11 @@ +# coding: utf-8 +from __future__ import absolute_import, division, print_function import sys import _pytest._code import py import pytest +from test_excinfo import TWMock def test_ne(): @@ -11,6 +14,7 @@ def test_ne(): code2 = _pytest._code.Code(compile('foo = "baz"', '', 'exec')) assert code2 != code1 + def test_code_gives_back_name_for_not_existing_file(): name = 'abc-123' co_code = compile("pass\n", name, 'exec') @@ -19,8 +23,9 @@ def test_code_gives_back_name_for_not_existing_file(): assert str(code.path) == name assert code.fullsource is None + def test_code_with_class(): - class A: + class A(object): pass pytest.raises(TypeError, "_pytest._code.Code(A)") @@ -29,11 +34,13 @@ if True: def x(): pass + def test_code_fullsource(): code = _pytest._code.Code(x) full = code.fullsource assert 'test_code_fullsource()' in str(full) + def test_code_source(): code = _pytest._code.Code(x) src = code.source() @@ -41,6 +48,7 @@ def test_code_source(): pass""" assert str(src) == expected + def test_frame_getsourcelineno_myself(): def func(): return sys._getframe(0) @@ -49,6 +57,7 @@ def test_frame_getsourcelineno_myself(): source, lineno = f.code.fullsource, f.lineno assert source[lineno].startswith(" return sys._getframe(0)") + def test_getstatement_empty_fullsource(): def func(): return sys._getframe(0) @@ -61,6 +70,7 @@ def test_getstatement_empty_fullsource(): finally: f.code.__class__.fullsource = prop + def test_code_from_func(): co = _pytest._code.Code(test_frame_getsourcelineno_myself) assert co.firstlineno @@ -91,6 +101,7 @@ def test_unicode_handling_syntax_error(): if sys.version_info[0] < 3: unicode(excinfo) + def test_code_getargs(): def f1(x): pass @@ -136,26 +147,50 @@ def test_frame_getargs(): ('z', {'c': 'd'})] -class TestExceptionInfo: +class TestExceptionInfo(object): def test_bad_getsource(self): try: - if False: pass - else: assert False + if False: + pass + else: + assert False except AssertionError: exci = _pytest._code.ExceptionInfo() assert exci.getrepr() -class TestTracebackEntry: +class TestTracebackEntry(object): def test_getsource(self): try: - if False: pass - else: assert False + if False: + pass + else: + assert False except AssertionError: exci = _pytest._code.ExceptionInfo() entry = exci.traceback[0] source = entry.getsource() - assert len(source) == 4 - assert 'else: assert False' in source[3] + assert len(source) == 6 + assert 'assert False' in source[5] + + +class TestReprFuncArgs(object): + + def test_not_raise_exception_with_mixed_encoding(self): + from _pytest._code.code import ReprFuncArgs + + tw = TWMock() + + args = [ + ('unicode_string', u"São Paulo"), + ('utf8_string', 'S\xc3\xa3o Paulo'), + ] + + r = ReprFuncArgs(args) + r.toterminal(tw) + if sys.version_info[0] >= 3: + assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo' + else: + assert tw.lines[0] == 'unicode_string = São Paulo, utf8_string = São Paulo' diff --git a/testing/code/test_excinfo.py b/testing/code/test_excinfo.py index 23b0a985e..263d053b5 100644 --- a/testing/code/test_excinfo.py +++ b/testing/code/test_excinfo.py @@ -1,5 +1,7 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function +import sys import operator import _pytest import py @@ -10,9 +12,6 @@ from _pytest._code.code import ( ReprExceptionInfo, ExceptionChainRepr) -queue = py.builtin._tryimport('queue', 'Queue') - -failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") from test_source import astonly try: @@ -22,23 +21,32 @@ except ImportError: else: invalidate_import_caches = getattr(importlib, "invalidate_caches", None) -import pytest +queue = py.builtin._tryimport('queue', 'Queue') + +failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") + pytest_version_info = tuple(map(int, pytest.__version__.split(".")[:3])) -class TWMock: + +class TWMock(object): WRITE = object() def __init__(self): self.lines = [] self.is_writing = False + def sep(self, sep, line=None): self.lines.append((sep, line)) + def write(self, msg, **kw): self.lines.append((TWMock.WRITE, msg)) + def line(self, line, **kw): self.lines.append(line) + def markup(self, text, **kw): return text + def get_write_msg(self, idx): flag, msg = self.lines[idx] assert flag == TWMock.WRITE @@ -46,6 +54,7 @@ class TWMock: fullwidth = 80 + def test_excinfo_simple(): try: raise ValueError @@ -53,6 +62,7 @@ def test_excinfo_simple(): info = _pytest._code.ExceptionInfo() assert info.type == ValueError + def test_excinfo_getstatement(): def g(): raise ValueError @@ -67,29 +77,36 @@ def test_excinfo_getstatement(): linenumbers = [_pytest._code.getrawcode(f).co_firstlineno - 1 + 4, _pytest._code.getrawcode(f).co_firstlineno - 1 + 1, _pytest._code.getrawcode(g).co_firstlineno - 1 + 1, ] - l = list(excinfo.traceback) - foundlinenumbers = [x.lineno for x in l] + values = list(excinfo.traceback) + foundlinenumbers = [x.lineno for x in values] assert foundlinenumbers == linenumbers - #for x in info: + # for x in info: # print "%s:%d %s" %(x.path.relto(root), x.lineno, x.statement) - #xxx + # xxx # testchain for getentries test below + + def f(): # raise ValueError # + + def g(): # __tracebackhide__ = True f() # + + def h(): # g() # -class TestTraceback_f_g_h: + +class TestTraceback_f_g_h(object): def setup_method(self, method): try: h() @@ -99,8 +116,8 @@ class TestTraceback_f_g_h: def test_traceback_entries(self): tb = self.excinfo.traceback entries = list(tb) - assert len(tb) == 4 # maybe fragile test - assert len(entries) == 4 # maybe fragile test + assert len(tb) == 4 # maybe fragile test + assert len(entries) == 4 # maybe fragile test names = ['f', 'g', 'h'] for entry in entries: try: @@ -111,7 +128,7 @@ class TestTraceback_f_g_h: def test_traceback_entry_getsource(self): tb = self.excinfo.traceback - s = str(tb[-1].getsource() ) + s = str(tb[-1].getsource()) assert s.startswith("def f():") assert s.endswith("raise ValueError") @@ -127,10 +144,10 @@ class TestTraceback_f_g_h: xyz() """) try: - exec (source.compile()) + exec(source.compile()) except NameError: tb = _pytest._code.ExceptionInfo().traceback - print (tb[-1].getsource()) + print(tb[-1].getsource()) s = str(tb[-1].getsource()) assert s.startswith("def xyz():\n try:") assert s.strip().endswith("except somenoname:") @@ -141,7 +158,7 @@ class TestTraceback_f_g_h: traceback = self.excinfo.traceback newtraceback = traceback.cut(path=path, firstlineno=firstlineno) assert len(newtraceback) == 1 - newtraceback = traceback.cut(path=path, lineno=firstlineno+2) + newtraceback = traceback.cut(path=path, lineno=firstlineno + 2) assert len(newtraceback) == 1 def test_traceback_cut_excludepath(self, testdir): @@ -208,7 +225,7 @@ class TestTraceback_f_g_h: def f(n): if n == 0: raise RuntimeError("hello") - f(n-1) + f(n - 1) excinfo = pytest.raises(RuntimeError, f, 100) monkeypatch.delattr(excinfo.traceback.__class__, "recursionindex") @@ -227,7 +244,7 @@ class TestTraceback_f_g_h: def f(n): try: do_stuff() - except: + except: # noqa reraise_me() excinfo = pytest.raises(RuntimeError, f, 8) @@ -236,7 +253,7 @@ class TestTraceback_f_g_h: assert recindex is None def test_traceback_messy_recursion(self): - #XXX: simplified locally testable version + # XXX: simplified locally testable version decorator = pytest.importorskip('decorator').decorator def log(f, *k, **kw): @@ -292,44 +309,50 @@ class TestTraceback_f_g_h: assert entry.lineno == co.firstlineno + 2 assert entry.frame.code.name == 'g' + def test_excinfo_exconly(): excinfo = pytest.raises(ValueError, h) assert excinfo.exconly().startswith('ValueError') excinfo = pytest.raises(ValueError, - "raise ValueError('hello\\nworld')") + "raise ValueError('hello\\nworld')") msg = excinfo.exconly(tryshort=True) assert msg.startswith('ValueError') assert msg.endswith("world") + def test_excinfo_repr(): excinfo = pytest.raises(ValueError, h) s = repr(excinfo) assert s == "" + def test_excinfo_str(): excinfo = pytest.raises(ValueError, h) s = str(excinfo) - assert s.startswith(__file__[:-9]) # pyc file and $py.class + assert s.startswith(__file__[:-9]) # pyc file and $py.class assert s.endswith("ValueError") - assert len(s.split(":")) >= 3 # on windows it's 4 + assert len(s.split(":")) >= 3 # on windows it's 4 + def test_excinfo_errisinstance(): excinfo = pytest.raises(ValueError, h) assert excinfo.errisinstance(ValueError) + def test_excinfo_no_sourcecode(): try: - exec ("raise ValueError()") + exec("raise ValueError()") except ValueError: excinfo = _pytest._code.ExceptionInfo() s = str(excinfo.traceback[-1]) - if py.std.sys.version_info < (2,5): + if py.std.sys.version_info < (2, 5): assert s == " File '':1 in ?\n ???\n" else: assert s == " File '':1 in \n ???\n" + def test_excinfo_no_python_sourcecode(tmpdir): - #XXX: simplified locally testable version + # XXX: simplified locally testable version tmpdir.join('test.txt').write("{{ h()}}:") jinja2 = pytest.importorskip('jinja2') @@ -337,10 +360,10 @@ def test_excinfo_no_python_sourcecode(tmpdir): env = jinja2.Environment(loader=loader) template = env.get_template('test.txt') excinfo = pytest.raises(ValueError, - template.render, h=h) + template.render, h=h) for item in excinfo.traceback: - print(item) #XXX: for some reason jinja.Template.render is printed in full - item.source # shouldnt fail + print(item) # XXX: for some reason jinja.Template.render is printed in full + item.source # shouldnt fail if item.path.basename == 'test.txt': assert str(item.source) == '{{ h()}}:' @@ -356,6 +379,7 @@ def test_entrysource_Queue_example(): s = str(source).strip() assert s.startswith("def get") + def test_codepath_Queue_example(): try: queue.Queue().get(timeout=0.001) @@ -367,11 +391,13 @@ def test_codepath_Queue_example(): assert path.basename.lower() == "queue.py" assert path.check() + def test_match_succeeds(): with pytest.raises(ZeroDivisionError) as excinfo: - 0 / 0 + 0 // 0 excinfo.match(r'.*zero.*') + def test_match_raises_error(testdir): testdir.makepyfile(""" import pytest @@ -386,7 +412,8 @@ def test_match_raises_error(testdir): "*AssertionError*Pattern*[123]*not found*", ]) -class TestFormattedExcinfo: + +class TestFormattedExcinfo(object): @pytest.fixture def importasmod(self, request): @@ -404,10 +431,10 @@ class TestFormattedExcinfo: def excinfo_from_exec(self, source): source = _pytest._code.Source(source).strip() try: - exec (source.compile()) + exec(source.compile()) except KeyboardInterrupt: raise - except: + except: # noqa return _pytest._code.ExceptionInfo() assert 0, "did not raise" @@ -440,12 +467,11 @@ class TestFormattedExcinfo: 'E AssertionError' ] - def test_repr_source_not_existing(self): pr = FormattedExcinfo() co = compile("raise ValueError()", "", "exec") try: - exec (co) + exec(co) except ValueError: excinfo = _pytest._code.ExceptionInfo() repr = pr.repr_excinfo(excinfo) @@ -460,7 +486,7 @@ a = 1 raise ValueError() """, "", "exec") try: - exec (co) + exec(co) except ValueError: excinfo = _pytest._code.ExceptionInfo() repr = pr.repr_excinfo(excinfo) @@ -472,7 +498,7 @@ raise ValueError() pr = FormattedExcinfo() class FakeCode(object): - class raw: + class raw(object): co_filename = '?' path = '?' @@ -490,7 +516,7 @@ raise ValueError() class FakeTracebackEntry(_pytest._code.Traceback.Entry): def __init__(self, tb, excinfo=None): - self.lineno = 5+3 + self.lineno = 5 + 3 @property def frame(self): @@ -526,14 +552,12 @@ raise ValueError() if py.std.sys.version_info[0] >= 3: assert repr.chain[0][0].reprentries[0].lines[0] == "> ???" - fail = py.error.ENOENT # noqa repr = pr.repr_excinfo(excinfo) assert repr.reprtraceback.reprentries[0].lines[0] == "> ???" if py.std.sys.version_info[0] >= 3: assert repr.chain[0][0].reprentries[0].lines[0] == "> ???" - def test_repr_local(self): p = FormattedExcinfo(showlocals=True) loc = {'y': 5, 'z': 7, 'x': 3, '@x': 2, '__builtins__': {}} @@ -573,19 +597,19 @@ raise ValueError() loc = repr_entry.reprfileloc assert loc.path == mod.__file__ assert loc.lineno == 3 - #assert loc.message == "ValueError: hello" + # assert loc.message == "ValueError: hello" def test_repr_tracebackentry_lines2(self, importasmod): mod = importasmod(""" def func1(m, x, y, z): raise ValueError("hello\\nworld") """) - excinfo = pytest.raises(ValueError, mod.func1, "m"*90, 5, 13, "z"*120) + excinfo = pytest.raises(ValueError, mod.func1, "m" * 90, 5, 13, "z" * 120) excinfo.traceback = excinfo.traceback.filter() entry = excinfo.traceback[-1] p = FormattedExcinfo(funcargs=True) reprfuncargs = p.repr_args(entry) - assert reprfuncargs.args[0] == ('m', repr("m"*90)) + assert reprfuncargs.args[0] == ('m', repr("m" * 90)) assert reprfuncargs.args[1] == ('x', '5') assert reprfuncargs.args[2] == ('y', '13') assert reprfuncargs.args[3] == ('z', repr("z" * 120)) @@ -932,10 +956,10 @@ raise ValueError() @pytest.mark.parametrize('reproptions', [ {'style': style, 'showlocals': showlocals, 'funcargs': funcargs, 'tbfilter': tbfilter - } for style in ("long", "short", "no") - for showlocals in (True, False) - for tbfilter in (True, False) - for funcargs in (True, False)]) + } for style in ("long", "short", "no") + for showlocals in (True, False) + for tbfilter in (True, False) + for funcargs in (True, False)]) def test_format_excinfo(self, importasmod, reproptions): mod = importasmod(""" def g(x): @@ -967,7 +991,8 @@ raise ValueError() r = excinfo.getrepr(style="long") tw = TWMock() r.toterminal(tw) - for line in tw.lines: print (line) + for line in tw.lines: + print(line) assert tw.lines[0] == "" assert tw.lines[1] == " def f():" assert tw.lines[2] == "> g()" @@ -1014,19 +1039,20 @@ raise ValueError() r = excinfo.getrepr(style="long") tw = TWMock() r.toterminal(tw) - for line in tw.lines: print (line) - assert tw.lines[0] == "" - assert tw.lines[1] == " def f():" - assert tw.lines[2] == " try:" - assert tw.lines[3] == "> g()" - assert tw.lines[4] == "" + for line in tw.lines: + print(line) + assert tw.lines[0] == "" + assert tw.lines[1] == " def f():" + assert tw.lines[2] == " try:" + assert tw.lines[3] == "> g()" + assert tw.lines[4] == "" line = tw.get_write_msg(5) assert line.endswith('mod.py') - assert tw.lines[6] == ':6: ' - assert tw.lines[7] == ("_ ", None) - assert tw.lines[8] == "" - assert tw.lines[9] == " def g():" - assert tw.lines[10] == "> raise ValueError()" + assert tw.lines[6] == ':6: ' + assert tw.lines[7] == ("_ ", None) + assert tw.lines[8] == "" + assert tw.lines[9] == " def g():" + assert tw.lines[10] == "> raise ValueError()" assert tw.lines[11] == "E ValueError" assert tw.lines[12] == "" line = tw.get_write_msg(13) @@ -1069,6 +1095,36 @@ raise ValueError() assert line.endswith('mod.py') assert tw.lines[47] == ":15: AttributeError" + @pytest.mark.skipif("sys.version_info[0] < 3") + def test_exc_repr_with_raise_from_none_chain_suppression(self, importasmod): + mod = importasmod(""" + def f(): + try: + g() + except Exception: + raise AttributeError() from None + def g(): + raise ValueError() + """) + excinfo = pytest.raises(AttributeError, mod.f) + r = excinfo.getrepr(style="long") + tw = TWMock() + r.toterminal(tw) + for line in tw.lines: + print(line) + assert tw.lines[0] == "" + assert tw.lines[1] == " def f():" + assert tw.lines[2] == " try:" + assert tw.lines[3] == " g()" + assert tw.lines[4] == " except Exception:" + assert tw.lines[5] == "> raise AttributeError() from None" + assert tw.lines[6] == "E AttributeError" + assert tw.lines[7] == "" + line = tw.get_write_msg(8) + assert line.endswith('mod.py') + assert tw.lines[9] == ":6: AttributeError" + assert len(tw.lines) == 10 + @pytest.mark.skipif("sys.version_info[0] < 3") @pytest.mark.parametrize('reason, description', [ ('cause', 'The above exception was the direct cause of the following exception:'), @@ -1139,3 +1195,58 @@ def test_cwd_deleted(testdir): result = testdir.runpytest() result.stdout.fnmatch_lines(['* 1 failed in *']) assert 'INTERNALERROR' not in result.stdout.str() + result.stderr.str() + + +def test_exception_repr_extraction_error_on_recursion(): + """ + Ensure we can properly detect a recursion error even + if some locals raise error on comparision (#2459). + """ + class numpy_like(object): + + def __eq__(self, other): + if type(other) is numpy_like: + raise ValueError('The truth value of an array ' + 'with more than one element is ambiguous.') + + def a(x): + return b(numpy_like()) + + def b(x): + return a(numpy_like()) + + try: + a(numpy_like()) + except: # noqa + from _pytest._code.code import ExceptionInfo + from _pytest.pytester import LineMatcher + exc_info = ExceptionInfo() + + matcher = LineMatcher(str(exc_info.getrepr()).splitlines()) + matcher.fnmatch_lines([ + '!!! Recursion error detected, but an error occurred locating the origin of recursion.', + '*The following exception happened*', + '*ValueError: The truth value of an array*', + ]) + + +def test_no_recursion_index_on_recursion_error(): + """ + Ensure that we don't break in case we can't find the recursion index + during a recursion error (#2486). + """ + try: + class RecursionDepthError(object): + def __getattr__(self, attr): + return getattr(self, '_' + attr) + + RecursionDepthError().trigger + except: # noqa + from _pytest._code.code import ExceptionInfo + exc_info = ExceptionInfo() + if sys.version_info[:2] == (2, 6): + assert "'RecursionDepthError' object has no attribute '___" in str(exc_info.getrepr()) + else: + assert 'maximum recursion' in str(exc_info.getrepr()) + else: + assert 0 diff --git a/testing/code/test_source.py b/testing/code/test_source.py index 13bfccd54..4f3796cb4 100644 --- a/testing/code/test_source.py +++ b/testing/code/test_source.py @@ -1,6 +1,7 @@ # flake8: noqa # disable flake check on this file because some constructs are strange # or redundant on purpose and can't be disable on a line-by-line basis +from __future__ import absolute_import, division, print_function import sys import _pytest._code @@ -16,6 +17,7 @@ else: failsonjython = pytest.mark.xfail("sys.platform.startswith('java')") + def test_source_str_function(): x = Source("3") assert str(x) == "3" @@ -33,6 +35,7 @@ def test_source_str_function(): """, rstrip=True) assert str(x) == "\n3" + def test_unicode(): try: unicode @@ -44,23 +47,27 @@ def test_unicode(): val = eval(co) assert isinstance(val, unicode) + def test_source_from_function(): source = _pytest._code.Source(test_source_str_function) assert str(source).startswith('def test_source_str_function():') + def test_source_from_method(): - class TestClass: + class TestClass(object): def test_method(self): pass source = _pytest._code.Source(TestClass().test_method) assert source.lines == ["def test_method(self):", " pass"] + def test_source_from_lines(): lines = ["a \n", "b\n", "c"] source = _pytest._code.Source(lines) assert source.lines == ['a ', 'b', 'c'] + def test_source_from_inner_function(): def f(): pass @@ -69,6 +76,7 @@ def test_source_from_inner_function(): source = _pytest._code.Source(f) assert str(source).startswith('def f():') + def test_source_putaround_simple(): source = Source("raise ValueError") source = source.putaround( @@ -77,7 +85,7 @@ def test_source_putaround_simple(): x = 42 else: x = 23""") - assert str(source)=="""\ + assert str(source) == """\ try: raise ValueError except ValueError: @@ -85,6 +93,7 @@ except ValueError: else: x = 23""" + def test_source_putaround(): source = Source() source = source.putaround(""" @@ -93,24 +102,28 @@ def test_source_putaround(): """) assert str(source).strip() == "if 1:\n x=1" + def test_source_strips(): source = Source("") assert source == Source() assert str(source) == '' assert source.strip() == source + def test_source_strip_multiline(): source = Source() source.lines = ["", " hello", " "] source2 = source.strip() assert source2.lines == [" hello"] + def test_syntaxerror_rerepresentation(): ex = pytest.raises(SyntaxError, _pytest._code.compile, 'xyz xyz') assert ex.value.lineno == 1 - assert ex.value.offset in (4,7) # XXX pypy/jython versus cpython? + assert ex.value.offset in (4, 7) # XXX pypy/jython versus cpython? assert ex.value.text.strip(), 'x x' + def test_isparseable(): assert Source("hello").isparseable() assert Source("if 1:\n pass").isparseable() @@ -119,13 +132,15 @@ def test_isparseable(): assert not Source(" \nif 1:\npass").isparseable() assert not Source(chr(0)).isparseable() -class TestAccesses: + +class TestAccesses(object): source = Source("""\ def f(x): pass def g(x): pass """) + def test_getrange(self): x = self.source[0:2] assert x.isparseable() @@ -140,10 +155,11 @@ class TestAccesses: assert len(self.source) == 4 def test_iter(self): - l = [x for x in self.source] - assert len(l) == 4 + values = [x for x in self.source] + assert len(values) == 4 -class TestSourceParsingAndCompiling: + +class TestSourceParsingAndCompiling(object): source = Source("""\ def f(x): assert (x == @@ -154,12 +170,12 @@ class TestSourceParsingAndCompiling: def test_compile(self): co = _pytest._code.compile("x=3") d = {} - exec (co, d) + exec(co, d) assert d['x'] == 3 def test_compile_and_getsource_simple(self): co = _pytest._code.compile("x=3") - exec (co) + exec(co) source = _pytest._code.Source(co) assert str(source) == "x=3" @@ -180,16 +196,16 @@ class TestSourceParsingAndCompiling: assert 'ValueError' in source2 def test_getstatement(self): - #print str(self.source) + # print str(self.source) ass = str(self.source[1:]) for i in range(1, 4): - #print "trying start in line %r" % self.source[i] + # print "trying start in line %r" % self.source[i] s = self.source.getstatement(i) #x = s.deindent() assert str(s) == ass def test_getstatementrange_triple_quoted(self): - #print str(self.source) + # print str(self.source) source = Source("""hello(''' ''')""") s = source.getstatement(0) @@ -210,12 +226,12 @@ class TestSourceParsingAndCompiling: """) assert len(source) == 7 # check all lineno's that could occur in a traceback - #assert source.getstatementrange(0) == (0, 7) - #assert source.getstatementrange(1) == (1, 5) + # assert source.getstatementrange(0) == (0, 7) + # assert source.getstatementrange(1) == (1, 5) assert source.getstatementrange(2) == (2, 3) assert source.getstatementrange(3) == (3, 4) assert source.getstatementrange(4) == (4, 5) - #assert source.getstatementrange(5) == (0, 7) + # assert source.getstatementrange(5) == (0, 7) assert source.getstatementrange(6) == (6, 7) def test_getstatementrange_bug(self): @@ -261,7 +277,7 @@ class TestSourceParsingAndCompiling: def test_getstatementrange_out_of_bounds_py3(self): source = Source("if xxx:\n from .collections import something") r = source.getstatementrange(1) - assert r == (1,2) + assert r == (1, 2) def test_getstatementrange_with_syntaxerror_issue7(self): source = Source(":") @@ -282,7 +298,7 @@ class TestSourceParsingAndCompiling: excinfo = pytest.raises(AssertionError, "f(6)") frame = excinfo.traceback[-1].frame stmt = frame.code.fullsource.getstatement(frame.lineno) - #print "block", str(block) + # print "block", str(block) assert str(stmt).strip().startswith('assert') @pytest.mark.parametrize('name', ['', None, 'my']) @@ -290,9 +306,9 @@ class TestSourceParsingAndCompiling: def check(comp, name): co = comp(self.source, name) if not name: - expected = "codegen %s:%d>" %(mypath, mylineno+2+2) + expected = "codegen %s:%d>" % (mypath, mylineno + 2 + 2) else: - expected = "codegen %r %s:%d>" % (name, mypath, mylineno+2+2) + expected = "codegen %r %s:%d>" % (name, mypath, mylineno + 2 + 2) fn = co.co_filename assert fn.endswith(expected) @@ -306,30 +322,18 @@ class TestSourceParsingAndCompiling: def test_offsetless_synerr(self): pytest.raises(SyntaxError, _pytest._code.compile, "lambda a,a: 0", mode='eval') + def test_getstartingblock_singleline(): - class A: + class A(object): def __init__(self, *args): frame = sys._getframe(1) self.source = _pytest._code.Frame(frame).statement x = A('x', 'y') - l = [i for i in x.source.lines if i.strip()] - assert len(l) == 1 + values = [i for i in x.source.lines if i.strip()] + assert len(values) == 1 -def test_getstartingblock_multiline(): - class A: - def __init__(self, *args): - frame = sys._getframe(1) - self.source = _pytest._code.Frame(frame).statement - - x = A('x', - 'y' \ - , - 'z') - - l = [i for i in x.source.lines if i.strip()] - assert len(l) == 4 def test_getline_finally(): def c(): pass @@ -344,6 +348,7 @@ def test_getline_finally(): source = excinfo.traceback[-1].statement assert str(source).strip() == 'c(1)' + def test_getfuncsource_dynamic(): source = """ def f(): @@ -385,7 +390,7 @@ def test_deindent(): lines = deindent(source.splitlines()) assert lines == ['', 'def f():', ' def g():', ' pass', ' '] -@pytest.mark.xfail("sys.version_info[:3] < (2,7,0)") + def test_source_of_class_at_eof_without_newline(tmpdir): # this test fails because the implicit inspect.getsource(A) below # does not return the "x = 1" last line. @@ -399,10 +404,12 @@ def test_source_of_class_at_eof_without_newline(tmpdir): s2 = _pytest._code.Source(tmpdir.join("a.py").pyimport().A) assert str(source).strip() == str(s2).strip() + if True: def x(): pass + def test_getsource_fallback(): from _pytest._code.source import getsource expected = """def x(): @@ -410,6 +417,7 @@ def test_getsource_fallback(): src = getsource(x) assert src == expected + def test_idem_compile_and_getsource(): from _pytest._code.source import getsource expected = "def x(): pass" @@ -417,12 +425,14 @@ def test_idem_compile_and_getsource(): src = getsource(co) assert src == expected + def test_findsource_fallback(): from _pytest._code.source import findsource src, lineno = findsource(x) assert 'test_findsource_simple' in str(src) assert src[lineno] == ' def x():' + def test_findsource(): from _pytest._code.source import findsource co = _pytest._code.compile("""if 1: @@ -449,7 +459,7 @@ def test_getfslineno(): fspath, lineno = getfslineno(f) assert fspath.basename == "test_source.py" - assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource + assert lineno == _pytest._code.getrawcode(f).co_firstlineno - 1 # see findsource class A(object): pass @@ -461,16 +471,19 @@ def test_getfslineno(): assert lineno == A_lineno assert getfslineno(3) == ("", -1) - class B: + + class B(object): pass B.__name__ = "B2" assert getfslineno(B)[1] == -1 + def test_code_of_object_instance_with_call(): - class A: + class A(object): pass pytest.raises(TypeError, lambda: _pytest._code.Source(A())) - class WithCall: + + class WithCall(object): def __call__(self): pass @@ -489,10 +502,12 @@ def getstatement(lineno, source): ast, start, end = getstatementrange_ast(lineno, source) return source[start:end] + def test_oneline(): source = getstatement(0, "raise ValueError") assert str(source) == "raise ValueError" + def test_comment_and_no_newline_at_end(): from _pytest._code.source import getstatementrange_ast source = Source(['def test_basic_complex():', @@ -501,10 +516,12 @@ def test_comment_and_no_newline_at_end(): ast, start, end = getstatementrange_ast(1, source) assert end == 2 + def test_oneline_and_comment(): source = getstatement(0, "raise ValueError\n#hello") assert str(source) == "raise ValueError" + @pytest.mark.xfail(hasattr(sys, "pypy_version_info"), reason='does not work on pypy') def test_comments(): @@ -520,29 +537,33 @@ def test_comments(): comment 4 """ ''' - for line in range(2,6): + for line in range(2, 6): assert str(getstatement(line, source)) == ' x = 1' - for line in range(6,10): + for line in range(6, 10): assert str(getstatement(line, source)) == ' assert False' assert str(getstatement(10, source)) == '"""' + def test_comment_in_statement(): source = '''test(foo=1, # comment 1 bar=2) ''' - for line in range(1,3): + for line in range(1, 3): assert str(getstatement(line, source)) == \ - 'test(foo=1,\n # comment 1\n bar=2)' + 'test(foo=1,\n # comment 1\n bar=2)' + def test_single_line_else(): source = getstatement(1, "if False: 2\nelse: 3") assert str(source) == "else: 3" + def test_single_line_finally(): source = getstatement(1, "try: 1\nfinally: 3") assert str(source) == "finally: 3" + def test_issue55(): source = ('def round_trip(dinp):\n assert 1 == dinp\n' 'def test_rt():\n round_trip("""\n""")\n') @@ -559,7 +580,8 @@ x = 3 """) assert str(source) == "raise ValueError(\n 23\n)" -class TestTry: + +class TestTry(object): pytestmark = astonly source = """\ try: @@ -586,7 +608,8 @@ else: source = getstatement(5, self.source) assert str(source) == " raise KeyError()" -class TestTryFinally: + +class TestTryFinally(object): source = """\ try: raise ValueError @@ -603,8 +626,7 @@ finally: assert str(source) == " raise IndexError(1)" - -class TestIf: +class TestIf(object): pytestmark = astonly source = """\ if 1: @@ -631,6 +653,7 @@ else: source = getstatement(5, self.source) assert str(source) == " y = 7" + def test_semicolon(): s = """\ hello ; pytest.skip() @@ -638,6 +661,7 @@ hello ; pytest.skip() source = getstatement(0, s) assert str(source) == s.strip() + def test_def_online(): s = """\ def func(): raise ValueError(42) @@ -648,6 +672,7 @@ def something(): source = getstatement(0, s) assert str(source) == "def func(): raise ValueError(42)" + def XXX_test_expression_multiline(): source = """\ something diff --git a/testing/code/test_source_multiline_block.py b/testing/code/test_source_multiline_block.py new file mode 100644 index 000000000..b356d191f --- /dev/null +++ b/testing/code/test_source_multiline_block.py @@ -0,0 +1,26 @@ +# flake8: noqa +import sys + +import _pytest._code + + +def test_getstartingblock_multiline(): + """ + This test was originally found in test_source.py, but it depends on the weird + formatting of the ``x = A`` construct seen here and our autopep8 tool can only exclude entire + files (it does not support excluding lines/blocks using the traditional #noqa comment yet, + see hhatto/autopep8#307). It was considered better to just move this single test to its own + file and exclude it from autopep8 than try to complicate things. + """ + class A(object): + def __init__(self, *args): + frame = sys._getframe(1) + self.source = _pytest._code.Frame(frame).statement + + x = A('x', + 'y' + , + 'z') + + values = [i for i in x.source.lines if i.strip()] + assert len(values) == 4 diff --git a/testing/deprecated_test.py b/testing/deprecated_test.py index e610458e0..3f244a53c 100644 --- a/testing/deprecated_test.py +++ b/testing/deprecated_test.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest @@ -8,12 +9,16 @@ def test_yield_tests_deprecation(testdir): def test_gen(): yield "m1", func1, 15, 3*5 yield "m2", func1, 42, 6*7 + def test_gen2(): + for k in range(10): + yield func1, 1, 1 """) result = testdir.runpytest('-ra') result.stdout.fnmatch_lines([ '*yield tests are deprecated, and scheduled to be removed in pytest 4.0*', '*2 passed*', ]) + assert result.stdout.str().count('yield tests are deprecated') == 2 def test_funcarg_prefix_deprecation(testdir): @@ -26,7 +31,7 @@ def test_funcarg_prefix_deprecation(testdir): """) result = testdir.runpytest('-ra') result.stdout.fnmatch_lines([ - ('WC1 None pytest_funcarg__value: ' + ('*pytest_funcarg__value: ' 'declaring fixtures using "pytest_funcarg__" prefix is deprecated ' 'and scheduled to be removed in pytest 4.0. ' 'Please remove the prefix and use the @pytest.fixture decorator instead.'), @@ -48,7 +53,7 @@ def test_str_args_deprecated(tmpdir, testdir): from _pytest.main import EXIT_NOTESTSCOLLECTED warnings = [] - class Collect: + class Collect(object): def pytest_logwarning(self, message): warnings.append(message) @@ -73,4 +78,7 @@ def test_resultlog_is_deprecated(testdir): pass ''') result = testdir.runpytest('--result-log=%s' % testdir.tmpdir.join('result.log')) - result.stdout.fnmatch_lines(['*--result-log is deprecated and scheduled for removal in pytest 4.0*']) + result.stdout.fnmatch_lines([ + '*--result-log is deprecated and scheduled for removal in pytest 4.0*', + '*See https://docs.pytest.org/*/usage.html#creating-resultlog-format-files for more information*', + ]) diff --git a/testing/freeze/create_executable.py b/testing/freeze/create_executable.py index 8cf259c40..f4f6088ef 100644 --- a/testing/freeze/create_executable.py +++ b/testing/freeze/create_executable.py @@ -10,4 +10,3 @@ if __name__ == '__main__': hidden.extend(['--hidden-import', x]) args = ['pyinstaller', '--noconfirm'] + hidden + ['runtests_script.py'] subprocess.check_call(' '.join(args), shell=True) - diff --git a/testing/freeze/runtests_script.py b/testing/freeze/runtests_script.py index cb961fc6c..d281601c0 100644 --- a/testing/freeze/runtests_script.py +++ b/testing/freeze/runtests_script.py @@ -6,4 +6,4 @@ py.test main(). if __name__ == '__main__': import sys import pytest - sys.exit(pytest.main()) \ No newline at end of file + sys.exit(pytest.main()) diff --git a/testing/freeze/tests/test_trivial.py b/testing/freeze/tests/test_trivial.py index 6cf6b05ad..45622b850 100644 --- a/testing/freeze/tests/test_trivial.py +++ b/testing/freeze/tests/test_trivial.py @@ -2,5 +2,6 @@ def test_upper(): assert 'foo'.upper() == 'FOO' + def test_lower(): - assert 'FOO'.lower() == 'foo' \ No newline at end of file + assert 'FOO'.lower() == 'foo' diff --git a/testing/freeze/tox_run.py b/testing/freeze/tox_run.py index 5310ac1b7..3fc388040 100644 --- a/testing/freeze/tox_run.py +++ b/testing/freeze/tox_run.py @@ -9,4 +9,4 @@ if __name__ == '__main__': executable = os.path.join(os.getcwd(), 'dist', 'runtests_script', 'runtests_script') if sys.platform.startswith('win'): executable += '.exe' - sys.exit(os.system('%s tests' % executable)) \ No newline at end of file + sys.exit(os.system('%s tests' % executable)) diff --git a/testing/python/approx.py b/testing/python/approx.py index fc1cbf9ab..d591b8ba5 100644 --- a/testing/python/approx.py +++ b/testing/python/approx.py @@ -1,4 +1,5 @@ # encoding: utf-8 +import operator import sys import pytest import doctest @@ -20,7 +21,7 @@ class MyDocTestRunner(doctest.DocTestRunner): example.source.strip(), got.strip(), example.want.strip())) -class TestApprox: +class TestApprox(object): def test_repr_string(self): # for some reason in Python 2.6 it is not displaying the tolerance representation correctly @@ -29,12 +30,21 @@ class TestApprox: if sys.version_info[:2] == (2, 6): tol1, tol2, infr = '???', '???', '???' assert repr(approx(1.0)) == '1.0 {pm} {tol1}'.format(pm=plus_minus, tol1=tol1) - assert repr(approx([1.0, 2.0])) == '1.0 {pm} {tol1}, 2.0 {pm} {tol2}'.format(pm=plus_minus, tol1=tol1, tol2=tol2) + assert repr(approx([1.0, 2.0])) == 'approx([1.0 {pm} {tol1}, 2.0 {pm} {tol2}])'.format( + pm=plus_minus, tol1=tol1, tol2=tol2) + assert repr(approx((1.0, 2.0))) == 'approx((1.0 {pm} {tol1}, 2.0 {pm} {tol2}))'.format( + pm=plus_minus, tol1=tol1, tol2=tol2) assert repr(approx(inf)) == 'inf' assert repr(approx(1.0, rel=nan)) == '1.0 {pm} ???'.format(pm=plus_minus) assert repr(approx(1.0, rel=inf)) == '1.0 {pm} {infr}'.format(pm=plus_minus, infr=infr) assert repr(approx(1.0j, rel=inf)) == '1j' + # Dictionaries aren't ordered, so we need to check both orders. + assert repr(approx({'a': 1.0, 'b': 2.0})) in ( + "approx({{'a': 1.0 {pm} {tol1}, 'b': 2.0 {pm} {tol2}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2), + "approx({{'b': 2.0 {pm} {tol2}, 'a': 1.0 {pm} {tol1}}})".format(pm=plus_minus, tol1=tol1, tol2=tol2), + ) + def test_operator_overloading(self): assert 1 == approx(1, rel=1e-6, abs=1e-12) assert not (1 != approx(1, rel=1e-6, abs=1e-12)) @@ -43,30 +53,30 @@ class TestApprox: def test_exactly_equal(self): examples = [ - (2.0, 2.0), - (0.1e200, 0.1e200), - (1.123e-300, 1.123e-300), - (12345, 12345.0), - (0.0, -0.0), - (345678, 345678), - (Decimal('1.0001'), Decimal('1.0001')), - (Fraction(1, 3), Fraction(-1, -3)), + (2.0, 2.0), + (0.1e200, 0.1e200), + (1.123e-300, 1.123e-300), + (12345, 12345.0), + (0.0, -0.0), + (345678, 345678), + (Decimal('1.0001'), Decimal('1.0001')), + (Fraction(1, 3), Fraction(-1, -3)), ] for a, x in examples: assert a == approx(x) def test_opposite_sign(self): examples = [ - (eq, 1e-100, -1e-100), - (ne, 1e100, -1e100), + (eq, 1e-100, -1e-100), + (ne, 1e100, -1e100), ] for op, a, x in examples: assert op(a, approx(x)) def test_zero_tolerance(self): within_1e10 = [ - (1.1e-100, 1e-100), - (-1.1e-100, -1e-100), + (1.1e-100, 1e-100), + (-1.1e-100, -1e-100), ] for a, x in within_1e10: assert x == approx(x, rel=0.0, abs=0.0) @@ -79,11 +89,11 @@ class TestApprox: def test_negative_tolerance(self): # Negative tolerances are not allowed. illegal_kwargs = [ - dict(rel=-1e100), - dict(abs=-1e100), - dict(rel=1e100, abs=-1e100), - dict(rel=-1e100, abs=1e100), - dict(rel=-1e100, abs=-1e100), + dict(rel=-1e100), + dict(abs=-1e100), + dict(rel=1e100, abs=-1e100), + dict(rel=-1e100, abs=1e100), + dict(rel=-1e100, abs=-1e100), ] for kwargs in illegal_kwargs: with pytest.raises(ValueError): @@ -92,10 +102,10 @@ class TestApprox: def test_inf_tolerance(self): # Everything should be equal if the tolerance is infinite. large_diffs = [ - (1, 1000), - (1e-50, 1e50), - (-1.0, -1e300), - (0.0, 10), + (1, 1000), + (1e-50, 1e50), + (-1.0, -1e300), + (0.0, 10), ] for a, x in large_diffs: assert a != approx(x, rel=0.0, abs=0.0) @@ -107,8 +117,8 @@ class TestApprox: # If the relative tolerance is zero but the expected value is infinite, # the actual tolerance is a NaN, which should be an error. illegal_kwargs = [ - dict(rel=inf, abs=0.0), - dict(rel=inf, abs=inf), + dict(rel=inf, abs=0.0), + dict(rel=inf, abs=inf), ] for kwargs in illegal_kwargs: with pytest.raises(ValueError): @@ -116,9 +126,9 @@ class TestApprox: def test_nan_tolerance(self): illegal_kwargs = [ - dict(rel=nan), - dict(abs=nan), - dict(rel=nan, abs=nan), + dict(rel=nan), + dict(abs=nan), + dict(rel=nan, abs=nan), ] for kwargs in illegal_kwargs: with pytest.raises(ValueError): @@ -135,15 +145,15 @@ class TestApprox: # None of the other tests (except the doctests) should be affected by # the choice of defaults. examples = [ - # Relative tolerance used. - (eq, 1e100 + 1e94, 1e100), - (ne, 1e100 + 2e94, 1e100), - (eq, 1e0 + 1e-6, 1e0), - (ne, 1e0 + 2e-6, 1e0), - # Absolute tolerance used. - (eq, 1e-100, + 1e-106), - (eq, 1e-100, + 2e-106), - (eq, 1e-100, 0), + # Relative tolerance used. + (eq, 1e100 + 1e94, 1e100), + (ne, 1e100 + 2e94, 1e100), + (eq, 1e0 + 1e-6, 1e0), + (ne, 1e0 + 2e-6, 1e0), + # Absolute tolerance used. + (eq, 1e-100, + 1e-106), + (eq, 1e-100, + 2e-106), + (eq, 1e-100, 0), ] for op, a, x in examples: assert op(a, approx(x)) @@ -166,9 +176,9 @@ class TestApprox: def test_relative_tolerance(self): within_1e8_rel = [ - (1e8 + 1e0, 1e8), - (1e0 + 1e-8, 1e0), - (1e-8 + 1e-16, 1e-8), + (1e8 + 1e0, 1e8), + (1e0 + 1e-8, 1e0), + (1e-8 + 1e-16, 1e-8), ] for a, x in within_1e8_rel: assert a == approx(x, rel=5e-8, abs=0.0) @@ -176,9 +186,9 @@ class TestApprox: def test_absolute_tolerance(self): within_1e8_abs = [ - (1e8 + 9e-9, 1e8), - (1e0 + 9e-9, 1e0), - (1e-8 + 9e-9, 1e-8), + (1e8 + 9e-9, 1e8), + (1e0 + 9e-9, 1e0), + (1e-8 + 9e-9, 1e-8), ] for a, x in within_1e8_abs: assert a == approx(x, rel=0, abs=5e-8) @@ -186,106 +196,170 @@ class TestApprox: def test_expecting_zero(self): examples = [ - (ne, 1e-6, 0.0), - (ne, -1e-6, 0.0), - (eq, 1e-12, 0.0), - (eq, -1e-12, 0.0), - (ne, 2e-12, 0.0), - (ne, -2e-12, 0.0), - (ne, inf, 0.0), - (ne, nan, 0.0), - ] + (ne, 1e-6, 0.0), + (ne, -1e-6, 0.0), + (eq, 1e-12, 0.0), + (eq, -1e-12, 0.0), + (ne, 2e-12, 0.0), + (ne, -2e-12, 0.0), + (ne, inf, 0.0), + (ne, nan, 0.0), + ] for op, a, x in examples: assert op(a, approx(x, rel=0.0, abs=1e-12)) assert op(a, approx(x, rel=1e-6, abs=1e-12)) def test_expecting_inf(self): examples = [ - (eq, inf, inf), - (eq, -inf, -inf), - (ne, inf, -inf), - (ne, 0.0, inf), - (ne, nan, inf), + (eq, inf, inf), + (eq, -inf, -inf), + (ne, inf, -inf), + (ne, 0.0, inf), + (ne, nan, inf), ] for op, a, x in examples: assert op(a, approx(x)) def test_expecting_nan(self): examples = [ - (nan, nan), - (-nan, -nan), - (nan, -nan), - (0.0, nan), - (inf, nan), + (eq, nan, nan), + (eq, -nan, -nan), + (eq, nan, -nan), + (ne, 0.0, nan), + (ne, inf, nan), ] - for a, x in examples: - # If there is a relative tolerance and the expected value is NaN, - # the actual tolerance is a NaN, which should be an error. - with pytest.raises(ValueError): - a != approx(x, rel=inf) + for op, a, x in examples: + # Nothing is equal to NaN by default. + assert a != approx(x) - # You can make comparisons against NaN by not specifying a relative - # tolerance, so only an absolute tolerance is calculated. - assert a != approx(x, abs=inf) - - def test_expecting_sequence(self): - within_1e8 = [ - (1e8 + 1e0, 1e8), - (1e0 + 1e-8, 1e0), - (1e-8 + 1e-16, 1e-8), - ] - actual, expected = zip(*within_1e8) - assert actual == approx(expected, rel=5e-8, abs=0.0) - - def test_expecting_sequence_wrong_len(self): - assert [1, 2] != approx([1]) - assert [1, 2] != approx([1,2,3]) - - def test_complex(self): - within_1e6 = [ - ( 1.000001 + 1.0j, 1.0 + 1.0j), - (1.0 + 1.000001j, 1.0 + 1.0j), - (-1.000001 + 1.0j, -1.0 + 1.0j), - (1.0 - 1.000001j, 1.0 - 1.0j), - ] - for a, x in within_1e6: - assert a == approx(x, rel=5e-6, abs=0) - assert a != approx(x, rel=5e-7, abs=0) + # If ``nan_ok=True``, then NaN is equal to NaN. + assert op(a, approx(x, nan_ok=True)) def test_int(self): within_1e6 = [ - (1000001, 1000000), - (-1000001, -1000000), + (1000001, 1000000), + (-1000001, -1000000), ] for a, x in within_1e6: assert a == approx(x, rel=5e-6, abs=0) assert a != approx(x, rel=5e-7, abs=0) + assert approx(x, rel=5e-6, abs=0) == a + assert approx(x, rel=5e-7, abs=0) != a def test_decimal(self): within_1e6 = [ - (Decimal('1.000001'), Decimal('1.0')), - (Decimal('-1.000001'), Decimal('-1.0')), + (Decimal('1.000001'), Decimal('1.0')), + (Decimal('-1.000001'), Decimal('-1.0')), ] for a, x in within_1e6: assert a == approx(x, rel=Decimal('5e-6'), abs=0) assert a != approx(x, rel=Decimal('5e-7'), abs=0) + assert approx(x, rel=Decimal('5e-6'), abs=0) == a + assert approx(x, rel=Decimal('5e-7'), abs=0) != a def test_fraction(self): within_1e6 = [ - (1 + Fraction(1, 1000000), Fraction(1)), - (-1 - Fraction(-1, 1000000), Fraction(-1)), + (1 + Fraction(1, 1000000), Fraction(1)), + (-1 - Fraction(-1, 1000000), Fraction(-1)), ] for a, x in within_1e6: assert a == approx(x, rel=5e-6, abs=0) assert a != approx(x, rel=5e-7, abs=0) + assert approx(x, rel=5e-6, abs=0) == a + assert approx(x, rel=5e-7, abs=0) != a + + def test_complex(self): + within_1e6 = [ + (1.000001 + 1.0j, 1.0 + 1.0j), + (1.0 + 1.000001j, 1.0 + 1.0j), + (-1.000001 + 1.0j, -1.0 + 1.0j), + (1.0 - 1.000001j, 1.0 - 1.0j), + ] + for a, x in within_1e6: + assert a == approx(x, rel=5e-6, abs=0) + assert a != approx(x, rel=5e-7, abs=0) + assert approx(x, rel=5e-6, abs=0) == a + assert approx(x, rel=5e-7, abs=0) != a + + def test_list(self): + actual = [1 + 1e-7, 2 + 1e-8] + expected = [1, 2] + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_list_wrong_len(self): + assert [1, 2] != approx([1]) + assert [1, 2] != approx([1, 2, 3]) + + def test_tuple(self): + actual = (1 + 1e-7, 2 + 1e-8) + expected = (1, 2) + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_tuple_wrong_len(self): + assert (1, 2) != approx((1,)) + assert (1, 2) != approx((1, 2, 3)) + + def test_dict(self): + actual = {'a': 1 + 1e-7, 'b': 2 + 1e-8} + # Dictionaries became ordered in python3.6, so switch up the order here + # to make sure it doesn't matter. + expected = {'b': 2, 'a': 1} + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == actual + assert approx(expected, rel=5e-8, abs=0) != actual + + def test_dict_wrong_len(self): + assert {'a': 1, 'b': 2} != approx({'a': 1}) + assert {'a': 1, 'b': 2} != approx({'a': 1, 'c': 2}) + assert {'a': 1, 'b': 2} != approx({'a': 1, 'b': 2, 'c': 3}) + + def test_numpy_array(self): + np = pytest.importorskip('numpy') + + actual = np.array([1 + 1e-7, 2 + 1e-8]) + expected = np.array([1, 2]) + + # Return false if any element is outside the tolerance. + assert actual == approx(expected, rel=5e-7, abs=0) + assert actual != approx(expected, rel=5e-8, abs=0) + assert approx(expected, rel=5e-7, abs=0) == expected + assert approx(expected, rel=5e-8, abs=0) != actual + + # Should be able to compare lists with numpy arrays. + assert list(actual) == approx(expected, rel=5e-7, abs=0) + assert list(actual) != approx(expected, rel=5e-8, abs=0) + assert actual == approx(list(expected), rel=5e-7, abs=0) + assert actual != approx(list(expected), rel=5e-8, abs=0) + + def test_numpy_array_wrong_shape(self): + np = pytest.importorskip('numpy') + + a12 = np.array([[1, 2]]) + a21 = np.array([[1], [2]]) + + assert a12 != approx(a21) + assert a21 != approx(a12) def test_doctests(self): parser = doctest.DocTestParser() test = parser.get_doctest( - approx.__doc__, - {'approx': approx}, - approx.__name__, - None, None, + approx.__doc__, + {'approx': approx}, + approx.__name__, + None, None, ) runner = MyDocTestRunner() runner.run(test) @@ -310,3 +384,15 @@ class TestApprox: '=* 1 failed in *=', ]) + @pytest.mark.parametrize('op', [ + pytest.param(operator.le, id='<='), + pytest.param(operator.lt, id='<'), + pytest.param(operator.ge, id='>='), + pytest.param(operator.gt, id='>'), + ]) + def test_comparison_operator_type_error(self, op): + """ + pytest.approx should raise TypeError for operators other than == and != (#2003). + """ + with pytest.raises(TypeError): + op(1, approx(1, rel=1e-6, abs=1e-12)) diff --git a/testing/python/collect.py b/testing/python/collect.py index 1e69f2da9..7b361a89e 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -12,7 +12,10 @@ from _pytest.main import ( ) -class TestModule: +ignore_parametrized_marks = pytest.mark.filterwarnings('ignore:Applying marks directly to parameters') + + +class TestModule(object): def test_failing_import(self, testdir): modcol = testdir.getmodulecol("import alksdjalskdjalkjals") pytest.raises(Collector.CollectError, modcol.collect) @@ -104,18 +107,34 @@ class TestModule: else: assert name not in stdout + def test_show_traceback_import_error_unicode(self, testdir): + """Check test modules collected which raise ImportError with unicode messages + are handled properly (#2336). + """ + testdir.makepyfile(u""" + # -*- coding: utf-8 -*- + raise ImportError(u'Something bad happened ☺') + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "ImportError while importing test module*", + "Traceback:", + "*raise ImportError*Something bad happened*", + ]) + assert result.ret == 2 -class TestClass: + +class TestClass(object): def test_class_with_init_warning(self, testdir): testdir.makepyfile(""" - class TestClass1: + class TestClass1(object): def __init__(self): pass """) result = testdir.runpytest("-rw") - result.stdout.fnmatch_lines_random(""" - WC1*test_class_with_init_warning.py*__init__* - """) + result.stdout.fnmatch_lines([ + "*cannot collect test class 'TestClass1' because it has a __init__ constructor", + ]) def test_class_subclassobject(self, testdir): testdir.getmodulecol(""" @@ -127,9 +146,39 @@ class TestClass: "*collected 0*", ]) + def test_static_method(self, testdir): + """Support for collecting staticmethod tests (#2528, #2699)""" + testdir.getmodulecol(""" + import pytest + class Test(object): + @staticmethod + def test_something(): + pass + + @pytest.fixture + def fix(self): + return 1 + + @staticmethod + def test_fix(fix): + assert fix == 1 + """) + result = testdir.runpytest() + if sys.version_info < (2, 7): + # in 2.6, the code to handle static methods doesn't work + result.stdout.fnmatch_lines([ + "*collected 0 items*", + "*cannot collect static method*", + ]) + else: + result.stdout.fnmatch_lines([ + "*collected 2 items*", + "*2 passed in*", + ]) + def test_setup_teardown_class_as_classmethod(self, testdir): testdir.makepyfile(test_mod1=""" - class TestClassMethod: + class TestClassMethod(object): @classmethod def setup_class(cls): pass @@ -166,8 +215,18 @@ class TestClass: "because it has a __new__ constructor*" ) + def test_issue2234_property(self, testdir): + testdir.makepyfile(""" + class TestCase(object): + @property + def prop(self): + raise NotImplementedError() + """) + result = testdir.runpytest() + assert result.ret == EXIT_NOTESTSCOLLECTED -class TestGenerator: + +class TestGenerator(object): def test_generative_functions(self, testdir): modcol = testdir.getmodulecol(""" def func1(arg, arg2): @@ -192,7 +251,7 @@ class TestGenerator: modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 - class TestGenMethods: + class TestGenMethods(object): def test_gen(self): yield func1, 17, 3*5 yield func1, 42, 6*7 @@ -246,7 +305,7 @@ class TestGenerator: modcol = testdir.getmodulecol(""" def func1(arg, arg2): assert arg == arg2 - class TestGenMethods: + class TestGenMethods(object): def test_gen(self): yield "m1", func1, 17, 3*5 yield "m2", func1, 42, 6*7 @@ -264,6 +323,7 @@ class TestGenerator: def test_order_of_execution_generator_same_codeline(self, testdir, tmpdir): o = testdir.makepyfile(""" + from __future__ import print_function def test_generative_order_of_execution(): import py, pytest test_list = [] @@ -273,8 +333,8 @@ class TestGenerator: test_list.append(item) def assert_order_of_execution(): - py.builtin.print_('expected order', expected_list) - py.builtin.print_('but got ', test_list) + print('expected order', expected_list) + print('but got ', test_list) assert test_list == expected_list for i in expected_list: @@ -288,6 +348,7 @@ class TestGenerator: def test_order_of_execution_generator_different_codeline(self, testdir): o = testdir.makepyfile(""" + from __future__ import print_function def test_generative_tests_different_codeline(): import py, pytest test_list = [] @@ -303,8 +364,8 @@ class TestGenerator: test_list.append(0) def assert_order_of_execution(): - py.builtin.print_('expected order', expected_list) - py.builtin.print_('but got ', test_list) + print('expected order', expected_list) + print('but got ', test_list) assert test_list == expected_list yield list_append_0 @@ -326,7 +387,7 @@ class TestGenerator: # has been used during collection. o = testdir.makepyfile(""" setuplist = [] - class TestClass: + class TestClass(object): def setup_method(self, func): #print "setup_method", self, func setuplist.append(self) @@ -360,7 +421,7 @@ class TestGenerator: assert not skipped and not failed -class TestFunction: +class TestFunction(object): def test_getmodulecollector(self, testdir): item = testdir.getitem("def test_func(): pass") modcol = item.getparent(pytest.Module) @@ -369,7 +430,7 @@ class TestFunction: def test_function_as_object_instance_ignored(self, testdir): testdir.makepyfile(""" - class A: + class A(object): def __call__(self, tmpdir): 0/0 @@ -391,10 +452,10 @@ class TestFunction: pass f1 = pytest.Function(name="name", parent=session, config=config, - args=(1,), callobj=func1) + args=(1,), callobj=func1) assert f1 == f1 - f2 = pytest.Function(name="name",config=config, - callobj=func2, parent=session) + f2 = pytest.Function(name="name", config=config, + callobj=func2, parent=session) assert f1 != f2 def test_issue197_parametrize_emptyset(self, testdir): @@ -420,7 +481,7 @@ class TestFunction: def test_issue213_parametrize_value_no_equal(self, testdir): testdir.makepyfile(""" import pytest - class A: + class A(object): def __eq__(self, other): raise ValueError("not possible") @pytest.mark.parametrize('arg', [A()]) @@ -448,7 +509,6 @@ class TestFunction: rec = testdir.inline_run() rec.assertoutcome(passed=2) - def test_parametrize_with_non_hashable_values_indirect(self, testdir): """Test parametrization with non-hashable values with indirect parametrization.""" testdir.makepyfile(""" @@ -476,7 +536,6 @@ class TestFunction: rec = testdir.inline_run() rec.assertoutcome(passed=2) - def test_parametrize_overrides_fixture(self, testdir): """Test parametrization when parameter overrides existing fixture with same name.""" testdir.makepyfile(""" @@ -504,7 +563,6 @@ class TestFunction: rec = testdir.inline_run() rec.assertoutcome(passed=3) - def test_parametrize_overrides_parametrized_fixture(self, testdir): """Test parametrization when parameter overrides existing parametrized fixture with same name.""" testdir.makepyfile(""" @@ -522,7 +580,8 @@ class TestFunction: rec = testdir.inline_run() rec.assertoutcome(passed=1) - def test_parametrize_with_mark(selfself, testdir): + @ignore_parametrized_marks + def test_parametrize_with_mark(self, testdir): items = testdir.getitems(""" import pytest @pytest.mark.foo @@ -551,11 +610,11 @@ class TestFunction: item = testdir.getitem("def test_func(): raise ValueError") config = item.config - class MyPlugin1: + class MyPlugin1(object): def pytest_pyfunc_call(self, pyfuncitem): raise ValueError - class MyPlugin2: + class MyPlugin2(object): def pytest_pyfunc_call(self, pyfuncitem): return True @@ -595,6 +654,7 @@ class TestFunction: assert colitems[2].name == 'test2[a-c]' assert colitems[3].name == 'test2[b-c]' + @ignore_parametrized_marks def test_parametrize_skipif(self, testdir): testdir.makepyfile(""" import pytest @@ -608,6 +668,7 @@ class TestFunction: result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *') + @ignore_parametrized_marks def test_parametrize_skip(self, testdir): testdir.makepyfile(""" import pytest @@ -621,6 +682,7 @@ class TestFunction: result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 skipped in *') + @ignore_parametrized_marks def test_parametrize_skipif_no_skip(self, testdir): testdir.makepyfile(""" import pytest @@ -634,6 +696,7 @@ class TestFunction: result = testdir.runpytest() result.stdout.fnmatch_lines('* 1 failed, 2 passed in *') + @ignore_parametrized_marks def test_parametrize_xfail(self, testdir): testdir.makepyfile(""" import pytest @@ -647,6 +710,7 @@ class TestFunction: result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 xfailed in *') + @ignore_parametrized_marks def test_parametrize_passed(self, testdir): testdir.makepyfile(""" import pytest @@ -660,6 +724,7 @@ class TestFunction: result = testdir.runpytest() result.stdout.fnmatch_lines('* 2 passed, 1 xpassed in *') + @ignore_parametrized_marks def test_parametrize_xfail_passed(self, testdir): testdir.makepyfile(""" import pytest @@ -683,7 +748,7 @@ class TestFunction: assert [x.originalname for x in items] == ['test_func', 'test_func'] -class TestSorting: +class TestSorting(object): def test_check_equality(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass @@ -705,11 +770,11 @@ class TestSorting: assert not (fn1 == fn3) assert fn1 != fn3 - for fn in fn1,fn2,fn3: + for fn in fn1, fn2, fn3: assert fn != 3 assert fn != modcol - assert fn != [1,2,3] - assert [1,2,3] != fn + assert fn != [1, 2, 3] + assert [1, 2, 3] != fn assert modcol != fn def test_allow_sane_sorting_for_decorators(self, testdir): @@ -733,7 +798,7 @@ class TestSorting: assert [item.name for item in colitems] == ['test_b', 'test_a'] -class TestConftestCustomization: +class TestConftestCustomization(object): def test_pytest_pycollect_module(self, testdir): testdir.makeconftest(""" import pytest @@ -808,11 +873,40 @@ class TestConftestCustomization: def test_makeitem_non_underscore(self, testdir, monkeypatch): modcol = testdir.getmodulecol("def _hello(): pass") - l = [] + values = [] monkeypatch.setattr(pytest.Module, 'makeitem', - lambda self, name, obj: l.append(name)) - l = modcol.collect() - assert '_hello' not in l + lambda self, name, obj: values.append(name)) + values = modcol.collect() + assert '_hello' not in values + + def test_issue2369_collect_module_fileext(self, testdir): + """Ensure we can collect files with weird file extensions as Python + modules (#2369)""" + # We'll implement a little finder and loader to import files containing + # Python source code whose file extension is ".narf". + testdir.makeconftest(""" + import sys, os, imp + from _pytest.python import Module + + class Loader: + def load_module(self, name): + return imp.load_source(name, name + ".narf") + class Finder: + def find_module(self, name, path=None): + if os.path.exists(name + ".narf"): + return Loader() + sys.meta_path.append(Finder()) + + def pytest_collect_file(path, parent): + if path.ext == ".narf": + return Module(path, parent)""") + testdir.makefile(".narf", """ + def test_something(): + assert 1 + 1 == 2""") + # Use runpytest_subprocess, since we're futzing with sys.meta_path. + result = testdir.runpytest_subprocess() + result.stdout.fnmatch_lines('*1 passed*') + def test_setup_only_available_in_subdir(testdir): sub1 = testdir.mkpydir("sub1") @@ -840,6 +934,7 @@ def test_setup_only_available_in_subdir(testdir): result = testdir.runpytest("-v", "-s") result.assert_outcomes(passed=2) + def test_modulecol_roundtrip(testdir): modcol = testdir.getmodulecol("pass", withinit=True) trail = modcol.nodeid @@ -847,7 +942,7 @@ def test_modulecol_roundtrip(testdir): assert modcol.name == newcol.name -class TestTracebackCutting: +class TestTracebackCutting(object): def test_skip_simple(self): excinfo = pytest.raises(pytest.skip.Exception, 'pytest.skip("xxx")') assert excinfo.traceback[-1].frame.code.name == "skip" @@ -867,13 +962,13 @@ class TestTracebackCutting: out = result.stdout.str() assert "xyz" in out assert "conftest.py:5: ValueError" in out - numentries = out.count("_ _ _") # separator for traceback entries + numentries = out.count("_ _ _") # separator for traceback entries assert numentries == 0 result = testdir.runpytest("--fulltrace", p) out = result.stdout.str() assert "conftest.py:5: ValueError" in out - numentries = out.count("_ _ _ _") # separator for traceback entries + numentries = out.count("_ _ _ _") # separator for traceback entries assert numentries > 3 def test_traceback_error_during_import(self, testdir): @@ -973,7 +1068,7 @@ class TestTracebackCutting: assert filter_traceback(tb[-1]) -class TestReportInfo: +class TestReportInfo(object): def test_itemreport_reportinfo(self, testdir, linecomp): testdir.makeconftest(""" import pytest @@ -998,7 +1093,7 @@ class TestReportInfo: def test_class_reportinfo(self, testdir): modcol = testdir.getmodulecol(""" # lineno 0 - class TestClass: + class TestClass(object): def test_hello(self): pass """) classcol = testdir.collect_by_name(modcol, "TestClass") @@ -1033,7 +1128,7 @@ class TestReportInfo: def check(x): pass yield check, 3 - class TestClass: + class TestClass(object): def test_method(self): pass """ @@ -1042,7 +1137,7 @@ class TestReportInfo: # https://github.com/pytest-dev/pytest/issues/1204 modcol = testdir.getmodulecol(""" # lineno 0 - class TestClass: + class TestClass(object): def __getattr__(self, name): return "this is not an int" @@ -1064,7 +1159,7 @@ def test_customized_python_discovery(testdir): p = testdir.makepyfile(""" def check_simple(): pass - class CheckMyApp: + class CheckMyApp(object): def check_meth(self): pass """) @@ -1124,6 +1219,7 @@ def test_collector_attributes(testdir): "*1 passed*", ]) + def test_customize_through_attributes(testdir): testdir.makeconftest(""" import pytest @@ -1139,7 +1235,7 @@ def test_customize_through_attributes(testdir): return MyClass(name, parent=collector) """) testdir.makepyfile(""" - class MyTestClass: + class MyTestClass(object): def test_hello(self): pass """) @@ -1153,11 +1249,11 @@ def test_customize_through_attributes(testdir): def test_unorderable_types(testdir): testdir.makepyfile(""" - class TestJoinEmpty: + class TestJoinEmpty(object): pass def make_test(): - class Test: + class Test(object): pass Test.__name__ = "TestFoo" return Test @@ -1231,8 +1327,8 @@ def test_dont_collect_non_function_callable(testdir): result = testdir.runpytest('-rw') result.stdout.fnmatch_lines([ '*collected 1 item*', - 'WC2 *', - '*1 passed, 1 pytest-warnings in *', + "*cannot collect 'test_a' because it is not a function*", + '*1 passed, 1 warnings in *', ]) @@ -1293,7 +1389,6 @@ def test_skip_duplicates_by_default(testdir): ]) - def test_keep_duplicates(testdir): """Test for issue https://github.com/pytest-dev/pytest/issues/1609 (#1609) diff --git a/testing/python/fixture.py b/testing/python/fixture.py index 3e84be138..b351eeeca 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -7,28 +7,40 @@ from _pytest.pytester import get_public_names from _pytest.fixtures import FixtureLookupError from _pytest import fixtures + def test_getfuncargnames(): - def f(): pass + def f(): + pass assert not fixtures.getfuncargnames(f) - def g(arg): pass + def g(arg): + pass assert fixtures.getfuncargnames(g) == ('arg',) - def h(arg1, arg2="hello"): pass + def h(arg1, arg2="hello"): + pass assert fixtures.getfuncargnames(h) == ('arg1',) - def h(arg1, arg2, arg3="hello"): pass + def h(arg1, arg2, arg3="hello"): + pass assert fixtures.getfuncargnames(h) == ('arg1', 'arg2') - class A: + class A(object): def f(self, arg1, arg2="hello"): pass + @staticmethod + def static(arg1, arg2): + pass + assert fixtures.getfuncargnames(A().f) == ('arg1',) - if sys.version_info < (3,0): + if sys.version_info < (3, 0): assert fixtures.getfuncargnames(A.f) == ('arg1',) -class TestFillFixtures: + assert fixtures.getfuncargnames(A.static, cls=A) == ('arg1', 'arg2') + + +class TestFillFixtures(object): def test_fillfuncargs_exposed(self): # used by oejskit, kept for compatibility assert pytest._fillfuncargs == fixtures.fillfixtures @@ -44,7 +56,7 @@ class TestFillFixtures: def test_func(some): pass """) - result = testdir.runpytest() # "--collect-only") + result = testdir.runpytest() # "--collect-only") assert result.ret != 0 result.stdout.fnmatch_lines([ "*def test_func(some)*", @@ -79,7 +91,7 @@ class TestFillFixtures: def something(request): return request.function.__name__ - class TestClass: + class TestClass(object): def test_method(self, something): assert something == "test_method" def test_func(something): @@ -91,7 +103,7 @@ class TestFillFixtures: def test_funcarg_lookup_classlevel(self, testdir): p = testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.fixture def something(self, request): @@ -134,7 +146,7 @@ class TestFillFixtures: def spam(): return 'spam' - class TestSpam: + class TestSpam(object): @pytest.fixture def spam(self, spam): @@ -439,7 +451,6 @@ class TestFillFixtures: ]) assert "INTERNAL" not in result.stdout.str() - def test_fixture_excinfo_leak(self, testdir): # on python2 sys.excinfo would leak into fixture executions testdir.makepyfile(""" @@ -463,7 +474,7 @@ class TestFillFixtures: assert result.ret == 0 -class TestRequestBasic: +class TestRequestBasic(object): def test_request_attributes(self, testdir): item = testdir.getitem(""" import pytest @@ -484,7 +495,7 @@ class TestRequestBasic: def test_request_attributes_method(self, testdir): item, = testdir.getitems(""" import pytest - class TestB: + class TestB(object): @pytest.fixture def something(self, request): @@ -502,7 +513,7 @@ class TestRequestBasic: @pytest.fixture def something(request): pass - class TestClass: + class TestClass(object): def test_method(self, something): pass """) @@ -537,30 +548,42 @@ class TestRequestBasic: def test_getfixturevalue(self, testdir, getfixmethod): item = testdir.getitem(""" import pytest - l = [2] + values = [2] @pytest.fixture def something(request): return 1 @pytest.fixture def other(request): - return l.pop() + return values.pop() def test_func(something): pass """) + import contextlib + if getfixmethod == 'getfuncargvalue': + warning_expectation = pytest.warns(DeprecationWarning) + else: + # see #1830 for a cleaner way to accomplish this + @contextlib.contextmanager + def expecting_no_warning(): + yield + + warning_expectation = expecting_no_warning() + req = item._request - fixture_fetcher = getattr(req, getfixmethod) - pytest.raises(FixtureLookupError, fixture_fetcher, "notexists") - val = fixture_fetcher("something") - assert val == 1 - val = fixture_fetcher("something") - assert val == 1 - val2 = fixture_fetcher("other") - assert val2 == 2 - val2 = fixture_fetcher("other") # see about caching - assert val2 == 2 - pytest._fillfuncargs(item) - assert item.funcargs["something"] == 1 - assert len(get_public_names(item.funcargs)) == 2 - assert "request" in item.funcargs - #assert item.funcargs == {'something': 1, "other": 2} + with warning_expectation: + fixture_fetcher = getattr(req, getfixmethod) + with pytest.raises(FixtureLookupError): + fixture_fetcher("notexists") + val = fixture_fetcher("something") + assert val == 1 + val = fixture_fetcher("something") + assert val == 1 + val2 = fixture_fetcher("other") + assert val2 == 2 + val2 = fixture_fetcher("other") # see about caching + assert val2 == 2 + pytest._fillfuncargs(item) + assert item.funcargs["something"] == 1 + assert len(get_public_names(item.funcargs)) == 2 + assert "request" in item.funcargs def test_request_addfinalizer(self, testdir): item = testdir.getitem(""" @@ -599,15 +622,15 @@ class TestRequestBasic: def test_request_addfinalizer_failing_setup(self, testdir): testdir.makepyfile(""" import pytest - l = [1] + values = [1] @pytest.fixture def myfix(request): - request.addfinalizer(l.pop) + request.addfinalizer(values.pop) assert 0 def test_fix(myfix): pass def test_finalizer_ran(): - assert not l + assert not values """) reprec = testdir.inline_run("-s") reprec.assertoutcome(failed=1, passed=1) @@ -615,36 +638,68 @@ class TestRequestBasic: def test_request_addfinalizer_failing_setup_module(self, testdir): testdir.makepyfile(""" import pytest - l = [1, 2] + values = [1, 2] @pytest.fixture(scope="module") def myfix(request): - request.addfinalizer(l.pop) - request.addfinalizer(l.pop) + request.addfinalizer(values.pop) + request.addfinalizer(values.pop) assert 0 def test_fix(myfix): pass """) reprec = testdir.inline_run("-s") mod = reprec.getcalls("pytest_runtest_setup")[0].item.module - assert not mod.l - + assert not mod.values def test_request_addfinalizer_partial_setup_failure(self, testdir): p = testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture def something(request): - request.addfinalizer(lambda: l.append(None)) + request.addfinalizer(lambda: values.append(None)) def test_func(something, missingarg): pass def test_second(): - assert len(l) == 1 + assert len(values) == 1 """) result = testdir.runpytest(p) result.stdout.fnmatch_lines([ "*1 error*" # XXX the whole module collection fails - ]) + ]) + + def test_request_subrequest_addfinalizer_exceptions(self, testdir): + """ + Ensure exceptions raised during teardown by a finalizer are suppressed + until all finalizers are called, re-raising the first exception (#2440) + """ + testdir.makepyfile(""" + import pytest + values = [] + def _excepts(where): + raise Exception('Error in %s fixture' % where) + @pytest.fixture + def subrequest(request): + return request + @pytest.fixture + def something(subrequest): + subrequest.addfinalizer(lambda: values.append(1)) + subrequest.addfinalizer(lambda: values.append(2)) + subrequest.addfinalizer(lambda: _excepts('something')) + @pytest.fixture + def excepts(subrequest): + subrequest.addfinalizer(lambda: _excepts('excepts')) + subrequest.addfinalizer(lambda: values.append(3)) + def test_first(something, excepts): + pass + def test_second(): + assert values == [3, 2, 1] + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*Exception: Error in excepts fixture', + '* 2 passed, 1 error in *', + ]) def test_request_getmodulepath(self, testdir): modcol = testdir.getmodulecol("def test_somefunc(): pass") @@ -693,28 +748,28 @@ class TestRequestBasic: def test_setupdecorator_and_xunit(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope='module', autouse=True) def setup_module(): - l.append("module") + values.append("module") @pytest.fixture(autouse=True) def setup_function(): - l.append("function") + values.append("function") def test_func(): pass - class TestClass: + class TestClass(object): @pytest.fixture(scope="class", autouse=True) def setup_class(self): - l.append("class") + values.append("class") @pytest.fixture(autouse=True) def setup_method(self): - l.append("method") + values.append("method") def test_method(self): pass def test_all(): - assert l == ["module", "function", "class", + assert values == ["module", "function", "class", "function", "method", "function"] """) reprec = testdir.inline_run("-v") @@ -771,15 +826,16 @@ class TestRequestBasic: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) -class TestRequestMarking: + +class TestRequestMarking(object): def test_applymarker(self, testdir): - item1,item2 = testdir.getitems(""" + item1, item2 = testdir.getitems(""" import pytest @pytest.fixture def something(request): pass - class TestClass: + class TestClass(object): def test_func1(self, something): pass def test_func2(self, something): @@ -831,7 +887,8 @@ class TestRequestMarking: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) -class TestRequestCachedSetup: + +class TestRequestCachedSetup(object): def test_request_cachedsetup_defaultmodule(self, testdir): reprec = testdir.inline_runsource(""" mysetup = ["hello",].pop @@ -844,7 +901,7 @@ class TestRequestCachedSetup: def test_func1(something): assert something == "hello" - class TestClass: + class TestClass(object): def test_func1a(self, something): assert something == "hello" """) @@ -862,7 +919,7 @@ class TestRequestCachedSetup: assert something == "hello3" def test_func2(something): assert something == "hello2" - class TestClass: + class TestClass(object): def test_func1a(self, something): assert something == "hello" def test_func2b(self, something): @@ -873,10 +930,10 @@ class TestRequestCachedSetup: def test_request_cachedsetup_extrakey(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = fixtures.FixtureRequest(item1) - l = ["hello", "world"] + values = ["hello", "world"] def setup(): - return l.pop() + return values.pop() ret1 = req1.cached_setup(setup, extrakey=1) ret2 = req1.cached_setup(setup, extrakey=2) @@ -890,24 +947,24 @@ class TestRequestCachedSetup: def test_request_cachedsetup_cache_deletion(self, testdir): item1 = testdir.getitem("def test_func(): pass") req1 = fixtures.FixtureRequest(item1) - l = [] + values = [] def setup(): - l.append("setup") + values.append("setup") def teardown(val): - l.append("teardown") + values.append("teardown") req1.cached_setup(setup, teardown, scope="function") - assert l == ['setup'] + assert values == ['setup'] # artificial call of finalizer setupstate = req1._pyfuncitem.session._setupstate setupstate._callfinalizers(item1) - assert l == ["setup", "teardown"] + assert values == ["setup", "teardown"] req1.cached_setup(setup, teardown, scope="function") - assert l == ["setup", "teardown", "setup"] + assert values == ["setup", "teardown", "setup"] setupstate._callfinalizers(item1) - assert l == ["setup", "teardown", "setup", "teardown"] + assert values == ["setup", "teardown", "setup", "teardown"] def test_request_cached_setup_two_args(self, testdir): testdir.makepyfile(""" @@ -949,17 +1006,17 @@ class TestRequestCachedSetup: def test_request_cached_setup_functional(self, testdir): testdir.makepyfile(test_0=""" import pytest - l = [] + values = [] @pytest.fixture def something(request): val = request.cached_setup(fsetup, fteardown) return val def fsetup(mycache=[1]): - l.append(mycache.pop()) - return l + values.append(mycache.pop()) + return values def fteardown(something): - l.remove(something[0]) - l.append(2) + values.remove(something[0]) + values.append(2) def test_list_once(something): assert something == [1] def test_list_twice(something): @@ -968,7 +1025,7 @@ class TestRequestCachedSetup: testdir.makepyfile(test_1=""" import test_0 # should have run already def test_check_test0_has_teardown_correct(): - assert test_0.l == [2] + assert test_0.values == [2] """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines([ @@ -996,7 +1053,8 @@ class TestRequestCachedSetup: "*ZeroDivisionError*", ]) -class TestFixtureUsages: + +class TestFixtureUsages(object): def test_noargfixturedec(self, testdir): testdir.makepyfile(""" import pytest @@ -1092,10 +1150,10 @@ class TestFixtureUsages: def test_funcarg_parametrized_and_used_twice(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=[1,2]) def arg1(request): - l.append(1) + values.append(1) return request.param @pytest.fixture() @@ -1104,7 +1162,7 @@ class TestFixtureUsages: def test_add(arg1, arg2): assert arg2 == arg1 + 1 - assert len(l) == arg1 + assert len(values) == arg1 """) result = testdir.runpytest() result.stdout.fnmatch_lines([ @@ -1138,15 +1196,15 @@ class TestFixtureUsages: def test_factory_setup_as_classes_fails(self, testdir): testdir.makepyfile(""" import pytest - class arg1: + class arg1(object): def __init__(self, request): self.x = 1 arg1 = pytest.fixture()(arg1) """) reprec = testdir.inline_run() - l = reprec.getfailedcollections() - assert len(l) == 1 + values = reprec.getfailedcollections() + assert len(values) == 1 def test_request_can_be_overridden(self, testdir): testdir.makepyfile(""" @@ -1165,20 +1223,20 @@ class TestFixtureUsages: testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="class") def myfix(request): request.cls.hello = "world" - l.append(1) + values.append(1) - class TestClass: + class TestClass(object): def test_one(self): assert self.hello == "world" - assert len(l) == 1 + assert len(values) == 1 def test_two(self): assert self.hello == "world" - assert len(l) == 1 + assert len(values) == 1 pytest.mark.usefixtures("myfix")(TestClass) """) reprec = testdir.inline_run() @@ -1198,7 +1256,7 @@ class TestFixtureUsages: """) testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_one(self): assert self.hello == "world" def test_two(self): @@ -1217,7 +1275,7 @@ class TestFixtureUsages: testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.fixture def setup1(self, request): assert self == request.instance @@ -1232,7 +1290,7 @@ class TestFixtureUsages: testdir.makepyfile(""" import pytest - l = [] + values = [] def f(): yield 1 yield 2 @@ -1246,17 +1304,17 @@ class TestFixtureUsages: return request.param def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg2): - l.append(arg2*10) + values.append(arg2*10) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == [1,2, 10,20] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == [1, 2, 10, 20] -class TestFixtureManagerParseFactories: +class TestFixtureManagerParseFactories(object): @pytest.fixture def testdir(self, request): @@ -1280,7 +1338,7 @@ class TestFixtureManagerParseFactories: def test_parsefactories_evil_objects_issue214(self, testdir): testdir.makepyfile(""" - class A: + class A(object): def __call__(self): pass def __getattr__(self, name): @@ -1311,7 +1369,7 @@ class TestFixtureManagerParseFactories: @pytest.fixture def hello(request): return "module" - class TestClass: + class TestClass(object): @pytest.fixture def hello(self, request): return "class" @@ -1360,7 +1418,7 @@ class TestFixtureManagerParseFactories: reprec.assertoutcome(passed=2) -class TestAutouseDiscovery: +class TestAutouseDiscovery(object): @pytest.fixture def testdir(self, testdir): @@ -1402,20 +1460,20 @@ class TestAutouseDiscovery: def test_two_classes_separated_autouse(self, testdir): testdir.makepyfile(""" import pytest - class TestA: - l = [] + class TestA(object): + values = [] @pytest.fixture(autouse=True) def setup1(self): - self.l.append(1) + self.values.append(1) def test_setup1(self): - assert self.l == [1] - class TestB: - l = [] + assert self.values == [1] + class TestB(object): + values = [] @pytest.fixture(autouse=True) def setup2(self): - self.l.append(1) + self.values.append(1) def test_setup2(self): - assert self.l == [1] + assert self.values == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=2) @@ -1423,7 +1481,7 @@ class TestAutouseDiscovery: def test_setup_at_classlevel(self, testdir): testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.fixture(autouse=True) def permethod(self, request): request.instance.funcname = request.function.__name__ @@ -1498,28 +1556,28 @@ class TestAutouseDiscovery: def test_autouse_in_module_and_two_classes(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(autouse=True) def append1(): - l.append("module") + values.append("module") def test_x(): - assert l == ["module"] + assert values == ["module"] - class TestA: + class TestA(object): @pytest.fixture(autouse=True) def append2(self): - l.append("A") + values.append("A") def test_hello(self): - assert l == ["module", "module", "A"], l - class TestA2: + assert values == ["module", "module", "A"], values + class TestA2(object): def test_world(self): - assert l == ["module", "module", "A", "module"], l + assert values == ["module", "module", "A", "module"], values """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) -class TestAutouseManagement: +class TestAutouseManagement(object): def test_autouse_conftest_mid_directory(self, testdir): pkgdir = testdir.mkpydir("xyz123") pkgdir.join("conftest.py").write(_pytest._code.Source(""" @@ -1554,28 +1612,26 @@ class TestAutouseManagement: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) - - def test_funcarg_and_setup(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module") def arg(): - l.append(1) + values.append(1) return 0 @pytest.fixture(scope="module", autouse=True) def something(arg): - l.append(2) + values.append(2) def test_hello(arg): - assert len(l) == 2 - assert l == [1,2] + assert len(values) == 2 + assert values == [1,2] assert arg == 0 def test_hello2(arg): - assert len(l) == 2 - assert l == [1,2] + assert len(values) == 2 + assert values == [1,2] assert arg == 0 """) reprec = testdir.inline_run() @@ -1584,20 +1640,20 @@ class TestAutouseManagement: def test_uses_parametrized_resource(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=[1,2]) def arg(request): return request.param @pytest.fixture(autouse=True) def something(arg): - l.append(arg) + values.append(arg) def test_hello(): - if len(l) == 1: - assert l == [1] - elif len(l) == 2: - assert l == [1, 2] + if len(values) == 1: + assert values == [1] + elif len(values) == 2: + assert values == [1, 2] else: 0/0 @@ -1609,7 +1665,7 @@ class TestAutouseManagement: testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="session", params=[1,2]) def arg(request): @@ -1618,14 +1674,14 @@ class TestAutouseManagement: @pytest.fixture(scope="function", autouse=True) def append(request, arg): if request.function.__name__ == "test_some": - l.append(arg) + values.append(arg) def test_some(): pass def test_result(arg): - assert len(l) == arg - assert l[:arg] == [1,2][:arg] + assert len(values) == arg + assert values[:arg] == [1,2][:arg] """) reprec = testdir.inline_run("-v", "-s") reprec.assertoutcome(passed=4) @@ -1635,7 +1691,7 @@ class TestAutouseManagement: import pytest import pprint - l = [] + values = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): @@ -1648,43 +1704,43 @@ class TestAutouseManagement: @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): - l.append("fin_%s%s" % (carg, farg)) + values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """) testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): def test_1(self): pass - class TestClass2: + class TestClass2(object): def test_2(self): pass """) confcut = "--confcutdir={0}".format(testdir.tmpdir) - reprec = testdir.inline_run("-v","-s", confcut) + reprec = testdir.inline_run("-v", "-s", confcut) reprec.assertoutcome(passed=8) config = reprec.getcalls("pytest_unconfigure")[0].config - l = config.pluginmanager._getconftestmodules(p)[0].l - assert l == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 + values = config.pluginmanager._getconftestmodules(p)[0].values + assert values == ["fin_a1", "fin_a2", "fin_b1", "fin_b2"] * 2 def test_scope_ordering(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="function", autouse=True) def fappend2(): - l.append(2) + values.append(2) @pytest.fixture(scope="class", autouse=True) def classappend3(): - l.append(3) + values.append(3) @pytest.fixture(scope="module", autouse=True) def mappend(): - l.append(1) + values.append(1) - class TestHallo: + class TestHallo(object): def test_method(self): - assert l == [1,3,2] + assert values == [1,3,2] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -1692,23 +1748,23 @@ class TestAutouseManagement: def test_parametrization_setup_teardown_ordering(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] def pytest_generate_tests(metafunc): if metafunc.cls is not None: metafunc.parametrize("item", [1,2], scope="class") - class TestClass: + class TestClass(object): @pytest.fixture(scope="class", autouse=True) def addteardown(self, item, request): - l.append("setup-%d" % item) - request.addfinalizer(lambda: l.append("teardown-%d" % item)) + values.append("setup-%d" % item) + request.addfinalizer(lambda: values.append("teardown-%d" % item)) def test_step1(self, item): - l.append("step1-%d" % item) + values.append("step1-%d" % item) def test_step2(self, item): - l.append("step2-%d" % item) + values.append("step2-%d" % item) def test_finish(): - print (l) - assert l == ["setup-1", "step1-1", "step2-1", "teardown-1", + print (values) + assert values == ["setup-1", "step1-1", "step2-1", "teardown-1", "setup-2", "step1-2", "step2-2", "teardown-2",] """) reprec = testdir.inline_run() @@ -1718,56 +1774,56 @@ class TestAutouseManagement: testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(autouse=True) def fix1(): - l.append(1) + values.append(1) @pytest.fixture() def arg1(): - l.append(2) + values.append(2) def test_hello(arg1): - assert l == [1,2] + assert values == [1,2] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @pytest.mark.issue226 - @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00","p01"]) - @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10","p11"]) + @pytest.mark.parametrize("param1", ["", "params=[1]"], ids=["p00", "p01"]) + @pytest.mark.parametrize("param2", ["", "params=[1]"], ids=["p10", "p11"]) def test_ordering_dependencies_torndown_first(self, testdir, param1, param2): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(%(param1)s) def arg1(request): - request.addfinalizer(lambda: l.append("fin1")) - l.append("new1") + request.addfinalizer(lambda: values.append("fin1")) + values.append("new1") @pytest.fixture(%(param2)s) def arg2(request, arg1): - request.addfinalizer(lambda: l.append("fin2")) - l.append("new2") + request.addfinalizer(lambda: values.append("fin2")) + values.append("new2") def test_arg(arg2): pass def test_check(): - assert l == ["new1", "new2", "fin2", "fin1"] + assert values == ["new1", "new2", "fin2", "fin1"] """ % locals()) reprec = testdir.inline_run("-s") reprec.assertoutcome(passed=2) -class TestFixtureMarker: +class TestFixtureMarker(object): def test_parametrize(self, testdir): testdir.makepyfile(""" import pytest @pytest.fixture(params=["a", "b", "c"]) def arg(request): return request.param - l = [] + values = [] def test_param(arg): - l.append(arg) + values.append(arg) def test_result(): - assert l == list("abc") + assert values == list("abc") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=4) @@ -1811,21 +1867,21 @@ class TestFixtureMarker: def test_scope_session(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module") def arg(): - l.append(1) + values.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 - assert len(l) == 1 - class TestClass: + assert len(values) == 1 + class TestClass(object): def test3(self, arg): assert arg == 1 - assert len(l) == 1 + assert len(values) == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) @@ -1833,10 +1889,10 @@ class TestFixtureMarker: def test_scope_session_exc(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="session") def fix(): - l.append(1) + values.append(1) pytest.skip('skipping') def test_1(fix): @@ -1844,7 +1900,7 @@ class TestFixtureMarker: def test_2(fix): pass def test_last(): - assert l == [1] + assert values == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(skipped=2, passed=1) @@ -1852,11 +1908,11 @@ class TestFixtureMarker: def test_scope_session_exc_two_fix(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] m = [] @pytest.fixture(scope="session") def a(): - l.append(1) + values.append(1) pytest.skip('skipping') @pytest.fixture(scope="session") def b(a): @@ -1867,7 +1923,7 @@ class TestFixtureMarker: def test_2(b): pass def test_last(): - assert l == [1] + assert values == [1] assert m == [] """) reprec = testdir.inline_run() @@ -1905,21 +1961,21 @@ class TestFixtureMarker: def test_scope_module_uses_session(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module") def arg(): - l.append(1) + values.append(1) return 1 def test_1(arg): assert arg == 1 def test_2(arg): assert arg == 1 - assert len(l) == 1 - class TestClass: + assert len(values) == 1 + class TestClass(object): def test3(self, arg): assert arg == 1 - assert len(l) == 1 + assert len(values) == 1 """) reprec = testdir.inline_run() reprec.assertoutcome(passed=3) @@ -2014,17 +2070,17 @@ class TestFixtureMarker: @pytest.fixture(scope="module", params=["a", "b", "c"]) def arg(request): return request.param - l = [] + values = [] def test_param(arg): - l.append(arg) + values.append(arg) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=3) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert len(l) == 3 - assert "a" in l - assert "b" in l - assert "c" in l + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert len(values) == 3 + assert "a" in values + assert "b" in values + assert "c" in values def test_scope_mismatch(self, testdir): testdir.makeconftest(""" @@ -2055,16 +2111,16 @@ class TestFixtureMarker: def arg(request): return request.param - l = [] + values = [] def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=4) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == [1,1,2,2] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == [1, 1, 2, 2] def test_module_parametrized_ordering(self, testdir): testdir.makeconftest(""" @@ -2116,7 +2172,7 @@ class TestFixtureMarker: testdir.makeconftest(""" import pytest - l = [] + values = [] @pytest.fixture(scope="function", params=[1,2]) def farg(request): @@ -2129,18 +2185,18 @@ class TestFixtureMarker: @pytest.fixture(scope="function", autouse=True) def append(request, farg, carg): def fin(): - l.append("fin_%s%s" % (carg, farg)) + values.append("fin_%s%s" % (carg, farg)) request.addfinalizer(fin) """) testdir.makepyfile(""" import pytest - class TestClass2: + class TestClass2(object): def test_1(self): pass def test_2(self): pass - class TestClass: + class TestClass(object): def test_3(self): pass """) @@ -2167,30 +2223,30 @@ class TestFixtureMarker: @pytest.fixture(scope="function", params=[1, 2]) def arg(request): param = request.param - request.addfinalizer(lambda: l.append("fin:%s" % param)) - l.append("create:%s" % param) + request.addfinalizer(lambda: values.append("fin:%s" % param)) + values.append("create:%s" % param) return request.param @pytest.fixture(scope="module", params=["mod1", "mod2"]) def modarg(request): param = request.param - request.addfinalizer(lambda: l.append("fin:%s" % param)) - l.append("create:%s" % param) + request.addfinalizer(lambda: values.append("fin:%s" % param)) + values.append("create:%s" % param) return request.param - l = [] + values = [] def test_1(arg): - l.append("test1") + values.append("test1") def test_2(modarg): - l.append("test2") + values.append("test2") def test_3(arg, modarg): - l.append("test3") + values.append("test3") def test_4(modarg, arg): - l.append("test4") + values.append("test4") """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=12) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values expected = [ 'create:1', 'test1', 'fin:1', 'create:2', 'test1', 'fin:2', 'create:mod1', 'test2', 'create:1', 'test3', @@ -2199,10 +2255,10 @@ class TestFixtureMarker: 'fin:mod1', 'create:mod2', 'test2', 'create:1', 'test3', 'fin:1', 'create:2', 'test3', 'fin:2', 'create:1', 'test4', 'fin:1', 'create:2', 'test4', 'fin:2', - 'fin:mod2'] + 'fin:mod2'] import pprint - pprint.pprint(list(zip(l, expected))) - assert l == expected + pprint.pprint(list(zip(values, expected))) + assert values == expected def test_parametrized_fixture_teardown_order(self, testdir): testdir.makepyfile(""" @@ -2211,29 +2267,29 @@ class TestFixtureMarker: def param1(request): return request.param - l = [] + values = [] - class TestClass: + class TestClass(object): @classmethod @pytest.fixture(scope="class", autouse=True) def setup1(self, request, param1): - l.append(1) + values.append(1) request.addfinalizer(self.teardown1) @classmethod def teardown1(self): - assert l.pop() == 1 + assert values.pop() == 1 @pytest.fixture(scope="class", autouse=True) def setup2(self, request, param1): - l.append(2) + values.append(2) request.addfinalizer(self.teardown2) @classmethod def teardown2(self): - assert l.pop() == 2 + assert values.pop() == 2 def test(self): pass def test_finish(): - assert not l + assert not values """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" @@ -2273,7 +2329,7 @@ class TestFixtureMarker: testpath = testdir.makepyfile(""" import pytest - class Box: + class Box(object): value = 0 @pytest.fixture(scope='class') @@ -2284,11 +2340,11 @@ class TestFixtureMarker: def test_a(a): assert a == 1 - class Test1: + class Test1(object): def test_b(self, a): assert a == 2 - class Test2: + class Test2(object): def test_c(self, a): assert a == 3""") reprec = testdir.inline_run(testpath) @@ -2298,42 +2354,42 @@ class TestFixtureMarker: def test_request_is_clean(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=[1, 2]) def fix(request): - request.addfinalizer(lambda: l.append(request.param)) + request.addfinalizer(lambda: values.append(request.param)) def test_fix(fix): pass """) reprec = testdir.inline_run("-s") - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == [1,2] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == [1, 2] def test_parametrize_separated_lifecycle(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope="module", params=[1, 2]) def arg(request): x = request.param - request.addfinalizer(lambda: l.append("fin%s" % x)) + request.addfinalizer(lambda: values.append("fin%s" % x)) return request.param def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) """) reprec = testdir.inline_run("-vs") reprec.assertoutcome(passed=4) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values import pprint - pprint.pprint(l) - #assert len(l) == 6 - assert l[0] == l[1] == 1 - assert l[2] == "fin1" - assert l[3] == l[4] == 2 - assert l[5] == "fin2" + pprint.pprint(values) + # assert len(values) == 6 + assert values[0] == values[1] == 1 + assert values[2] == "fin1" + assert values[3] == values[4] == 2 + assert values[5] == "fin2" def test_parametrize_function_scoped_finalizers_called(self, testdir): testdir.makepyfile(""" @@ -2342,28 +2398,27 @@ class TestFixtureMarker: @pytest.fixture(scope="function", params=[1, 2]) def arg(request): x = request.param - request.addfinalizer(lambda: l.append("fin%s" % x)) + request.addfinalizer(lambda: values.append("fin%s" % x)) return request.param - l = [] + values = [] def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) def test_3(): - assert len(l) == 8 - assert l == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] + assert len(values) == 8 + assert values == [1, "fin1", 2, "fin2", 1, "fin1", 2, "fin2"] """) reprec = testdir.inline_run("-v") reprec.assertoutcome(passed=5) - @pytest.mark.issue246 @pytest.mark.parametrize("scope", ["session", "function", "module"]) def test_finalizer_order_on_parametrization(self, scope, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(scope=%(scope)r, params=["1"]) def fix1(request): @@ -2372,13 +2427,13 @@ class TestFixtureMarker: @pytest.fixture(scope=%(scope)r) def fix2(request, base): def cleanup_fix2(): - assert not l, "base should not have been finalized" + assert not values, "base should not have been finalized" request.addfinalizer(cleanup_fix2) @pytest.fixture(scope=%(scope)r) def base(request, fix1): def cleanup_base(): - l.append("fin_base") + values.append("fin_base") print ("finalizing base") request.addfinalizer(cleanup_base) @@ -2396,29 +2451,29 @@ class TestFixtureMarker: def test_class_scope_parametrization_ordering(self, testdir): testdir.makepyfile(""" import pytest - l = [] + values = [] @pytest.fixture(params=["John", "Doe"], scope="class") def human(request): - request.addfinalizer(lambda: l.append("fin %s" % request.param)) + request.addfinalizer(lambda: values.append("fin %s" % request.param)) return request.param - class TestGreetings: + class TestGreetings(object): def test_hello(self, human): - l.append("test_hello") + values.append("test_hello") - class TestMetrics: + class TestMetrics(object): def test_name(self, human): - l.append("test_name") + values.append("test_name") def test_population(self, human): - l.append("test_population") + values.append("test_population") """) reprec = testdir.inline_run() reprec.assertoutcome(passed=6) - l = reprec.getcalls("pytest_runtest_call")[0].item.module.l - assert l == ["test_hello", "fin John", "test_hello", "fin Doe", - "test_name", "test_population", "fin John", - "test_name", "test_population", "fin Doe"] + values = reprec.getcalls("pytest_runtest_call")[0].item.module.values + assert values == ["test_hello", "fin John", "test_hello", "fin Doe", + "test_name", "test_population", "fin John", + "test_name", "test_population", "fin Doe"] def test_parametrize_setup_function(self, testdir): testdir.makepyfile(""" @@ -2430,21 +2485,21 @@ class TestFixtureMarker: @pytest.fixture(scope="module", autouse=True) def mysetup(request, arg): - request.addfinalizer(lambda: l.append("fin%s" % arg)) - l.append("setup%s" % arg) + request.addfinalizer(lambda: values.append("fin%s" % arg)) + values.append("setup%s" % arg) - l = [] + values = [] def test_1(arg): - l.append(arg) + values.append(arg) def test_2(arg): - l.append(arg) + values.append(arg) def test_3(): import pprint - pprint.pprint(l) + pprint.pprint(values) if arg == 1: - assert l == ["setup1", 1, 1, ] + assert values == ["setup1", 1, 1, ] elif arg == 2: - assert l == ["setup1", 1, 1, "fin1", + assert values == ["setup1", 1, 1, "fin1", "setup2", 2, 2, ] """) @@ -2498,9 +2553,42 @@ class TestFixtureMarker: '*test_foo*alpha*', '*test_foo*beta*']) + @pytest.mark.issue920 + def test_deterministic_fixture_collection(self, testdir, monkeypatch): + testdir.makepyfile(""" + import pytest -class TestRequestScopeAccess: - pytestmark = pytest.mark.parametrize(("scope", "ok", "error"),[ + @pytest.fixture(scope="module", + params=["A", + "B", + "C"]) + def A(request): + return request.param + + @pytest.fixture(scope="module", + params=["DDDDDDDDD", "EEEEEEEEEEEE", "FFFFFFFFFFF", "banansda"]) + def B(request, A): + return request.param + + def test_foo(B): + # Something funky is going on here. + # Despite specified seeds, on what is collected, + # sometimes we get unexpected passes. hashing B seems + # to help? + assert hash(B) or True + """) + monkeypatch.setenv("PYTHONHASHSEED", "1") + out1 = testdir.runpytest_subprocess("-v") + monkeypatch.setenv("PYTHONHASHSEED", "2") + out2 = testdir.runpytest_subprocess("-v") + out1 = [line for line in out1.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo")] + out2 = [line for line in out2.outlines if line.startswith("test_deterministic_fixture_collection.py::test_foo")] + assert len(out1) == 12 + assert out1 == out2 + + +class TestRequestScopeAccess(object): + pytestmark = pytest.mark.parametrize(("scope", "ok", "error"), [ ["session", "", "fspath class function module"], ["module", "module fspath", "cls function"], ["class", "module fspath cls", "function"], @@ -2521,7 +2609,7 @@ class TestRequestScopeAccess: assert request.config def test_func(): pass - """ %(scope, ok.split(), error.split())) + """ % (scope, ok.split(), error.split())) reprec = testdir.inline_run("-l") reprec.assertoutcome(passed=1) @@ -2539,11 +2627,12 @@ class TestRequestScopeAccess: assert request.config def test_func(arg): pass - """ %(scope, ok.split(), error.split())) + """ % (scope, ok.split(), error.split())) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) -class TestErrors: + +class TestErrors(object): def test_subfactory_missing_funcarg(self, testdir): testdir.makepyfile(""" import pytest @@ -2571,13 +2660,13 @@ class TestErrors: request.addfinalizer(f) return object() - l = [] + values = [] def test_1(fix1): - l.append(fix1) + values.append(fix1) def test_2(fix1): - l.append(fix1) + values.append(fix1) def test_3(): - assert l[0] != l[1] + assert values[0] != values[1] """) result = testdir.runpytest() result.stdout.fnmatch_lines(""" @@ -2588,8 +2677,6 @@ class TestErrors: *3 pass*2 error* """) - - def test_setupfunc_missing_funcarg(self, testdir): testdir.makepyfile(""" import pytest @@ -2607,7 +2694,8 @@ class TestErrors: "*1 error*", ]) -class TestShowFixtures: + +class TestShowFixtures(object): def test_funcarg_compat(self, testdir): config = testdir.parseconfigure("--funcargs") assert config.option.showfixtures @@ -2615,18 +2703,16 @@ class TestShowFixtures: def test_show_fixtures(self, testdir): result = testdir.runpytest("--fixtures") result.stdout.fnmatch_lines([ - "*tmpdir*", - "*temporary directory*", - ] - ) + "*tmpdir*", + "*temporary directory*", + ]) def test_show_fixtures_verbose(self, testdir): result = testdir.runpytest("--fixtures", "-v") result.stdout.fnmatch_lines([ - "*tmpdir*--*tmpdir.py*", - "*temporary directory*", - ] - ) + "*tmpdir*--*tmpdir.py*", + "*temporary directory*", + ]) def test_show_fixtures_testmodule(self, testdir): p = testdir.makepyfile(''' @@ -2669,7 +2755,7 @@ class TestShowFixtures: """) def test_show_fixtures_trimmed_doc(self, testdir): - p = testdir.makepyfile(''' + p = testdir.makepyfile(dedent(''' import pytest @pytest.fixture def arg1(): @@ -2685,9 +2771,9 @@ class TestShowFixtures: line2 """ - ''') + ''')) result = testdir.runpytest("--fixtures", p) - result.stdout.fnmatch_lines(""" + result.stdout.fnmatch_lines(dedent(""" * fixtures defined from test_show_fixtures_trimmed_doc * arg2 line1 @@ -2696,8 +2782,64 @@ class TestShowFixtures: line1 line2 - """) + """)) + def test_show_fixtures_indented_doc(self, testdir): + p = testdir.makepyfile(dedent(''' + import pytest + @pytest.fixture + def fixture1(): + """ + line1 + indented line + """ + ''')) + result = testdir.runpytest("--fixtures", p) + result.stdout.fnmatch_lines(dedent(""" + * fixtures defined from test_show_fixtures_indented_doc * + fixture1 + line1 + indented line + """)) + + def test_show_fixtures_indented_doc_first_line_unindented(self, testdir): + p = testdir.makepyfile(dedent(''' + import pytest + @pytest.fixture + def fixture1(): + """line1 + line2 + indented line + """ + ''')) + result = testdir.runpytest("--fixtures", p) + result.stdout.fnmatch_lines(dedent(""" + * fixtures defined from test_show_fixtures_indented_doc_first_line_unindented * + fixture1 + line1 + line2 + indented line + """)) + + def test_show_fixtures_indented_in_class(self, testdir): + p = testdir.makepyfile(dedent(''' + import pytest + class TestClass: + @pytest.fixture + def fixture1(): + """line1 + line2 + indented line + """ + ''')) + result = testdir.runpytest("--fixtures", p) + result.stdout.fnmatch_lines(dedent(""" + * fixtures defined from test_show_fixtures_indented_in_class * + fixture1 + line1 + line2 + indented line + """)) def test_show_fixtures_different_files(self, testdir): """ @@ -2770,7 +2912,7 @@ class TestShowFixtures: @pytest.mark.parametrize('flavor', ['fixture', 'yield_fixture']) -class TestContextManagerFixtureFuncs: +class TestContextManagerFixtureFuncs(object): def test_simple(self, testdir, flavor): testdir.makepyfile(""" @@ -2877,7 +3019,8 @@ class TestContextManagerFixtureFuncs: result = testdir.runpytest("-s") result.stdout.fnmatch_lines("*mew*") -class TestParameterizedSubRequest: + +class TestParameterizedSubRequest(object): def test_call_from_fixture(self, testdir): testfile = testdir.makepyfile(""" import pytest diff --git a/testing/python/integration.py b/testing/python/integration.py index 6697342ea..6ea29fa98 100644 --- a/testing/python/integration.py +++ b/testing/python/integration.py @@ -3,8 +3,8 @@ from _pytest import python from _pytest import runner -class TestOEJSKITSpecials: - def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage +class TestOEJSKITSpecials(object): + def test_funcarg_non_pycollectobj(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector, name, obj): @@ -19,7 +19,7 @@ class TestOEJSKITSpecials: @pytest.fixture def arg1(request): return 42 - class MyClass: + class MyClass(object): pass """) # this hook finds funcarg factories @@ -30,7 +30,7 @@ class TestOEJSKITSpecials: pytest._fillfuncargs(clscol) assert clscol.funcargs['arg1'] == 42 - def test_autouse_fixture(self, testdir): # rough jstests usage + def test_autouse_fixture(self, testdir): # rough jstests usage testdir.makeconftest(""" import pytest def pytest_pycollect_makeitem(collector, name, obj): @@ -48,7 +48,7 @@ class TestOEJSKITSpecials: @pytest.fixture def arg1(request): return 42 - class MyClass: + class MyClass(object): pass """) # this hook finds funcarg factories @@ -76,7 +76,8 @@ def test_wrapped_getfslineno(): fs2, lineno2 = python.getfslineno(wrap) assert lineno > lineno2, "getfslineno does not unwrap correctly" -class TestMockDecoration: + +class TestMockDecoration(object): def test_wrapped_getfuncargnames(self): from _pytest.compat import getfuncargnames @@ -92,8 +93,8 @@ class TestMockDecoration: def f(x): pass - l = getfuncargnames(f) - assert l == ("x",) + values = getfuncargnames(f) + assert values == ("x",) def test_wrapped_getfuncargnames_patching(self): from _pytest.compat import getfuncargnames @@ -109,8 +110,8 @@ class TestMockDecoration: def f(x, y, z): pass - l = getfuncargnames(f) - assert l == ("y", "z") + values = getfuncargnames(f) + assert values == ("y", "z") def test_unittest_mock(self, testdir): pytest.importorskip("unittest.mock") @@ -173,7 +174,7 @@ class TestMockDecoration: reprec.assertoutcome(passed=2) calls = reprec.getcalls("pytest_runtest_logreport") funcnames = [call.report.location[2] for call in calls - if call.report.when == "call"] + if call.report.when == "call"] assert funcnames == ["T.test_hello", "test_someting"] def test_mock_sorting(self, testdir): @@ -207,7 +208,7 @@ class TestMockDecoration: @patch('os.getcwd') @patch('os.path') @mark.slow - class TestSimple: + class TestSimple(object): def test_simple_thing(self, mock_path, mock_getcwd): pass """) @@ -215,7 +216,7 @@ class TestMockDecoration: reprec.assertoutcome(passed=1) -class TestReRunTests: +class TestReRunTests(object): def test_rerun(self, testdir): testdir.makeconftest(""" from _pytest.runner import runtestprotocol @@ -246,12 +247,13 @@ class TestReRunTests: *2 passed* """) + def test_pytestconfig_is_session_scoped(): from _pytest.fixtures import pytestconfig assert pytestconfig._pytestfixturefunction.scope == "session" -class TestNoselikeTestAttribute: +class TestNoselikeTestAttribute(object): def test_module_with_global_test(self, testdir): testdir.makepyfile(""" __test__ = False @@ -270,7 +272,7 @@ class TestNoselikeTestAttribute: pass test_func.__test__ = False - class TestSome: + class TestSome(object): __test__ = False def test_method(self): pass @@ -328,7 +330,7 @@ class TestNoselikeTestAttribute: @pytest.mark.issue351 -class TestParameterize: +class TestParameterize(object): def test_idfn_marker(self, testdir): testdir.makepyfile(""" diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index a7e1d5699..2acdf669e 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -13,12 +13,12 @@ from hypothesis import strategies PY3 = sys.version_info >= (3, 0) -class TestMetafunc: +class TestMetafunc(object): def Metafunc(self, func): # the unit tests of this class check if things work correctly # on the funcarg level, so we don't need a full blown # initiliazation - class FixtureInfo: + class FixtureInfo(object): name2fixturedefs = None def __init__(self, names): @@ -29,13 +29,15 @@ class TestMetafunc: return python.Metafunc(func, fixtureinfo, None) def test_no_funcargs(self, testdir): - def function(): pass + def function(): + pass metafunc = self.Metafunc(function) assert not metafunc.fixturenames repr(metafunc._calls) def test_function_basic(self): - def func(arg1, arg2="qwe"): pass + def func(arg1, arg2="qwe"): + pass metafunc = self.Metafunc(func) assert len(metafunc.fixturenames) == 1 assert 'arg1' in metafunc.fixturenames @@ -43,7 +45,8 @@ class TestMetafunc: assert metafunc.cls is None def test_addcall_no_args(self): - def func(arg1): pass + def func(arg1): + pass metafunc = self.Metafunc(func) metafunc.addcall() assert len(metafunc._calls) == 1 @@ -52,7 +55,8 @@ class TestMetafunc: assert not hasattr(call, 'param') def test_addcall_id(self): - def func(arg1): pass + def func(arg1): + pass metafunc = self.Metafunc(func) pytest.raises(ValueError, "metafunc.addcall(id=None)") @@ -65,10 +69,12 @@ class TestMetafunc: assert metafunc._calls[1].id == "2" def test_addcall_param(self): - def func(arg1): pass + def func(arg1): + pass metafunc = self.Metafunc(func) - class obj: pass + class obj(object): + pass metafunc.addcall(param=obj) metafunc.addcall(param=obj) @@ -79,11 +85,13 @@ class TestMetafunc: assert metafunc._calls[2].getparam("arg1") == 1 def test_addcall_funcargs(self): - def func(x): pass + def func(x): + pass metafunc = self.Metafunc(func) - class obj: pass + class obj(object): + pass metafunc.addcall(funcargs={"x": 2}) metafunc.addcall(funcargs={"x": 3}) @@ -94,17 +102,19 @@ class TestMetafunc: assert not hasattr(metafunc._calls[1], 'param') def test_parametrize_error(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) - metafunc.parametrize("x", [1,2]) - pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) - pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5,6])) - metafunc.parametrize("y", [1,2]) - pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) - pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5,6])) + metafunc.parametrize("x", [1, 2]) + pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5, 6])) + pytest.raises(ValueError, lambda: metafunc.parametrize("x", [5, 6])) + metafunc.parametrize("y", [1, 2]) + pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5, 6])) + pytest.raises(ValueError, lambda: metafunc.parametrize("y", [5, 6])) def test_parametrize_bad_scope(self, testdir): - def func(x): pass + def func(x): + pass metafunc = self.Metafunc(func) try: metafunc.parametrize("x", [1], scope='doggy') @@ -112,45 +122,50 @@ class TestMetafunc: assert "has an unsupported scope value 'doggy'" in str(ve) def test_parametrize_and_id(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) - metafunc.parametrize("x", [1,2], ids=['basic', 'advanced']) + metafunc.parametrize("x", [1, 2], ids=['basic', 'advanced']) metafunc.parametrize("y", ["abc", "def"]) ids = [x.id for x in metafunc._calls] assert ids == ["basic-abc", "basic-def", "advanced-abc", "advanced-def"] def test_parametrize_and_id_unicode(self): """Allow unicode strings for "ids" parameter in Python 2 (##1905)""" - def func(x): pass + def func(x): + pass metafunc = self.Metafunc(func) metafunc.parametrize("x", [1, 2], ids=[u'basic', u'advanced']) ids = [x.id for x in metafunc._calls] assert ids == [u"basic", u"advanced"] def test_parametrize_with_wrong_number_of_ids(self, testdir): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) pytest.raises(ValueError, lambda: - metafunc.parametrize("x", [1,2], ids=['basic'])) + metafunc.parametrize("x", [1, 2], ids=['basic'])) pytest.raises(ValueError, lambda: - metafunc.parametrize(("x","y"), [("abc", "def"), - ("ghi", "jkl")], ids=["one"])) + metafunc.parametrize(("x", "y"), [("abc", "def"), + ("ghi", "jkl")], ids=["one"])) @pytest.mark.issue510 def test_parametrize_empty_list(self): - def func( y): pass + def func(y): + pass metafunc = self.Metafunc(func) metafunc.parametrize("y", []) assert 'skip' in metafunc._calls[0].keywords def test_parametrize_with_userobjects(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) - class A: + class A(object): pass metafunc.parametrize("x", [A(), A()]) @@ -178,11 +193,27 @@ class TestMetafunc: """ from _pytest.python import _idval values = [ - (u'', ''), - (u'ascii', 'ascii'), - (u'ação', 'a\\xe7\\xe3o'), - (u'josé@blah.com', 'jos\\xe9@blah.com'), - (u'δοκ.ιμή@παράδειγμα.δοκιμή', '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae'), + ( + u'', + '' + ), + ( + u'ascii', + 'ascii' + ), + ( + u'ação', + 'a\\xe7\\xe3o' + ), + ( + u'josé@blah.com', + 'jos\\xe9@blah.com' + ), + ( + u'δοκ.ιμή@παράδειγμα.δοκιμή', + '\\u03b4\\u03bf\\u03ba.\\u03b9\\u03bc\\u03ae@\\u03c0\\u03b1\\u03c1\\u03ac\\u03b4\\u03b5\\u03b9\\u03b3' + '\\u03bc\\u03b1.\\u03b4\\u03bf\\u03ba\\u03b9\\u03bc\\u03ae' + ), ] for val, expected in values: assert _idval(val, 'a', 6, None) == expected @@ -207,37 +238,40 @@ class TestMetafunc: @pytest.mark.issue250 def test_idmaker_autoname(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [("string", 1.0), - ("st-ring", 2.0)]) + result = idmaker(("a", "b"), [pytest.param("string", 1.0), + pytest.param("st-ring", 2.0)]) assert result == ["string-1.0", "st-ring-2.0"] - result = idmaker(("a", "b"), [(object(), 1.0), - (object(), object())]) + result = idmaker(("a", "b"), [pytest.param(object(), 1.0), + pytest.param(object(), object())]) assert result == ["a0-1.0", "a1-b1"] # unicode mixing, issue250 - result = idmaker((py.builtin._totext("a"), "b"), [({}, b'\xc3\xb4')]) + result = idmaker( + (py.builtin._totext("a"), "b"), + [pytest.param({}, b'\xc3\xb4')]) assert result == ['a0-\\xc3\\xb4'] def test_idmaker_with_bytes_regex(self): from _pytest.python import idmaker - result = idmaker(("a"), [(re.compile(b'foo'), 1.0)]) + result = idmaker(("a"), [pytest.param(re.compile(b'foo'), 1.0)]) assert result == ["foo"] def test_idmaker_native_strings(self): from _pytest.python import idmaker totext = py.builtin._totext - result = idmaker(("a", "b"), [(1.0, -1.1), - (2, -202), - ("three", "three hundred"), - (True, False), - (None, None), - (re.compile('foo'), re.compile('bar')), - (str, int), - (list("six"), [66, 66]), - (set([7]), set("seven")), - (tuple("eight"), (8, -8, 8)), - (b'\xc3\xb4', b"name"), - (b'\xc3\xb4', totext("other")), + result = idmaker(("a", "b"), [ + pytest.param(1.0, -1.1), + pytest.param(2, -202), + pytest.param("three", "three hundred"), + pytest.param(True, False), + pytest.param(None, None), + pytest.param(re.compile('foo'), re.compile('bar')), + pytest.param(str, int), + pytest.param(list("six"), [66, 66]), + pytest.param(set([7]), set("seven")), + pytest.param(tuple("eight"), (8, -8, 8)), + pytest.param(b'\xc3\xb4', b"name"), + pytest.param(b'\xc3\xb4', totext("other")), ]) assert result == ["1.0--1.1", "2--202", @@ -257,7 +291,7 @@ class TestMetafunc: from _pytest.python import idmaker enum = pytest.importorskip("enum") e = enum.Enum("Foo", "one, two") - result = idmaker(("a", "b"), [(e.one, e.two)]) + result = idmaker(("a", "b"), [pytest.param(e.one, e.two)]) assert result == ["Foo.one-Foo.two"] @pytest.mark.issue351 @@ -268,14 +302,15 @@ class TestMetafunc: if isinstance(val, Exception): return repr(val) - result = idmaker(("a", "b"), [(10.0, IndexError()), - (20, KeyError()), - ("three", [1, 2, 3]), + result = idmaker(("a", "b"), [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), ], idfn=ids) assert result == ["10.0-IndexError()", "20-KeyError()", "three-b2", - ] + ] @pytest.mark.issue351 def test_idmaker_idfn_unique_names(self): @@ -284,49 +319,98 @@ class TestMetafunc: def ids(val): return 'a' - result = idmaker(("a", "b"), [(10.0, IndexError()), - (20, KeyError()), - ("three", [1, 2, 3]), - ], idfn=ids) + result = idmaker(("a", "b"), [pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), + ], idfn=ids) assert result == ["a-a0", "a-a1", "a-a2", - ] + ] @pytest.mark.issue351 def test_idmaker_idfn_exception(self): from _pytest.python import idmaker + from _pytest.recwarn import WarningsRecorder + + class BadIdsException(Exception): + pass def ids(val): - raise Exception("bad code") + raise BadIdsException("ids raised") - result = idmaker(("a", "b"), [(10.0, IndexError()), - (20, KeyError()), - ("three", [1, 2, 3]), - ], idfn=ids) - assert result == ["10.0-b0", - "20-b1", - "three-b2", - ] + rec = WarningsRecorder() + with rec: + idmaker(("a", "b"), [ + pytest.param(10.0, IndexError()), + pytest.param(20, KeyError()), + pytest.param("three", [1, 2, 3]), + ], idfn=ids) + + assert [str(i.message) for i in rec.list] == [ + "Raised while trying to determine id of parameter a at position 0." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 0." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter a at position 1." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 1." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter a at position 2." + "\nUpdate your code as this will raise an error in pytest-4.0.", + "Raised while trying to determine id of parameter b at position 2." + "\nUpdate your code as this will raise an error in pytest-4.0.", + ] + + def test_parametrize_ids_exception(self, testdir): + """ + :param testdir: the instance of Testdir class, a temporary + test directory. + """ + testdir.makepyfile(""" + import pytest + + def ids(arg): + raise Exception("bad ids") + + @pytest.mark.parametrize("arg", ["a", "b"], ids=ids) + def test_foo(arg): + pass + """) + with pytest.warns(DeprecationWarning): + result = testdir.runpytest("--collect-only") + result.stdout.fnmatch_lines([ + "", + " ", + " ", + ]) def test_idmaker_with_ids(self): from _pytest.python import idmaker - result = idmaker(("a", "b"), [(1, 2), - (3, 4)], + result = idmaker(("a", "b"), [pytest.param(1, 2), + pytest.param(3, 4)], ids=["a", None]) assert result == ["a", "3-4"] + def test_idmaker_with_paramset_id(self): + from _pytest.python import idmaker + result = idmaker(("a", "b"), [pytest.param(1, 2, id="me"), + pytest.param(3, 4, id="you")], + ids=["a", None]) + assert result == ["me", "you"] + def test_idmaker_with_ids_unique_names(self): from _pytest.python import idmaker - result = idmaker(("a"), [1,2,3,4,5], + result = idmaker(("a"), map(pytest.param, [1, 2, 3, 4, 5]), ids=["a", "a", "b", "c", "b"]) assert result == ["a0", "a1", "b0", "c", "b1"] def test_addcall_and_parametrize(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.addcall({'x': 1}) - metafunc.parametrize('y', [2,3]) + metafunc.parametrize('y', [2, 3]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {'x': 1, 'y': 2} assert metafunc._calls[1].funcargs == {'x': 1, 'y': 3} @@ -335,19 +419,21 @@ class TestMetafunc: @pytest.mark.issue714 def test_parametrize_indirect(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x', [1], indirect=True) - metafunc.parametrize('y', [2,3], indirect=True) + metafunc.parametrize('y', [2, 3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} - assert metafunc._calls[0].params == dict(x=1,y=2) - assert metafunc._calls[1].params == dict(x=1,y=3) + assert metafunc._calls[0].params == dict(x=1, y=2) + assert metafunc._calls[1].params == dict(x=1, y=3) @pytest.mark.issue714 def test_parametrize_indirect_list(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x, y', [('a', 'b')], indirect=['x']) assert metafunc._calls[0].funcargs == dict(y='b') @@ -355,7 +441,8 @@ class TestMetafunc: @pytest.mark.issue714 def test_parametrize_indirect_list_all(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'y']) assert metafunc._calls[0].funcargs == {} @@ -363,7 +450,8 @@ class TestMetafunc: @pytest.mark.issue714 def test_parametrize_indirect_list_empty(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.parametrize('x, y', [('a', 'b')], indirect=[]) assert metafunc._calls[0].funcargs == dict(x='a', y='b') @@ -401,7 +489,8 @@ class TestMetafunc: @pytest.mark.issue714 def test_parametrize_indirect_list_error(self, testdir): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) with pytest.raises(ValueError): metafunc.parametrize('x, y', [('a', 'b')], indirect=['x', 'z']) @@ -497,16 +586,17 @@ class TestMetafunc: ]) def test_addcalls_and_parametrize_indirect(self): - def func(x, y): pass + def func(x, y): + pass metafunc = self.Metafunc(func) metafunc.addcall(param="123") metafunc.parametrize('x', [1], indirect=True) - metafunc.parametrize('y', [2,3], indirect=True) + metafunc.parametrize('y', [2, 3], indirect=True) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == {} assert metafunc._calls[1].funcargs == {} - assert metafunc._calls[0].params == dict(x=1,y=2) - assert metafunc._calls[1].params == dict(x=1,y=3) + assert metafunc._calls[0].params == dict(x=1, y=2) + assert metafunc._calls[1].params == dict(x=1, y=3) def test_parametrize_functional(self, testdir): testdir.makepyfile(""" @@ -531,7 +621,7 @@ class TestMetafunc: def test_parametrize_onearg(self): metafunc = self.Metafunc(lambda x: None) - metafunc.parametrize("x", [1,2]) + metafunc.parametrize("x", [1, 2]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == dict(x=1) assert metafunc._calls[0].id == "1" @@ -540,15 +630,15 @@ class TestMetafunc: def test_parametrize_onearg_indirect(self): metafunc = self.Metafunc(lambda x: None) - metafunc.parametrize("x", [1,2], indirect=True) + metafunc.parametrize("x", [1, 2], indirect=True) assert metafunc._calls[0].params == dict(x=1) assert metafunc._calls[0].id == "1" assert metafunc._calls[1].params == dict(x=2) assert metafunc._calls[1].id == "2" def test_parametrize_twoargs(self): - metafunc = self.Metafunc(lambda x,y: None) - metafunc.parametrize(("x", "y"), [(1,2), (3,4)]) + metafunc = self.Metafunc(lambda x, y: None) + metafunc.parametrize(("x", "y"), [(1, 2), (3, 4)]) assert len(metafunc._calls) == 2 assert metafunc._calls[0].funcargs == dict(x=1, y=2) assert metafunc._calls[0].id == "1-2" @@ -561,7 +651,7 @@ class TestMetafunc: pytestmark = pytest.mark.parametrize("x", [1,2]) def test_func(x): assert 0, x - class TestClass: + class TestClass(object): pytestmark = pytest.mark.parametrize("y", [3,4]) def test_meth(self, x, y): assert 0, x @@ -619,20 +709,24 @@ class TestMetafunc: """) def test_format_args(self): - def function1(): pass + def function1(): + pass assert fixtures._format_args(function1) == '()' - def function2(arg1): pass + def function2(arg1): + pass assert fixtures._format_args(function2) == "(arg1)" - def function3(arg1, arg2="qwe"): pass + def function3(arg1, arg2="qwe"): + pass assert fixtures._format_args(function3) == "(arg1, arg2='qwe')" - def function4(arg1, *args, **kwargs): pass + def function4(arg1, *args, **kwargs): + pass assert fixtures._format_args(function4) == "(arg1, *args, **kwargs)" -class TestMetafuncFunctional: +class TestMetafuncFunctional(object): def test_attributes(self, testdir): p = testdir.makepyfile(""" # assumes that generate/provide runs in the same process @@ -651,7 +745,7 @@ class TestMetafuncFunctional: assert metafunc.function == test_function assert metafunc.cls is None - class TestClass: + class TestClass(object): def test_method(self, metafunc, pytestconfig): assert metafunc.config == pytestconfig assert metafunc.module.__name__ == __name__ @@ -676,7 +770,7 @@ class TestMetafuncFunctional: def pytest_generate_tests(metafunc): metafunc.addcall(funcargs=dict(arg1=1, arg2=1)) - class TestClass: + class TestClass(object): def test_myfunc(self, arg1, arg2): assert arg1 == arg2 """) @@ -716,14 +810,13 @@ class TestMetafuncFunctional: def pytest_generate_tests(metafunc): assert 'xyz' not in metafunc.fixturenames - class TestHello: + class TestHello(object): def test_hello(xyz): pass """) result = testdir.runpytest(p) result.assert_outcomes(passed=1) - def test_generate_plugin_and_module(self, testdir): testdir.makeconftest(""" def pytest_generate_tests(metafunc): @@ -742,7 +835,7 @@ class TestMetafuncFunctional: def arg2(request): return request.param[1] - class TestClass: + class TestClass(object): def test_myfunc(self, arg1, arg2): assert arg1 == arg2 """) @@ -755,7 +848,7 @@ class TestMetafuncFunctional: def test_generate_tests_in_class(self, testdir): p = testdir.makepyfile(""" - class TestClass: + class TestClass(object): def pytest_generate_tests(self, metafunc): metafunc.addcall(funcargs={'hello': 'world'}, id="hello") @@ -774,7 +867,7 @@ class TestMetafuncFunctional: metafunc.addcall({'arg1': 10}) metafunc.addcall({'arg1': 20}) - class TestClass: + class TestClass(object): def test_func(self, arg1): assert not hasattr(self, 'x') self.x = 1 @@ -791,7 +884,7 @@ class TestMetafuncFunctional: def pytest_generate_tests(metafunc): metafunc.addcall({'arg1': 1}) - class TestClass: + class TestClass(object): def test_method(self, arg1): assert arg1 == self.val def setup_method(self, func): @@ -978,21 +1071,21 @@ class TestMetafuncFunctional: def test_parametrize_scope_overrides(self, testdir, scope, length): testdir.makepyfile(""" import pytest - l = [] + values = [] def pytest_generate_tests(metafunc): if "arg" in metafunc.funcargnames: metafunc.parametrize("arg", [1,2], indirect=True, scope=%r) @pytest.fixture def arg(request): - l.append(request.param) + values.append(request.param) return request.param def test_hello(arg): assert arg in (1,2) def test_world(arg): assert arg in (1,2) def test_checklength(): - assert len(l) == %d + assert len(values) == %d """ % (scope, length)) reprec = testdir.inline_run() reprec.assertoutcome(passed=5) @@ -1061,7 +1154,7 @@ class TestMetafuncFunctional: @pytest.mark.issue463 @pytest.mark.parametrize('attr', ['parametrise', 'parameterize', - 'parameterise']) + 'parameterise']) def test_parametrize_misspelling(self, testdir, attr): testdir.makepyfile(""" import pytest @@ -1077,7 +1170,7 @@ class TestMetafuncFunctional: assert expectederror in failures[0].longrepr.reprcrash.message -class TestMetafuncFunctionalAuto: +class TestMetafuncFunctionalAuto(object): """ Tests related to automatically find out the correct scope for parametrized tests (#1832). """ @@ -1196,8 +1289,10 @@ class TestMetafuncFunctionalAuto: assert output.count('preparing foo-3') == 1 -class TestMarkersWithParametrization: - pytestmark = pytest.mark.issue308 +@pytest.mark.filterwarnings('ignore:Applying marks directly to parameters') +@pytest.mark.issue308 +class TestMarkersWithParametrization(object): + def test_simple_mark(self, testdir): s = """ import pytest @@ -1381,7 +1476,6 @@ class TestMarkersWithParametrization: reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=2) - @pytest.mark.issue290 def test_parametrize_ID_generation_string_int_works(self, testdir): testdir.makepyfile(""" @@ -1398,6 +1492,29 @@ class TestMarkersWithParametrization: reprec = testdir.inline_run() reprec.assertoutcome(passed=2) + @pytest.mark.parametrize('strict', [True, False]) + def test_parametrize_marked_value(self, testdir, strict): + s = """ + import pytest + + @pytest.mark.parametrize(("n", "expected"), [ + pytest.param( + 2,3, + marks=pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict}), + ), + pytest.param( + 2,3, + marks=[pytest.mark.xfail("sys.version_info > (0, 0, 0)", reason="some bug", strict={strict})], + ), + ]) + def test_increment(n, expected): + assert n + 1 == expected + """.format(strict=strict) + testdir.makepyfile(s) + reprec = testdir.inline_run() + passed, failed = (0, 2) if strict else (2, 0) + reprec.assertoutcome(passed=passed, failed=failed) + def test_pytest_make_parametrize_id(self, testdir): testdir.makeconftest(""" def pytest_make_parametrize_id(config, val): @@ -1415,3 +1532,26 @@ class TestMarkersWithParametrization: "*test_func*0*PASS*", "*test_func*2*PASS*", ]) + + def test_pytest_make_parametrize_id_with_argname(self, testdir): + testdir.makeconftest(""" + def pytest_make_parametrize_id(config, val, argname): + return str(val * 2 if argname == 'x' else val * 10) + """) + testdir.makepyfile(""" + import pytest + + @pytest.mark.parametrize("x", range(2)) + def test_func_a(x): + pass + + @pytest.mark.parametrize("y", [1]) + def test_func_b(y): + pass + """) + result = testdir.runpytest("-v") + result.stdout.fnmatch_lines([ + "*test_func_a*0*PASS*", + "*test_func_a*2*PASS*", + "*test_func_b*10*PASS*", + ]) diff --git a/testing/python/raises.py b/testing/python/raises.py index 8f141cfa1..321ee349e 100644 --- a/testing/python/raises.py +++ b/testing/python/raises.py @@ -2,7 +2,7 @@ import pytest import sys -class TestRaises: +class TestRaises(object): def test_raises(self): source = "int('qwe')" excinfo = pytest.raises(ValueError, source) @@ -20,7 +20,7 @@ class TestRaises: pytest.raises(ValueError, int, 'hello') def test_raises_callable_no_exception(self): - class A: + class A(object): def __call__(self): pass try: @@ -28,14 +28,6 @@ class TestRaises: except pytest.raises.Exception: pass - def test_raises_flip_builtin_AssertionError(self): - # we replace AssertionError on python level - # however c code might still raise the builtin one - from _pytest.assertion.util import BuiltinAssertionError # noqa - pytest.raises(AssertionError,""" - raise BuiltinAssertionError - """) - def test_raises_as_contextmanager(self, testdir): testdir.makepyfile(""" from __future__ import with_statement @@ -126,3 +118,17 @@ class TestRaises: for o in gc.get_objects(): assert type(o) is not T + def test_raises_match(self): + msg = r"with base \d+" + with pytest.raises(ValueError, match=msg): + int('asdf') + + msg = "with base 10" + with pytest.raises(ValueError, match=msg): + int('asdf') + + msg = "with base 16" + expr = r"Pattern '{0}' not found in 'invalid literal for int\(\) with base 10: 'asdf''".format(msg) + with pytest.raises(AssertionError, match=expr): + with pytest.raises(ValueError, match=msg): + int('asdf', base=10) diff --git a/testing/python/setup_only.py b/testing/python/setup_only.py index c780b197e..18af56477 100644 --- a/testing/python/setup_only.py +++ b/testing/python/setup_only.py @@ -187,7 +187,7 @@ def test_dynamic_fixture_request(testdir): pass @pytest.fixture() def dependent_fixture(request): - request.getfuncargvalue('dynamically_requested_fixture') + request.getfixturevalue('dynamically_requested_fixture') def test_dyn(dependent_fixture): pass ''') diff --git a/testing/python/show_fixtures_per_test.py b/testing/python/show_fixtures_per_test.py index 18563e818..741f33946 100644 --- a/testing/python/show_fixtures_per_test.py +++ b/testing/python/show_fixtures_per_test.py @@ -135,3 +135,24 @@ def test_verbose_include_private_fixtures_and_loc(testdir): 'arg3 -- test_verbose_include_private_fixtures_and_loc.py:3', ' arg3 from testmodule', ]) + + +def test_doctest_items(testdir): + testdir.makepyfile(''' + def foo(): + """ + >>> 1 + 1 + 2 + """ + ''') + testdir.maketxtfile(''' + >>> 1 + 1 + 2 + ''') + result = testdir.runpytest("--fixtures-per-test", "--doctest-modules", + "--doctest-glob=*.txt", "-v") + assert result.ret == 0 + + result.stdout.fnmatch_lines([ + '*collected 2 items*', + ]) diff --git a/testing/test_argcomplete.py b/testing/test_argcomplete.py index ace7d8ceb..c92612577 100644 --- a/testing/test_argcomplete.py +++ b/testing/test_argcomplete.py @@ -1,8 +1,10 @@ -from __future__ import with_statement -import py, pytest +from __future__ import absolute_import, division, print_function +import py +import pytest # test for _argcomplete but not specific for any application + def equal_with_bash(prefix, ffc, fc, out=None): res = ffc(prefix) res_bash = set(fc(prefix)) @@ -17,10 +19,12 @@ def equal_with_bash(prefix, ffc, fc, out=None): # copied from argcomplete.completers as import from there # also pulls in argcomplete.__init__ which opens filedescriptor 9 # this gives an IOError at the end of testrun + + def _wrapcall(*args, **kargs): try: - if py.std.sys.version_info > (2,7): - return py.std.subprocess.check_output(*args,**kargs).decode().splitlines() + if py.std.sys.version_info > (2, 7): + return py.std.subprocess.check_output(*args, **kargs).decode().splitlines() if 'stdout' in kargs: raise ValueError('stdout argument not allowed, it will be overridden.') process = py.std.subprocess.Popen( @@ -36,9 +40,11 @@ def _wrapcall(*args, **kargs): except py.std.subprocess.CalledProcessError: return [] + class FilesCompleter(object): 'File completer class, optionally takes a list of allowed extensions' - def __init__(self,allowednames=(),directories=True): + + def __init__(self, allowednames=(), directories=True): # Fix if someone passes in a string instead of a list if type(allowednames) is str: allowednames = [allowednames] @@ -50,32 +56,33 @@ class FilesCompleter(object): completion = [] if self.allowednames: if self.directories: - files = _wrapcall(['bash','-c', - "compgen -A directory -- '{p}'".format(p=prefix)]) - completion += [ f + '/' for f in files] + files = _wrapcall(['bash', '-c', + "compgen -A directory -- '{p}'".format(p=prefix)]) + completion += [f + '/' for f in files] for x in self.allowednames: completion += _wrapcall(['bash', '-c', - "compgen -A file -X '!*.{0}' -- '{p}'".format(x,p=prefix)]) + "compgen -A file -X '!*.{0}' -- '{p}'".format(x, p=prefix)]) else: completion += _wrapcall(['bash', '-c', - "compgen -A file -- '{p}'".format(p=prefix)]) + "compgen -A file -- '{p}'".format(p=prefix)]) anticomp = _wrapcall(['bash', '-c', - "compgen -A directory -- '{p}'".format(p=prefix)]) + "compgen -A directory -- '{p}'".format(p=prefix)]) - completion = list( set(completion) - set(anticomp)) + completion = list(set(completion) - set(anticomp)) if self.directories: completion += [f + '/' for f in anticomp] return completion -class TestArgComplete: + +class TestArgComplete(object): @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") def test_compare_with_compgen(self): from _pytest._argcomplete import FastFilesCompleter ffc = FastFilesCompleter() fc = FilesCompleter() - for x in '/ /d /data qqq'.split(): + for x in ['/', '/d', '/data', 'qqq', '']: assert equal_with_bash(x, ffc, fc, out=py.std.sys.stdout) @pytest.mark.skipif("sys.platform in ('win32', 'darwin')") diff --git a/testing/test_assertion.py b/testing/test_assertion.py index c63f26b9c..4cd050d8c 100644 --- a/testing/test_assertion.py +++ b/testing/test_assertion.py @@ -1,4 +1,5 @@ # -*- coding: utf-8 -*- +from __future__ import absolute_import, division, print_function import sys import textwrap @@ -6,6 +7,7 @@ import _pytest.assertion as plugin import py import pytest from _pytest.assertion import util +from _pytest.assertion import truncate PY3 = sys.version_info >= (3, 0) @@ -24,7 +26,7 @@ def mock_config(): return Config() -class TestImportHookInstallation: +class TestImportHookInstallation(object): @pytest.mark.parametrize('initial_conftest', [True, False]) @pytest.mark.parametrize('mode', ['plain', 'rewrite']) @@ -58,6 +60,23 @@ class TestImportHookInstallation: assert 0 result.stdout.fnmatch_lines([expected]) + def test_rewrite_assertions_pytester_plugin(self, testdir): + """ + Assertions in the pytester plugin must also benefit from assertion + rewriting (#1920). + """ + testdir.makepyfile(""" + pytest_plugins = ['pytester'] + def test_dummy_failure(testdir): # how meta! + testdir.makepyfile('def test(): assert 0') + r = testdir.inline_run() + r.assertoutcome(passed=1) + """) + result = testdir.runpytest_subprocess() + result.stdout.fnmatch_lines([ + '*assert 1 == 0*', + ]) + @pytest.mark.parametrize('mode', ['plain', 'rewrite']) def test_pytest_plugins_rewrite(self, testdir, mode): contents = { @@ -141,7 +160,7 @@ class TestImportHookInstallation: plugin_state = "{plugin_state}" - class DummyDistInfo: + class DummyDistInfo(object): project_name = 'spam' version = '1.0' @@ -156,7 +175,7 @@ class TestImportHookInstallation: 'hampkg/__init__.py'] return [] - class DummyEntryPoint: + class DummyEntryPoint(object): name = 'spam' module_name = 'spam.py' attrs = () @@ -210,9 +229,9 @@ class TestImportHookInstallation: return pkg.helper.tool """, 'pkg/other.py': """ - l = [3, 2] + values = [3, 2] def tool(): - assert l.pop() == 3 + assert values.pop() == 3 """, 'conftest.py': """ pytest_plugins = ['pkg.plugin'] @@ -229,7 +248,7 @@ class TestImportHookInstallation: result = testdir.runpytest_subprocess('--assert=rewrite') result.stdout.fnmatch_lines(['>*assert a == b*', 'E*assert 2 == 3*', - '>*assert l.pop() == 3*', + '>*assert values.pop() == 3*', 'E*AssertionError']) def test_register_assert_rewrite_checks_types(self): @@ -239,18 +258,18 @@ class TestImportHookInstallation: 'pytest_tests_internal_non_existing2') -class TestBinReprIntegration: +class TestBinReprIntegration(object): def test_pytest_assertrepr_compare_called(self, testdir): testdir.makeconftest(""" import pytest - l = [] + values = [] def pytest_assertrepr_compare(op, left, right): - l.append((op, left, right)) + values.append((op, left, right)) @pytest.fixture def list(request): - return l + return values """) testdir.makepyfile(""" def test_hello(): @@ -264,13 +283,14 @@ class TestBinReprIntegration: "*test_check*PASS*", ]) + def callequal(left, right, verbose=False): config = mock_config() config.verbose = verbose return plugin.pytest_assertrepr_compare(config, '==', left, right) -class TestAssert_reprcompare: +class TestAssert_reprcompare(object): def test_different_types(self): assert callequal([0, 1], 'foo') is None @@ -284,15 +304,15 @@ class TestAssert_reprcompare: assert '+ eggs' in diff def test_text_skipping(self): - lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs') + lines = callequal('a' * 50 + 'spam', 'a' * 50 + 'eggs') assert 'Skipping' in lines[1] for line in lines: - assert 'a'*50 not in line + assert 'a' * 50 not in line def test_text_skipping_verbose(self): - lines = callequal('a'*50 + 'spam', 'a'*50 + 'eggs', verbose=True) - assert '- ' + 'a'*50 + 'spam' in lines - assert '+ ' + 'a'*50 + 'eggs' in lines + lines = callequal('a' * 50 + 'spam', 'a' * 50 + 'eggs', verbose=True) + assert '- ' + 'a' * 50 + 'spam' in lines + assert '+ ' + 'a' * 50 + 'eggs' in lines def test_multiline_text_diff(self): left = 'foo\nspam\nbar' @@ -347,7 +367,7 @@ class TestAssert_reprcompare: expl = '\n'.join(callequal(left, right, verbose=True)) assert expl.endswith(textwrap.dedent(expected).strip()) - def test_list_different_lenghts(self): + def test_list_different_lengths(self): expl = callequal([0, 1], [0, 1, 2]) assert len(expl) > 1 expl = callequal([0, 1, 2], [0, 1]) @@ -364,8 +384,16 @@ class TestAssert_reprcompare: for line in lines[1:]: assert 'b' not in line - def test_dict_omitting_verbose(self): - lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=True) + def test_dict_omitting_with_verbosity_1(self): + """ Ensure differing items are visible for verbosity=1 (#1512) """ + lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=1) + assert lines[1].startswith('Omitting 1 identical item') + assert lines[2].startswith('Differing items') + assert lines[3] == "{'a': 0} != {'a': 1}" + assert 'Common items' not in lines + + def test_dict_omitting_with_verbosity_2(self): + lines = callequal({'a': 0, 'b': 1}, {'a': 1, 'b': 1}, verbose=2) assert lines[1].startswith('Common items:') assert 'Omitting' not in lines[1] assert lines[2] == "{'b': 1}" @@ -410,13 +438,13 @@ class TestAssert_reprcompare: assert len(expl) > 1 def test_list_tuples(self): - expl = callequal([], [(1,2)]) + expl = callequal([], [(1, 2)]) assert len(expl) > 1 - expl = callequal([(1,2)], []) + expl = callequal([(1, 2)], []) assert len(expl) > 1 def test_list_bad_repr(self): - class A: + class A(object): def __repr__(self): raise ValueError(42) expl = callequal([], [A()]) @@ -475,7 +503,7 @@ class TestAssert_reprcompare: assert msg -class TestFormatExplanation: +class TestFormatExplanation(object): def test_special_chars_full(self, testdir): # Issue 453, for the bug this would raise IndexError @@ -567,6 +595,111 @@ class TestFormatExplanation: assert util.format_explanation(expl) == res +class TestTruncateExplanation(object): + + """ Confirm assertion output is truncated as expected """ + + # The number of lines in the truncation explanation message. Used + # to calculate that results have the expected length. + LINES_IN_TRUNCATION_MSG = 2 + + def test_doesnt_truncate_when_input_is_empty_list(self): + expl = [] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) + assert result == expl + + def test_doesnt_truncate_at_when_input_is_5_lines_and_LT_max_chars(self): + expl = ['a' * 100 for x in range(5)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) + assert result == expl + + def test_truncates_at_8_lines_when_given_list_of_empty_strings(self): + expl = ['' for x in range(50)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) + assert result != expl + assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "43 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_8_lines_when_first_8_lines_are_LT_max_chars(self): + expl = ['a' for x in range(100)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) + assert result != expl + assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "93 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_8_lines_when_first_8_lines_are_EQ_max_chars(self): + expl = ['a' * 80 for x in range(16)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=8 * 80) + assert result != expl + assert len(result) == 8 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "9 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_4_lines_when_first_4_lines_are_GT_max_chars(self): + expl = ['a' * 250 for x in range(10)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=999) + assert result != expl + assert len(result) == 4 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "7 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + assert last_line_before_trunc_msg.endswith("...") + + def test_truncates_at_1_line_when_first_line_is_GT_max_chars(self): + expl = ['a' * 250 for x in range(1000)] + result = truncate._truncate_explanation(expl, max_lines=8, max_chars=100) + assert result != expl + assert len(result) == 1 + self.LINES_IN_TRUNCATION_MSG + assert "Full output truncated" in result[-1] + assert "1000 lines hidden" in result[-1] + last_line_before_trunc_msg = result[- self.LINES_IN_TRUNCATION_MSG - 1] + assert last_line_before_trunc_msg.endswith("...") + + def test_full_output_truncated(self, monkeypatch, testdir): + """ Test against full runpytest() output. """ + + line_count = 7 + line_len = 100 + expected_truncated_lines = 2 + testdir.makepyfile(r""" + def test_many_lines(): + a = list([str(i)[0] * %d for i in range(%d)]) + b = a[::2] + a = '\n'.join(map(str, a)) + b = '\n'.join(map(str, b)) + assert a == b + """ % (line_len, line_count)) + monkeypatch.delenv('CI', raising=False) + + result = testdir.runpytest() + # without -vv, truncate the message showing a few diff lines only + result.stdout.fnmatch_lines([ + "*- 1*", + "*- 3*", + "*- 5*", + "*truncated (%d lines hidden)*use*-vv*" % expected_truncated_lines, + ]) + + result = testdir.runpytest('-vv') + result.stdout.fnmatch_lines([ + "* 6*", + ]) + + monkeypatch.setenv('CI', '1') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "* 6*", + ]) + + def test_python25_compile_issue257(testdir): testdir.makepyfile(""" def test_rewritten(): @@ -580,6 +713,7 @@ def test_python25_compile_issue257(testdir): *1 failed* """) + def test_rewritten(testdir): testdir.makepyfile(""" def test_rewritten(): @@ -587,11 +721,13 @@ def test_rewritten(testdir): """) assert testdir.runpytest().ret == 0 + def test_reprcompare_notin(mock_config): detail = plugin.pytest_assertrepr_compare( mock_config, 'not in', 'foo', 'aaafoobbb')[1:] assert detail == ["'foo' is contained here:", ' aaafoobbb', '? +++'] + def test_pytest_assertrepr_compare_integration(testdir): testdir.makepyfile(""" def test_hello(): @@ -608,6 +744,7 @@ def test_pytest_assertrepr_compare_integration(testdir): "*E*50*", ]) + def test_sequence_comparison_uses_repr(testdir): testdir.makepyfile(""" def test_hello(): @@ -626,40 +763,6 @@ def test_sequence_comparison_uses_repr(testdir): ]) -def test_assert_compare_truncate_longmessage(monkeypatch, testdir): - testdir.makepyfile(r""" - def test_long(): - a = list(range(200)) - b = a[::2] - a = '\n'.join(map(str, a)) - b = '\n'.join(map(str, b)) - assert a == b - """) - monkeypatch.delenv('CI', raising=False) - - result = testdir.runpytest() - # without -vv, truncate the message showing a few diff lines only - result.stdout.fnmatch_lines([ - "*- 1", - "*- 3", - "*- 5", - "*- 7", - "*truncated (193 more lines)*use*-vv*", - ]) - - - result = testdir.runpytest('-vv') - result.stdout.fnmatch_lines([ - "*- 197", - ]) - - monkeypatch.setenv('CI', '1') - result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*- 197", - ]) - - def test_assertrepr_loaded_per_dir(testdir): testdir.makepyfile(test_base=['def test_base(): assert 1 == 2']) a = testdir.mkdir('a') @@ -674,12 +777,12 @@ def test_assertrepr_loaded_per_dir(testdir): b_conftest.write('def pytest_assertrepr_compare(): return ["summary b"]') result = testdir.runpytest() result.stdout.fnmatch_lines([ - '*def test_base():*', - '*E*assert 1 == 2*', - '*def test_a():*', - '*E*assert summary a*', - '*def test_b():*', - '*E*assert summary b*']) + '*def test_base():*', + '*E*assert 1 == 2*', + '*def test_a():*', + '*E*assert summary a*', + '*def test_b():*', + '*E*assert summary b*']) def test_assertion_options(testdir): @@ -693,6 +796,7 @@ def test_assertion_options(testdir): result = testdir.runpytest_subprocess("--assert=plain") assert "3 == 4" not in result.stdout.str() + def test_triple_quoted_string_issue113(testdir): testdir.makepyfile(""" def test_hello(): @@ -704,6 +808,7 @@ def test_triple_quoted_string_issue113(testdir): ]) assert 'SyntaxError' not in result.stdout.str() + def test_traceback_failure(testdir): p1 = testdir.makepyfile(""" def g(): @@ -724,7 +829,7 @@ def test_traceback_failure(testdir): "", "*test_*.py:6: ", "_ _ _ *", - #"", + # "", " def f(x):", "> assert x == g()", "E assert 3 == 2", @@ -733,7 +838,7 @@ def test_traceback_failure(testdir): "*test_traceback_failure.py:4: AssertionError" ]) - result = testdir.runpytest(p1) # "auto" + result = testdir.runpytest(p1) # "auto" result.stdout.fnmatch_lines([ "*test_traceback_failure.py F", "====* FAILURES *====", @@ -783,7 +888,7 @@ def test_exception_handling_no_traceback(testdir): ]) -@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')" ) +@pytest.mark.skipif("'__pypy__' in sys.builtin_module_names or sys.platform.startswith('java')") def test_warn_missing(testdir): testdir.makepyfile("") result = testdir.run(sys.executable, "-OO", "-m", "pytest", "-h") @@ -795,6 +900,7 @@ def test_warn_missing(testdir): "*WARNING*assert statements are not executed*", ]) + def test_recursion_source_decode(testdir): testdir.makepyfile(""" def test_something(): @@ -809,6 +915,7 @@ def test_recursion_source_decode(testdir): """) + def test_AssertionError_message(testdir): testdir.makepyfile(""" def test_hello(): @@ -822,6 +929,7 @@ def test_AssertionError_message(testdir): *AssertionError: (1, 2)* """) + @pytest.mark.skipif(PY3, reason='This bug does not exist on PY3') def test_set_with_unsortable_elements(): # issue #718 @@ -858,6 +966,7 @@ def test_set_with_unsortable_elements(): """).strip() assert '\n'.join(expl) == dedent + def test_diff_newline_at_end(monkeypatch, testdir): testdir.makepyfile(r""" def test_diff(): @@ -872,13 +981,18 @@ def test_diff_newline_at_end(monkeypatch, testdir): * ? + """) + def test_assert_tuple_warning(testdir): testdir.makepyfile(""" def test_tuple(): assert(False, 'you shall not pass') """) result = testdir.runpytest('-rw') - result.stdout.fnmatch_lines('WR1*:2 assertion is always true*') + result.stdout.fnmatch_lines([ + '*test_assert_tuple_warning.py:2', + '*assertion is always true*', + ]) + def test_assert_indirect_tuple_no_warning(testdir): testdir.makepyfile(""" @@ -890,6 +1004,7 @@ def test_assert_indirect_tuple_no_warning(testdir): output = '\n'.join(result.stdout.lines) assert 'WR1' not in output + def test_assert_with_unicode(monkeypatch, testdir): testdir.makepyfile(u""" # -*- coding: utf-8 -*- @@ -899,6 +1014,28 @@ def test_assert_with_unicode(monkeypatch, testdir): result = testdir.runpytest() result.stdout.fnmatch_lines(['*AssertionError*']) + +def test_raise_unprintable_assertion_error(testdir): + testdir.makepyfile(r""" + def test_raise_assertion_error(): + raise AssertionError('\xff') + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([r"> raise AssertionError('\xff')", 'E AssertionError: *']) + + +def test_raise_assertion_error_raisin_repr(testdir): + testdir.makepyfile(u""" + class RaisingRepr(object): + def __repr__(self): + raise Exception() + def test_raising_repr(): + raise AssertionError(RaisingRepr()) + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines(['E AssertionError: ']) + + def test_issue_1944(testdir): testdir.makepyfile(""" def f(): @@ -909,4 +1046,3 @@ def test_issue_1944(testdir): result = testdir.runpytest() result.stdout.fnmatch_lines(["*1 error*"]) assert "AttributeError: 'Module' object has no attribute '_obj'" not in result.stdout.str() - diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 8e26cdb1b..31e996052 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -1,28 +1,30 @@ +from __future__ import absolute_import, division, print_function + import glob import os import py_compile import stat import sys import zipfile - import py import pytest -ast = pytest.importorskip("ast") -if sys.platform.startswith("java"): - # XXX should be xfail - pytest.skip("assert rewrite does currently not work on jython") - import _pytest._code from _pytest.assertion import util from _pytest.assertion.rewrite import rewrite_asserts, PYTEST_TAG, AssertionRewritingHook from _pytest.main import EXIT_NOTESTSCOLLECTED +ast = pytest.importorskip("ast") +if sys.platform.startswith("java"): + # XXX should be xfail + pytest.skip("assert rewrite does currently not work on jython") + def setup_module(mod): mod._old_reprcompare = util._reprcompare _pytest._code._reprcompare = None + def teardown_module(mod): util._reprcompare = mod._old_reprcompare del mod._old_reprcompare @@ -33,6 +35,7 @@ def rewrite(src): rewrite_asserts(tree) return tree + def getmsg(f, extra_ns=None, must_pass=False): """Rewrite the assertions in f, run it, and get the failure message.""" src = '\n'.join(_pytest._code.Code(f).source().lines) @@ -57,18 +60,23 @@ def getmsg(f, extra_ns=None, must_pass=False): pytest.fail("function didn't raise at all") -class TestAssertionRewrite: +class TestAssertionRewrite(object): def test_place_initial_imports(self): s = """'Doc string'\nother = stuff""" m = rewrite(s) - assert isinstance(m.body[0], ast.Expr) - assert isinstance(m.body[0].value, ast.Str) - for imp in m.body[1:3]: + # Module docstrings in 3.7 are part of Module node, it's not in the body + # so we remove it so the following body items have the same indexes on + # all Python versions + if sys.version_info < (3, 7): + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + for imp in m.body[0:2]: assert isinstance(imp, ast.Import) assert imp.lineno == 2 assert imp.col_offset == 0 - assert isinstance(m.body[3], ast.Assign) + assert isinstance(m.body[2], ast.Assign) s = """from __future__ import with_statement\nother_stuff""" m = rewrite(s) assert isinstance(m.body[0], ast.ImportFrom) @@ -77,16 +85,29 @@ class TestAssertionRewrite: assert imp.lineno == 2 assert imp.col_offset == 0 assert isinstance(m.body[3], ast.Expr) + s = """'doc string'\nfrom __future__ import with_statement""" + m = rewrite(s) + if sys.version_info < (3, 7): + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + assert isinstance(m.body[0], ast.ImportFrom) + for imp in m.body[1:3]: + assert isinstance(imp, ast.Import) + assert imp.lineno == 2 + assert imp.col_offset == 0 s = """'doc string'\nfrom __future__ import with_statement\nother""" m = rewrite(s) - assert isinstance(m.body[0], ast.Expr) - assert isinstance(m.body[0].value, ast.Str) - assert isinstance(m.body[1], ast.ImportFrom) - for imp in m.body[2:4]: + if sys.version_info < (3, 7): + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + assert isinstance(m.body[0], ast.ImportFrom) + for imp in m.body[1:3]: assert isinstance(imp, ast.Import) assert imp.lineno == 3 assert imp.col_offset == 0 - assert isinstance(m.body[4], ast.Expr) + assert isinstance(m.body[3], ast.Expr) s = """from . import relative\nother_stuff""" m = rewrite(s) for imp in m.body[0:2]: @@ -98,10 +119,14 @@ class TestAssertionRewrite: def test_dont_rewrite(self): s = """'PYTEST_DONT_REWRITE'\nassert 14""" m = rewrite(s) - assert len(m.body) == 2 - assert isinstance(m.body[0].value, ast.Str) - assert isinstance(m.body[1], ast.Assert) - assert m.body[1].msg is None + if sys.version_info < (3, 7): + assert len(m.body) == 2 + assert isinstance(m.body[0], ast.Expr) + assert isinstance(m.body[0].value, ast.Str) + del m.body[0] + else: + assert len(m.body) == 1 + assert m.body[0].msg is None def test_name(self): def f(): @@ -117,12 +142,12 @@ class TestAssertionRewrite: def f(): assert a_global # noqa - assert getmsg(f, {"a_global" : False}) == "assert False" + assert getmsg(f, {"a_global": False}) == "assert False" def f(): assert sys == 42 - assert getmsg(f, {"sys" : sys}) == "assert sys == 42" + assert getmsg(f, {"sys": sys}) == "assert sys == 42" def f(): assert cls == 42 # noqa @@ -130,7 +155,7 @@ class TestAssertionRewrite: class X(object): pass - assert getmsg(f, {"cls" : X}) == "assert cls == 42" + assert getmsg(f, {"cls": X}) == "assert cls == 42" def test_assert_already_has_message(self): def f(): @@ -237,13 +262,13 @@ class TestAssertionRewrite: def f(): assert x() and x() - assert getmsg(f, {"x" : x}) == """assert (False) + assert getmsg(f, {"x": x}) == """assert (False) + where False = x()""" def f(): assert False or x() - assert getmsg(f, {"x" : x}) == """assert (False or False) + assert getmsg(f, {"x": x}) == """assert (False or False) + where False = x()""" def f(): @@ -254,7 +279,7 @@ class TestAssertionRewrite: def f(): x = 1 y = 2 - assert x in {1 : None} and y in {} + assert x in {1: None} and y in {} assert getmsg(f) == "assert (1 in {1: None} and 2 in {})" @@ -271,7 +296,7 @@ class TestAssertionRewrite: getmsg(f, must_pass=True) - def test_short_circut_evaluation(self): + def test_short_circuit_evaluation(self): def f(): assert True or explode # noqa @@ -333,7 +358,7 @@ class TestAssertionRewrite: @pytest.mark.skipif("sys.version_info < (3,5)") def test_at_operator_issue1290(self, testdir): testdir.makepyfile(""" - class Matrix: + class Matrix(object): def __init__(self, num): self.num = num def __matmul__(self, other): @@ -347,7 +372,7 @@ class TestAssertionRewrite: def g(a=42, *args, **kwargs): return False - ns = {"g" : g} + ns = {"g": g} def f(): assert g() @@ -388,7 +413,7 @@ class TestAssertionRewrite: def f(): x = "a" - assert g(**{x : 2}) + assert g(**{x: 2}) assert getmsg(f, ns) == """assert False + where False = g(**{'a': 2})""" @@ -397,10 +422,10 @@ class TestAssertionRewrite: class X(object): g = 3 - ns = {"x" : X} + ns = {"x": X} def f(): - assert not x.g # noqa + assert not x.g # noqa assert getmsg(f, ns) == """assert not 3 + where 3 = x.g""" @@ -448,8 +473,8 @@ class TestAssertionRewrite: def test_len(self): def f(): - l = list(range(10)) - assert len(l) == 11 + values = list(range(10)) + assert len(values) == 11 assert getmsg(f).startswith("""assert 10 == 11 + where 10 = len([""") @@ -515,7 +540,7 @@ class TestAssertionRewrite: assert r"where 1 = \n{ \n~ \n}.a" in util._format_lines([getmsg(f)])[0] -class TestRewriteOnImport: +class TestRewriteOnImport(object): def test_pycache_is_a_file(self, testdir): testdir.tmpdir.join("__pycache__").write("Hello") @@ -555,7 +580,7 @@ class TestRewriteOnImport: def test_readonly(self, testdir): sub = testdir.mkdir("testing") sub.join("test_readonly.py").write( - py.builtin._totext(""" + py.builtin._totext(""" def test_rewritten(): assert "@py_builtins" in globals() """).encode("utf-8"), "wb") @@ -608,7 +633,7 @@ def test_rewritten(): def test_optimized(): "hello" assert test_optimized.__doc__ is None""" - ) + ) p = py.path.local.make_numbered_dir(prefix="runpytest-", keep=None, rootdir=testdir.tmpdir) tmp = "--basetemp=%s" % p @@ -637,8 +662,8 @@ def test_rewritten(): testdir.tmpdir.join("test_newlines.py").write(b, "wb") assert testdir.runpytest().ret == 0 - @pytest.mark.skipif(sys.version_info < (3,3), - reason='packages without __init__.py not supported on python 2') + @pytest.mark.skipif(sys.version_info < (3, 3), + reason='packages without __init__.py not supported on python 2') def test_package_without__init__py(self, testdir): pkg = testdir.mkdir('a_package_without_init_py') pkg.join('module.py').ensure() @@ -682,7 +707,7 @@ def test_rewritten(): hook.mark_rewrite('test_remember_rewritten_modules') assert warnings == [] - def test_rewrite_warning_using_pytest_plugins(self, testdir, monkeypatch): + def test_rewrite_warning_using_pytest_plugins(self, testdir): testdir.makepyfile(**{ 'conftest.py': "pytest_plugins = ['core', 'gui', 'sci']", 'core.py': "", @@ -695,6 +720,40 @@ def test_rewritten(): result.stdout.fnmatch_lines(['*= 1 passed in *=*']) assert 'pytest-warning summary' not in result.stdout.str() + def test_rewrite_warning_using_pytest_plugins_env_var(self, testdir, monkeypatch): + monkeypatch.setenv('PYTEST_PLUGINS', 'plugin') + testdir.makepyfile(**{ + 'plugin.py': "", + 'test_rewrite_warning_using_pytest_plugins_env_var.py': """ + import plugin + pytest_plugins = ['plugin'] + def test(): + pass + """, + }) + testdir.chdir() + result = testdir.runpytest_subprocess() + result.stdout.fnmatch_lines(['*= 1 passed in *=*']) + assert 'pytest-warning summary' not in result.stdout.str() + + @pytest.mark.skipif(sys.version_info[0] > 2, reason='python 2 only') + def test_rewrite_future_imports(self, testdir): + """Test that rewritten modules don't inherit the __future__ flags + from the assertrewrite module. + + assertion.rewrite imports __future__.division (and others), so + ensure rewritten modules don't inherit those flags. + + The test below will fail if __future__.division is enabled + """ + testdir.makepyfile(''' + def test(): + x = 1 / 2 + assert type(x) is int + ''') + result = testdir.runpytest() + assert result.ret == 0 + class TestAssertionRewriteHookDetails(object): def test_loader_is_package_false_for_module(self, testdir): @@ -868,7 +927,7 @@ class TestAssertionRewriteHookDetails(object): """ path = testdir.mkpydir("foo") path.join("test_foo.py").write(_pytest._code.Source(""" - class Test: + class Test(object): def test_foo(self): import pkgutil data = pkgutil.get_data('foo.test_foo', 'data.txt') @@ -896,7 +955,7 @@ def test_issue731(testdir): assert 'unbalanced braces' not in result.stdout.str() -class TestIssue925(): +class TestIssue925(object): def test_simple_case(self, testdir): testdir.makepyfile(""" def test_ternary_display(): @@ -921,3 +980,17 @@ class TestIssue925(): result = testdir.runpytest() result.stdout.fnmatch_lines('*E*assert True == ((False == True) == True)') + +class TestIssue2121(): + def test_simple(self, testdir): + testdir.tmpdir.join("tests/file.py").ensure().write(""" +def test_simple_failure(): + assert 1 + 1 == 3 +""") + testdir.tmpdir.join("pytest.ini").write(py.std.textwrap.dedent(""" + [pytest] + python_files = tests/**.py + """)) + + result = testdir.runpytest() + result.stdout.fnmatch_lines('*E*assert (1 + 1) == 3') diff --git a/testing/test_cache.py b/testing/test_cache.py index 98053f869..a37170cdd 100755 --- a/testing/test_cache.py +++ b/testing/test_cache.py @@ -1,5 +1,6 @@ +from __future__ import absolute_import, division, print_function import sys - +import py import _pytest import pytest import os @@ -7,7 +8,8 @@ import shutil pytest_plugins = "pytester", -class TestNewAPI: + +class TestNewAPI(object): def test_config_cache_makedir(self, testdir): testdir.makeini("[pytest]") config = testdir.parseconfigure() @@ -54,7 +56,7 @@ class TestNewAPI: assert result.ret == 1 result.stdout.fnmatch_lines([ "*could not create cache path*", - "*1 pytest-warnings*", + "*1 warnings*", ]) def test_config_cache(self, testdir): @@ -86,6 +88,36 @@ class TestNewAPI: assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) + def test_custom_rel_cache_dir(self, testdir): + rel_cache_dir = os.path.join('custom_cache_dir', 'subdir') + testdir.makeini(""" + [pytest] + cache_dir = {cache_dir} + """.format(cache_dir=rel_cache_dir)) + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest() + assert testdir.tmpdir.join(rel_cache_dir).isdir() + + def test_custom_abs_cache_dir(self, testdir, tmpdir_factory): + tmp = str(tmpdir_factory.mktemp('tmp')) + abs_cache_dir = os.path.join(tmp, 'custom_cache_dir') + testdir.makeini(""" + [pytest] + cache_dir = {cache_dir} + """.format(cache_dir=abs_cache_dir)) + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest() + assert py.path.local(abs_cache_dir).isdir() + + def test_custom_cache_dir_with_env_var(self, testdir, monkeypatch): + monkeypatch.setenv('env_var', 'custom_cache_dir') + testdir.makeini(""" + [pytest] + cache_dir = {cache_dir} + """.format(cache_dir='$env_var')) + testdir.makepyfile(test_errored='def test_error():\n assert False') + testdir.runpytest() + assert testdir.tmpdir.join('custom_cache_dir').isdir() def test_cache_reportheader(testdir): @@ -129,7 +161,7 @@ def test_cache_show(testdir): ]) -class TestLastFailed: +class TestLastFailed(object): def test_lastfailed_usecase(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) @@ -191,13 +223,37 @@ class TestLastFailed: "test_a.py*", "test_b.py*", ]) - result = testdir.runpytest("--lf", "--ff") + result = testdir.runpytest("--ff") # Test order will be failing tests firs result.stdout.fnmatch_lines([ "test_b.py*", "test_a.py*", ]) + def test_lastfailed_failedfirst_order(self, testdir): + testdir.makepyfile(**{ + 'test_a.py': """ + def test_always_passes(): + assert 1 + """, + 'test_b.py': """ + def test_always_fails(): + assert 0 + """, + }) + result = testdir.runpytest() + # Test order will be collection order; alphabetical + result.stdout.fnmatch_lines([ + "test_a.py*", + "test_b.py*", + ]) + result = testdir.runpytest("--lf", "--ff") + # Test order will be failing tests firs + result.stdout.fnmatch_lines([ + "test_b.py*", + ]) + assert 'test_a.py' not in result.stdout.str() + def test_lastfailed_difference_invocations(self, testdir, monkeypatch): monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1) testdir.makepyfile(test_a=""" @@ -284,6 +340,73 @@ class TestLastFailed: result = testdir.runpytest() result.stdout.fnmatch_lines('*1 failed in*') + def test_terminal_report_lastfailed(self, testdir): + test_a = testdir.makepyfile(test_a=""" + def test_a1(): + pass + def test_a2(): + pass + """) + test_b = testdir.makepyfile(test_b=""" + def test_b1(): + assert 0 + def test_b2(): + assert 0 + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + 'collected 4 items', + '*2 failed, 2 passed in*', + ]) + + result = testdir.runpytest('--lf') + result.stdout.fnmatch_lines([ + 'collected 4 items', + 'run-last-failure: rerun previous 2 failures', + '*2 failed, 2 deselected in*', + ]) + + result = testdir.runpytest(test_a, '--lf') + result.stdout.fnmatch_lines([ + 'collected 2 items', + 'run-last-failure: run all (no recorded failures)', + '*2 passed in*', + ]) + + result = testdir.runpytest(test_b, '--lf') + result.stdout.fnmatch_lines([ + 'collected 2 items', + 'run-last-failure: rerun previous 2 failures', + '*2 failed in*', + ]) + + result = testdir.runpytest('test_b.py::test_b1', '--lf') + result.stdout.fnmatch_lines([ + 'collected 1 item', + 'run-last-failure: rerun previous 1 failure', + '*1 failed in*', + ]) + + def test_terminal_report_failedfirst(self, testdir): + testdir.makepyfile(test_a=""" + def test_a1(): + assert 0 + def test_a2(): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + 'collected 2 items', + '*1 failed, 1 passed in*', + ]) + + result = testdir.runpytest('--ff') + result.stdout.fnmatch_lines([ + 'collected 2 items', + 'run-last-failure: rerun previous 1 failure first', + '*1 failed, 1 passed in*', + ]) + def test_lastfailed_collectfailure(self, testdir, monkeypatch): testdir.makepyfile(test_maybe=""" @@ -313,7 +436,6 @@ class TestLastFailed: lastfailed = rlf(fail_import=0, fail_run=1) assert list(lastfailed) == ['test_maybe.py::test_hello'] - def test_lastfailed_failure_subset(self, testdir, monkeypatch): testdir.makepyfile(test_maybe=""" @@ -355,12 +477,10 @@ class TestLastFailed: result, lastfailed = rlf(fail_import=1, fail_run=0) assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py'] - result, lastfailed = rlf(fail_import=0, fail_run=0, args=('test_maybe2.py',)) assert list(lastfailed) == ['test_maybe.py'] - # edge case of test selection - even if we remember failures # from other tests we still need to run all tests if no test # matches the failures @@ -384,3 +504,102 @@ class TestLastFailed: testdir.makepyfile(test_errored='def test_error():\n assert False') testdir.runpytest('-q', '--lf') assert os.path.exists('.cache') + + def test_xfail_not_considered_failure(self, testdir): + testdir.makepyfile(''' + import pytest + @pytest.mark.xfail + def test(): + assert 0 + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines('*1 xfailed*') + assert self.get_cached_last_failed(testdir) == [] + + def test_xfail_strict_considered_failure(self, testdir): + testdir.makepyfile(''' + import pytest + @pytest.mark.xfail(strict=True) + def test(): + pass + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines('*1 failed*') + assert self.get_cached_last_failed(testdir) == ['test_xfail_strict_considered_failure.py::test'] + + @pytest.mark.parametrize('mark', ['mark.xfail', 'mark.skip']) + def test_failed_changed_to_xfail_or_skip(self, testdir, mark): + testdir.makepyfile(''' + import pytest + def test(): + assert 0 + ''') + result = testdir.runpytest() + assert self.get_cached_last_failed(testdir) == ['test_failed_changed_to_xfail_or_skip.py::test'] + assert result.ret == 1 + + testdir.makepyfile(''' + import pytest + @pytest.{mark} + def test(): + assert 0 + '''.format(mark=mark)) + result = testdir.runpytest() + assert result.ret == 0 + assert self.get_cached_last_failed(testdir) == [] + assert result.ret == 0 + + def get_cached_last_failed(self, testdir): + config = testdir.parseconfigure() + return sorted(config.cache.get("cache/lastfailed", {})) + + def test_cache_cumulative(self, testdir): + """ + Test workflow where user fixes errors gradually file by file using --lf. + """ + # 1. initial run + test_bar = testdir.makepyfile(test_bar=""" + def test_bar_1(): + pass + def test_bar_2(): + assert 0 + """) + test_foo = testdir.makepyfile(test_foo=""" + def test_foo_3(): + pass + def test_foo_4(): + assert 0 + """) + testdir.runpytest() + assert self.get_cached_last_failed(testdir) == ['test_bar.py::test_bar_2', 'test_foo.py::test_foo_4'] + + # 2. fix test_bar_2, run only test_bar.py + testdir.makepyfile(test_bar=""" + def test_bar_1(): + pass + def test_bar_2(): + pass + """) + result = testdir.runpytest(test_bar) + result.stdout.fnmatch_lines('*2 passed*') + # ensure cache does not forget that test_foo_4 failed once before + assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4'] + + result = testdir.runpytest('--last-failed') + result.stdout.fnmatch_lines('*1 failed, 3 deselected*') + assert self.get_cached_last_failed(testdir) == ['test_foo.py::test_foo_4'] + + # 3. fix test_foo_4, run only test_foo.py + test_foo = testdir.makepyfile(test_foo=""" + def test_foo_3(): + pass + def test_foo_4(): + pass + """) + result = testdir.runpytest(test_foo, '--last-failed') + result.stdout.fnmatch_lines('*1 passed, 1 deselected*') + assert self.get_cached_last_failed(testdir) == [] + + result = testdir.runpytest('--last-failed') + result.stdout.fnmatch_lines('*4 passed*') + assert self.get_cached_last_failed(testdir) == [] diff --git a/testing/test_capture.py b/testing/test_capture.py index cbb5fc81b..eb10f3c07 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -1,9 +1,11 @@ +from __future__ import absolute_import, division, print_function # note: py.io capture tests where copied from # pylib 1.4.20.dev2 (rev 13d9af95547e) from __future__ import with_statement import pickle import os import sys +from io import UnsupportedOperation import _pytest._code import py @@ -13,7 +15,7 @@ import contextlib from _pytest import capture from _pytest.capture import CaptureManager from _pytest.main import EXIT_NOTESTSCOLLECTED -from py.builtin import print_ + needsosdup = pytest.mark.xfail("not hasattr(os, 'dup')") @@ -47,15 +49,15 @@ def oswritebytes(fd, obj): os.write(fd, tobytes(obj)) - def StdCaptureFD(out=True, err=True, in_=True): return capture.MultiCapture(out, err, in_, Capture=capture.FDCapture) + def StdCapture(out=True, err=True, in_=True): return capture.MultiCapture(out, err, in_, Capture=capture.SysCapture) -class TestCaptureManager: +class TestCaptureManager(object): def test_getmethod_default_no_fd(self, monkeypatch): from _pytest.capture import pytest_addoption from _pytest.config import Parser @@ -70,7 +72,7 @@ class TestCaptureManager: @needsosdup @pytest.mark.parametrize("method", - ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')]) + ['no', 'sys', pytest.mark.skipif('not hasattr(os, "dup")', 'fd')]) def test_capturing_basic_api(self, method): capouter = StdCaptureFD() old = sys.stdout, sys.stderr, sys.stdin @@ -81,14 +83,14 @@ class TestCaptureManager: assert outerr == ("", "") outerr = capman.suspendcapture() assert outerr == ("", "") - print ("hello") + print("hello") out, err = capman.suspendcapture() if method == "no": assert old == (sys.stdout, sys.stderr, sys.stdin) else: assert not out capman.resumecapture() - print ("hello") + print("hello") out, err = capman.suspendcapture() if method != "no": assert out == "hello\n" @@ -110,7 +112,7 @@ class TestCaptureManager: @pytest.mark.parametrize("method", ['fd', 'sys']) def test_capturing_unicode(testdir, method): - if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2,2): + if hasattr(sys, "pypy_version_info") and sys.pypy_version_info < (2, 2): pytest.xfail("does not work on pypy < 2.2") if sys.version_info >= (3, 0): obj = "'b\u00f6y'" @@ -154,7 +156,7 @@ def test_collect_capturing(testdir): ]) -class TestPerTestCapturing: +class TestPerTestCapturing(object): def test_capture_and_fixtures(self, testdir): p = testdir.makepyfile(""" def setup_module(mod): @@ -232,7 +234,7 @@ class TestPerTestCapturing: "setup func1*", "in func1*", "teardown func1*", - #"*1 fixture failure*" + # "*1 fixture failure*" ]) def test_teardown_capturing_final(self, testdir): @@ -275,18 +277,18 @@ class TestPerTestCapturing: ]) -class TestLoggingInteraction: +class TestLoggingInteraction(object): def test_logging_stream_ownership(self, testdir): p = testdir.makepyfile(""" def test_logging(): import logging import pytest - stream = capture.TextIO() + stream = capture.CaptureIO() logging.basicConfig(stream=stream) stream.close() # to free memory/release resources """) result = testdir.runpytest_subprocess(p) - result.stderr.str().find("atexit") == -1 + assert result.stderr.str().find("atexit") == -1 def test_logging_and_immediate_setupteardown(self, testdir): p = testdir.makepyfile(""" @@ -303,7 +305,7 @@ class TestLoggingInteraction: assert 0 """) for optargs in (('--capture=sys',), ('--capture=fd',)): - print (optargs) + print(optargs) result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ @@ -329,7 +331,7 @@ class TestLoggingInteraction: assert 0 """) for optargs in (('--capture=sys',), ('--capture=fd',)): - print (optargs) + print(optargs) result = testdir.runpytest_subprocess(p, *optargs) s = result.stdout.str() result.stdout.fnmatch_lines([ @@ -353,7 +355,7 @@ class TestLoggingInteraction: """) result = testdir.runpytest_subprocess( p, "--traceconfig", - "-p", "no:capturelog") + "-p", "no:capturelog", "-p", "no:hypothesis", "-p", "no:hypothesispytest") assert result.ret != 0 result.stdout.fnmatch_lines([ "*hello432*", @@ -395,7 +397,7 @@ class TestLoggingInteraction: assert 'operation on closed file' not in result.stderr.str() -class TestCaptureFixture: +class TestCaptureFixture(object): @pytest.mark.parametrize("opt", [[], ["-s"]]) def test_std_functional(self, testdir, opt): reprec = testdir.inline_runsource(""" @@ -604,7 +606,7 @@ def test_capture_binary_output(testdir): def test_error_during_readouterr(testdir): - """Make sure we suspend capturing if errors occurr during readouterr""" + """Make sure we suspend capturing if errors occur during readouterr""" testdir.makepyfile(pytest_xyz=""" from _pytest.capture import FDCapture def bad_snap(self): @@ -622,16 +624,16 @@ def test_error_during_readouterr(testdir): ]) -class TestTextIO: +class TestCaptureIO(object): def test_text(self): - f = capture.TextIO() + f = capture.CaptureIO() f.write("hello") s = f.getvalue() assert s == "hello" f.close() def test_unicode_and_str_mixture(self): - f = capture.TextIO() + f = capture.CaptureIO() if sys.version_info >= (3, 0): f.write("\u00f6") pytest.raises(TypeError, "f.write(bytes('hello', 'UTF-8'))") @@ -642,6 +644,18 @@ class TestTextIO: f.close() assert isinstance(s, unicode) + @pytest.mark.skipif( + sys.version_info[0] == 2, + reason='python 3 only behaviour', + ) + def test_write_bytes_to_buffer(self): + """In python3, stdout / stderr are text io wrappers (exposing a buffer + property of the underlying bytestream). See issue #1407 + """ + f = capture.CaptureIO() + f.buffer.write(b'foo\r\n') + assert f.getvalue() == 'foo\r\n' + def test_bytes_io(): f = py.io.BytesIO() @@ -658,7 +672,7 @@ def test_dontreadfrominput(): pytest.raises(IOError, f.read) pytest.raises(IOError, f.readlines) pytest.raises(IOError, iter, f) - pytest.raises(ValueError, f.fileno) + pytest.raises(UnsupportedOperation, f.fileno) f.close() # just for completeness @@ -691,6 +705,7 @@ def tmpfile(testdir): if not f.closed: f.close() + @needsosdup def test_dupfile(tmpfile): flist = [] @@ -699,27 +714,39 @@ def test_dupfile(tmpfile): assert nf != tmpfile assert nf.fileno() != tmpfile.fileno() assert nf not in flist - print_(i, end="", file=nf) + print(i, end="", file=nf) flist.append(nf) + + fname_open = flist[0].name + assert fname_open == repr(flist[0].buffer) + for i in range(5): f = flist[i] f.close() + fname_closed = flist[0].name + assert fname_closed == repr(flist[0].buffer) + assert fname_closed != fname_open tmpfile.seek(0) s = tmpfile.read() assert "01234" in repr(s) tmpfile.close() + assert fname_closed == repr(flist[0].buffer) + def test_dupfile_on_bytesio(): io = py.io.BytesIO() f = capture.safe_text_dupfile(io, "wb") f.write("hello") assert io.getvalue() == b"hello" + assert 'BytesIO object' in f.name + def test_dupfile_on_textio(): io = py.io.TextIO() f = capture.safe_text_dupfile(io, "wb") f.write("hello") assert io.getvalue() == "hello" + assert not hasattr(f, 'name') @contextlib.contextmanager @@ -737,7 +764,7 @@ def lsof_check(): assert len2 < len1 + 3, out2 -class TestFDCapture: +class TestFDCapture(object): pytestmark = needsosdup def test_simple(self, tmpfile): @@ -773,7 +800,7 @@ class TestFDCapture: def test_stderr(self): cap = capture.FDCapture(2) cap.start() - print_("hello", file=sys.stderr) + print("hello", file=sys.stderr) s = cap.snap() cap.done() assert s == "hello\n" @@ -832,7 +859,7 @@ def saved_fd(fd): os.close(new_fd) -class TestStdCapture: +class TestStdCapture(object): captureclass = staticmethod(StdCapture) @contextlib.contextmanager @@ -862,7 +889,7 @@ class TestStdCapture: def test_capturing_readouterr(self): with self.getcapture() as cap: - print ("hello world") + print("hello world") sys.stderr.write("hello error\n") out, err = cap.readouterr() assert out == "hello world\n" @@ -873,7 +900,7 @@ class TestStdCapture: def test_capturing_readouterr_unicode(self): with self.getcapture() as cap: - print ("hx\xc4\x85\xc4\x87") + print("hx\xc4\x85\xc4\x87") out, err = cap.readouterr() assert out == py.builtin._totext("hx\xc4\x85\xc4\x87\n", "utf8") @@ -888,7 +915,7 @@ class TestStdCapture: def test_reset_twice_error(self): with self.getcapture() as cap: - print ("hello") + print("hello") out, err = cap.readouterr() pytest.raises(ValueError, cap.stop_capturing) assert out == "hello\n" @@ -900,9 +927,9 @@ class TestStdCapture: with self.getcapture() as cap: sys.stdout.write("hello") sys.stderr.write("world") - sys.stdout = capture.TextIO() - sys.stderr = capture.TextIO() - print ("not seen") + sys.stdout = capture.CaptureIO() + sys.stderr = capture.CaptureIO() + print("not seen") sys.stderr.write("not seen\n") out, err = cap.readouterr() assert out == "hello" @@ -912,9 +939,9 @@ class TestStdCapture: def test_capturing_error_recursive(self): with self.getcapture() as cap1: - print ("cap1") + print("cap1") with self.getcapture() as cap2: - print ("cap2") + print("cap2") out2, err2 = cap2.readouterr() out1, err1 = cap1.readouterr() assert out1 == "cap1\n" @@ -944,9 +971,9 @@ class TestStdCapture: assert sys.stdin is old def test_stdin_nulled_by_default(self): - print ("XXX this test may well hang instead of crashing") - print ("XXX which indicates an error in the underlying capturing") - print ("XXX mechanisms") + print("XXX this test may well hang instead of crashing") + print("XXX which indicates an error in the underlying capturing") + print("XXX mechanisms") with self.getcapture(): pytest.raises(IOError, "sys.stdin.read()") @@ -990,7 +1017,7 @@ class TestStdCaptureFD(TestStdCapture): cap.stop_capturing() -class TestStdCaptureFDinvalidFD: +class TestStdCaptureFDinvalidFD(object): pytestmark = needsosdup def test_stdcapture_fd_invalid_fd(self, testdir): @@ -1023,6 +1050,15 @@ def test_capture_not_started_but_reset(): capsys.stop_capturing() +def test_using_capsys_fixture_works_with_sys_stdout_encoding(capsys): + test_text = 'test text' + + print(test_text.encode(sys.stdout.encoding, 'replace')) + (out, err) = capsys.readouterr() + assert out + assert err == '' + + @needsosdup @pytest.mark.parametrize('use', [True, False]) def test_fdcapture_tmpfile_remains_the_same(tmpfile, use): @@ -1038,6 +1074,7 @@ def test_fdcapture_tmpfile_remains_the_same(tmpfile, use): capfile2 = cap.err.tmpfile assert capfile2 == capfile + @needsosdup def test_close_and_capture_again(testdir): testdir.makepyfile(""" @@ -1057,7 +1094,6 @@ def test_close_and_capture_again(testdir): """) - @pytest.mark.parametrize('method', ['SysCapture', 'FDCapture']) def test_capturing_and_logging_fundamentals(testdir, method): if method == "StdCaptureFD" and not hasattr(os, 'dup'): @@ -1104,6 +1140,23 @@ def test_error_attribute_issue555(testdir): reprec.assertoutcome(passed=1) +@pytest.mark.skipif(not sys.platform.startswith('win') and sys.version_info[:2] >= (3, 6), + reason='only py3.6+ on windows') +def test_py36_windowsconsoleio_workaround_non_standard_streams(): + """ + Ensure _py36_windowsconsoleio_workaround function works with objects that + do not implement the full ``io``-based stream protocol, for example execnet channels (#2666). + """ + from _pytest.capture import _py36_windowsconsoleio_workaround + + class DummyStream: + def write(self, s): + pass + + stream = DummyStream() + _py36_windowsconsoleio_workaround(stream) + + def test_dontreadfrominput_has_encoding(testdir): testdir.makepyfile(""" import sys diff --git a/testing/test_collection.py b/testing/test_collection.py index 9cf4de895..cf13d4b00 100644 --- a/testing/test_collection.py +++ b/testing/test_collection.py @@ -1,8 +1,12 @@ -import pytest, py +from __future__ import absolute_import, division, print_function +import pytest +import py -from _pytest.main import Session, EXIT_NOTESTSCOLLECTED +import _pytest._code +from _pytest.main import Session, EXIT_NOTESTSCOLLECTED, _in_venv -class TestCollector: + +class TestCollector(object): def test_collect_versus_item(self): from pytest import Collector, Item assert not issubclass(Collector, Item) @@ -41,16 +45,16 @@ class TestCollector: assert not (fn1 == fn3) assert fn1 != fn3 - for fn in fn1,fn2,fn3: + for fn in fn1, fn2, fn3: assert fn != 3 assert fn != modcol - assert fn != [1,2,3] - assert [1,2,3] != fn + assert fn != [1, 2, 3] + assert [1, 2, 3] != fn assert modcol != fn def test_getparent(self, testdir): modcol = testdir.getmodulecol(""" - class TestClass: + class TestClass(object): def test_foo(): pass """) @@ -67,7 +71,6 @@ class TestCollector: parent = fn.getparent(pytest.Class) assert parent is cls - def test_getcustomfile_roundtrip(self, testdir): hello = testdir.makefile(".xxx", hello="world") testdir.makepyfile(conftest=""" @@ -85,7 +88,24 @@ class TestCollector: assert len(nodes) == 1 assert isinstance(nodes[0], pytest.File) -class TestCollectFS: + def test_can_skip_class_with_test_attr(self, testdir): + """Assure test class is skipped when using `__test__=False` (See #2007).""" + testdir.makepyfile(""" + class TestFoo(object): + __test__ = False + def __init__(self): + pass + def test_foo(): + assert True + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + 'collected 0 items', + '*no tests ran in*', + ]) + + +class TestCollectFS(object): def test_ignored_certain_directories(self, testdir): tmpdir = testdir.tmpdir tmpdir.ensure("build", 'test_notfound.py') @@ -104,6 +124,53 @@ class TestCollectFS: assert "test_notfound" not in s assert "test_found" in s + @pytest.mark.parametrize('fname', + ("activate", "activate.csh", "activate.fish", + "Activate", "Activate.bat", "Activate.ps1")) + def test_ignored_virtualenvs(self, testdir, fname): + bindir = "Scripts" if py.std.sys.platform.startswith("win") else "bin" + testdir.tmpdir.ensure("virtual", bindir, fname) + testfile = testdir.tmpdir.ensure("virtual", "test_invenv.py") + testfile.write("def test_hello(): pass") + + # by default, ignore tests inside a virtualenv + result = testdir.runpytest() + assert "test_invenv" not in result.stdout.str() + # allow test collection if user insists + result = testdir.runpytest("--collect-in-virtualenv") + assert "test_invenv" in result.stdout.str() + # allow test collection if user directly passes in the directory + result = testdir.runpytest("virtual") + assert "test_invenv" in result.stdout.str() + + @pytest.mark.parametrize('fname', + ("activate", "activate.csh", "activate.fish", + "Activate", "Activate.bat", "Activate.ps1")) + def test_ignored_virtualenvs_norecursedirs_precedence(self, testdir, fname): + bindir = "Scripts" if py.std.sys.platform.startswith("win") else "bin" + # norecursedirs takes priority + testdir.tmpdir.ensure(".virtual", bindir, fname) + testfile = testdir.tmpdir.ensure(".virtual", "test_invenv.py") + testfile.write("def test_hello(): pass") + result = testdir.runpytest("--collect-in-virtualenv") + assert "test_invenv" not in result.stdout.str() + # ...unless the virtualenv is explicitly given on the CLI + result = testdir.runpytest("--collect-in-virtualenv", ".virtual") + assert "test_invenv" in result.stdout.str() + + @pytest.mark.parametrize('fname', + ("activate", "activate.csh", "activate.fish", + "Activate", "Activate.bat", "Activate.ps1")) + def test__in_venv(self, testdir, fname): + """Directly test the virtual env detection function""" + bindir = "Scripts" if py.std.sys.platform.startswith("win") else "bin" + # no bin/activate, not a virtualenv + base_path = testdir.tmpdir.mkdir('venv') + assert _in_venv(base_path) is False + # with bin/activate, totally a virtualenv + base_path.ensure(bindir, fname) + assert _in_venv(base_path) is True + def test_custom_norecursedirs(self, testdir): testdir.makeini(""" [pytest] @@ -147,11 +214,11 @@ class TestCollectFS: assert [x.name for x in items] == ['test_%s' % dirname] -class TestCollectPluginHookRelay: +class TestCollectPluginHookRelay(object): def test_pytest_collect_file(self, testdir): wascalled = [] - class Plugin: + class Plugin(object): def pytest_collect_file(self, path, parent): if not path.basename.startswith("."): # Ignore hidden files, e.g. .testmondata. @@ -165,7 +232,7 @@ class TestCollectPluginHookRelay: def test_pytest_collect_directory(self, testdir): wascalled = [] - class Plugin: + class Plugin(object): def pytest_collect_directory(self, path, parent): wascalled.append(path.basename) @@ -176,7 +243,7 @@ class TestCollectPluginHookRelay: assert "world" in wascalled -class TestPrunetraceback: +class TestPrunetraceback(object): def test_custom_repr_failure(self, testdir): p = testdir.makepyfile(""" @@ -222,7 +289,7 @@ class TestPrunetraceback: ]) -class TestCustomConftests: +class TestCustomConftests(object): def test_ignore_collect_path(self, testdir): testdir.makeconftest(""" def pytest_ignore_collect(path, config): @@ -317,7 +384,8 @@ class TestCustomConftests: "*test_x*" ]) -class TestSession: + +class TestSession(object): def test_parsearg(self, testdir): p = testdir.makepyfile("def test_func(): pass") subdir = testdir.mkdir("sub") @@ -330,11 +398,11 @@ class TestSession: assert rcol.fspath == subdir parts = rcol._parsearg(p.basename) - assert parts[0] == target + assert parts[0] == target assert len(parts) == 1 parts = rcol._parsearg(p.basename + "::test_func") - assert parts[0] == target - assert parts[1] == "test_func" + assert parts[0] == target + assert parts[1] == "test_func" assert len(parts) == 2 def test_collect_topdir(self, testdir): @@ -345,13 +413,18 @@ class TestSession: topdir = testdir.tmpdir rcol = Session(config) assert topdir == rcol.fspath - #rootid = rcol.nodeid - #root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0] - #assert root2 == rcol, rootid + # rootid = rcol.nodeid + # root2 = rcol.perform_collect([rcol.nodeid], genitems=False)[0] + # assert root2 == rcol, rootid colitems = rcol.perform_collect([rcol.nodeid], genitems=False) assert len(colitems) == 1 assert colitems[0].fspath == p + def get_reported_items(self, hookrec): + """Return pytest.Item instances reported by the pytest_collectreport hook""" + calls = hookrec.getcalls('pytest_collectreport') + return [x for call in calls for x in call.report.result + if isinstance(x, pytest.Item)] def test_collect_protocol_single_function(self, testdir): p = testdir.makepyfile("def test_func(): pass") @@ -369,13 +442,14 @@ class TestSession: ("pytest_collectstart", "collector.fspath == p"), ("pytest_make_collect_report", "collector.fspath == p"), ("pytest_pycollect_makeitem", "name == 'test_func'"), - ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), - ("pytest_collectreport", "report.nodeid == ''") + ("pytest_collectreport", "report.result[0].name == 'test_func'"), ]) + # ensure we are reporting the collection of the single test item (#2464) + assert [x.name for x in self.get_reported_items(hookrec)] == ['test_func'] def test_collect_protocol_method(self, testdir): p = testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_method(self): pass """) @@ -390,6 +464,8 @@ class TestSession: assert items[0].name == "test_method" newid = items[0].nodeid assert newid == normid + # ensure we are reporting the collection of the single test item (#2464) + assert [x.name for x in self.get_reported_items(hookrec)] == ['test_method'] def test_collect_custom_nodes_multi_id(self, testdir): p = testdir.makepyfile("def test_func(): pass") @@ -419,9 +495,8 @@ class TestSession: "collector.__class__.__name__ == 'Module'"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", "report.nodeid.startswith(p.basename)"), - #("pytest_collectreport", - # "report.fspath == %r" % str(rcol.fspath)), ]) + assert len(self.get_reported_items(hookrec)) == 2 def test_collect_subdir_event_ordering(self, testdir): p = testdir.makepyfile("def test_func(): pass") @@ -436,7 +511,7 @@ class TestSession: ("pytest_collectstart", "collector.fspath == test_aaa"), ("pytest_pycollect_makeitem", "name == 'test_func'"), ("pytest_collectreport", - "report.nodeid.startswith('aaa/test_aaa.py')"), + "report.nodeid.startswith('aaa/test_aaa.py')"), ]) def test_collect_two_commandline_args(self, testdir): @@ -474,17 +549,20 @@ class TestSession: def test_find_byid_without_instance_parents(self, testdir): p = testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_method(self): pass """) - arg = p.basename + ("::TestClass::test_method") + arg = p.basename + "::TestClass::test_method" items, hookrec = testdir.inline_genitems(arg) assert len(items) == 1 item, = items assert item.nodeid.endswith("TestClass::()::test_method") + # ensure we are reporting the collection of the single test item (#2464) + assert [x.name for x in self.get_reported_items(hookrec)] == ['test_method'] -class Test_getinitialnodes: + +class Test_getinitialnodes(object): def test_global_file(self, testdir, tmpdir): x = tmpdir.ensure("x.py") with tmpdir.as_cwd(): @@ -511,7 +589,8 @@ class Test_getinitialnodes: for col in col.listchain(): assert col.config is config -class Test_genitems: + +class Test_genitems(object): def test_check_collect_hashes(self, testdir): p = testdir.makepyfile(""" def test_1(): @@ -534,7 +613,7 @@ class Test_genitems: def testone(): pass - class TestX: + class TestX(object): def testmethod_one(self): pass @@ -567,11 +646,11 @@ class Test_genitems: python_functions = *_test test """) p = testdir.makepyfile(''' - class MyTestSuite: + class MyTestSuite(object): def x_test(self): pass - class TestCase: + class TestCase(object): def test_y(self): pass ''') @@ -586,7 +665,7 @@ def test_matchnodes_two_collections_same_file(testdir): def pytest_configure(config): config.pluginmanager.register(Plugin2()) - class Plugin2: + class Plugin2(object): def pytest_collect_file(self, path, parent): if path.ext == ".abc": return MyFile2(path, parent) @@ -618,15 +697,15 @@ def test_matchnodes_two_collections_same_file(testdir): ]) -class TestNodekeywords: +class TestNodekeywords(object): def test_no_under(self, testdir): modcol = testdir.getmodulecol(""" def test_pass(): pass def test_fail(): assert 0 """) - l = list(modcol.keywords) - assert modcol.name in l - for x in l: + values = list(modcol.keywords) + assert modcol.name in values + for x in values: assert not x.startswith("_") assert modcol.name in repr(modcol.keywords) @@ -663,6 +742,7 @@ COLLECTION_ERROR_PY_FILES = dict( """, ) + def test_exit_on_collection_error(testdir): """Verify that all collection errors are collected and no tests executed""" testdir.makepyfile(**COLLECTION_ERROR_PY_FILES) @@ -751,3 +831,28 @@ def test_continue_on_collection_errors_maxfail(testdir): "*Interrupted: stopping after 3 failures*", "*1 failed, 2 error*", ]) + + +def test_fixture_scope_sibling_conftests(testdir): + """Regression test case for https://github.com/pytest-dev/pytest/issues/2836""" + foo_path = testdir.mkpydir("foo") + foo_path.join("conftest.py").write(_pytest._code.Source(""" + import pytest + @pytest.fixture + def fix(): + return 1 + """)) + foo_path.join("test_foo.py").write("def test_foo(fix): assert fix == 1") + + # Tests in `food/` should not see the conftest fixture from `foo/` + food_path = testdir.mkpydir("food") + food_path.join("test_food.py").write("def test_food(fix): assert fix == 1") + + res = testdir.runpytest() + assert res.ret == 1 + + res.stdout.fnmatch_lines([ + "*ERROR at setup of test_food*", + "E*fixture 'fix' not found", + "*1 passed, 1 error*", + ]) diff --git a/testing/test_compat.py b/testing/test_compat.py new file mode 100644 index 000000000..c74801c6c --- /dev/null +++ b/testing/test_compat.py @@ -0,0 +1,101 @@ +from __future__ import absolute_import, division, print_function +import sys + +import pytest +from _pytest.compat import is_generator, get_real_func, safe_getattr +from _pytest.outcomes import OutcomeException + + +def test_is_generator(): + def zap(): + yield + + def foo(): + pass + + assert is_generator(zap) + assert not is_generator(foo) + + +def test_real_func_loop_limit(): + + class Evil(object): + def __init__(self): + self.left = 1000 + + def __repr__(self): + return "".format(left=self.left) + + def __getattr__(self, attr): + if not self.left: + raise RuntimeError('its over') + self.left -= 1 + return self + + evil = Evil() + + with pytest.raises(ValueError): + res = get_real_func(evil) + print(res) + + +@pytest.mark.skipif(sys.version_info < (3, 4), + reason='asyncio available in Python 3.4+') +def test_is_generator_asyncio(testdir): + testdir.makepyfile(""" + from _pytest.compat import is_generator + import asyncio + @asyncio.coroutine + def baz(): + yield from [1,2,3] + + def test_is_generator_asyncio(): + assert not is_generator(baz) + """) + # avoid importing asyncio into pytest's own process, + # which in turn imports logging (#8) + result = testdir.runpytest_subprocess() + result.stdout.fnmatch_lines(['*1 passed*']) + + +@pytest.mark.skipif(sys.version_info < (3, 5), + reason='async syntax available in Python 3.5+') +def test_is_generator_async_syntax(testdir): + testdir.makepyfile(""" + from _pytest.compat import is_generator + def test_is_generator_py35(): + async def foo(): + await foo() + + async def bar(): + pass + + assert not is_generator(foo) + assert not is_generator(bar) + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines(['*1 passed*']) + + +class ErrorsHelper(object): + @property + def raise_exception(self): + raise Exception('exception should be catched') + + @property + def raise_fail(self): + pytest.fail('fail should be catched') + + +def test_helper_failures(): + helper = ErrorsHelper() + with pytest.raises(Exception): + helper.raise_exception + with pytest.raises(OutcomeException): + helper.raise_fail + + +def test_safe_getattr(): + helper = ErrorsHelper() + assert safe_getattr(helper, 'raise_exception', 'default') == 'default' + assert safe_getattr(helper, 'raise_fail', 'default') == 'default' diff --git a/testing/test_config.py b/testing/test_config.py index 21ceab1e2..f21d1821e 100644 --- a/testing/test_config.py +++ b/testing/test_config.py @@ -1,10 +1,13 @@ -import py, pytest +from __future__ import absolute_import, division, print_function +import py +import pytest import _pytest._code -from _pytest.config import getcfg, get_common_ancestor, determine_setup +from _pytest.config import getcfg, get_common_ancestor, determine_setup, _iter_rewritable_modules from _pytest.main import EXIT_NOTESTSCOLLECTED -class TestParseIni: + +class TestParseIni(object): @pytest.mark.parametrize('section, filename', [('pytest', 'pytest.ini'), ('tool:pytest', 'setup.cfg')]) @@ -84,7 +87,8 @@ class TestParseIni: result = testdir.inline_run("--confcutdir=.") assert result.ret == 0 -class TestConfigCmdlineParsing: + +class TestConfigCmdlineParsing(object): def test_parsing_again_fails(self, testdir): config = testdir.parseconfig() pytest.raises(AssertionError, lambda: config.parse([])) @@ -98,7 +102,7 @@ class TestConfigCmdlineParsing: [pytest] custom = 0 """) - testdir.makefile(".cfg", custom = """ + testdir.makefile(".cfg", custom=""" [pytest] custom = 1 """) @@ -115,14 +119,15 @@ class TestConfigCmdlineParsing: ret = pytest.main("-c " + temp_cfg_file) assert ret == _pytest.main.EXIT_OK -class TestConfigAPI: + +class TestConfigAPI(object): def test_config_trace(self, testdir): config = testdir.parseconfig() - l = [] - config.trace.root.setwriter(l.append) + values = [] + config.trace.root.setwriter(values.append) config.trace("hello") - assert len(l) == 1 - assert l[0] == "hello [config]\n" + assert len(values) == 1 + assert values[0] == "hello [config]\n" def test_config_getoption(self, testdir): testdir.makeconftest(""" @@ -140,7 +145,7 @@ class TestConfigAPI: from __future__ import unicode_literals def pytest_addoption(parser): - parser.addoption('--hello', type='string') + parser.addoption('--hello', type=str) """) config = testdir.parseconfig('--hello=this') assert config.getoption('hello') == 'this' @@ -148,7 +153,7 @@ class TestConfigAPI: def test_config_getvalueorskip(self, testdir): config = testdir.parseconfig() pytest.raises(pytest.skip.Exception, - "config.getvalueorskip('hello')") + "config.getvalueorskip('hello')") verbose = config.getvalueorskip("verbose") assert verbose == config.option.verbose @@ -204,10 +209,10 @@ class TestConfigAPI: paths=hello world/sub.py """) config = testdir.parseconfig() - l = config.getini("paths") - assert len(l) == 2 - assert l[0] == p.dirpath('hello') - assert l[1] == p.dirpath('world/sub.py') + values = config.getini("paths") + assert len(values) == 2 + assert values[0] == p.dirpath('hello') + assert values[1] == p.dirpath('world/sub.py') pytest.raises(ValueError, config.getini, 'other') def test_addini_args(self, testdir): @@ -221,11 +226,11 @@ class TestConfigAPI: args=123 "123 hello" "this" """) config = testdir.parseconfig() - l = config.getini("args") - assert len(l) == 3 - assert l == ["123", "123 hello", "this"] - l = config.getini("a2") - assert l == list("123") + values = config.getini("args") + assert len(values) == 3 + assert values == ["123", "123 hello", "this"] + values = config.getini("a2") + assert values == list("123") def test_addini_linelist(self, testdir): testdir.makeconftest(""" @@ -239,11 +244,11 @@ class TestConfigAPI: second line """) config = testdir.parseconfig() - l = config.getini("xy") - assert len(l) == 2 - assert l == ["123 345", "second line"] - l = config.getini("a2") - assert l == [] + values = config.getini("xy") + assert len(values) == 2 + assert values == ["123 345", "second line"] + values = config.getini("a2") + assert values == [] @pytest.mark.parametrize('str_val, bool_val', [('True', True), ('no', False), ('no-ini', True)]) @@ -270,13 +275,13 @@ class TestConfigAPI: xy= 123 """) config = testdir.parseconfig() - l = config.getini("xy") - assert len(l) == 1 - assert l == ["123"] + values = config.getini("xy") + assert len(values) == 1 + assert values == ["123"] config.addinivalue_line("xy", "456") - l = config.getini("xy") - assert len(l) == 2 - assert l == ["123", "456"] + values = config.getini("xy") + assert len(values) == 2 + assert values == ["123", "456"] def test_addinivalue_line_new(self, testdir): testdir.makeconftest(""" @@ -286,13 +291,13 @@ class TestConfigAPI: config = testdir.parseconfig() assert not config.getini("xy") config.addinivalue_line("xy", "456") - l = config.getini("xy") - assert len(l) == 1 - assert l == ["456"] + values = config.getini("xy") + assert len(values) == 1 + assert values == ["456"] config.addinivalue_line("xy", "123") - l = config.getini("xy") - assert len(l) == 2 - assert l == ["456", "123"] + values = config.getini("xy") + assert len(values) == 2 + assert values == ["456", "123"] def test_confcutdir_check_isdir(self, testdir): """Give an error if --confcutdir is not a valid directory (#2078)""" @@ -303,8 +308,18 @@ class TestConfigAPI: config = testdir.parseconfig('--confcutdir', testdir.tmpdir.join('dir').ensure(dir=1)) assert config.getoption('confcutdir') == str(testdir.tmpdir.join('dir')) + @pytest.mark.parametrize('names, expected', [ + (['bar.py'], ['bar']), + (['foo', 'bar.py'], []), + (['foo', 'bar.pyc'], []), + (['foo', '__init__.py'], ['foo']), + (['foo', 'bar', '__init__.py'], []), + ]) + def test_iter_rewritable_modules(self, names, expected): + assert list(_iter_rewritable_modules(['/'.join(names)])) == expected -class TestConfigFromdictargs: + +class TestConfigFromdictargs(object): def test_basic_behavior(self): from _pytest.config import Config option_dict = { @@ -389,19 +404,19 @@ def test_preparse_ordering_with_setuptools(testdir, monkeypatch): def my_iter(name): assert name == "pytest11" - class Dist: + class Dist(object): project_name = 'spam' version = '1.0' def _get_metadata(self, name): return ['foo.txt,sha256=abc,123'] - class EntryPoint: + class EntryPoint(object): name = "mytestplugin" dist = Dist() def load(self): - class PseudoPlugin: + class PseudoPlugin(object): x = 42 return PseudoPlugin() @@ -423,14 +438,14 @@ def test_setuptools_importerror_issue1479(testdir, monkeypatch): def my_iter(name): assert name == "pytest11" - class Dist: + class Dist(object): project_name = 'spam' version = '1.0' def _get_metadata(self, name): return ['foo.txt,sha256=abc,123'] - class EntryPoint: + class EntryPoint(object): name = "mytestplugin" dist = Dist() @@ -450,14 +465,14 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch): def my_iter(name): assert name == "pytest11" - class Dist: + class Dist(object): project_name = 'spam' version = '1.0' def _get_metadata(self, name): return ['foo.txt,sha256=abc,123'] - class EntryPoint: + class EntryPoint(object): name = "mytestplugin" dist = Dist() @@ -471,6 +486,7 @@ def test_plugin_preparse_prevents_setuptools_loading(testdir, monkeypatch): plugin = config.pluginmanager.getplugin("mytestplugin") assert plugin is None + def test_cmdline_processargs_simple(testdir): testdir.makeconftest(""" def pytest_cmdline_preparse(args): @@ -482,6 +498,7 @@ def test_cmdline_processargs_simple(testdir): "*-h*", ]) + def test_invalid_options_show_extra_information(testdir): """display extra information when pytest exits due to unrecognized options in the command-line""" @@ -519,7 +536,7 @@ def test_consider_args_after_options_for_rootdir_and_inifile(testdir, args): args[i] = d2 with root.as_cwd(): result = testdir.runpytest(*args) - result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile: ']) + result.stdout.fnmatch_lines(['*rootdir: *myroot, inifile:']) @pytest.mark.skipif("sys.platform == 'win32'") @@ -527,6 +544,30 @@ def test_toolongargs_issue224(testdir): result = testdir.runpytest("-m", "hello" * 500) assert result.ret == EXIT_NOTESTSCOLLECTED + +def test_config_in_subdirectory_colon_command_line_issue2148(testdir): + conftest_source = ''' + def pytest_addoption(parser): + parser.addini('foo', 'foo') + ''' + + testdir.makefile('.ini', **{ + 'pytest': '[pytest]\nfoo = root', + 'subdir/pytest': '[pytest]\nfoo = subdir', + }) + + testdir.makepyfile(**{ + 'conftest': conftest_source, + 'subdir/conftest': conftest_source, + 'subdir/test_foo': ''' + def test_foo(pytestconfig): + assert pytestconfig.getini('foo') == 'subdir' + '''}) + + result = testdir.runpytest('subdir/test_foo.py::test_foo') + assert result.ret == 0 + + def test_notify_exception(testdir, capfd): config = testdir.parseconfig() excinfo = pytest.raises(ValueError, "raise ValueError(1)") @@ -534,7 +575,7 @@ def test_notify_exception(testdir, capfd): out, err = capfd.readouterr() assert "ValueError" in err - class A: + class A(object): def pytest_internalerror(self, excrepr): return True @@ -545,39 +586,54 @@ def test_notify_exception(testdir, capfd): def test_load_initial_conftest_last_ordering(testdir): - from _pytest.config import get_config + from _pytest.config import get_config pm = get_config().pluginmanager - class My: + class My(object): def pytest_load_initial_conftests(self): pass m = My() pm.register(m) hc = pm.hook.pytest_load_initial_conftests - l = hc._nonwrappers + hc._wrappers + values = hc._nonwrappers + hc._wrappers expected = [ "_pytest.config", 'test_config', '_pytest.capture', ] - assert [x.function.__module__ for x in l] == expected + assert [x.function.__module__ for x in values] == expected -class TestWarning: +def test_get_plugin_specs_as_list(): + from _pytest.config import _get_plugin_specs_as_list + with pytest.raises(pytest.UsageError): + _get_plugin_specs_as_list(set(['foo'])) + with pytest.raises(pytest.UsageError): + _get_plugin_specs_as_list(dict()) + + assert _get_plugin_specs_as_list(None) == [] + assert _get_plugin_specs_as_list('') == [] + assert _get_plugin_specs_as_list('foo') == ['foo'] + assert _get_plugin_specs_as_list('foo,bar') == ['foo', 'bar'] + assert _get_plugin_specs_as_list(['foo', 'bar']) == ['foo', 'bar'] + assert _get_plugin_specs_as_list(('foo', 'bar')) == ['foo', 'bar'] + + +class TestWarning(object): def test_warn_config(self, testdir): testdir.makeconftest(""" - l = [] + values = [] def pytest_configure(config): config.warn("C1", "hello") def pytest_logwarning(code, message): if message == "hello" and code == "C1": - l.append(1) + values.append(1) """) testdir.makepyfile(""" def test_proper(pytestconfig): import conftest - assert conftest.l == [1] + assert conftest.values == [1] """) reprec = testdir.inline_run() reprec.assertoutcome(passed=1) @@ -594,16 +650,18 @@ class TestWarning: pass """) result = testdir.runpytest("--disable-pytest-warnings") - assert result.parseoutcomes()["pytest-warnings"] > 0 + assert result.parseoutcomes()["warnings"] > 0 assert "hello" not in result.stdout.str() result = testdir.runpytest() result.stdout.fnmatch_lines(""" - ===*pytest-warning summary*=== - *WT1*test_warn_on_test_item*:7 hello* + ===*warnings summary*=== + *test_warn_on_test_item_from_request.py::test_hello* + *hello* """) -class TestRootdir: + +class TestRootdir(object): def test_simple_noini(self, tmpdir): assert get_common_ancestor([tmpdir]) == tmpdir a = tmpdir.mkdir("a") @@ -626,7 +684,7 @@ class TestRootdir: rootdir, inifile, inicfg = determine_setup(None, args) assert rootdir == tmpdir assert inifile == inifile - rootdir, inifile, inicfg = determine_setup(None, [b,a]) + rootdir, inifile, inicfg = determine_setup(None, [b, a]) assert rootdir == tmpdir assert inifile == inifile @@ -661,7 +719,7 @@ class TestRootdir: assert rootdir == tmpdir -class TestOverrideIniArgs: +class TestOverrideIniArgs(object): @pytest.mark.parametrize("name", "setup.cfg tox.ini pytest.ini".split()) def test_override_ini_names(self, testdir, name): testdir.tmpdir.join(name).write(py.std.textwrap.dedent(""" @@ -684,7 +742,6 @@ class TestOverrideIniArgs: assert result.ret == 0 result.stdout.fnmatch_lines(["custom_option:3.0"]) - def test_override_ini_pathlist(self, testdir): testdir.makeconftest(""" def pytest_addoption(parser): @@ -740,6 +797,21 @@ class TestOverrideIniArgs: result = testdir.runpytest("--override-ini", 'xdist_strict True', "-s") result.stderr.fnmatch_lines(["*ERROR* *expects option=value*"]) + @pytest.mark.parametrize('with_ini', [True, False]) + def test_override_ini_handled_asap(self, testdir, with_ini): + """-o should be handled as soon as possible and always override what's in ini files (#2238)""" + if with_ini: + testdir.makeini(""" + [pytest] + python_files=test_*.py + """) + testdir.makepyfile(unittest_ini_handle=""" + def test(): + pass + """) + result = testdir.runpytest("--override-ini", 'python_files=unittest_*.py') + result.stdout.fnmatch_lines(["*1 passed in*"]) + def test_with_arg_outside_cwd_without_inifile(self, tmpdir, monkeypatch): monkeypatch.chdir(str(tmpdir)) a = tmpdir.mkdir("a") @@ -771,4 +843,3 @@ class TestOverrideIniArgs: rootdir, inifile, inicfg = determine_setup(None, ['a/exist']) assert rootdir == tmpdir assert inifile is None - diff --git a/testing/test_conftest.py b/testing/test_conftest.py index c0fa74701..c0411b723 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function from textwrap import dedent import _pytest._code @@ -18,20 +19,23 @@ def basedir(request, tmpdir_factory): tmpdir.ensure("adir/b/__init__.py") return tmpdir + def ConftestWithSetinitial(path): conftest = PytestPluginManager() conftest_setinitial(conftest, [path]) return conftest + def conftest_setinitial(conftest, args, confcutdir=None): - class Namespace: + class Namespace(object): def __init__(self): self.file_or_dir = args self.confcutdir = str(confcutdir) self.noconftest = False conftest._set_initial_conftests(Namespace()) -class TestConftestValueAccessGlobal: + +class TestConftestValueAccessGlobal(object): def test_basic_init(self, basedir): conftest = PytestPluginManager() p = basedir.join("adir") @@ -42,7 +46,7 @@ class TestConftestValueAccessGlobal: len(conftest._path2confmods) conftest._getconftestmodules(basedir) snap1 = len(conftest._path2confmods) - #assert len(conftest._path2confmods) == snap1 + 1 + # assert len(conftest._path2confmods) == snap1 + 1 conftest._getconftestmodules(basedir.join('adir')) assert len(conftest._path2confmods) == snap1 + 1 conftest._getconftestmodules(basedir.join('b')) @@ -64,11 +68,12 @@ class TestConftestValueAccessGlobal: startdir.ensure("xx", dir=True) conftest = ConftestWithSetinitial(startdir) mod, value = conftest._rget_with_confmod("a", startdir) - assert value == 1.5 + assert value == 1.5 path = py.path.local(mod.__file__) assert path.dirpath() == basedir.join("adir", "b") assert path.purebasename.startswith("conftest") + def test_conftest_in_nonpkg_with_init(tmpdir): tmpdir.ensure("adir-1.0/conftest.py").write("a=1 ; Directory = 3") tmpdir.ensure("adir-1.0/b/conftest.py").write("b=2 ; a = 1.5") @@ -76,13 +81,15 @@ def test_conftest_in_nonpkg_with_init(tmpdir): tmpdir.ensure("adir-1.0/__init__.py") ConftestWithSetinitial(tmpdir.join("adir-1.0", "b")) + def test_doubledash_considered(testdir): conf = testdir.mkdir("--option") conf.join("conftest.py").ensure() conftest = PytestPluginManager() conftest_setinitial(conftest, [conf.basename, conf.basename]) - l = conftest._getconftestmodules(conf) - assert len(l) == 1 + values = conftest._getconftestmodules(conf) + assert len(values) == 1 + def test_issue151_load_all_conftests(testdir): names = "code proj src".split() @@ -95,6 +102,7 @@ def test_issue151_load_all_conftests(testdir): d = list(conftest._conftestpath2mod.values()) assert len(d) == len(names) + def test_conftest_global_import(testdir): testdir.makeconftest("x=3") p = testdir.makepyfile(""" @@ -116,32 +124,35 @@ def test_conftest_global_import(testdir): res = testdir.runpython(p) assert res.ret == 0 + def test_conftestcutdir(testdir): conf = testdir.makeconftest("") p = testdir.mkdir("x") conftest = PytestPluginManager() conftest_setinitial(conftest, [testdir.tmpdir], confcutdir=p) - l = conftest._getconftestmodules(p) - assert len(l) == 0 - l = conftest._getconftestmodules(conf.dirpath()) - assert len(l) == 0 + values = conftest._getconftestmodules(p) + assert len(values) == 0 + values = conftest._getconftestmodules(conf.dirpath()) + assert len(values) == 0 assert conf not in conftest._conftestpath2mod # but we can still import a conftest directly conftest._importconftest(conf) - l = conftest._getconftestmodules(conf.dirpath()) - assert l[0].__file__.startswith(str(conf)) + values = conftest._getconftestmodules(conf.dirpath()) + assert values[0].__file__.startswith(str(conf)) # and all sub paths get updated properly - l = conftest._getconftestmodules(p) - assert len(l) == 1 - assert l[0].__file__.startswith(str(conf)) + values = conftest._getconftestmodules(p) + assert len(values) == 1 + assert values[0].__file__.startswith(str(conf)) + def test_conftestcutdir_inplace_considered(testdir): conf = testdir.makeconftest("") conftest = PytestPluginManager() conftest_setinitial(conftest, [conf.dirpath()], confcutdir=conf.dirpath()) - l = conftest._getconftestmodules(conf.dirpath()) - assert len(l) == 1 - assert l[0].__file__.startswith(str(conf)) + values = conftest._getconftestmodules(conf.dirpath()) + assert len(values) == 1 + assert values[0].__file__.startswith(str(conf)) + @pytest.mark.parametrize("name", 'test tests whatever .dotdir'.split()) def test_setinitial_conftest_subdirs(testdir, name): @@ -150,12 +161,13 @@ def test_setinitial_conftest_subdirs(testdir, name): conftest = PytestPluginManager() conftest_setinitial(conftest, [sub.dirpath()], confcutdir=testdir.tmpdir) if name not in ('whatever', '.dotdir'): - assert subconftest in conftest._conftestpath2mod + assert subconftest in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 1 else: - assert subconftest not in conftest._conftestpath2mod + assert subconftest not in conftest._conftestpath2mod assert len(conftest._conftestpath2mod) == 0 + def test_conftest_confcutdir(testdir): testdir.makeconftest("assert 0") x = testdir.mkdir("x") @@ -167,6 +179,7 @@ def test_conftest_confcutdir(testdir): result.stdout.fnmatch_lines(["*--xyz*"]) assert 'warning: could not load initial' not in result.stdout.str() + def test_no_conftest(testdir): testdir.makeconftest("assert 0") result = testdir.runpytest("--noconftest") @@ -175,6 +188,7 @@ def test_no_conftest(testdir): result = testdir.runpytest() assert result.ret == EXIT_USAGEERROR + def test_conftest_existing_resultlog(testdir): x = testdir.mkdir("tests") x.join("conftest.py").write(_pytest._code.Source(""" @@ -185,6 +199,7 @@ def test_conftest_existing_resultlog(testdir): result = testdir.runpytest("-h", "--resultlog", "result.log") result.stdout.fnmatch_lines(["*--xyz*"]) + def test_conftest_existing_junitxml(testdir): x = testdir.mkdir("tests") x.join("conftest.py").write(_pytest._code.Source(""" @@ -195,6 +210,7 @@ def test_conftest_existing_junitxml(testdir): result = testdir.runpytest("-h", "--junitxml", "junit.xml") result.stdout.fnmatch_lines(["*--xyz*"]) + def test_conftest_import_order(testdir, monkeypatch): ct1 = testdir.makeconftest("") sub = testdir.mkdir("sub") @@ -265,7 +281,7 @@ def test_conftest_found_with_double_dash(testdir): """) -class TestConftestVisibility: +class TestConftestVisibility(object): def _setup_tree(self, testdir): # for issue616 # example mostly taken from: # https://mail.python.org/pipermail/pytest-dev/2014-September/002617.html @@ -305,9 +321,9 @@ class TestConftestVisibility: # use value from parent dir's """)) - print ("created directory structure:") + print("created directory structure:") for x in testdir.tmpdir.visit(): - print (" " + x.relto(testdir.tmpdir)) + print(" " + x.relto(testdir.tmpdir)) return { "runner": runner, @@ -318,38 +334,38 @@ class TestConftestVisibility: # N.B.: "swc" stands for "subdir with conftest.py" # "snc" stands for "subdir no [i.e. without] conftest.py" @pytest.mark.parametrize("chdir,testarg,expect_ntests_passed", [ - # Effective target: package/.. - ("runner", "..", 3), - ("package", "..", 3), - ("swc", "../..", 3), - ("snc", "../..", 3), + # Effective target: package/.. + ("runner", "..", 3), + ("package", "..", 3), + ("swc", "../..", 3), + ("snc", "../..", 3), - # Effective target: package - ("runner", "../package", 3), - ("package", ".", 3), - ("swc", "..", 3), - ("snc", "..", 3), + # Effective target: package + ("runner", "../package", 3), + ("package", ".", 3), + ("swc", "..", 3), + ("snc", "..", 3), - # Effective target: package/swc - ("runner", "../package/swc", 1), - ("package", "./swc", 1), - ("swc", ".", 1), - ("snc", "../swc", 1), + # Effective target: package/swc + ("runner", "../package/swc", 1), + ("package", "./swc", 1), + ("swc", ".", 1), + ("snc", "../swc", 1), - # Effective target: package/snc - ("runner", "../package/snc", 1), - ("package", "./snc", 1), - ("swc", "../snc", 1), - ("snc", ".", 1), + # Effective target: package/snc + ("runner", "../package/snc", 1), + ("package", "./snc", 1), + ("swc", "../snc", 1), + ("snc", ".", 1), ]) @pytest.mark.issue616 def test_parsefactories_relative_node_ids( - self, testdir, chdir,testarg, expect_ntests_passed): + self, testdir, chdir, testarg, expect_ntests_passed): dirs = self._setup_tree(testdir) - print("pytest run in cwd: %s" %( + print("pytest run in cwd: %s" % ( dirs[chdir].relto(testdir.tmpdir))) - print("pytestarg : %s" %(testarg)) - print("expected pass : %s" %(expect_ntests_passed)) + print("pytestarg : %s" % (testarg)) + print("expected pass : %s" % (expect_ntests_passed)) with dirs[chdir].as_cwd(): reprec = testdir.inline_run(testarg, "-q", "--traceconfig") reprec.assertoutcome(passed=expect_ntests_passed) @@ -398,7 +414,7 @@ def test_search_conftest_up_to_inifile(testdir, confcutdir, passed, error): def test_issue1073_conftest_special_objects(testdir): testdir.makeconftest(""" - class DontTouchMe: + class DontTouchMe(object): def __getattr__(self, x): raise Exception('cant touch me') @@ -448,3 +464,15 @@ def test_hook_proxy(testdir): '*test_foo4.py*', '*3 passed*', ]) + + +def test_required_option_help(testdir): + testdir.makeconftest("assert 0") + x = testdir.mkdir("x") + x.join("conftest.py").write(_pytest._code.Source(""" + def pytest_addoption(parser): + parser.addoption("--xyz", action="store_true", required=True) + """)) + result = testdir.runpytest("-h", x) + assert 'argument --xyz is required' not in result.stdout.str() + assert 'general:' in result.stdout.str() diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 4ea2cc58e..6616d2eae 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -1,10 +1,13 @@ # encoding: utf-8 +from __future__ import absolute_import, division, print_function import sys import _pytest._code +from _pytest.compat import MODULE_NOT_FOUND_ERROR from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile import pytest -class TestDoctests: + +class TestDoctests(object): def test_collect_testtextfile(self, testdir): w = testdir.maketxtfile(whatever="") @@ -16,7 +19,7 @@ class TestDoctests: """) for x in (testdir.tmpdir, checkfile): - #print "checking that %s returns custom items" % (x,) + # print "checking that %s returns custom items" % (x,) items, reprec = testdir.inline_genitems(x) assert len(items) == 1 assert isinstance(items[0], DoctestItem) @@ -29,14 +32,14 @@ class TestDoctests: path = testdir.makepyfile(whatever="#") for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 0 def test_collect_module_single_modulelevel_doctest(self, testdir): path = testdir.makepyfile(whatever='""">>> pass"""') for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 1 assert isinstance(items[0], DoctestItem) assert isinstance(items[0].parent, DoctestModule) @@ -49,7 +52,7 @@ class TestDoctests: """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -74,7 +77,7 @@ class TestDoctests: """) for p in (path, testdir.tmpdir): items, reprec = testdir.inline_genitems(p, - '--doctest-modules') + '--doctest-modules') assert len(items) == 2 assert isinstance(items[0], DoctestItem) assert isinstance(items[1], DoctestItem) @@ -129,6 +132,33 @@ class TestDoctests: '*1 passed*', ]) + @pytest.mark.parametrize( + ' test_string, encoding', + [ + (u'foo', 'ascii'), + (u'öäü', 'latin1'), + (u'öäü', 'utf-8') + ] + ) + def test_encoding(self, testdir, test_string, encoding): + """Test support for doctest_encoding ini option. + """ + testdir.makeini(""" + [pytest] + doctest_encoding={0} + """.format(encoding)) + doctest = u""" + >>> u"{0}" + {1} + """.format(test_string, repr(test_string)) + testdir._makefile(".txt", [doctest], {}, encoding=encoding) + + result = testdir.runpytest() + + result.stdout.fnmatch_lines([ + '*1 passed*', + ]) + def test_doctest_unexpected_exception(self, testdir): testdir.maketxtfile(""" >>> i = 0 @@ -143,7 +173,7 @@ class TestDoctests: "*UNEXPECTED*ZeroDivision*", ]) - def test_docstring_context_around_error(self, testdir): + def test_docstring_partial_context_around_error(self, testdir): """Test that we show some context before the actual line of a failing doctest. """ @@ -169,7 +199,7 @@ class TestDoctests: ''') result = testdir.runpytest('--doctest-modules') result.stdout.fnmatch_lines([ - '*docstring_context_around_error*', + '*docstring_partial_context_around_error*', '005*text-line-3', '006*text-line-4', '013*text-line-11', @@ -183,6 +213,32 @@ class TestDoctests: assert 'text-line-2' not in result.stdout.str() assert 'text-line-after' not in result.stdout.str() + def test_docstring_full_context_around_error(self, testdir): + """Test that we show the whole context before the actual line of a failing + doctest, provided that the context is up to 10 lines long. + """ + testdir.makepyfile(''' + def foo(): + """ + text-line-1 + text-line-2 + + >>> 1 + 1 + 3 + """ + ''') + result = testdir.runpytest('--doctest-modules') + result.stdout.fnmatch_lines([ + '*docstring_full_context_around_error*', + '003*text-line-1', + '004*text-line-2', + '006*>>> 1 + 1', + 'Expected:', + ' 3', + 'Got:', + ' 2', + ]) + def test_doctest_linedata_missing(self, testdir): testdir.tmpdir.join('hello.py').write(_pytest._code.Source(""" class Fun(object): @@ -211,8 +267,8 @@ class TestDoctests: # doctest is never executed because of error during hello.py collection result.stdout.fnmatch_lines([ "*>>> import asdals*", - "*UNEXPECTED*ImportError*", - "ImportError: No module named *asdal*", + "*UNEXPECTED*{e}*".format(e=MODULE_NOT_FOUND_ERROR), + "{e}: No module named *asdal*".format(e=MODULE_NOT_FOUND_ERROR), ]) def test_doctest_unex_importerror_with_module(self, testdir): @@ -227,7 +283,7 @@ class TestDoctests: # doctest is never executed because of error during hello.py collection result.stdout.fnmatch_lines([ "*ERROR collecting hello.py*", - "*ImportError: No module named *asdals*", + "*{e}: No module named *asdals*".format(e=MODULE_NOT_FOUND_ERROR), "*Interrupted: 1 errors during collection*", ]) @@ -264,7 +320,6 @@ class TestDoctests: "*:5: DocTestFailure" ]) - def test_txtfile_failing(self, testdir): p = testdir.maketxtfile(""" >>> i = 0 @@ -349,7 +404,7 @@ class TestDoctests: def test_doctestmodule_two_tests_one_fail(self, testdir): p = testdir.makepyfile(""" - class MyClass: + class MyClass(object): def bad_meth(self): ''' >>> magic = 42 @@ -372,7 +427,7 @@ class TestDoctests: doctest_optionflags = ELLIPSIS NORMALIZE_WHITESPACE """) p = testdir.makepyfile(""" - class MyClass: + class MyClass(object): ''' >>> a = "foo " >>> print(a) @@ -389,7 +444,7 @@ class TestDoctests: doctest_optionflags = ELLIPSIS """) p = testdir.makepyfile(""" - class MyClass: + class MyClass(object): ''' >>> a = "foo " >>> print(a) @@ -475,8 +530,65 @@ class TestDoctests: "--junit-xml=junit.xml") reprec.assertoutcome(failed=1) + def test_unicode_doctest(self, testdir): + """ + Test case for issue 2434: DecodeError on Python 2 when doctest contains non-ascii + characters. + """ + p = testdir.maketxtfile(test_unicode_doctest=""" + .. doctest:: -class TestLiterals: + >>> print( + ... "Hi\\n\\nByé") + Hi + ... + Byé + >>> 1/0 # Byé + 1 + """) + result = testdir.runpytest(p) + result.stdout.fnmatch_lines([ + '*UNEXPECTED EXCEPTION: ZeroDivisionError*', + '*1 failed*', + ]) + + def test_unicode_doctest_module(self, testdir): + """ + Test case for issue 2434: DecodeError on Python 2 when doctest docstring + contains non-ascii characters. + """ + p = testdir.makepyfile(test_unicode_doctest_module=""" + # -*- encoding: utf-8 -*- + from __future__ import unicode_literals + + def fix_bad_unicode(text): + ''' + >>> print(fix_bad_unicode('único')) + único + ''' + return "único" + """) + result = testdir.runpytest(p, '--doctest-modules') + result.stdout.fnmatch_lines(['* 1 passed *']) + + def test_reportinfo(self, testdir): + ''' + Test case to make sure that DoctestItem.reportinfo() returns lineno. + ''' + p = testdir.makepyfile(test_reportinfo=""" + def foo(x): + ''' + >>> foo('a') + 'b' + ''' + return 'c' + """) + items, reprec = testdir.inline_genitems(p, '--doctest-modules') + reportinfo = items[0].reportinfo() + assert reportinfo[1] == 1 + + +class TestLiterals(object): @pytest.mark.parametrize('config_mode', ['ini', 'comment']) def test_allow_unicode(self, testdir, config_mode): @@ -563,7 +675,7 @@ class TestLiterals: reprec.assertoutcome(passed=passed, failed=int(not passed)) -class TestDoctestSkips: +class TestDoctestSkips(object): """ If all examples in a doctest are skipped due to the SKIP option, then the tests should be SKIPPED rather than PASSED. (#957) @@ -617,7 +729,7 @@ class TestDoctestSkips: reprec.assertoutcome(passed=0, skipped=0) -class TestDoctestAutoUseFixtures: +class TestDoctestAutoUseFixtures(object): SCOPES = ['module', 'session', 'class', 'function'] @@ -736,7 +848,7 @@ class TestDoctestAutoUseFixtures: result.stdout.fnmatch_lines(['*=== 1 passed in *']) -class TestDoctestNamespaceFixture: +class TestDoctestNamespaceFixture(object): SCOPES = ['module', 'session', 'class', 'function'] @@ -786,7 +898,7 @@ class TestDoctestNamespaceFixture: reprec.assertoutcome(passed=1) -class TestDoctestReportingOption: +class TestDoctestReportingOption(object): def _run_doctest_report(self, testdir, format): testdir.makepyfile(""" def foo(): @@ -861,4 +973,3 @@ class TestDoctestReportingOption: result.stderr.fnmatch_lines([ "*error: argument --doctest-report: invalid choice: 'obviously_invalid_format' (choose from*" ]) - diff --git a/testing/test_entry_points.py b/testing/test_entry_points.py index 370b93129..6ca68b481 100644 --- a/testing/test_entry_points.py +++ b/testing/test_entry_points.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pkg_resources import pytest diff --git a/testing/test_helpconfig.py b/testing/test_helpconfig.py index fc3c8fdf6..845005a05 100644 --- a/testing/test_helpconfig.py +++ b/testing/test_helpconfig.py @@ -1,10 +1,12 @@ +from __future__ import absolute_import, division, print_function from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest + def test_version(testdir, pytestconfig): result = testdir.runpytest("--version") assert result.ret == 0 - #p = py.path.local(py.__file__).dirpath() + # p = py.path.local(py.__file__).dirpath() result.stderr.fnmatch_lines([ '*pytest*%s*imported from*' % (pytest.__version__, ) ]) @@ -14,6 +16,7 @@ def test_version(testdir, pytestconfig): "*at*", ]) + def test_help(testdir): result = testdir.runpytest("--help") assert result.ret == 0 @@ -25,6 +28,7 @@ def test_help(testdir): *to see*fixtures*pytest --fixtures* """) + def test_hookvalidation_unknown(testdir): testdir.makeconftest(""" def pytest_hello(xyz): @@ -32,10 +36,11 @@ def test_hookvalidation_unknown(testdir): """) result = testdir.runpytest() assert result.ret != 0 - result.stderr.fnmatch_lines([ + result.stdout.fnmatch_lines([ '*unknown hook*pytest_hello*' ]) + def test_hookvalidation_optional(testdir): testdir.makeconftest(""" import pytest @@ -46,6 +51,7 @@ def test_hookvalidation_optional(testdir): result = testdir.runpytest() assert result.ret == EXIT_NOTESTSCOLLECTED + def test_traceconfig(testdir): result = testdir.runpytest("--traceconfig") result.stdout.fnmatch_lines([ @@ -53,12 +59,14 @@ def test_traceconfig(testdir): "*active plugins*", ]) + def test_debug(testdir, monkeypatch): result = testdir.runpytest_subprocess("--debug") assert result.ret == EXIT_NOTESTSCOLLECTED p = testdir.tmpdir.join("pytestdebug.log") assert "pytest_sessionstart" in p.read() + def test_PYTEST_DEBUG(testdir, monkeypatch): monkeypatch.setenv("PYTEST_DEBUG", "1") result = testdir.runpytest_subprocess() diff --git a/testing/test_junitxml.py b/testing/test_junitxml.py index abbc9cd33..b604c02a3 100644 --- a/testing/test_junitxml.py +++ b/testing/test_junitxml.py @@ -1,5 +1,5 @@ # -*- coding: utf-8 -*- - +from __future__ import absolute_import, division, print_function from xml.dom import minidom import py import sys @@ -79,7 +79,7 @@ class DomNode(object): return type(self)(self.__node.nextSibling) -class TestPython: +class TestPython(object): def test_summing_simple(self, testdir): testdir.makepyfile(""" import pytest @@ -189,6 +189,29 @@ class TestPython: fnode.assert_attr(message="test teardown failure") assert "ValueError" in fnode.toxml() + def test_call_failure_teardown_error(self, testdir): + testdir.makepyfile(""" + import pytest + + @pytest.fixture + def arg(): + yield + raise Exception("Teardown Exception") + def test_function(arg): + raise Exception("Call Exception") + """) + result, dom = runandparse(testdir) + assert result.ret + node = dom.find_first_by_tag("testsuite") + node.assert_attr(errors=1, failures=1, tests=1) + first, second = dom.find_by_tag("testcase") + if not first or not second or first == second: + assert 0 + fnode = first.find_first_by_tag("failure") + fnode.assert_attr(message="Exception: Call Exception") + snode = second.find_first_by_tag("error") + snode.assert_attr(message="test teardown failure") + def test_skip_contains_name_reason(self, testdir): testdir.makepyfile(""" import pytest @@ -263,7 +286,7 @@ class TestPython: def test_classname_instance(self, testdir): testdir.makepyfile(""" - class TestClass: + class TestClass(object): def test_method(self): assert 0 """) @@ -376,7 +399,7 @@ class TestPython: testdir.makepyfile(""" def test_func(): assert 0 - class TestHello: + class TestHello(object): def test_hello(self): pass """) @@ -557,6 +580,26 @@ class TestPython: systemout = pnode.find_first_by_tag("system-err") assert "hello-stderr" in systemout.toxml() + def test_avoid_double_stdout(self, testdir): + testdir.makepyfile(""" + import sys + import pytest + + @pytest.fixture + def arg(request): + yield + sys.stdout.write('hello-stdout teardown') + raise ValueError() + def test_function(arg): + sys.stdout.write('hello-stdout call') + """) + result, dom = runandparse(testdir) + node = dom.find_first_by_tag("testsuite") + pnode = node.find_first_by_tag("testcase") + systemout = pnode.find_first_by_tag("system-out") + assert "hello-stdout call" in systemout.toxml() + assert "hello-stdout teardown" in systemout.toxml() + def test_mangle_test_address(): from _pytest.junitxml import mangle_test_address @@ -569,11 +612,14 @@ def test_mangle_test_address(): def test_dont_configure_on_slaves(tmpdir): gotten = [] - class FakeConfig: + class FakeConfig(object): def __init__(self): self.pluginmanager = self self.option = self + def getini(self, name): + return "pytest" + junitprefix = None # XXX: shouldnt need tmpdir ? xmlpath = str(tmpdir.join('junix.xml')) @@ -588,7 +634,7 @@ def test_dont_configure_on_slaves(tmpdir): assert len(gotten) == 1 -class TestNonPython: +class TestNonPython(object): def test_summing_simple(self, testdir): testdir.makeconftest(""" import pytest @@ -715,11 +761,13 @@ def test_logxml_makedir(testdir): assert result.ret == 0 assert testdir.tmpdir.join("path/to/results.xml").check() + def test_logxml_check_isdir(testdir): """Give an error if --junit-xml is a directory (#2089)""" result = testdir.runpytest("--junit-xml=.") result.stderr.fnmatch_lines(["*--junitxml must be a filename*"]) + def test_escaped_parametrized_names_xml(testdir): testdir.makepyfile(""" import pytest @@ -750,7 +798,7 @@ def test_double_colon_split_function_issue469(testdir): def test_double_colon_split_method_issue469(testdir): testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): @pytest.mark.parametrize('param', ["double::colon"]) def test_func(self, param): pass @@ -810,7 +858,10 @@ def test_record_property(testdir): pnodes = psnode.find_by_tag('property') pnodes[0].assert_attr(name="bar", value="1") pnodes[1].assert_attr(name="foo", value="<1") - result.stdout.fnmatch_lines('*C3*test_record_property.py*experimental*') + result.stdout.fnmatch_lines([ + 'test_record_property.py::test_record', + '*record_xml_property*experimental*', + ]) def test_record_property_same_name(testdir): @@ -962,3 +1013,50 @@ def test_global_properties(testdir): actual[k] = v assert actual == expected + + +def test_url_property(testdir): + test_url = "http://www.github.com/pytest-dev" + path = testdir.tmpdir.join("test_url_property.xml") + log = LogXML(str(path), None) + from _pytest.runner import BaseReport + + class Report(BaseReport): + longrepr = "FooBarBaz" + sections = [] + nodeid = "something" + location = 'tests/filename.py', 42, 'TestClass.method' + url = test_url + + test_report = Report() + + log.pytest_sessionstart() + node_reporter = log._opentestcase(test_report) + node_reporter.append_failure(test_report) + log.pytest_sessionfinish() + + test_case = minidom.parse(str(path)).getElementsByTagName('testcase')[0] + + assert (test_case.getAttribute('url') == test_url), "The URL did not get written to the xml" + + +@pytest.mark.parametrize('suite_name', ['my_suite', '']) +def test_set_suite_name(testdir, suite_name): + if suite_name: + testdir.makeini(""" + [pytest] + junit_suite_name={0} + """.format(suite_name)) + expected = suite_name + else: + expected = 'pytest' + testdir.makepyfile(""" + import pytest + + def test_func(): + pass + """) + result, dom = runandparse(testdir) + assert result.ret == 0 + node = dom.find_first_by_tag("testsuite") + node.assert_attr(name=expected) diff --git a/testing/test_mark.py b/testing/test_mark.py index a4430b4c8..3ac42daee 100644 --- a/testing/test_mark.py +++ b/testing/test_mark.py @@ -1,22 +1,40 @@ +from __future__ import absolute_import, division, print_function import os +import sys -import py, pytest -from _pytest.mark import MarkGenerator as Mark +import pytest +from _pytest.mark import MarkGenerator as Mark, ParameterSet, transfer_markers -class TestMark: + +class TestMark(object): def test_markinfo_repr(self): - from _pytest.mark import MarkInfo - m = MarkInfo("hello", (1,2), {}) + from _pytest.mark import MarkInfo, Mark + m = MarkInfo(Mark("hello", (1, 2), {})) repr(m) - def test_pytest_exists_in_namespace_all(self): - assert 'mark' in py.test.__all__ - assert 'mark' in pytest.__all__ + @pytest.mark.parametrize('attr', ['mark', 'param']) + @pytest.mark.parametrize('modulename', ['py.test', 'pytest']) + def test_pytest_exists_in_namespace_all(self, attr, modulename): + module = sys.modules[modulename] + assert attr in module.__all__ def test_pytest_mark_notcallable(self): mark = Mark() pytest.raises((AttributeError, TypeError), mark) + def test_mark_with_param(self): + def some_function(abc): + pass + + class SomeClass(object): + pass + + assert pytest.mark.fun(some_function) is some_function + assert pytest.mark.fun.with_args(some_function) is not some_function + + assert pytest.mark.fun(SomeClass) is SomeClass + assert pytest.mark.fun.with_args(SomeClass) is not SomeClass + def test_pytest_mark_name_starts_with_underscore(self): mark = Mark() pytest.raises(AttributeError, getattr, mark, '_some_name') @@ -136,6 +154,7 @@ def test_ini_markers(testdir): rec = testdir.inline_run() rec.assertoutcome(passed=1) + def test_markers_option(testdir): testdir.makeini(""" [pytest] @@ -149,6 +168,24 @@ def test_markers_option(testdir): "*a1some*another marker", ]) + +def test_ini_markers_whitespace(testdir): + testdir.makeini(""" + [pytest] + markers = + a1 : this is a whitespace marker + """) + testdir.makepyfile(""" + import pytest + + @pytest.mark.a1 + def test_markers(): + assert True + """) + rec = testdir.inline_run("--strict", "-m", "a1") + rec.assertoutcome(passed=1) + + def test_markers_option_with_plugin_in_current_dir(testdir): testdir.makeconftest('pytest_plugins = "flip_flop"') testdir.makepyfile(flip_flop="""\ @@ -182,6 +219,7 @@ def test_mark_on_pseudo_function(testdir): reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + def test_strict_prohibits_unregistered_markers(testdir): testdir.makepyfile(""" import pytest @@ -195,11 +233,12 @@ def test_strict_prohibits_unregistered_markers(testdir): "*unregisteredmark*not*registered*", ]) + @pytest.mark.parametrize("spec", [ - ("xyz", ("test_one",)), - ("xyz and xyz2", ()), - ("xyz2", ("test_two",)), - ("xyz or xyz2", ("test_one", "test_two"),) + ("xyz", ("test_one",)), + ("xyz and xyz2", ()), + ("xyz2", ("test_two",)), + ("xyz or xyz2", ("test_one", "test_two"),) ]) def test_mark_option(spec, testdir): testdir.makepyfile(""" @@ -218,9 +257,10 @@ def test_mark_option(spec, testdir): assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) + @pytest.mark.parametrize("spec", [ - ("interface", ("test_interface",)), - ("not interface", ("test_nointer",)), + ("interface", ("test_interface",)), + ("not interface", ("test_nointer",)), ]) def test_mark_option_custom(spec, testdir): testdir.makeconftest(""" @@ -243,11 +283,12 @@ def test_mark_option_custom(spec, testdir): assert len(passed) == len(passed_result) assert list(passed) == list(passed_result) + @pytest.mark.parametrize("spec", [ - ("interface", ("test_interface",)), - ("not interface", ("test_nointer", "test_pass")), - ("pass", ("test_pass",)), - ("not pass", ("test_interface", "test_nointer")), + ("interface", ("test_interface",)), + ("not interface", ("test_nointer", "test_pass")), + ("pass", ("test_pass",)), + ("not pass", ("test_interface", "test_nointer")), ]) def test_keyword_option_custom(spec, testdir): testdir.makepyfile(""" @@ -267,9 +308,9 @@ def test_keyword_option_custom(spec, testdir): @pytest.mark.parametrize("spec", [ - ("None", ("test_func[None]",)), - ("1.3", ("test_func[1.3]",)), - ("2-3", ("test_func[2-3]",)) + ("None", ("test_func[None]",)), + ("1.3", ("test_func[1.3]",)), + ("2-3", ("test_func[2-3]",)) ]) def test_keyword_option_parametrize(spec, testdir): testdir.makepyfile(""" @@ -301,7 +342,42 @@ def test_parametrized_collected_from_command_line(testdir): rec.assertoutcome(passed=3) -class TestFunctional: +def test_parametrized_collect_with_wrong_args(testdir): + """Test collect parametrized func with wrong number of args.""" + py_file = testdir.makepyfile(""" + import pytest + + @pytest.mark.parametrize('foo, bar', [(1, 2, 3)]) + def test_func(foo, bar): + pass + """) + + result = testdir.runpytest(py_file) + result.stdout.fnmatch_lines([ + 'E ValueError: In "parametrize" the number of values ((1, 2, 3)) ' + 'must be equal to the number of names ([\'foo\', \'bar\'])' + ]) + + +def test_parametrized_with_kwargs(testdir): + """Test collect parametrized func with wrong number of args.""" + py_file = testdir.makepyfile(""" + import pytest + + @pytest.fixture(params=[1,2]) + def a(request): + return request.param + + @pytest.mark.parametrize(argnames='b', argvalues=[1, 2]) + def test_func(a, b): + pass + """) + + result = testdir.runpytest(py_file) + assert(result.ret == 0) + + +class TestFunctional(object): def test_mark_per_function(self, testdir): p = testdir.makepyfile(""" @@ -326,7 +402,7 @@ class TestFunctional: def test_marklist_per_class(self, testdir): item = testdir.getitem(""" import pytest - class TestClass: + class TestClass(object): pytestmark = [pytest.mark.hello, pytest.mark.world] def test_func(self): assert TestClass.test_func.hello @@ -339,7 +415,7 @@ class TestFunctional: item = testdir.getitem(""" import pytest pytestmark = [pytest.mark.hello, pytest.mark.world] - class TestClass: + class TestClass(object): def test_func(self): assert TestClass.test_func.hello assert TestClass.test_func.world @@ -352,7 +428,7 @@ class TestFunctional: item = testdir.getitem(""" import pytest @pytest.mark.hello - class TestClass: + class TestClass(object): def test_func(self): assert TestClass.test_func.hello """) @@ -363,7 +439,7 @@ class TestFunctional: item = testdir.getitem(""" import pytest @pytest.mark.hello - class TestClass: + class TestClass(object): pytestmark = pytest.mark.world def test_func(self): assert TestClass.test_func.hello @@ -377,7 +453,7 @@ class TestFunctional: p = testdir.makepyfile(""" import pytest pytestmark = pytest.mark.hello("pos1", x=1, y=2) - class TestClass: + class TestClass(object): # classlevel overrides module level pytestmark = pytest.mark.hello(x=3) @pytest.mark.hello("pos0", z=4) @@ -392,29 +468,29 @@ class TestFunctional: assert marker.kwargs == {'x': 1, 'y': 2, 'z': 4} # test the new __iter__ interface - l = list(marker) - assert len(l) == 3 - assert l[0].args == ("pos0",) - assert l[1].args == () - assert l[2].args == ("pos1", ) + values = list(marker) + assert len(values) == 3 + assert values[0].args == ("pos0",) + assert values[1].args == () + assert values[2].args == ("pos1", ) @pytest.mark.xfail(reason='unfixed') def test_merging_markers_deep(self, testdir): # issue 199 - propagate markers into nested classes p = testdir.makepyfile(""" import pytest - class TestA: + class TestA(object): pytestmark = pytest.mark.a def test_b(self): assert True - class TestC: + class TestC(object): # this one didnt get marked def test_d(self): assert True """) items, rec = testdir.inline_genitems(p) for item in items: - print (item, item.keywords) + print(item, item.keywords) assert 'a' in item.keywords def test_mark_decorator_subclass_does_not_propagate_to_base(self, testdir): @@ -422,7 +498,7 @@ class TestFunctional: import pytest @pytest.mark.a - class Base: pass + class Base(object): pass @pytest.mark.b class Test1(Base): @@ -434,14 +510,13 @@ class TestFunctional: items, rec = testdir.inline_genitems(p) self.assert_markers(items, test_foo=('a', 'b'), test_bar=('a',)) - @pytest.mark.issue568 @pytest.mark.xfail(reason="markers smear on methods of base classes") def test_mark_should_not_pass_to_siebling_class(self, testdir): p = testdir.makepyfile(""" import pytest - class TestBase: + class TestBase(object): def test_foo(self): pass @@ -459,13 +534,12 @@ class TestFunctional: assert not hasattr(base_item.obj, 'b') assert not hasattr(sub_item_other.obj, 'b') - def test_mark_decorator_baseclasses_merged(self, testdir): p = testdir.makepyfile(""" import pytest @pytest.mark.a - class Base: pass + class Base(object): pass @pytest.mark.b class Base2(Base): pass @@ -485,14 +559,14 @@ class TestFunctional: def test_mark_with_wrong_marker(self, testdir): reprec = testdir.inline_runsource(""" import pytest - class pytestmark: + class pytestmark(object): pass def test_func(): pass """) - l = reprec.getfailedcollections() - assert len(l) == 1 - assert "TypeError" in str(l[0].longrepr) + values = reprec.getfailedcollections() + assert len(values) == 1 + assert "TypeError" in str(values[0].longrepr) def test_mark_dynamically_in_funcarg(self, testdir): testdir.makeconftest(""" @@ -501,8 +575,8 @@ class TestFunctional: def arg(request): request.applymarker(pytest.mark.hello) def pytest_terminal_summary(terminalreporter): - l = terminalreporter.stats['passed'] - terminalreporter.writer.line("keyword: %s" % l[0].keywords) + values = terminalreporter.stats['passed'] + terminalreporter.writer.line("keyword: %s" % values[0].keywords) """) testdir.makepyfile(""" def test_func(arg): @@ -525,10 +599,10 @@ class TestFunctional: item, = items keywords = item.keywords marker = keywords['hello'] - l = list(marker) - assert len(l) == 2 - assert l[0].args == ("pos0",) - assert l[1].args == ("pos1",) + values = list(marker) + assert len(values) == 2 + assert values[0].args == ("pos0",) + assert values[1].args == ("pos1",) def test_no_marker_match_on_unmarked_names(self, testdir): p = testdir.makepyfile(""" @@ -630,7 +704,7 @@ class TestFunctional: reprec.assertoutcome(skipped=1) -class TestKeywordSelection: +class TestKeywordSelection(object): def test_select_simple(self, testdir): file_test = testdir.makepyfile(""" @@ -659,7 +733,7 @@ class TestKeywordSelection: p = testdir.makepyfile(test_select=""" def test_1(): pass - class TestClass: + class TestClass(object): def test_2(self): pass """) @@ -673,7 +747,7 @@ class TestKeywordSelection: item.extra_keyword_matches.add("xxx") """) reprec = testdir.inline_run(p.dirpath(), '-s', '-k', keyword) - py.builtin.print_("keyword", repr(keyword)) + print("keyword", repr(keyword)) passed, skipped, failed = reprec.listoutcomes() assert len(passed) == 1 assert passed[0].nodeid.endswith("test_2") @@ -738,3 +812,50 @@ class TestKeywordSelection: assert_test_is_not_selected("__") assert_test_is_not_selected("()") + + +@pytest.mark.parametrize('argval, expected', [ + (pytest.mark.skip()((1, 2)), + ParameterSet(values=(1, 2), marks=[pytest.mark.skip], id=None)), + (pytest.mark.xfail(pytest.mark.skip()((1, 2))), + ParameterSet(values=(1, 2), + marks=[pytest.mark.xfail, pytest.mark.skip], id=None)), + +]) +@pytest.mark.filterwarnings('ignore') +def test_parameterset_extractfrom(argval, expected): + extracted = ParameterSet.extract_from(argval) + assert extracted == expected + + +def test_legacy_transfer(): + + class FakeModule(object): + pytestmark = [] + + class FakeClass(object): + pytestmark = pytest.mark.nofun + + @pytest.mark.fun + def fake_method(self): + pass + + transfer_markers(fake_method, FakeClass, FakeModule) + + # legacy marks transfer smeared + assert fake_method.nofun + assert fake_method.fun + # pristine marks dont transfer + assert fake_method.pytestmark == [pytest.mark.fun.mark] + + +class TestMarkDecorator(object): + + @pytest.mark.parametrize('lhs, rhs, expected', [ + (pytest.mark.foo(), pytest.mark.foo(), True), + (pytest.mark.foo(), pytest.mark.bar(), False), + (pytest.mark.foo(), 'bar', False), + ('foo', pytest.mark.bar(), False) + ]) + def test__eq__(self, lhs, rhs, expected): + assert (lhs == rhs) == expected diff --git a/testing/test_modimport.py b/testing/test_modimport.py new file mode 100644 index 000000000..2ab86bf7a --- /dev/null +++ b/testing/test_modimport.py @@ -0,0 +1,25 @@ +import py +import subprocess +import sys +import pytest +import _pytest + +MODSET = [ + x for x in py.path.local(_pytest.__file__).dirpath().visit('*.py') + if x.purebasename != '__init__' +] + + +@pytest.mark.parametrize('modfile', MODSET, ids=lambda x: x.purebasename) +def test_fileimport(modfile): + # this test ensures all internal packages can import + # without needing the pytest namespace being set + # this is critical for the initialization of xdist + + res = subprocess.call([ + sys.executable, + '-c', 'import sys, py; py.path.local(sys.argv[1]).pyimport()', + modfile.strpath, + ]) + if res: + pytest.fail("command result %s" % res) diff --git a/testing/test_monkeypatch.py b/testing/test_monkeypatch.py index 3fcd20f32..4427908ab 100644 --- a/testing/test_monkeypatch.py +++ b/testing/test_monkeypatch.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import os import sys import textwrap @@ -7,20 +8,16 @@ from _pytest.monkeypatch import MonkeyPatch @pytest.fixture -def mp(request): +def mp(): cwd = os.getcwd() sys_path = list(sys.path) - - def cleanup(): - sys.path[:] = sys_path - os.chdir(cwd) - - request.addfinalizer(cleanup) - return MonkeyPatch() + yield MonkeyPatch() + sys.path[:] = sys_path + os.chdir(cwd) def test_setattr(): - class A: + class A(object): x = 1 monkeypatch = MonkeyPatch() @@ -43,7 +40,7 @@ def test_setattr(): assert A.x == 5 -class TestSetattrWithImportPath: +class TestSetattrWithImportPath(object): def test_string_expression(self, monkeypatch): monkeypatch.setattr("os.path.abspath", lambda x: "hello2") assert os.path.abspath("123") == "hello2" @@ -83,7 +80,7 @@ class TestSetattrWithImportPath: def test_delattr(): - class A: + class A(object): x = 1 monkeypatch = MonkeyPatch() @@ -298,7 +295,7 @@ class SampleNewInherit(SampleNew): pass -class SampleOld: +class SampleOld(object): # oldstyle on python2 @staticmethod def hello(): @@ -322,12 +319,11 @@ def test_issue156_undo_staticmethod(Sample): monkeypatch.undo() assert Sample.hello() + def test_issue1338_name_resolving(): pytest.importorskip('requests') monkeypatch = MonkeyPatch() try: - monkeypatch.delattr('requests.sessions.Session.request') + monkeypatch.delattr('requests.sessions.Session.request') finally: monkeypatch.undo() - - diff --git a/testing/test_nodes.py b/testing/test_nodes.py new file mode 100644 index 000000000..6f4540f99 --- /dev/null +++ b/testing/test_nodes.py @@ -0,0 +1,18 @@ +import pytest + +from _pytest import nodes + + +@pytest.mark.parametrize("baseid, nodeid, expected", ( + ('', '', True), + ('', 'foo', True), + ('', 'foo/bar', True), + ('', 'foo/bar::TestBaz::()', True), + ('foo', 'food', False), + ('foo/bar::TestBaz::()', 'foo/bar', False), + ('foo/bar::TestBaz::()', 'foo/bar::TestBop::()', False), + ('foo/bar', 'foo/bar::TestBop::()', True), +)) +def test_ischildnode(baseid, nodeid, expected): + result = nodes.ischildnode(baseid, nodeid) + assert result is expected diff --git a/testing/test_nose.py b/testing/test_nose.py index f54246111..df3e1a94b 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -1,22 +1,25 @@ +from __future__ import absolute_import, division, print_function import pytest + def setup_module(mod): mod.nose = pytest.importorskip("nose") + def test_nose_setup(testdir): p = testdir.makepyfile(""" - l = [] + values = [] from nose.tools import with_setup - @with_setup(lambda: l.append(1), lambda: l.append(2)) + @with_setup(lambda: values.append(1), lambda: values.append(2)) def test_hello(): - assert l == [1] + assert values == [1] def test_world(): - assert l == [1,2] + assert values == [1,2] - test_hello.setup = lambda: l.append(1) - test_hello.teardown = lambda: l.append(2) + test_hello.setup = lambda: values.append(1) + test_hello.teardown = lambda: values.append(2) """) result = testdir.runpytest(p, '-p', 'nose') result.assert_outcomes(passed=2) @@ -24,47 +27,48 @@ def test_nose_setup(testdir): def test_setup_func_with_setup_decorator(): from _pytest.nose import call_optional - l = [] + values = [] - class A: + class A(object): @pytest.fixture(autouse=True) def f(self): - l.append(1) + values.append(1) call_optional(A(), "f") - assert not l + assert not values def test_setup_func_not_callable(): from _pytest.nose import call_optional - class A: + class A(object): f = 1 call_optional(A(), "f") + def test_nose_setup_func(testdir): p = testdir.makepyfile(""" from nose.tools import with_setup - l = [] + values = [] def my_setup(): a = 1 - l.append(a) + values.append(a) def my_teardown(): b = 2 - l.append(b) + values.append(b) @with_setup(my_setup, my_teardown) def test_hello(): - print (l) - assert l == [1] + print (values) + assert values == [1] def test_world(): - print (l) - assert l == [1,2] + print (values) + assert values == [1,2] """) result = testdir.runpytest(p, '-p', 'nose') @@ -75,18 +79,18 @@ def test_nose_setup_func_failure(testdir): p = testdir.makepyfile(""" from nose.tools import with_setup - l = [] + values = [] my_setup = lambda x: 1 my_teardown = lambda x: 2 @with_setup(my_setup, my_teardown) def test_hello(): - print (l) - assert l == [1] + print (values) + assert values == [1] def test_world(): - print (l) - assert l == [1,2] + print (values) + assert values == [1,2] """) result = testdir.runpytest(p, '-p', 'nose') @@ -97,13 +101,13 @@ def test_nose_setup_func_failure(testdir): def test_nose_setup_func_failure_2(testdir): testdir.makepyfile(""" - l = [] + values = [] my_setup = 1 my_teardown = 2 def test_hello(): - assert l == [] + assert values == [] test_hello.setup = my_setup test_hello.teardown = my_teardown @@ -111,31 +115,32 @@ def test_nose_setup_func_failure_2(testdir): reprec = testdir.inline_run() reprec.assertoutcome(passed=1) + def test_nose_setup_partial(testdir): pytest.importorskip("functools") p = testdir.makepyfile(""" from functools import partial - l = [] + values = [] def my_setup(x): a = x - l.append(a) + values.append(a) def my_teardown(x): b = x - l.append(b) + values.append(b) my_setup_partial = partial(my_setup, 1) my_teardown_partial = partial(my_teardown, 2) def test_hello(): - print (l) - assert l == [1] + print (values) + assert values == [1] def test_world(): - print (l) - assert l == [1,2] + print (values) + assert values == [1,2] test_hello.setup = my_setup_partial test_hello.teardown = my_teardown_partial @@ -246,31 +251,32 @@ def test_module_level_setup(testdir): def test_nose_style_setup_teardown(testdir): testdir.makepyfile(""" - l = [] + values = [] def setup_module(): - l.append(1) + values.append(1) def teardown_module(): - del l[0] + del values[0] def test_hello(): - assert l == [1] + assert values == [1] def test_world(): - assert l == [1] + assert values == [1] """) result = testdir.runpytest('-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*", ]) + def test_nose_setup_ordering(testdir): testdir.makepyfile(""" def setup_module(mod): mod.visited = True - class TestClass: + class TestClass(object): def setup(self): assert visited def test_first(self): @@ -304,6 +310,7 @@ def test_apiwrapper_problem_issue260(testdir): result = testdir.runpytest() result.assert_outcomes(passed=1) + def test_setup_teardown_linking_issue265(testdir): # we accidentally didnt integrate nose setupstate with normal setupstate # this test ensures that won't happen again @@ -351,6 +358,7 @@ def test_SkipTest_in_test(testdir): reprec = testdir.inline_run() reprec.assertoutcome(skipped=1) + def test_istest_function_decorator(testdir): p = testdir.makepyfile(""" import nose.tools @@ -361,6 +369,7 @@ def test_istest_function_decorator(testdir): result = testdir.runpytest(p) result.assert_outcomes(passed=1) + def test_nottest_function_decorator(testdir): testdir.makepyfile(""" import nose.tools @@ -373,22 +382,24 @@ def test_nottest_function_decorator(testdir): calls = reprec.getreports("pytest_runtest_logreport") assert not calls + def test_istest_class_decorator(testdir): p = testdir.makepyfile(""" import nose.tools @nose.tools.istest - class NotTestPrefix: + class NotTestPrefix(object): def test_method(self): pass """) result = testdir.runpytest(p) result.assert_outcomes(passed=1) + def test_nottest_class_decorator(testdir): testdir.makepyfile(""" import nose.tools @nose.tools.nottest - class TestPrefix: + class TestPrefix(object): def test_method(self): pass """) diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index e933dbb8d..02fdf0ada 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -1,14 +1,17 @@ -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import sys import os -import py, pytest +import py +import pytest from _pytest import config as parseopt + @pytest.fixture def parser(): return parseopt.Parser() -class TestParser: + +class TestParser(object): def test_no_help_by_default(self, capsys): parser = parseopt.Parser(usage="xyz") pytest.raises(SystemExit, lambda: parser.parse(["-h"])) @@ -34,15 +37,16 @@ class TestParser: ) def test_argument_type(self): - argument = parseopt.Argument('-t', dest='abc', type='int') + argument = parseopt.Argument('-t', dest='abc', type=int) assert argument.type is int - argument = parseopt.Argument('-t', dest='abc', type='string') + argument = parseopt.Argument('-t', dest='abc', type=str) assert argument.type is str argument = parseopt.Argument('-t', dest='abc', type=float) assert argument.type is float - with pytest.raises(KeyError): - argument = parseopt.Argument('-t', dest='abc', type='choice') - argument = parseopt.Argument('-t', dest='abc', type='choice', + with pytest.warns(DeprecationWarning): + with pytest.raises(KeyError): + argument = parseopt.Argument('-t', dest='abc', type='choice') + argument = parseopt.Argument('-t', dest='abc', type=str, choices=['red', 'blue']) assert argument.type is str @@ -139,7 +143,7 @@ class TestParser: parser.addoption("--hello", dest="hello", action="store") parser.addoption("--world", dest="world", default=42) - class A: + class A(object): pass option = A() @@ -160,12 +164,12 @@ class TestParser: assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] args = parser.parse(['-R', '-S', '4', '2', '-R']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] - assert args.R == True - assert args.S == False + assert args.R is True + assert args.S is False args = parser.parse(['-R', '4', '-S', '2']) assert getattr(args, parseopt.FILE_OR_DIR) == ['4', '2'] - assert args.R == True - assert args.S == False + assert args.R is True + assert args.S is False def test_parse_defaultgetter(self): def defaultget(option): @@ -176,8 +180,8 @@ class TestParser: elif option.type is str: option.default = "world" parser = parseopt.Parser(processopt=defaultget) - parser.addoption("--this", dest="this", type="int", action="store") - parser.addoption("--hello", dest="hello", type="string", action="store") + parser.addoption("--this", dest="this", type=int, action="store") + parser.addoption("--hello", dest="hello", type=str, action="store") parser.addoption("--no", dest="no", action="store_true") option = parser.parse([]) assert option.hello == "world" @@ -187,7 +191,7 @@ class TestParser: def test_drop_short_helper(self): parser = py.std.argparse.ArgumentParser(formatter_class=parseopt.DropShorterLongHelpFormatter) parser.add_argument('-t', '--twoword', '--duo', '--two-word', '--two', - help='foo').map_long_option = {'two': 'two-word'} + help='foo').map_long_option = {'two': 'two-word'} # throws error on --deux only! parser.add_argument('-d', '--deuxmots', '--deux-mots', action='store_true', help='foo').map_long_option = {'deux': 'deux-mots'} @@ -237,18 +241,18 @@ class TestParser: assert args.file_or_dir == ['abcd'] def test_drop_short_help0(self, parser, capsys): - parser.addoption('--func-args', '--doit', help = 'foo', + parser.addoption('--func-args', '--doit', help='foo', action='store_true') parser.parse([]) help = parser.optparser.format_help() - assert '--func-args, --doit foo' in help + assert '--func-args, --doit foo' in help # testing would be more helpful with all help generated def test_drop_short_help1(self, parser, capsys): group = parser.getgroup("general") group.addoption('--doit', '--func-args', action='store_true', help='foo') group._addoption("-h", "--help", action="store_true", dest="help", - help="show help message and configuration info") + help="show help message and configuration info") parser.parse(['-h']) help = parser.optparser.format_help() assert '-doit, --func-args foo' in help @@ -272,7 +276,7 @@ def test_argcomplete(testdir, monkeypatch): script = str(testdir.tmpdir.join("test_argcomplete")) pytest_bin = sys.argv[0] if "pytest" not in os.path.basename(pytest_bin): - pytest.skip("need to be run with pytest executable, not %s" %(pytest_bin,)) + pytest.skip("need to be run with pytest executable, not %s" % (pytest_bin,)) with open(str(script), 'w') as fp: # redirect output from argcomplete to stdin and stderr is not trivial @@ -283,7 +287,7 @@ def test_argcomplete(testdir, monkeypatch): # to handle a keyword argument env that replaces os.environ in popen or # extends the copy, advantage: could not forget to restore monkeypatch.setenv('_ARGCOMPLETE', "1") - monkeypatch.setenv('_ARGCOMPLETE_IFS',"\x0b") + monkeypatch.setenv('_ARGCOMPLETE_IFS', "\x0b") monkeypatch.setenv('COMP_WORDBREAKS', ' \\t\\n"\\\'><=;|&(:') arg = '--fu' @@ -296,12 +300,12 @@ def test_argcomplete(testdir, monkeypatch): elif not result.stdout.str(): pytest.skip("bash provided no output, argcomplete not available?") else: - if py.std.sys.version_info < (2,7): + if py.std.sys.version_info < (2, 7): result.stdout.lines = result.stdout.lines[0].split('\x0b') result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) else: result.stdout.fnmatch_lines(["--funcargs", "--fulltrace"]) - if py.std.sys.version_info < (2,7): + if py.std.sys.version_info < (2, 7): return os.mkdir('test_argcomplete.d') arg = 'test_argc' diff --git a/testing/test_pastebin.py b/testing/test_pastebin.py index 8123424ca..6b1742d14 100644 --- a/testing/test_pastebin.py +++ b/testing/test_pastebin.py @@ -1,8 +1,10 @@ # encoding: utf-8 +from __future__ import absolute_import, division, print_function import sys import pytest -class TestPasteCapture: + +class TestPasteCapture(object): @pytest.fixture def pastebinlist(self, monkeypatch, request): @@ -25,7 +27,7 @@ class TestPasteCapture: assert len(pastebinlist) == 1 s = pastebinlist[0] assert s.find("def test_fail") != -1 - assert reprec.countoutcomes() == [1,1,1] + assert reprec.countoutcomes() == [1, 1, 1] def test_all(self, testdir, pastebinlist): from _pytest.pytester import LineMatcher @@ -39,7 +41,7 @@ class TestPasteCapture: pytest.skip("") """) reprec = testdir.inline_run(testpath, "--pastebin=all", '-v') - assert reprec.countoutcomes() == [1,1,1] + assert reprec.countoutcomes() == [1, 1, 1] assert len(pastebinlist) == 1 contents = pastebinlist[0].decode('utf-8') matcher = LineMatcher(contents.splitlines()) @@ -71,7 +73,7 @@ class TestPasteCapture: ]) -class TestPaste: +class TestPaste(object): @pytest.fixture def pastebin(self, request): @@ -88,7 +90,7 @@ class TestPaste: def mocked(url, data): calls.append((url, data)) - class DummyFile: + class DummyFile(object): def read(self): # part of html of a normal response return b'View raw.' @@ -113,5 +115,3 @@ class TestPaste: assert 'lexer=%s' % lexer in data.decode() assert 'code=full-paste-contents' in data.decode() assert 'expiry=1week' in data.decode() - - diff --git a/testing/test_pdb.py b/testing/test_pdb.py index df58dad87..70a5c3c5b 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import sys import platform @@ -9,11 +10,30 @@ def runpdb_and_get_report(testdir, source): p = testdir.makepyfile(source) result = testdir.runpytest_inprocess("--pdb", p) reports = result.reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 3, reports # setup/call/teardown + assert len(reports) == 3, reports # setup/call/teardown return reports[1] -class TestPDB: +@pytest.fixture +def custom_pdb_calls(): + called = [] + + # install dummy debugger class and track which methods were called on it + class _CustomPdb(object): + def __init__(self, *args, **kwargs): + called.append("init") + + def reset(self): + called.append("reset") + + def interaction(self, *args): + called.append("interaction") + + _pytest._CustomPdb = _CustomPdb + return called + + +class TestPDB(object): @pytest.fixture def pdblist(self, request): @@ -106,6 +126,21 @@ class TestPDB: assert 'debug.me' in rest self.flush(child) + def test_pdb_unittest_skip(self, testdir): + """Test for issue #2137""" + p1 = testdir.makepyfile(""" + import unittest + @unittest.skipIf(True, 'Skipping also with pdb active') + class MyTestCase(unittest.TestCase): + def test_one(self): + assert 0 + """) + child = testdir.spawn_pytest("-rs --pdb %s" % p1) + child.expect('Skipping also with pdb active') + child.expect('1 skipped in') + child.sendeof() + self.flush(child) + def test_pdb_interaction_capture(self, testdir): p1 = testdir.makepyfile(""" def test_1(): @@ -145,7 +180,7 @@ class TestPDB: xxx """) child = testdir.spawn_pytest("--pdb %s" % p1) - #child.expect(".*import pytest.*") + # child.expect(".*import pytest.*") child.expect("(Pdb)") child.sendeof() child.expect("1 error") @@ -158,7 +193,7 @@ class TestPDB: """) p1 = testdir.makepyfile("def test_func(): pass") child = testdir.spawn_pytest("--pdb %s" % p1) - #child.expect(".*import pytest.*") + # child.expect(".*import pytest.*") child.expect("(Pdb)") child.sendeof() self.flush(child) @@ -180,7 +215,7 @@ class TestPDB: rest = child.read().decode("utf-8") assert "1 failed" in rest assert "def test_1" in rest - assert "hello17" in rest # out is captured + assert "hello17" in rest # out is captured self.flush(child) def test_pdb_set_trace_interception(self, testdir): @@ -273,8 +308,8 @@ class TestPDB: rest = child.read().decode("utf8") assert "1 failed" in rest assert "def test_1" in rest - assert "hello17" in rest # out is captured - assert "hello18" in rest # out is captured + assert "hello17" in rest # out is captured + assert "hello18" in rest # out is captured self.flush(child) def test_pdb_used_outside_test(self, testdir): @@ -283,7 +318,7 @@ class TestPDB: pytest.set_trace() x = 5 """) - child = testdir.spawn("%s %s" %(sys.executable, p1)) + child = testdir.spawn("%s %s" % (sys.executable, p1)) child.expect("x = 5") child.sendeof() self.flush(child) @@ -331,22 +366,17 @@ class TestPDB: child.sendeof() self.flush(child) - def test_pdb_custom_cls(self, testdir): - called = [] - - # install dummy debugger class and track which methods were called on it - class _CustomPdb: - def __init__(self, *args, **kwargs): - called.append("init") - - def reset(self): - called.append("reset") - - def interaction(self, *args): - called.append("interaction") - - _pytest._CustomPdb = _CustomPdb + def test_pdb_custom_cls(self, testdir, custom_pdb_calls): + p1 = testdir.makepyfile("""xxx """) + result = testdir.runpytest_inprocess( + "--pdb", "--pdbcls=_pytest:_CustomPdb", p1) + result.stdout.fnmatch_lines([ + "*NameError*xxx*", + "*1 error*", + ]) + assert custom_pdb_calls == ["init", "reset", "interaction"] + def test_pdb_custom_cls_without_pdb(self, testdir, custom_pdb_calls): p1 = testdir.makepyfile("""xxx """) result = testdir.runpytest_inprocess( "--pdbcls=_pytest:_CustomPdb", p1) @@ -354,4 +384,23 @@ class TestPDB: "*NameError*xxx*", "*1 error*", ]) - assert called == ["init", "reset", "interaction"] + assert custom_pdb_calls == [] + + def test_pdb_custom_cls_with_settrace(self, testdir, monkeypatch): + testdir.makepyfile(custom_pdb=""" + class CustomPdb(object): + def set_trace(*args, **kwargs): + print 'custom set_trace>' + """) + p1 = testdir.makepyfile(""" + import pytest + + def test_foo(): + pytest.set_trace() + """) + monkeypatch.setenv('PYTHONPATH', str(testdir.tmpdir)) + child = testdir.spawn_pytest("--pdbcls=custom_pdb:CustomPdb %s" % str(p1)) + + child.expect('custom set_trace>') + if child.isalive(): + child.wait() diff --git a/testing/test_pluginmanager.py b/testing/test_pluginmanager.py index 45ad321a3..13c487c26 100644 --- a/testing/test_pluginmanager.py +++ b/testing/test_pluginmanager.py @@ -1,4 +1,5 @@ # encoding: UTF-8 +from __future__ import absolute_import, division, print_function import pytest import py import os @@ -11,7 +12,8 @@ from _pytest.main import EXIT_NOTESTSCOLLECTED, Session def pytestpm(): return PytestPluginManager() -class TestPytestPluginInteractions: + +class TestPytestPluginInteractions(object): def test_addhooks_conftestplugin(self, testdir): testdir.makepyfile(newhooks=""" def pytest_myhook(xyz): @@ -28,9 +30,9 @@ class TestPytestPluginInteractions: config = get_config() pm = config.pluginmanager pm.hook.pytest_addhooks.call_historic( - kwargs=dict(pluginmanager=config.pluginmanager)) + kwargs=dict(pluginmanager=config.pluginmanager)) config.pluginmanager._importconftest(conf) - #print(config.pluginmanager.get_plugins()) + # print(config.pluginmanager.get_plugins()) res = config.hook.pytest_myhook(xyz=10) assert res == [11] @@ -83,50 +85,50 @@ class TestPytestPluginInteractions: def test_configure(self, testdir): config = testdir.parseconfig() - l = [] + values = [] - class A: + class A(object): def pytest_configure(self, config): - l.append(self) + values.append(self) config.pluginmanager.register(A()) - assert len(l) == 0 + assert len(values) == 0 config._do_configure() - assert len(l) == 1 + assert len(values) == 1 config.pluginmanager.register(A()) # leads to a configured() plugin - assert len(l) == 2 - assert l[0] != l[1] + assert len(values) == 2 + assert values[0] != values[1] config._ensure_unconfigure() config.pluginmanager.register(A()) - assert len(l) == 2 + assert len(values) == 2 def test_hook_tracing(self): pytestpm = get_config().pluginmanager # fully initialized with plugins saveindent = [] - class api1: + class api1(object): def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) - class api2: + class api2(object): def pytest_plugin_registered(self): saveindent.append(pytestpm.trace.root.indent) raise ValueError() - l = [] - pytestpm.trace.root.setwriter(l.append) + values = [] + pytestpm.trace.root.setwriter(values.append) undo = pytestpm.enable_tracing() try: indent = pytestpm.trace.root.indent p = api1() pytestpm.register(p) assert pytestpm.trace.root.indent == indent - assert len(l) >= 2 - assert 'pytest_plugin_registered' in l[0] - assert 'finish' in l[1] + assert len(values) >= 2 + assert 'pytest_plugin_registered' in values[0] + assert 'finish' in values[1] - l[:] = [] + values[:] = [] with pytest.raises(ValueError): pytestpm.register(api2()) assert pytestpm.trace.root.indent == indent @@ -156,11 +158,11 @@ class TestPytestPluginInteractions: def test_warn_on_deprecated_multicall(self, pytestpm): warnings = [] - class get_warnings: + class get_warnings(object): def pytest_logwarning(self, message): warnings.append(message) - class Plugin: + class Plugin(object): def pytest_configure(self, __multicall__): pass @@ -173,11 +175,11 @@ class TestPytestPluginInteractions: def test_warn_on_deprecated_addhooks(self, pytestpm): warnings = [] - class get_warnings: + class get_warnings(object): def pytest_logwarning(self, code, fslocation, message, nodeid): warnings.append(message) - class Plugin: + class Plugin(object): def pytest_testhook(): pass @@ -196,6 +198,7 @@ def test_namespace_has_default_and_env_plugins(testdir): result = testdir.runpython(p) assert result.ret == 0 + def test_default_markers(testdir): result = testdir.runpytest("--markers") result.stdout.fnmatch_lines([ @@ -221,18 +224,18 @@ def test_importplugin_error_message(testdir, pytestpm): assert py.std.re.match(expected, str(excinfo.value)) -class TestPytestPluginManager: +class TestPytestPluginManager(object): def test_register_imported_modules(self): pm = PytestPluginManager() mod = py.std.types.ModuleType("x.y.pytest_hello") pm.register(mod) assert pm.is_registered(mod) - l = pm.get_plugins() - assert mod in l + values = pm.get_plugins() + assert mod in values pytest.raises(ValueError, "pm.register(mod)") pytest.raises(ValueError, lambda: pm.register(mod)) - #assert not pm.is_registered(mod2) - assert pm.get_plugins() == l + # assert not pm.is_registered(mod2) + assert pm.get_plugins() == values def test_canonical_import(self, monkeypatch): mod = py.std.types.ModuleType("pytest_xyz") @@ -258,7 +261,7 @@ class TestPytestPluginManager: mod.pytest_plugins = "pytest_a" aplugin = testdir.makepyfile(pytest_a="#") reprec = testdir.make_hook_recorder(pytestpm) - #syspath.prepend(aplugin.dirpath()) + # syspath.prepend(aplugin.dirpath()) py.std.sys.path.insert(0, str(aplugin.dirpath())) pytestpm.consider_module(mod) call = reprec.getcall(pytestpm.hook.pytest_plugin_registered.name) @@ -266,8 +269,8 @@ class TestPytestPluginManager: # check that it is not registered twice pytestpm.consider_module(mod) - l = reprec.getcalls("pytest_plugin_registered") - assert len(l) == 1 + values = reprec.getcalls("pytest_plugin_registered") + assert len(values) == 1 def test_consider_env_fails_to_import(self, monkeypatch, pytestpm): monkeypatch.setenv('PYTEST_PLUGINS', 'nonexisting', prepend=",") @@ -284,8 +287,8 @@ class TestPytestPluginManager: result = testdir.runpytest("-rw", "-p", "skipping1", syspathinsert=True) assert result.ret == EXIT_NOTESTSCOLLECTED result.stdout.fnmatch_lines([ - "WI1*skipped plugin*skipping1*hello*", - "WI1*skipped plugin*skipping2*hello*", + "*skipped plugin*skipping1*hello*", + "*skipped plugin*skipping2*hello*", ]) def test_consider_env_plugin_instantiation(self, testdir, monkeypatch, pytestpm): @@ -348,10 +351,10 @@ class TestPytestPluginManager: pytestpm.consider_conftest(mod) -class TestPytestPluginManagerBootstrapming: +class TestPytestPluginManagerBootstrapming(object): def test_preparse_args(self, pytestpm): pytest.raises(ImportError, lambda: - pytestpm.consider_preparse(["xyz", "-p", "hello123"])) + pytestpm.consider_preparse(["xyz", "-p", "hello123"])) def test_plugin_prevent_register(self, pytestpm): pytestpm.consider_preparse(["xyz", "-p", "no:abc"]) diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 14548808c..0e8669698 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest import os from _pytest.pytester import HookRecorder @@ -12,7 +13,7 @@ def test_make_hook_recorder(testdir): pytest.xfail("internal reportrecorder tests need refactoring") - class rep: + class rep(object): excinfo = None passed = False failed = True @@ -25,7 +26,7 @@ def test_make_hook_recorder(testdir): failures = recorder.getfailures() assert failures == [rep] - class rep: + class rep(object): excinfo = None passed = False failed = False @@ -63,6 +64,7 @@ def test_parseconfig(testdir): assert config2 != config1 assert config1 != pytest.config + def test_testdir_runs_with_plugin(testdir): testdir.makepyfile(""" pytest_plugins = "pytester" @@ -74,9 +76,10 @@ def test_testdir_runs_with_plugin(testdir): def make_holder(): - class apiclass: + class apiclass(object): def pytest_xyz(self, arg): "x" + def pytest_xyz_noarg(self): "x" @@ -116,6 +119,7 @@ def test_makepyfile_unicode(testdir): unichr = chr testdir.makepyfile(unichr(0xfffd)) + def test_inline_run_clean_modules(testdir): test_mod = testdir.makepyfile("def test_foo(): assert True") result = testdir.inline_run(str(test_mod)) @@ -124,3 +128,11 @@ def test_inline_run_clean_modules(testdir): test_mod.write("def test_foo(): assert False") result2 = testdir.inline_run(str(test_mod)) assert result2.ret == EXIT_TESTSFAILED + + +def test_assert_outcomes_after_pytest_erro(testdir): + testdir.makepyfile("def test_foo(): assert True") + + result = testdir.runpytest('--unexpected-argument') + with pytest.raises(ValueError, message="Pytest terminal report not found"): + result.assert_outcomes(passed=0) diff --git a/testing/test_recwarn.py b/testing/test_recwarn.py index 87e5846c2..481bf0a04 100644 --- a/testing/test_recwarn.py +++ b/testing/test_recwarn.py @@ -1,5 +1,8 @@ +from __future__ import absolute_import, division, print_function import warnings +import re import py + import pytest from _pytest.recwarn import WarningsRecorder @@ -7,25 +10,19 @@ from _pytest.recwarn import WarningsRecorder def test_recwarn_functional(testdir): reprec = testdir.inline_runsource(""" import warnings - oldwarn = warnings.showwarning def test_method(recwarn): - assert warnings.showwarning != oldwarn warnings.warn("hello") warn = recwarn.pop() assert isinstance(warn.message, UserWarning) - def test_finalized(): - assert warnings.showwarning == oldwarn """) res = reprec.countoutcomes() - assert tuple(res) == (2, 0, 0), res + assert tuple(res) == (1, 0, 0), res class TestWarningsRecorderChecker(object): - def test_recording(self, recwarn): - showwarning = py.std.warnings.showwarning + def test_recording(self): rec = WarningsRecorder() with rec: - assert py.std.warnings.showwarning != showwarning assert not rec.list py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13) assert len(rec.list) == 1 @@ -33,14 +30,12 @@ class TestWarningsRecorderChecker(object): assert len(rec.list) == 2 warn = rec.pop() assert str(warn.message) == "hello" - l = rec.list + values = rec.list rec.clear() assert len(rec.list) == 0 - assert l is rec.list + assert values is rec.list pytest.raises(AssertionError, "rec.pop()") - assert showwarning == py.std.warnings.showwarning - def test_typechecking(self): from _pytest.recwarn import WarningsChecker with pytest.raises(TypeError): @@ -81,7 +76,7 @@ class TestDeprecatedCall(object): def test_deprecated_call_raises(self): with pytest.raises(AssertionError) as excinfo: pytest.deprecated_call(self.dep, 3, 5) - assert str(excinfo).find("did not produce") != -1 + assert 'Did not produce' in str(excinfo) def test_deprecated_call(self): pytest.deprecated_call(self.dep, 0, 5) @@ -110,50 +105,70 @@ class TestDeprecatedCall(object): pytest.deprecated_call(self.dep_explicit, 0) pytest.deprecated_call(self.dep_explicit, 0) - def test_deprecated_call_as_context_manager_no_warning(self): - with pytest.raises(pytest.fail.Exception) as ex: - with pytest.deprecated_call(): - self.dep(1) - assert str(ex.value) == "DID NOT WARN" - - def test_deprecated_call_as_context_manager(self): - with pytest.deprecated_call(): - self.dep(0) - - def test_deprecated_call_pending(self): + @pytest.mark.parametrize('mode', ['context_manager', 'call']) + def test_deprecated_call_no_warning(self, mode): + """Ensure deprecated_call() raises the expected failure when its block/function does + not raise a deprecation warning. + """ def f(): - py.std.warnings.warn(PendingDeprecationWarning("hi")) - pytest.deprecated_call(f) + pass + + msg = 'Did not produce DeprecationWarning or PendingDeprecationWarning' + with pytest.raises(AssertionError, matches=msg): + if mode == 'call': + pytest.deprecated_call(f) + else: + with pytest.deprecated_call(): + f() + + @pytest.mark.parametrize('warning_type', [PendingDeprecationWarning, DeprecationWarning]) + @pytest.mark.parametrize('mode', ['context_manager', 'call']) + @pytest.mark.parametrize('call_f_first', [True, False]) + @pytest.mark.filterwarnings('ignore') + def test_deprecated_call_modes(self, warning_type, mode, call_f_first): + """Ensure deprecated_call() captures a deprecation warning as expected inside its + block/function. + """ + def f(): + warnings.warn(warning_type("hi")) + return 10 + + # ensure deprecated_call() can capture the warning even if it has already been triggered + if call_f_first: + assert f() == 10 + if mode == 'call': + assert pytest.deprecated_call(f) == 10 + else: + with pytest.deprecated_call(): + assert f() == 10 + + @pytest.mark.parametrize('mode', ['context_manager', 'call']) + def test_deprecated_call_exception_is_raised(self, mode): + """If the block of the code being tested by deprecated_call() raises an exception, + it must raise the exception undisturbed. + """ + def f(): + raise ValueError('some exception') + + with pytest.raises(ValueError, match='some exception'): + if mode == 'call': + pytest.deprecated_call(f) + else: + with pytest.deprecated_call(): + f() def test_deprecated_call_specificity(self): other_warnings = [Warning, UserWarning, SyntaxWarning, RuntimeWarning, FutureWarning, ImportWarning, UnicodeWarning] for warning in other_warnings: def f(): - py.std.warnings.warn(warning("hi")) + warnings.warn(warning("hi")) + with pytest.raises(AssertionError): pytest.deprecated_call(f) - - def test_deprecated_function_already_called(self, testdir): - """deprecated_call should be able to catch a call to a deprecated - function even if that function has already been called in the same - module. See #1190. - """ - testdir.makepyfile(""" - import warnings - import pytest - - def deprecated_function(): - warnings.warn("deprecated", DeprecationWarning) - - def test_one(): - deprecated_function() - - def test_two(): - pytest.deprecated_call(deprecated_function) - """) - result = testdir.runpytest() - result.stdout.fnmatch_lines('*=== 2 passed in *===') + with pytest.raises(AssertionError): + with pytest.deprecated_call(): + f() class TestWarns(object): @@ -185,17 +200,38 @@ class TestWarns(object): with pytest.warns(RuntimeWarning): warnings.warn("runtime", RuntimeWarning) - with pytest.raises(pytest.fail.Exception): - with pytest.warns(RuntimeWarning): - warnings.warn("user", UserWarning) - - with pytest.raises(pytest.fail.Exception): - with pytest.warns(UserWarning): - warnings.warn("runtime", RuntimeWarning) - with pytest.warns(UserWarning): warnings.warn("user", UserWarning) + with pytest.raises(pytest.fail.Exception) as excinfo: + with pytest.warns(RuntimeWarning): + warnings.warn("user", UserWarning) + excinfo.match(r"DID NOT WARN. No warnings of type \(.+RuntimeWarning.+,\) was emitted. " + r"The list of emitted warnings is: \[UserWarning\('user',\)\].") + + with pytest.raises(pytest.fail.Exception) as excinfo: + with pytest.warns(UserWarning): + warnings.warn("runtime", RuntimeWarning) + excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " + r"The list of emitted warnings is: \[RuntimeWarning\('runtime',\)\].") + + with pytest.raises(pytest.fail.Exception) as excinfo: + with pytest.warns(UserWarning): + pass + excinfo.match(r"DID NOT WARN. No warnings of type \(.+UserWarning.+,\) was emitted. " + r"The list of emitted warnings is: \[\].") + + warning_classes = (UserWarning, FutureWarning) + with pytest.raises(pytest.fail.Exception) as excinfo: + with pytest.warns(warning_classes) as warninfo: + warnings.warn("runtime", RuntimeWarning) + warnings.warn("import", ImportWarning) + + message_template = ("DID NOT WARN. No warnings of type {0} was emitted. " + "The list of emitted warnings is: {1}.") + excinfo.match(re.escape(message_template.format(warning_classes, + [each.message for each in warninfo]))) + def test_record(self): with pytest.warns(UserWarning) as record: warnings.warn("user", UserWarning) @@ -212,6 +248,29 @@ class TestWarns(object): assert str(record[0].message) == "user" assert str(record[1].message) == "runtime" + def test_record_by_subclass(self): + with pytest.warns(Warning) as record: + warnings.warn("user", UserWarning) + warnings.warn("runtime", RuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + + class MyUserWarning(UserWarning): + pass + + class MyRuntimeWarning(RuntimeWarning): + pass + + with pytest.warns((UserWarning, RuntimeWarning)) as record: + warnings.warn("user", MyUserWarning) + warnings.warn("runtime", MyRuntimeWarning) + + assert len(record) == 2 + assert str(record[0].message) == "user" + assert str(record[1].message) == "runtime" + def test_double_test(self, testdir): """If a test is run again, the warning should still be raised""" testdir.makepyfile(''' diff --git a/testing/test_resultlog.py b/testing/test_resultlog.py index e2d4fc263..b7dd2687c 100644 --- a/testing/test_resultlog.py +++ b/testing/test_resultlog.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import os import _pytest._code @@ -5,7 +6,7 @@ import py import pytest from _pytest.main import Node, Item, FSCollector from _pytest.resultlog import generic_path, ResultLog, \ - pytest_configure, pytest_unconfigure + pytest_configure, pytest_unconfigure def test_generic_path(testdir): @@ -13,10 +14,10 @@ def test_generic_path(testdir): config = testdir.parseconfig() session = Session(config) p1 = Node('a', config=config, session=session) - #assert p1.fspath is None + # assert p1.fspath is None p2 = Node('B', parent=p1) - p3 = Node('()', parent = p2) - item = Item('c', parent = p3) + p3 = Node('()', parent=p2) + item = Item('c', parent=p3) res = generic_path(item) assert res == 'a.B().c' @@ -24,13 +25,14 @@ def test_generic_path(testdir): p0 = FSCollector('proj/test', config=config, session=session) p1 = FSCollector('proj/test/a', parent=p0) p2 = Node('B', parent=p1) - p3 = Node('()', parent = p2) + p3 = Node('()', parent=p2) p4 = Node('c', parent=p3) - item = Item('[1]', parent = p4) + item = Item('[1]', parent=p4) res = generic_path(item) assert res == 'test/a:B().c[1]' + def test_write_log_entry(): reslog = ResultLog(None, None) reslog.logfile = py.io.TextIO() @@ -67,10 +69,10 @@ def test_write_log_entry(): entry_lines = entry.splitlines() assert len(entry_lines) == 5 assert entry_lines[0] == 'F name' - assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()] + assert entry_lines[1:] == [' ' + line for line in longrepr.splitlines()] -class TestWithFunctionIntegration: +class TestWithFunctionIntegration(object): # XXX (hpk) i think that the resultlog plugin should # provide a Parser object so that one can remain # ignorant regarding formatting details. @@ -143,7 +145,7 @@ class TestWithFunctionIntegration: assert entry_lines[0].startswith('! ') if style != "native": - assert os.path.basename(__file__)[:-9] in entry_lines[0] #.pyc/class + assert os.path.basename(__file__)[:-9] in entry_lines[0] # .pyc/class assert entry_lines[-1][0] == ' ' assert 'ValueError' in entry @@ -175,6 +177,7 @@ def test_generic(testdir, LineMatcher): "x *:test_xfail_norun", ]) + def test_makedir_for_resultlog(testdir, LineMatcher): """--resultlog should automatically create directories for the log file""" testdir.plugins.append("resultlog") @@ -223,5 +226,3 @@ def test_failure_issue380(testdir): """) result = testdir.runpytest("--resultlog=log") assert result.ret == 2 - - diff --git a/testing/test_runner.py b/testing/test_runner.py index 727defa92..84d8f6c71 100644 --- a/testing/test_runner.py +++ b/testing/test_runner.py @@ -1,23 +1,24 @@ # -*- coding: utf-8 -*- -from __future__ import with_statement +from __future__ import absolute_import, division, print_function import _pytest._code import os import py import pytest import sys -from _pytest import runner, main +from _pytest import runner, main, outcomes -class TestSetupState: + +class TestSetupState(object): def test_setup(self, testdir): ss = runner.SetupState() item = testdir.getitem("def test_func(): pass") - l = [1] + values = [1] ss.prepare(item) - ss.addfinalizer(l.pop, colitem=item) - assert l + ss.addfinalizer(values.pop, colitem=item) + assert values ss._pop_and_teardown() - assert not l + assert not values def test_teardown_exact_stack_empty(self, testdir): item = testdir.getitem("def test_func(): pass") @@ -31,7 +32,7 @@ class TestSetupState: def setup_module(mod): raise ValueError(42) def test_func(): pass - """) # noqa + """) # noqa ss = runner.SetupState() pytest.raises(ValueError, lambda: ss.prepare(item)) pytest.raises(ValueError, lambda: ss.prepare(item)) @@ -39,11 +40,14 @@ class TestSetupState: def test_teardown_multiple_one_fails(self, testdir): r = [] - def fin1(): r.append('fin1') + def fin1(): + r.append('fin1') - def fin2(): raise Exception('oops') + def fin2(): + raise Exception('oops') - def fin3(): r.append('fin3') + def fin3(): + r.append('fin3') item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() @@ -58,9 +62,11 @@ class TestSetupState: def test_teardown_multiple_fail(self, testdir): # Ensure the first exception is the one which is re-raised. # Ideally both would be reported however. - def fin1(): raise Exception('oops1') + def fin1(): + raise Exception('oops1') - def fin2(): raise Exception('oops2') + def fin2(): + raise Exception('oops2') item = testdir.getitem("def test_func(): pass") ss = runner.SetupState() @@ -71,7 +77,7 @@ class TestSetupState: assert err.value.args == ('oops2',) -class BaseFunctionalTests: +class BaseFunctionalTests(object): def test_passfunction(self, testdir): reports = testdir.runitem(""" def test_func(): @@ -94,7 +100,7 @@ class BaseFunctionalTests: assert rep.failed assert rep.when == "call" assert rep.outcome == "failed" - #assert isinstance(rep.longrepr, ReprExceptionInfo) + # assert isinstance(rep.longrepr, ReprExceptionInfo) def test_skipfunction(self, testdir): reports = testdir.runitem(""" @@ -107,12 +113,12 @@ class BaseFunctionalTests: assert not rep.passed assert rep.skipped assert rep.outcome == "skipped" - #assert rep.skipped.when == "call" - #assert rep.skipped.when == "call" - #assert rep.skipped == "%sreason == "hello" - #assert rep.skipped.location.lineno == 3 - #assert rep.skipped.location.path - #assert not rep.skipped.failurerepr + # assert rep.skipped.when == "call" + # assert rep.skipped.when == "call" + # assert rep.skipped == "%sreason == "hello" + # assert rep.skipped.location.lineno == 3 + # assert rep.skipped.location.path + # assert not rep.skipped.failurerepr def test_skip_in_setup_function(self, testdir): reports = testdir.runitem(""" @@ -127,11 +133,11 @@ class BaseFunctionalTests: assert not rep.failed assert not rep.passed assert rep.skipped - #assert rep.skipped.reason == "hello" - #assert rep.skipped.location.lineno == 3 - #assert rep.skipped.location.lineno == 3 + # assert rep.skipped.reason == "hello" + # assert rep.skipped.location.lineno == 3 + # assert rep.skipped.location.lineno == 3 assert len(reports) == 2 - assert reports[1].passed # teardown + assert reports[1].passed # teardown def test_failure_in_setup_function(self, testdir): reports = testdir.runitem(""" @@ -163,8 +169,8 @@ class BaseFunctionalTests: assert not rep.passed assert rep.failed assert rep.when == "teardown" - #assert rep.longrepr.reprcrash.lineno == 3 - #assert rep.longrepr.reprtraceback.reprentries + # assert rep.longrepr.reprcrash.lineno == 3 + # assert rep.longrepr.reprtraceback.reprentries def test_custom_failure_repr(self, testdir): testdir.makepyfile(conftest=""" @@ -182,10 +188,10 @@ class BaseFunctionalTests: assert not rep.skipped assert not rep.passed assert rep.failed - #assert rep.outcome.when == "call" - #assert rep.failed.where.lineno == 3 - #assert rep.failed.where.path.basename == "test_func.py" - #assert rep.failed.failurerepr == "hello" + # assert rep.outcome.when == "call" + # assert rep.failed.where.lineno == 3 + # assert rep.failed.where.path.basename == "test_func.py" + # assert rep.failed.failurerepr == "hello" def test_teardown_final_returncode(self, testdir): rec = testdir.inline_runsource(""" @@ -200,7 +206,7 @@ class BaseFunctionalTests: rec = testdir.inline_runsource(""" import pytest - class TestClass: + class TestClass(object): def test_method(self): pass def teardown_class(cls): @@ -220,14 +226,14 @@ class BaseFunctionalTests: raise ValueError(42) """) reps = rec.getreports("pytest_runtest_logreport") - print (reps) + print(reps) for i in range(2): assert reps[i].nodeid.endswith("test_method") assert reps[i].passed assert reps[2].when == "teardown" assert reps[2].failed assert len(reps) == 6 - for i in range(3,5): + for i in range(3, 5): assert reps[i].nodeid.endswith("test_func") assert reps[i].passed assert reps[5].when == "teardown" @@ -239,7 +245,7 @@ class BaseFunctionalTests: rec = testdir.inline_runsource(""" import pytest - class TestClass: + class TestClass(object): def teardown_method(self, x, y, z): pass @@ -247,7 +253,7 @@ class BaseFunctionalTests: assert True """) reps = rec.getreports("pytest_runtest_logreport") - print (reps) + print(reps) assert len(reps) == 3 # assert reps[0].nodeid.endswith("test_method") @@ -262,11 +268,11 @@ class BaseFunctionalTests: assert reps[2].failed assert reps[2].when == "teardown" assert reps[2].longrepr.reprcrash.message in ( - # python3 error - "TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'", - # python2 error - 'TypeError: teardown_method() takes exactly 4 arguments (2 given)' - ) + # python3 error + "TypeError: teardown_method() missing 2 required positional arguments: 'y' and 'z'", + # python2 error + 'TypeError: teardown_method() takes exactly 4 arguments (2 given)' + ) def test_failure_in_setup_function_ignores_custom_repr(self, testdir): testdir.makepyfile(conftest=""" @@ -287,10 +293,10 @@ class BaseFunctionalTests: assert not rep.skipped assert not rep.passed assert rep.failed - #assert rep.outcome.when == "setup" - #assert rep.outcome.where.lineno == 3 - #assert rep.outcome.where.path.basename == "test_func.py" - #assert instanace(rep.failed.failurerepr, PythonFailureRepr) + # assert rep.outcome.when == "setup" + # assert rep.outcome.where.lineno == 3 + # assert rep.outcome.where.path.basename == "test_func.py" + # assert instanace(rep.failed.failurerepr, PythonFailureRepr) def test_systemexit_does_not_bail_out(self, testdir): try: @@ -316,6 +322,7 @@ class BaseFunctionalTests: else: pytest.fail("did not raise") + class TestExecutionNonForked(BaseFunctionalTests): def getrunner(self): def f(item): @@ -333,6 +340,7 @@ class TestExecutionNonForked(BaseFunctionalTests): else: pytest.fail("did not raise") + class TestExecutionForked(BaseFunctionalTests): pytestmark = pytest.mark.skipif("not hasattr(os, 'fork')") @@ -351,12 +359,13 @@ class TestExecutionForked(BaseFunctionalTests): assert rep.failed assert rep.when == "???" -class TestSessionReports: + +class TestSessionReports(object): def test_collect_result(self, testdir): col = testdir.getmodulecol(""" def test_func1(): pass - class TestClass: + class TestClass(object): pass """) rep = runner.collect_one_node(col) @@ -380,6 +389,7 @@ reporttypes = [ runner.CollectReport, ] + @pytest.mark.parametrize('reporttype', reporttypes, ids=[x.__name__ for x in reporttypes]) def test_report_extra_parameters(reporttype): if hasattr(py.std.inspect, 'signature'): @@ -390,12 +400,13 @@ def test_report_extra_parameters(reporttype): report = reporttype(newthing=1, **basekw) assert report.newthing == 1 + def test_callinfo(): ci = runner.CallInfo(lambda: 0, '123') assert ci.when == "123" assert ci.result == 0 assert "result" in repr(ci) - ci = runner.CallInfo(lambda: 0/0, '123') + ci = runner.CallInfo(lambda: 0 / 0, '123') assert ci.when == "123" assert not hasattr(ci, 'result') assert ci.excinfo @@ -403,13 +414,15 @@ def test_callinfo(): # design question: do we want general hooks in python files? # then something like the following functional tests makes sense + + @pytest.mark.xfail def test_runtest_in_module_ordering(testdir): p1 = testdir.makepyfile(""" import pytest def pytest_runtest_setup(item): # runs after class-level! item.function.mylist.append("module") - class TestClass: + class TestClass(object): def pytest_runtest_setup(self, item): assert not hasattr(item.function, 'mylist') item.function.mylist = ['class'] @@ -436,9 +449,18 @@ def test_runtest_in_module_ordering(testdir): def test_outcomeexception_exceptionattributes(): - outcome = runner.OutcomeException('test') + outcome = outcomes.OutcomeException('test') assert outcome.args[0] == outcome.msg + +def test_outcomeexception_passes_except_Exception(): + with pytest.raises(outcomes.OutcomeException): + try: + raise outcomes.OutcomeException('test') + except Exception: + pass + + def test_pytest_exit(): try: pytest.exit("hello") @@ -446,6 +468,7 @@ def test_pytest_exit(): excinfo = _pytest._code.ExceptionInfo() assert excinfo.errisinstance(KeyboardInterrupt) + def test_pytest_fail(): try: pytest.fail("hello") @@ -454,6 +477,7 @@ def test_pytest_fail(): s = excinfo.exconly(tryshort=True) assert s.startswith("Failed") + def test_pytest_exit_msg(testdir): testdir.makeconftest(""" import pytest @@ -466,6 +490,7 @@ def test_pytest_exit_msg(testdir): "Exit: oh noes", ]) + def test_pytest_fail_notrace(testdir): testdir.makepyfile(""" import pytest @@ -513,12 +538,12 @@ def test_pytest_no_tests_collected_exit_status(testdir): assert 1 """) result = testdir.runpytest() - result.stdout.fnmatch_lines('*collected 1 items*') + result.stdout.fnmatch_lines('*collected 1 item*') result.stdout.fnmatch_lines('*1 passed*') assert result.ret == main.EXIT_OK result = testdir.runpytest('-k nonmatch') - result.stdout.fnmatch_lines('*collected 1 items*') + result.stdout.fnmatch_lines('*collected 1 item*') result.stdout.fnmatch_lines('*1 deselected*') assert result.ret == main.EXIT_NOTESTSCOLLECTED @@ -531,6 +556,7 @@ def test_exception_printing_skip(): s = excinfo.exconly(tryshort=True) assert s.startswith("Skipped") + def test_importorskip(monkeypatch): importorskip = pytest.importorskip @@ -540,8 +566,8 @@ def test_importorskip(monkeypatch): try: sys = importorskip("sys") # noqa assert sys == py.std.sys - #path = pytest.importorskip("os.path") - #assert path == py.std.os.path + # path = pytest.importorskip("os.path") + # assert path == py.std.os.path excinfo = pytest.raises(pytest.skip.Exception, f) path = py.path.local(excinfo.getrepr().reprcrash.path) # check that importorskip reports the actual call @@ -561,10 +587,12 @@ def test_importorskip(monkeypatch): print(_pytest._code.ExceptionInfo()) pytest.fail("spurious skip") + def test_importorskip_imports_last_module_part(): ospath = pytest.importorskip("os.path") assert os.path == ospath + def test_importorskip_dev_module(monkeypatch): try: mod = py.std.types.ModuleType("mockmodule") @@ -680,7 +708,9 @@ def test_store_except_info_on_eror(): sys.last_traceback and friends. """ # Simulate item that raises a specific exception - class ItemThatRaises: + class ItemThatRaises(object): + nodeid = 'item_that_raises' + def runtest(self): raise IndexError('TEST') try: @@ -693,7 +723,32 @@ def test_store_except_info_on_eror(): assert sys.last_traceback -class TestReportContents: +def test_current_test_env_var(testdir, monkeypatch): + pytest_current_test_vars = [] + monkeypatch.setattr(sys, 'pytest_current_test_vars', pytest_current_test_vars, raising=False) + testdir.makepyfile(''' + import pytest + import sys + import os + + @pytest.fixture + def fix(): + sys.pytest_current_test_vars.append(('setup', os.environ['PYTEST_CURRENT_TEST'])) + yield + sys.pytest_current_test_vars.append(('teardown', os.environ['PYTEST_CURRENT_TEST'])) + + def test(fix): + sys.pytest_current_test_vars.append(('call', os.environ['PYTEST_CURRENT_TEST'])) + ''') + result = testdir.runpytest_inprocess() + assert result.ret == 0 + test_id = 'test_current_test_env_var.py::test' + assert pytest_current_test_vars == [ + ('setup', test_id + ' (setup)'), ('call', test_id + ' (call)'), ('teardown', test_id + ' (teardown)')] + assert 'PYTEST_CURRENT_TEST' not in os.environ + + +class TestReportContents(object): """ Test user-level API of ``TestReport`` objects. """ @@ -754,5 +809,3 @@ class TestReportContents: rep = reports[1] assert rep.capstdout == '' assert rep.capstderr == '' - - diff --git a/testing/test_runner_xunit.py b/testing/test_runner_xunit.py index e1f0924c6..fc931f867 100644 --- a/testing/test_runner_xunit.py +++ b/testing/test_runner_xunit.py @@ -1,6 +1,8 @@ -# -# test correct setup/teardowns at -# module, class, and instance level +""" + test correct setup/teardowns at + module, class, and instance level +""" +from __future__ import absolute_import, division, print_function import pytest @@ -24,7 +26,7 @@ def test_module_and_function_setup(testdir): assert modlevel[0] == 42 assert test_modlevel.answer == 17 - class TestFromClass: + class TestFromClass(object): def test_module(self): assert modlevel[0] == 42 assert not hasattr(test_modlevel, 'answer') @@ -34,22 +36,24 @@ def test_module_and_function_setup(testdir): rep = reprec.matchreport("test_module") assert rep.passed + def test_module_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" - l = [] + values = [] def setup_module(module): - l.append(1) + values.append(1) 0/0 def test_nothing(): pass def teardown_module(module): - l.append(2) + values.append(2) """) reprec.assertoutcome(failed=1) calls = reprec.getcalls("pytest_runtest_setup") - assert calls[0].item.module.l == [1] + assert calls[0].item.module.values == [1] + def test_setup_function_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" @@ -67,9 +71,10 @@ def test_setup_function_failure_no_teardown(testdir): calls = reprec.getcalls("pytest_runtest_setup") assert calls[0].item.module.modlevel == [1] + def test_class_setup(testdir): reprec = testdir.inline_runsource(""" - class TestSimpleClassSetup: + class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): cls.clslevel.append(23) @@ -88,11 +93,12 @@ def test_class_setup(testdir): assert not TestSimpleClassSetup.clslevel assert not TestInheritedClassSetupStillWorks.clslevel """) - reprec.assertoutcome(passed=1+2+1) + reprec.assertoutcome(passed=1 + 2 + 1) + def test_class_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" - class TestSimpleClassSetup: + class TestSimpleClassSetup(object): clslevel = [] def setup_class(cls): 0/0 @@ -108,9 +114,10 @@ def test_class_setup_failure_no_teardown(testdir): """) reprec.assertoutcome(failed=1, passed=1) + def test_method_setup(testdir): reprec = testdir.inline_runsource(""" - class TestSetupMethod: + class TestSetupMethod(object): def setup_method(self, meth): self.methsetup = meth def teardown_method(self, meth): @@ -124,9 +131,10 @@ def test_method_setup(testdir): """) reprec.assertoutcome(passed=2) + def test_method_setup_failure_no_teardown(testdir): reprec = testdir.inline_runsource(""" - class TestMethodSetup: + class TestMethodSetup(object): clslevel = [] def setup_method(self, method): self.clslevel.append(1) @@ -143,9 +151,10 @@ def test_method_setup_failure_no_teardown(testdir): """) reprec.assertoutcome(failed=1, passed=1) + def test_method_generator_setup(testdir): reprec = testdir.inline_runsource(""" - class TestSetupTeardownOnInstance: + class TestSetupTeardownOnInstance(object): def setup_class(cls): cls.classsetup = True @@ -165,6 +174,7 @@ def test_method_generator_setup(testdir): """) reprec.assertoutcome(passed=1, failed=1) + def test_func_generator_setup(testdir): reprec = testdir.inline_runsource(""" import sys @@ -193,9 +203,10 @@ def test_func_generator_setup(testdir): rep = reprec.matchreport("test_one", names="pytest_runtest_logreport") assert rep.passed + def test_method_setup_uses_fresh_instances(testdir): reprec = testdir.inline_runsource(""" - class TestSelfState1: + class TestSelfState1(object): memory = [] def test_hello(self): self.memory.append(self) @@ -205,6 +216,7 @@ def test_method_setup_uses_fresh_instances(testdir): """) reprec.assertoutcome(passed=2, failed=0) + def test_setup_that_skips_calledagain(testdir): p = testdir.makepyfile(""" import pytest @@ -218,6 +230,7 @@ def test_setup_that_skips_calledagain(testdir): reprec = testdir.inline_run(p) reprec.assertoutcome(skipped=2) + def test_setup_fails_again_on_all_tests(testdir): p = testdir.makepyfile(""" import pytest @@ -231,6 +244,7 @@ def test_setup_fails_again_on_all_tests(testdir): reprec = testdir.inline_run(p) reprec.assertoutcome(failed=2) + def test_setup_funcarg_setup_when_outer_scope_fails(testdir): p = testdir.makepyfile(""" import pytest @@ -276,7 +290,7 @@ def test_setup_teardown_function_level_with_optional_argument(testdir, monkeypat def test_function_1(): pass def test_function_2(): pass - class Test: + class Test(object): def setup_method(self, {arg}): trace('setup_method') def teardown_method(self, {arg}): trace('teardown_method') diff --git a/testing/test_session.py b/testing/test_session.py index a7dcb27a4..9ec13f523 100644 --- a/testing/test_session.py +++ b/testing/test_session.py @@ -1,8 +1,10 @@ +from __future__ import absolute_import, division, print_function import pytest from _pytest.main import EXIT_NOTESTSCOLLECTED -class SessionTests: + +class SessionTests(object): def test_basic_testitem_events(self, testdir): tfile = testdir.makepyfile(""" def test_one(): @@ -11,7 +13,7 @@ class SessionTests: assert 0 def test_other(): raise ValueError(23) - class TestClass: + class TestClass(object): def test_two(self, someargs): pass """) @@ -20,15 +22,18 @@ class SessionTests: assert len(skipped) == 0 assert len(passed) == 1 assert len(failed) == 3 - end = lambda x: x.nodeid.split("::")[-1] + + def end(x): + return x.nodeid.split("::")[-1] + assert end(failed[0]) == "test_one_one" assert end(failed[1]) == "test_other" itemstarted = reprec.getcalls("pytest_itemcollected") assert len(itemstarted) == 4 # XXX check for failing funcarg setup - #colreports = reprec.getcalls("pytest_collectreport") - #assert len(colreports) == 4 - #assert colreports[1].report.failed + # colreports = reprec.getcalls("pytest_collectreport") + # assert len(colreports) == 4 + # assert colreports[1].report.failed def test_nested_import_error(self, testdir): tfile = testdir.makepyfile(""" @@ -40,9 +45,9 @@ class SessionTests: a = 1 """) reprec = testdir.inline_run(tfile) - l = reprec.getfailedcollections() - assert len(l) == 1 - out = str(l[0].longrepr) + values = reprec.getfailedcollections() + assert len(values) == 1 + out = str(values[0].longrepr) assert out.find('does_not_work') != -1 def test_raises_output(self, testdir): @@ -70,9 +75,9 @@ class SessionTests: def test_syntax_error_module(self, testdir): reprec = testdir.inline_runsource("this is really not python") - l = reprec.getfailedcollections() - assert len(l) == 1 - out = str(l[0].longrepr) + values = reprec.getfailedcollections() + assert len(values) == 1 + out = str(values[0].longrepr) assert out.find(str('not python')) != -1 def test_exit_first_problem(self, testdir): @@ -97,12 +102,12 @@ class SessionTests: def test_broken_repr(self, testdir): p = testdir.makepyfile(""" import pytest - class BrokenRepr1: + class BrokenRepr1(object): foo=0 def __repr__(self): raise Exception("Ha Ha fooled you, I'm a broken repr().") - class TestBrokenClass: + class TestBrokenClass(object): def test_explicit_bad_repr(self): t = BrokenRepr1() pytest.raises(Exception, 'repr(t)') @@ -116,7 +121,7 @@ class SessionTests: passed, skipped, failed = reprec.listoutcomes() assert len(failed) == 1 out = failed[0].longrepr.reprcrash.message - assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 #' + assert out.find("""[Exception("Ha Ha fooled you, I'm a broken repr().") raised in repr()]""") != -1 # ' def test_skip_file_by_conftest(self, testdir): testdir.makepyfile(conftest=""" @@ -134,19 +139,20 @@ class SessionTests: assert len(reports) == 1 assert reports[0].skipped + class TestNewSession(SessionTests): def test_order_of_execution(self, testdir): reprec = testdir.inline_runsource(""" - l = [] + values = [] def test_1(): - l.append(1) + values.append(1) def test_2(): - l.append(2) + values.append(2) def test_3(): - assert l == [1,2] - class Testmygroup: - reslist = l + assert values == [1,2] + class Testmygroup(object): + reslist = values def test_1(self): self.reslist.append(1) def test_2(self): @@ -167,7 +173,7 @@ class TestNewSession(SessionTests): def test_one(): raise ValueError() - class TestX: + class TestX(object): def test_method_one(self): pass @@ -185,7 +191,7 @@ class TestNewSession(SessionTests): started = reprec.getcalls("pytest_collectstart") finished = reprec.getreports("pytest_collectreport") assert len(started) == len(finished) - assert len(started) == 7 # XXX extra TopCollector + assert len(started) == 7 # XXX extra TopCollector colfail = [x for x in finished if x.failed] assert len(colfail) == 1 @@ -197,7 +203,7 @@ class TestNewSession(SessionTests): colfail = [x for x in finished if x.failed] assert len(colfail) == 1 - def test_minus_x_overriden_by_maxfail(self, testdir): + def test_minus_x_overridden_by_maxfail(self, testdir): testdir.makepyfile(__init__="") testdir.makepyfile(test_one="xxxx", test_two="yyyy", test_third="zzz") reprec = testdir.inline_run("-x", "--maxfail=2", testdir.tmpdir) @@ -210,9 +216,10 @@ def test_plugin_specify(testdir): pytest.raises(ImportError, """ testdir.parseconfig("-p", "nqweotexistent") """) - #pytest.raises(ImportError, + # pytest.raises(ImportError, # "config.do_configure(config)" - #) + # ) + def test_plugin_already_exists(testdir): config = testdir.parseconfig("-p", "terminal") @@ -220,6 +227,7 @@ def test_plugin_already_exists(testdir): config._do_configure() config._ensure_unconfigure() + def test_exclude(testdir): hellodir = testdir.mkdir("hello") hellodir.join("test_hello.py").write("x y syntaxerror") @@ -230,16 +238,17 @@ def test_exclude(testdir): assert result.ret == 0 result.stdout.fnmatch_lines(["*1 passed*"]) + def test_sessionfinish_with_start(testdir): testdir.makeconftest(""" import os - l = [] + values = [] def pytest_sessionstart(): - l.append(os.getcwd()) + values.append(os.getcwd()) os.chdir("..") def pytest_sessionfinish(): - assert l[0] == os.getcwd() + assert values[0] == os.getcwd() """) res = testdir.runpytest("--collect-only") diff --git a/testing/test_skipping.py b/testing/test_skipping.py index 2e7868d3a..a25c9460a 100644 --- a/testing/test_skipping.py +++ b/testing/test_skipping.py @@ -1,3 +1,4 @@ +from __future__ import absolute_import, division, print_function import pytest import sys @@ -5,7 +6,7 @@ from _pytest.skipping import MarkEvaluator, folded_skips, pytest_runtest_setup from _pytest.runner import runtestprotocol -class TestEvaluator: +class TestEvaluator(object): def test_no_marker(self, testdir): item = testdir.getitem("def test_func(): pass") evalskipif = MarkEvaluator(item, 'skipif') @@ -79,7 +80,7 @@ class TestEvaluator: %s def test_func(): pass - """ % (lines[i], lines[(i+1) %2])) + """ % (lines[i], lines[(i + 1) % 2])) ev = MarkEvaluator(item, 'skipif') assert ev assert ev.istrue() @@ -114,7 +115,7 @@ class TestEvaluator: def test_skipif_class(self, testdir): item, = testdir.getitems(""" import pytest - class TestClass: + class TestClass(object): pytestmark = pytest.mark.skipif("config._hackxyz") def test_func(self): pass @@ -126,7 +127,7 @@ class TestEvaluator: assert expl == "condition: config._hackxyz" -class TestXFail: +class TestXFail(object): @pytest.mark.parametrize('strict', [True, False]) def test_xfail_simple(self, testdir, strict): @@ -206,9 +207,9 @@ class TestXFail: assert 0 """) testdir.runpytest(p, '-v') - #result.stdout.fnmatch_lines([ + # result.stdout.fnmatch_lines([ # "*HINT*use*-r*" - #]) + # ]) def test_xfail_not_run_xfail_reporting(self, testdir): p = testdir.makepyfile(test_one=""" @@ -349,7 +350,6 @@ class TestXFail: "*1 xfailed*", ]) - @pytest.mark.parametrize('expected, actual, matchline', [('TypeError', 'TypeError', "*1 xfailed*"), ('(AttributeError, TypeError)', 'TypeError', "*1 xfailed*"), @@ -452,7 +452,7 @@ class TestXFail: assert result.ret == (1 if strict else 0) -class TestXFailwithSetupTeardown: +class TestXFailwithSetupTeardown(object): def test_failing_setup_issue9(self, testdir): testdir.makepyfile(""" import pytest @@ -484,7 +484,7 @@ class TestXFailwithSetupTeardown: ]) -class TestSkip: +class TestSkip(object): def test_skip_class(self, testdir): testdir.makepyfile(""" import pytest @@ -575,20 +575,21 @@ class TestSkip: def test_hello(): pass """) - result = testdir.runpytest("-rs --strict") + result = testdir.runpytest("-rs") result.stdout.fnmatch_lines([ "*unconditional skip*", "*1 skipped*", ]) -class TestSkipif: + +class TestSkipif(object): def test_skipif_conditional(self, testdir): item = testdir.getitem(""" import pytest @pytest.mark.skipif("hasattr(os, 'sep')") def test_func(): pass - """) # noqa + """) # noqa x = pytest.raises(pytest.skip.Exception, lambda: pytest_runtest_setup(item)) assert x.value.msg == "condition: hasattr(os, 'sep')" @@ -639,7 +640,7 @@ def test_skip_not_report_default(testdir): """) result = testdir.runpytest(p, '-v') result.stdout.fnmatch_lines([ - #"*HINT*use*-r*", + # "*HINT*use*-r*", "*1 skipped*", ]) @@ -648,7 +649,7 @@ def test_skipif_class(testdir): p = testdir.makepyfile(""" import pytest - class TestClass: + class TestClass(object): pytestmark = pytest.mark.skipif("True") def test_that(self): assert 0 @@ -667,7 +668,7 @@ def test_skip_reasons_folding(): message = "justso" longrepr = (path, lineno, message) - class X: + class X(object): pass ev1 = X() ev1.when = "execute" @@ -678,14 +679,15 @@ def test_skip_reasons_folding(): ev2.longrepr = longrepr ev2.skipped = True - l = folded_skips([ev1, ev2]) - assert len(l) == 1 - num, fspath, lineno, reason = l[0] + values = folded_skips([ev1, ev2]) + assert len(values) == 1 + num, fspath, lineno, reason = values[0] assert num == 2 assert fspath == path assert lineno == lineno assert reason == message + def test_skipped_reasons_functional(testdir): testdir.makepyfile( test_one=""" @@ -694,11 +696,11 @@ def test_skipped_reasons_functional(testdir): doskip() def test_func(): pass - class TestClass: + class TestClass(object): def test_method(self): doskip() """, - conftest = """ + conftest=""" import pytest def doskip(): pytest.skip('test') @@ -706,10 +708,11 @@ def test_skipped_reasons_functional(testdir): ) result = testdir.runpytest('-rs') result.stdout.fnmatch_lines([ - "*SKIP*2*conftest.py:3: test", + "*SKIP*2*conftest.py:4: test", ]) assert result.ret == 0 + def test_reportchars(testdir): testdir.makepyfile(""" import pytest @@ -732,6 +735,7 @@ def test_reportchars(testdir): "SKIP*four*", ]) + def test_reportchars_error(testdir): testdir.makepyfile( conftest=""" @@ -747,6 +751,7 @@ def test_reportchars_error(testdir): 'ERROR*test_foo*', ]) + def test_reportchars_all(testdir): testdir.makepyfile(""" import pytest @@ -769,6 +774,7 @@ def test_reportchars_all(testdir): "XPASS*test_3*", ]) + def test_reportchars_all_error(testdir): testdir.makepyfile( conftest=""" @@ -784,6 +790,7 @@ def test_reportchars_all_error(testdir): 'ERROR*test_foo*', ]) + @pytest.mark.xfail("hasattr(sys, 'pypy_version_info')") def test_errors_in_xfail_skip_expressions(testdir): testdir.makepyfile(""" @@ -815,6 +822,7 @@ def test_errors_in_xfail_skip_expressions(testdir): "*1 pass*2 error*", ]) + def test_xfail_skipif_with_globals(testdir): testdir.makepyfile(""" import pytest @@ -833,6 +841,7 @@ def test_xfail_skipif_with_globals(testdir): "*x == 3*", ]) + def test_direct_gives_error(testdir): testdir.makepyfile(""" import pytest @@ -853,6 +862,7 @@ def test_default_markers(testdir): "*xfail(*condition, reason=None, run=True, raises=None, strict=False)*expected failure*", ]) + def test_xfail_test_setup_exception(testdir): testdir.makeconftest(""" def pytest_runtest_setup(): @@ -869,6 +879,7 @@ def test_xfail_test_setup_exception(testdir): assert 'xfailed' in result.stdout.str() assert 'xpassed' not in result.stdout.str() + def test_imperativeskip_on_xfail_test(testdir): testdir.makepyfile(""" import pytest @@ -892,7 +903,8 @@ def test_imperativeskip_on_xfail_test(testdir): *2 skipped* """) -class TestBooleanCondition: + +class TestBooleanCondition(object): def test_skipif(self, testdir): testdir.makepyfile(""" import pytest @@ -969,3 +981,26 @@ def test_module_level_skip_error(testdir): result.stdout.fnmatch_lines( "*Using pytest.skip outside of a test is not allowed*" ) + + +def test_mark_xfail_item(testdir): + # Ensure pytest.mark.xfail works with non-Python Item + testdir.makeconftest(""" + import pytest + + class MyItem(pytest.Item): + nodeid = 'foo' + def setup(self): + marker = pytest.mark.xfail(True, reason="Expected failure") + self.add_marker(marker) + def runtest(self): + assert False + + def pytest_collect_file(path, parent): + return MyItem("foo", parent) + """) + result = testdir.inline_run() + passed, skipped, failed = result.listoutcomes() + assert not failed + xfailed = [r for r in skipped if hasattr(r, 'wasxfail')] + assert xfailed diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 3efd7b1f9..0fa98b26e 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -1,6 +1,7 @@ """ terminal reporting of the full testing process. """ +from __future__ import absolute_import, division, print_function import collections import sys @@ -16,19 +17,20 @@ from _pytest.terminal import build_summary_stats_line, _plugin_nameversions DistInfo = collections.namedtuple('DistInfo', ['project_name', 'version']) -class Option: +class Option(object): def __init__(self, verbose=False, fulltrace=False): self.verbose = verbose self.fulltrace = fulltrace @property def args(self): - l = [] + values = [] if self.verbose: - l.append('-v') + values.append('-v') if self.fulltrace: - l.append('--fulltrace') - return l + values.append('--fulltrace') + return values + def pytest_generate_tests(metafunc): if "option" in metafunc.fixturenames: @@ -37,7 +39,7 @@ def pytest_generate_tests(metafunc): metafunc.addcall(id="verbose", funcargs={'option': Option(verbose=True)}) metafunc.addcall(id="quiet", - funcargs={'option': Option(verbose= -1)}) + funcargs={'option': Option(verbose=-1)}) metafunc.addcall(id="fulltrace", funcargs={'option': Option(fulltrace=True)}) @@ -56,7 +58,7 @@ def test_plugin_nameversion(input, expected): assert result == expected -class TestTerminal: +class TestTerminal(object): def test_pass_skip_fail(self, testdir, option): testdir.makepyfile(""" import pytest @@ -76,8 +78,8 @@ class TestTerminal: ]) else: result.stdout.fnmatch_lines([ - "*test_pass_skip_fail.py .sF" - ]) + "*test_pass_skip_fail.py .sF" + ]) result.stdout.fnmatch_lines([ " def test_func():", "> assert 0", @@ -109,7 +111,7 @@ class TestTerminal: item.config.pluginmanager.register(tr) location = item.reportinfo() tr.config.hook.pytest_runtest_logstart(nodeid=item.nodeid, - location=location, fspath=str(item.fspath)) + location=location, fspath=str(item.fspath)) linecomp.assert_contains_lines([ "*test_show_runtest_logstart.py*" ]) @@ -127,7 +129,7 @@ class TestTerminal: def test_itemreport_subclasses_show_subclassed_file(self, testdir): testdir.makepyfile(test_p1=""" - class BaseTests: + class BaseTests(object): def test_p1(self): pass class TestClass(BaseTests): @@ -151,7 +153,7 @@ class TestTerminal: def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): a = testdir.mkpydir("a123") a.join("test_hello123.py").write(_pytest._code.Source(""" - class TestClass: + class TestClass(object): def test_method(self): pass """)) @@ -203,8 +205,27 @@ class TestTerminal: assert result.ret == 2 result.stdout.fnmatch_lines(['*KeyboardInterrupt*']) + def test_collect_single_item(self, testdir): + """Use singular 'item' when reporting a single test item""" + testdir.makepyfile(""" + def test_foobar(): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines(['collected 1 item']) -class TestCollectonly: + def test_rewrite(self, testdir, monkeypatch): + config = testdir.parseconfig() + f = py.io.TextIO() + monkeypatch.setattr(f, 'isatty', lambda *args: True) + tr = TerminalReporter(config, f) + tr.writer.fullwidth = 10 + tr.write('hello') + tr.rewrite('hey', erase=True) + assert f.getvalue() == 'hello' + '\r' + 'hey' + (7 * ' ') + + +class TestCollectonly(object): def test_collectonly_basic(self, testdir): testdir.makepyfile(""" def test_func(): @@ -212,8 +233,8 @@ class TestCollectonly: """) result = testdir.runpytest("--collect-only",) result.stdout.fnmatch_lines([ - "", - " ", + "", + " ", ]) def test_collectonly_skipped_module(self, testdir): @@ -249,18 +270,18 @@ class TestCollectonly: p = testdir.makepyfile(""" def test_func1(): pass - class TestClass: + class TestClass(object): def test_method(self): pass """) result = testdir.runpytest("--collect-only", p) - #assert stderr.startswith("inserting into sys.path") + # assert stderr.startswith("inserting into sys.path") assert result.ret == 0 result.stdout.fnmatch_lines([ "*", "* ", "* ", - #"* ", + # "* ", "* ", ]) @@ -308,9 +329,10 @@ def test_repr_python_version(monkeypatch): py.std.sys.version_info = x = (2, 3) assert repr_pythonversion() == str(x) finally: - monkeypatch.undo() # do this early as pytest can get confused + monkeypatch.undo() # do this early as pytest can get confused -class TestFixtureReporting: + +class TestFixtureReporting(object): def test_setup_fixture_error(self, testdir): testdir.makepyfile(""" def setup_function(function): @@ -368,7 +390,7 @@ class TestFixtureReporting: "*def test_fail():", "*failingfunc*", "*1 failed*1 error*", - ]) + ]) def test_setup_teardown_output_and_test_failure(self, testdir): """ Test for issue #442 """ @@ -393,9 +415,10 @@ class TestFixtureReporting: "*teardown func*", "*1 failed*", - ]) + ]) -class TestTerminalFunctional: + +class TestTerminalFunctional(object): def test_deselected(self, testdir): testpath = testdir.makepyfile(""" def test_one(): @@ -405,7 +428,7 @@ class TestTerminalFunctional: def test_three(): pass """ - ) + ) result = testdir.runpytest("-k", "test_two:", testpath) result.stdout.fnmatch_lines([ "*test_deselected.py ..", @@ -431,7 +454,7 @@ class TestTerminalFunctional: p1 = testdir.makepyfile(""" def test_passes(): pass - class TestClass: + class TestClass(object): def test_method(self): pass """) @@ -475,7 +498,7 @@ class TestTerminalFunctional: """) result = testdir.runpytest(p1, '-l') result.stdout.fnmatch_lines([ - #"_ _ * Locals *", + # "_ _ * Locals *", "x* = 3", "y* = 'xxxxxx*" ]) @@ -487,7 +510,7 @@ class TestTerminalFunctional: raise ValueError() def test_pass(): pass - class TestClass: + class TestClass(object): def test_skip(self): pytest.skip("hello") def test_gen(): @@ -531,6 +554,23 @@ class TestTerminalFunctional: assert "===" not in s assert "passed" not in s + def test_report_collectionfinish_hook(self, testdir): + testdir.makeconftest(""" + def pytest_report_collectionfinish(config, startdir, items): + return ['hello from hook: {0} items'.format(len(items))] + """) + testdir.makepyfile(""" + import pytest + @pytest.mark.parametrize('i', range(3)) + def test(i): + pass + """) + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + "collected 3 items", + "hello from hook: 3 items", + ]) + def test_fail_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 0") @@ -542,11 +582,13 @@ def test_fail_extra_reporting(testdir): "FAIL*test_fail_extra_reporting*", ]) + def test_fail_reporting_on_pass(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('-rf') assert 'short test summary' not in result.stdout.str() + def test_pass_extra_reporting(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest() @@ -557,11 +599,13 @@ def test_pass_extra_reporting(testdir): "PASS*test_pass_extra_reporting*", ]) + def test_pass_reporting_on_fail(testdir): testdir.makepyfile("def test_this(): assert 0") result = testdir.runpytest('-rp') assert 'short test summary' not in result.stdout.str() + def test_pass_output_reporting(testdir): testdir.makepyfile(""" def test_pass_output(): @@ -574,6 +618,7 @@ def test_pass_output_reporting(testdir): "Four score and seven years ago...", ]) + def test_color_yes(testdir): testdir.makepyfile("def test_this(): assert 1") result = testdir.runpytest('--color=yes') @@ -612,10 +657,10 @@ def test_color_yes_collection_on_non_atty(testdir, verbose): def test_getreportopt(): - class config: - class option: + class config(object): + class option(object): reportchars = "" - disablepytestwarnings = True + disable_warnings = True config.option.reportchars = "sf" assert getreportopt(config) == "sf" @@ -624,11 +669,11 @@ def test_getreportopt(): assert getreportopt(config) == "sfx" config.option.reportchars = "sfx" - config.option.disablepytestwarnings = False + config.option.disable_warnings = False assert getreportopt(config) == "sfxw" config.option.reportchars = "sfxw" - config.option.disablepytestwarnings = False + config.option.disable_warnings = False assert getreportopt(config) == "sfxw" @@ -650,6 +695,7 @@ def test_terminalreporter_reportopt_addopts(testdir): "*1 passed*" ]) + def test_tbstyle_short(testdir): p = testdir.makepyfile(""" import pytest @@ -675,6 +721,7 @@ def test_tbstyle_short(testdir): assert 'x = 0' in s assert 'assert x' in s + def test_traceconfig(testdir, monkeypatch): result = testdir.runpytest("--traceconfig") result.stdout.fnmatch_lines([ @@ -683,10 +730,11 @@ def test_traceconfig(testdir, monkeypatch): assert result.ret == EXIT_NOTESTSCOLLECTED -class TestGenericReporting: +class TestGenericReporting(object): """ this test class can be subclassed with a different option provider to run e.g. distributed tests. """ + def test_collect_fail(self, testdir, option): testdir.makepyfile("import xyz\n") result = testdir.runpytest(*option.args) @@ -713,7 +761,6 @@ class TestGenericReporting: "*2 failed*", ]) - def test_tb_option(self, testdir, option): testdir.makepyfile(""" import pytest @@ -777,6 +824,7 @@ def pytest_report_header(config, startdir): str(testdir.tmpdir), ]) + @pytest.mark.xfail("not hasattr(os, 'dup')") def test_fdopen_kept_alive_issue124(testdir): testdir.makepyfile(""" @@ -795,6 +843,7 @@ def test_fdopen_kept_alive_issue124(testdir): "*2 passed*" ]) + def test_tbstyle_native_setup_error(testdir): testdir.makepyfile(""" import pytest @@ -807,8 +856,9 @@ def test_tbstyle_native_setup_error(testdir): """) result = testdir.runpytest("--tb=native") result.stdout.fnmatch_lines([ - '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*' - ]) + '*File *test_tbstyle_native_setup_error.py", line *, in setup_error_fixture*' + ]) + def test_terminal_summary(testdir): testdir.makeconftest(""" @@ -837,8 +887,8 @@ def test_terminal_summary_warnings_are_displayed(testdir): """) result = testdir.runpytest('-rw') result.stdout.fnmatch_lines([ - '*C1*internal warning', - '*== 1 pytest-warnings in *', + '*internal warning', + '*== 1 warnings in *', ]) @@ -858,11 +908,11 @@ def test_terminal_summary_warnings_are_displayed(testdir): ("yellow", "1 weird", {"weird": (1,)}), ("yellow", "1 passed, 1 weird", {"weird": (1,), "passed": (1,)}), - ("yellow", "1 pytest-warnings", {"warnings": (1,)}), - ("yellow", "1 passed, 1 pytest-warnings", {"warnings": (1,), - "passed": (1,)}), + ("yellow", "1 warnings", {"warnings": (1,)}), + ("yellow", "1 passed, 1 warnings", {"warnings": (1,), + "passed": (1,)}), - ("green", "5 passed", {"passed": (1,2,3,4,5)}), + ("green", "5 passed", {"passed": (1, 2, 3, 4, 5)}), # "Boring" statuses. These have no effect on the color of the summary @@ -891,13 +941,13 @@ def test_terminal_summary_warnings_are_displayed(testdir): # A couple more complex combinations ("red", "1 failed, 2 passed, 3 xfailed", - {"passed": (1,2), "failed": (1,), "xfailed": (1,2,3)}), + {"passed": (1, 2), "failed": (1,), "xfailed": (1, 2, 3)}), ("green", "1 passed, 2 skipped, 3 deselected, 2 xfailed", {"passed": (1,), - "skipped": (1,2), - "deselected": (1,2,3), - "xfailed": (1,2)}), + "skipped": (1, 2), + "deselected": (1, 2, 3), + "xfailed": (1, 2)}), ]) def test_summary_stats(exp_line, exp_color, stats_arg): print("Based on stats: %s" % stats_arg) @@ -906,3 +956,12 @@ def test_summary_stats(exp_line, exp_color, stats_arg): print("Actually got: \"%s\"; with color \"%s\"" % (line, color)) assert line == exp_line assert color == exp_color + + +def test_no_trailing_whitespace_after_inifile_word(testdir): + result = testdir.runpytest('') + assert 'inifile:\n' in result.stdout.str() + + testdir.makeini('[pytest]') + result = testdir.runpytest('') + assert 'inifile: tox.ini\n' in result.stdout.str() diff --git a/testing/test_tmpdir.py b/testing/test_tmpdir.py index 232acb6d2..467e77252 100644 --- a/testing/test_tmpdir.py +++ b/testing/test_tmpdir.py @@ -1,9 +1,11 @@ +from __future__ import absolute_import, division, print_function import sys import py import pytest from _pytest.tmpdir import tmpdir + def test_funcarg(testdir): testdir.makepyfile(""" def pytest_generate_tests(metafunc): @@ -28,13 +30,15 @@ def test_funcarg(testdir): bn = p.basename.strip("0123456789") assert bn == "qwe__abc" + def test_ensuretemp(recwarn): d1 = pytest.ensuretemp('hello') d2 = pytest.ensuretemp('hello') assert d1 == d2 assert d1.check(dir=1) -class TestTempdirHandler: + +class TestTempdirHandler(object): def test_mktemp(self, testdir): from _pytest.tmpdir import TempdirFactory config = testdir.parseconfig() @@ -48,7 +52,8 @@ class TestTempdirHandler: assert tmp2.relto(t.getbasetemp()).startswith("this") assert tmp2 != tmp -class TestConfigTmpdir: + +class TestConfigTmpdir(object): def test_getbasetemp_custom_removes_old(self, testdir): mytemp = testdir.tmpdir.join("xyz") p = testdir.makepyfile(""" @@ -75,6 +80,7 @@ def test_basetemp(testdir): assert result.ret == 0 assert mytemp.join('hello').check() + @pytest.mark.skipif(not hasattr(py.path.local, 'mksymlinkto'), reason="symlink not available on this platform") def test_tmpdir_always_is_realpath(testdir): diff --git a/testing/test_unittest.py b/testing/test_unittest.py index 9625ae0f8..3273e81aa 100644 --- a/testing/test_unittest.py +++ b/testing/test_unittest.py @@ -1,29 +1,32 @@ +from __future__ import absolute_import, division, print_function from _pytest.main import EXIT_NOTESTSCOLLECTED import pytest import gc + def test_simple_unittest(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): def testpassing(self): - self.assertEquals('foo', 'foo') + self.assertEqual('foo', 'foo') def test_failing(self): - self.assertEquals('foo', 'bar') + self.assertEqual('foo', 'bar') """) reprec = testdir.inline_run(testpath) assert reprec.matchreport("testpassing").passed assert reprec.matchreport("test_failing").failed + def test_runTest_method(testdir): testdir.makepyfile(""" import unittest class MyTestCaseWithRunTest(unittest.TestCase): def runTest(self): - self.assertEquals('foo', 'foo') + self.assertEqual('foo', 'foo') class MyTestCaseWithoutRunTest(unittest.TestCase): def runTest(self): - self.assertEquals('foo', 'foo') + self.assertEqual('foo', 'foo') def test_something(self): pass """) @@ -34,6 +37,7 @@ def test_runTest_method(testdir): *2 passed* """) + def test_isclasscheck_issue53(testdir): testpath = testdir.makepyfile(""" import unittest @@ -45,6 +49,7 @@ def test_isclasscheck_issue53(testdir): result = testdir.runpytest(testpath) assert result.ret == EXIT_NOTESTSCOLLECTED + def test_setup(testdir): testpath = testdir.makepyfile(""" import unittest @@ -54,7 +59,7 @@ def test_setup(testdir): def setup_method(self, method): self.foo2 = 1 def test_both(self): - self.assertEquals(1, self.foo) + self.assertEqual(1, self.foo) assert self.foo2 == 1 def teardown_method(self, method): assert 0, "42" @@ -65,36 +70,38 @@ def test_setup(testdir): rep = reprec.matchreport("test_both", when="teardown") assert rep.failed and '42' in str(rep.longrepr) + def test_setUpModule(testdir): testpath = testdir.makepyfile(""" - l = [] + values = [] def setUpModule(): - l.append(1) + values.append(1) def tearDownModule(): - del l[0] + del values[0] def test_hello(): - assert l == [1] + assert values == [1] def test_world(): - assert l == [1] + assert values == [1] """) result = testdir.runpytest(testpath) result.stdout.fnmatch_lines([ "*2 passed*", ]) + def test_setUpModule_failing_no_teardown(testdir): testpath = testdir.makepyfile(""" - l = [] + values = [] def setUpModule(): 0/0 def tearDownModule(): - l.append(1) + values.append(1) def test_hello(): pass @@ -102,7 +109,8 @@ def test_setUpModule_failing_no_teardown(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=0, failed=1) call = reprec.getcalls("pytest_runtest_setup")[0] - assert not call.item.module.l + assert not call.item.module.values + def test_new_instances(testdir): testpath = testdir.makepyfile(""" @@ -116,18 +124,19 @@ def test_new_instances(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=2) + def test_teardown(testdir): testpath = testdir.makepyfile(""" import unittest class MyTestCase(unittest.TestCase): - l = [] + values = [] def test_one(self): pass def tearDown(self): - self.l.append(None) + self.values.append(None) class Second(unittest.TestCase): def test_check(self): - self.assertEquals(MyTestCase.l, [None]) + self.assertEqual(MyTestCase.values, [None]) """) reprec = testdir.inline_run(testpath) passed, skipped, failed = reprec.countoutcomes() @@ -135,6 +144,7 @@ def test_teardown(testdir): assert passed == 2 assert passed + skipped + failed == 2 + def test_teardown_issue1649(testdir): """ Are TestCase objects cleaned up? Often unittest TestCase objects set @@ -157,6 +167,7 @@ def test_teardown_issue1649(testdir): for obj in gc.get_objects(): assert type(obj).__name__ != 'TestCaseObjectsShouldBeCleanedUp' + @pytest.mark.skipif("sys.version_info < (2,7)") def test_unittest_skip_issue148(testdir): testpath = testdir.makepyfile(""" @@ -176,6 +187,7 @@ def test_unittest_skip_issue148(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(skipped=1) + def test_method_and_teardown_failing_reporting(testdir): testdir.makepyfile(""" import unittest, pytest @@ -195,6 +207,7 @@ def test_method_and_teardown_failing_reporting(testdir): "*1 failed*1 error*", ]) + def test_setup_failure_is_shown(testdir): testdir.makepyfile(""" import unittest @@ -215,6 +228,7 @@ def test_setup_failure_is_shown(testdir): ]) assert 'never42' not in result.stdout.str() + def test_setup_setUpClass(testdir): testpath = testdir.makepyfile(""" import unittest @@ -237,6 +251,7 @@ def test_setup_setUpClass(testdir): reprec = testdir.inline_run(testpath) reprec.assertoutcome(passed=3) + def test_setup_class(testdir): testpath = testdir.makepyfile(""" import unittest @@ -278,6 +293,7 @@ def test_testcase_adderrorandfailure_defers(testdir, type): result = testdir.runpytest() assert 'should not raise' not in result.stdout.str() + @pytest.mark.parametrize("type", ['Error', 'Failure']) def test_testcase_custom_exception_info(testdir, type): testdir.makepyfile(""" @@ -309,6 +325,7 @@ def test_testcase_custom_exception_info(testdir, type): "*1 failed*", ]) + def test_testcase_totally_incompatible_exception_info(testdir): item, = testdir.getitems(""" from unittest import TestCase @@ -320,6 +337,7 @@ def test_testcase_totally_incompatible_exception_info(testdir): excinfo = item._excinfo.pop(0) assert 'ERROR: Unknown Incompatible' in str(excinfo.getrepr()) + def test_module_level_pytestmark(testdir): testpath = testdir.makepyfile(""" import unittest @@ -333,61 +351,12 @@ def test_module_level_pytestmark(testdir): reprec.assertoutcome(skipped=1) -def test_trial_testcase_skip_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - skip = 'dont run' - def test_func(self): - pass - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -def test_trial_testfunction_skip_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - def test_func(self): - pass - test_func.skip = 'dont run' - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -def test_trial_testcase_todo_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - todo = 'dont run' - def test_func(self): - assert 0 - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -def test_trial_testfunction_todo_property(testdir): - pytest.importorskip('twisted.trial.unittest') - testpath = testdir.makepyfile(""" - from twisted.trial import unittest - class MyTestCase(unittest.TestCase): - def test_func(self): - assert 0 - test_func.todo = 'dont run' - """) - reprec = testdir.inline_run(testpath, "-s") - reprec.assertoutcome(skipped=1) - - -class TestTrialUnittest: +class TestTrialUnittest(object): def setup_class(cls): cls.ut = pytest.importorskip("twisted.trial.unittest") + # on windows trial uses a socket for a reactor and apparently doesn't close it properly + # https://twistedmatrix.com/trac/ticket/9227 + cls.ignore_unclosed_socket_warning = ('-W', 'always') def test_trial_testcase_runtest_not_collected(self, testdir): testdir.makepyfile(""" @@ -397,7 +366,7 @@ class TestTrialUnittest: def test_hello(self): pass """) - reprec = testdir.inline_run() + reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning) reprec.assertoutcome(passed=1) testdir.makepyfile(""" from twisted.trial.unittest import TestCase @@ -406,7 +375,7 @@ class TestTrialUnittest: def runTest(self): pass """) - reprec = testdir.inline_run() + reprec = testdir.inline_run(*self.ignore_unclosed_socket_warning) reprec.assertoutcome(passed=1) def test_trial_exceptions_with_skips(self, testdir): @@ -444,7 +413,7 @@ class TestTrialUnittest: """) from _pytest.compat import _is_unittest_unexpected_success_a_failure should_fail = _is_unittest_unexpected_success_a_failure() - result = testdir.runpytest("-rxs") + result = testdir.runpytest("-rxs", *self.ignore_unclosed_socket_warning) result.stdout.fnmatch_lines_random([ "*XFAIL*test_trial_todo*", "*trialselfskip*", @@ -519,6 +488,51 @@ class TestTrialUnittest: child.expect("hellopdb") child.sendeof() + def test_trial_testcase_skip_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + skip = 'dont run' + def test_func(self): + pass + """) + reprec = testdir.inline_run(testpath, "-s") + reprec.assertoutcome(skipped=1) + + def test_trial_testfunction_skip_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + def test_func(self): + pass + test_func.skip = 'dont run' + """) + reprec = testdir.inline_run(testpath, "-s") + reprec.assertoutcome(skipped=1) + + def test_trial_testcase_todo_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + todo = 'dont run' + def test_func(self): + assert 0 + """) + reprec = testdir.inline_run(testpath, "-s") + reprec.assertoutcome(skipped=1) + + def test_trial_testfunction_todo_property(self, testdir): + testpath = testdir.makepyfile(""" + from twisted.trial import unittest + class MyTestCase(unittest.TestCase): + def test_func(self): + assert 0 + test_func.todo = 'dont run' + """) + reprec = testdir.inline_run(testpath, "-s", *self.ignore_unclosed_socket_warning) + reprec.assertoutcome(skipped=1) + + def test_djangolike_testcase(testdir): # contributed from Morten Breekevold testdir.makepyfile(""" @@ -579,11 +593,12 @@ def test_unittest_not_shown_in_traceback(testdir): class t(unittest.TestCase): def test_hello(self): x = 3 - self.assertEquals(x, 4) + self.assertEqual(x, 4) """) res = testdir.runpytest() assert "failUnlessEqual" not in res.stdout.str() + def test_unorderable_types(testdir): testdir.makepyfile(""" import unittest @@ -601,6 +616,7 @@ def test_unorderable_types(testdir): assert "TypeError" not in result.stdout.str() assert result.ret == EXIT_NOTESTSCOLLECTED + def test_unittest_typerror_traceback(testdir): testdir.makepyfile(""" import unittest @@ -704,7 +720,7 @@ def test_unittest_setup_interaction(testdir, fix_type, stmt): def test_non_unittest_no_setupclass_support(testdir): testpath = testdir.makepyfile(""" - class TestFoo: + class TestFoo(object): x = 0 @classmethod @@ -768,6 +784,7 @@ def test_issue333_result_clearing(testdir): reprec = testdir.inline_run() reprec.assertoutcome(failed=1) + @pytest.mark.skipif("sys.version_info < (2,7)") def test_unittest_raise_skip_issue748(testdir): testdir.makepyfile(test_foo=""" @@ -783,11 +800,12 @@ def test_unittest_raise_skip_issue748(testdir): *1 skipped* """) + @pytest.mark.skipif("sys.version_info < (2,7)") def test_unittest_skip_issue1169(testdir): testdir.makepyfile(test_foo=""" import unittest - + class MyTestCase(unittest.TestCase): @unittest.skip("skipping due to reasons") def test_skip(self): @@ -799,6 +817,7 @@ def test_unittest_skip_issue1169(testdir): *1 skipped* """) + def test_class_method_containing_test_issue1558(testdir): testdir.makepyfile(test_foo=""" import unittest diff --git a/testing/test_warnings.py b/testing/test_warnings.py new file mode 100644 index 000000000..12539f8ee --- /dev/null +++ b/testing/test_warnings.py @@ -0,0 +1,225 @@ +# -*- coding: utf8 -*- +from __future__ import unicode_literals + +import sys + +import pytest + + +WARNINGS_SUMMARY_HEADER = 'warnings summary' + + +@pytest.fixture +def pyfile_with_warnings(testdir, request): + """ + Create a test file which calls a function in a module which generates warnings. + """ + testdir.syspathinsert() + test_name = request.function.__name__ + module_name = test_name.lstrip('test_') + '_module' + testdir.makepyfile(**{ + module_name: ''' + import warnings + def foo(): + warnings.warn(UserWarning("user warning")) + warnings.warn(RuntimeWarning("runtime warning")) + return 1 + ''', + test_name: ''' + import {module_name} + def test_func(): + assert {module_name}.foo() == 1 + '''.format(module_name=module_name) + }) + + +@pytest.mark.filterwarnings('always') +def test_normal_flow(testdir, pyfile_with_warnings): + """ + Check that the warnings section is displayed, containing test node ids followed by + all warnings generated by that test node. + """ + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + + '*test_normal_flow.py::test_func', + + '*normal_flow_module.py:3: UserWarning: user warning', + '* warnings.warn(UserWarning("user warning"))', + + '*normal_flow_module.py:4: RuntimeWarning: runtime warning', + '* warnings.warn(RuntimeWarning("runtime warning"))', + '* 1 passed, 2 warnings*', + ]) + assert result.stdout.str().count('test_normal_flow.py::test_func') == 1 + + +@pytest.mark.filterwarnings('always') +def test_setup_teardown_warnings(testdir, pyfile_with_warnings): + testdir.makepyfile(''' + import warnings + import pytest + + @pytest.fixture + def fix(): + warnings.warn(UserWarning("warning during setup")) + yield + warnings.warn(UserWarning("warning during teardown")) + + def test_func(fix): + pass + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + + '*test_setup_teardown_warnings.py:6: UserWarning: warning during setup', + '*warnings.warn(UserWarning("warning during setup"))', + + '*test_setup_teardown_warnings.py:8: UserWarning: warning during teardown', + '*warnings.warn(UserWarning("warning during teardown"))', + '* 1 passed, 2 warnings*', + ]) + + +@pytest.mark.parametrize('method', ['cmdline', 'ini']) +def test_as_errors(testdir, pyfile_with_warnings, method): + args = ('-W', 'error') if method == 'cmdline' else () + if method == 'ini': + testdir.makeini(''' + [pytest] + filterwarnings= error + ''') + result = testdir.runpytest(*args) + result.stdout.fnmatch_lines([ + 'E UserWarning: user warning', + 'as_errors_module.py:3: UserWarning', + '* 1 failed in *', + ]) + + +@pytest.mark.parametrize('method', ['cmdline', 'ini']) +def test_ignore(testdir, pyfile_with_warnings, method): + args = ('-W', 'ignore') if method == 'cmdline' else () + if method == 'ini': + testdir.makeini(''' + [pytest] + filterwarnings= ignore + ''') + + result = testdir.runpytest(*args) + result.stdout.fnmatch_lines([ + '* 1 passed in *', + ]) + assert WARNINGS_SUMMARY_HEADER not in result.stdout.str() + + +@pytest.mark.skipif(sys.version_info < (3, 0), + reason='warnings message is unicode is ok in python3') +@pytest.mark.filterwarnings('always') +def test_unicode(testdir, pyfile_with_warnings): + testdir.makepyfile(''' + # -*- coding: utf8 -*- + import warnings + import pytest + + + @pytest.fixture + def fix(): + warnings.warn(u"测试") + yield + + def test_func(fix): + pass + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + '*test_unicode.py:8: UserWarning: \u6d4b\u8bd5*', + '* 1 passed, 1 warnings*', + ]) + + +@pytest.mark.skipif(sys.version_info >= (3, 0), + reason='warnings message is broken as it is not str instance') +def test_py2_unicode(testdir, pyfile_with_warnings): + if getattr(sys, "pypy_version_info", ())[:2] == (5, 9) and sys.platform.startswith('win'): + pytest.xfail("fails with unicode error on PyPy2 5.9 and Windows (#2905)") + testdir.makepyfile(''' + # -*- coding: utf8 -*- + import warnings + import pytest + + + @pytest.fixture + def fix(): + warnings.warn(u"测试") + yield + + @pytest.mark.filterwarnings('always') + def test_func(fix): + pass + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== %s ==*' % WARNINGS_SUMMARY_HEADER, + + '*test_py2_unicode.py:8: UserWarning: \u6d4b\u8bd5', + '*warnings.warn(u"\u6d4b\u8bd5")', + '*warnings.py:*: UnicodeWarning: Warning is using unicode non*', + '* 1 passed, 2 warnings*', + ]) + + +def test_works_with_filterwarnings(testdir): + """Ensure our warnings capture does not mess with pre-installed filters (#2430).""" + testdir.makepyfile(''' + import warnings + + class MyWarning(Warning): + pass + + warnings.filterwarnings("error", category=MyWarning) + + class TestWarnings(object): + def test_my_warning(self): + try: + warnings.warn(MyWarning("warn!")) + assert False + except MyWarning: + assert True + ''') + result = testdir.runpytest() + result.stdout.fnmatch_lines([ + '*== 1 passed in *', + ]) + + +@pytest.mark.parametrize('default_config', ['ini', 'cmdline']) +def test_filterwarnings_mark(testdir, default_config): + """ + Test ``filterwarnings`` mark works and takes precedence over command line and ini options. + """ + if default_config == 'ini': + testdir.makeini(""" + [pytest] + filterwarnings = always + """) + testdir.makepyfile(""" + import warnings + import pytest + + @pytest.mark.filterwarnings('ignore::RuntimeWarning') + def test_ignore_runtime_warning(): + warnings.warn(RuntimeWarning()) + + @pytest.mark.filterwarnings('error') + def test_warning_error(): + warnings.warn(RuntimeWarning()) + + def test_show_warning(): + warnings.warn(RuntimeWarning()) + """) + result = testdir.runpytest('-W always' if default_config == 'cmdline' else '') + result.stdout.fnmatch_lines(['*= 1 failed, 2 passed, 1 warnings in *']) diff --git a/tox.ini b/tox.ini index f3494e8be..48913bd7b 100644 --- a/tox.ini +++ b/tox.ini @@ -1,179 +1,217 @@ [tox] -minversion=2.0 -distshare={homedir}/.tox/distshare -# make sure to update enviroment list on appveyor.yml -envlist= - linting - py26 - py27 - py33 - py34 - py35 - pypy - {py27,py35}-{pexpect,xdist,trial} - py27-nobyte - doctesting - freeze - docs +minversion = 2.0 +distshare = {homedir}/.tox/distshare +# make sure to update environment list on appveyor.yml +envlist = + linting + py26 + py27 + py33 + py34 + py35 + py36 + py37 + pypy + {py27,py36}-{pexpect,xdist,trial,numpy} + py27-nobyte + doctesting + py35-freeze + docs [testenv] -commands= pytest --lsof -rfsxX {posargs:testing} +commands = pytest --lsof -rfsxX {posargs:testing} passenv = USER USERNAME -deps= +deps = hypothesis>=3.5.2 nose mock requests [testenv:py26] -commands= pytest --lsof -rfsxX {posargs:testing} +commands = pytest --lsof -rfsxX {posargs:testing} # pinning mock to last supported version for python 2.6 -deps= +deps = hypothesis<3.0 nose mock<1.1 [testenv:py27-subprocess] -changedir=. -basepython=python2.7 -deps=pytest-xdist>=1.13 +changedir = . +deps = + pytest-xdist>=1.13 mock nose -commands= - pytest -n3 -rfsxX --runpytest=subprocess {posargs:testing} +commands = + pytest -n3 -rfsxX --runpytest=subprocess {posargs:testing} [testenv:linting] +skipsdist = True +usedevelop = True basepython = python2.7 deps = flake8 # pygments required by rst-lint - pygments + pygments restructuredtext_lint - check-manifest commands = - {envpython} scripts/check-manifest.py flake8 pytest.py _pytest testing - rst-lint CHANGELOG.rst HOWTORELEASE.rst README.rst + {envpython} scripts/check-rst.py [testenv:py27-xdist] -deps=pytest-xdist>=1.13 +deps = + pytest-xdist>=1.13 mock nose hypothesis>=3.5.2 -commands= - pytest -n1 -rfsxX {posargs:testing} +changedir=testing +commands = + pytest -n1 -rfsxX {posargs:.} -[testenv:py35-xdist] -deps={[testenv:py27-xdist]deps} -commands= - pytest -n3 -rfsxX {posargs:testing} +[testenv:py36-xdist] +deps = {[testenv:py27-xdist]deps} +commands = + pytest -n3 -rfsxX {posargs:testing} [testenv:py27-pexpect] -changedir=testing -platform=linux|darwin -deps=pexpect -commands= - pytest -rfsxX test_pdb.py test_terminal.py test_unittest.py +changedir = testing +platform = linux|darwin +deps = pexpect +commands = + pytest -rfsxX test_pdb.py test_terminal.py test_unittest.py -[testenv:py35-pexpect] -changedir=testing -platform=linux|darwin -deps={[testenv:py27-pexpect]deps} -commands= - pytest -rfsxX test_pdb.py test_terminal.py test_unittest.py +[testenv:py36-pexpect] +changedir = testing +platform = linux|darwin +deps = {[testenv:py27-pexpect]deps} +commands = + pytest -rfsxX test_pdb.py test_terminal.py test_unittest.py [testenv:py27-nobyte] -deps= +deps = pytest-xdist>=1.13 hypothesis>=3.5.2 -distribute=true -setenv= +distribute = true +changedir=testing +setenv = PYTHONDONTWRITEBYTECODE=1 -commands= - pytest -n3 -rfsxX {posargs:testing} +commands = + pytest -n3 -rfsxX {posargs:.} [testenv:py27-trial] -deps=twisted -commands= - pytest -ra {posargs:testing/test_unittest.py} +deps = twisted +commands = + pytest -ra {posargs:testing/test_unittest.py} -[testenv:py35-trial] -deps={[testenv:py27-trial]deps} +[testenv:py36-trial] +deps = {[testenv:py27-trial]deps} +commands = + pytest -ra {posargs:testing/test_unittest.py} + +[testenv:py27-numpy] +deps=numpy commands= - pytest -ra {posargs:testing/test_unittest.py} + pytest -rfsxX {posargs:testing/python/approx.py} + +[testenv:py36-numpy] +deps=numpy +commands= + pytest -rfsxX {posargs:testing/python/approx.py} [testenv:docs] -basepython=python -changedir=doc/en -deps= +skipsdist = True +usedevelop = True +basepython = python +changedir = doc/en +deps = sphinx PyYAML -commands= +commands = sphinx-build -W -b html . _build [testenv:doctesting] basepython = python -usedevelop=True -skipsdist=True -deps= +usedevelop = True +skipsdist = True +# ensure the given pyargs cant mean anytrhing else +changedir = doc/ +deps = PyYAML -commands= - pytest -rfsxX doc/en - pytest --doctest-modules {toxinidir}/_pytest +commands = + pytest -rfsxX en + pytest --doctest-modules --pyargs _pytest [testenv:regen] -changedir=doc/en +changedir = doc/en +skipsdist = True basepython = python3.5 -deps=sphinx - PyYAML - regendoc>=0.6.1 -whitelist_externals= +deps = + sphinx + PyYAML + regendoc>=0.6.1 +whitelist_externals = rm make -commands= +commands = rm -rf /tmp/doc-exec* make regen +[testenv:fix-lint] +skipsdist = True +usedevelop = True +deps = + autopep8 +commands = + autopep8 --in-place -r --max-line-length=120 --exclude=vendored_packages,test_source_multiline_block.py _pytest testing + [testenv:jython] -changedir=testing -commands= +changedir = testing +commands = {envpython} {envbindir}/py.test-jython -rfsxX {posargs} -[testenv:freeze] -changedir=testing/freeze -deps=pyinstaller -commands= +[testenv:py35-freeze] +changedir = testing/freeze +deps = pyinstaller +commands = {envpython} create_executable.py {envpython} tox_run.py [testenv:coveralls] passenv = TRAVIS TRAVIS_JOB_ID TRAVIS_BRANCH COVERALLS_REPO_TOKEN -usedevelop=True -basepython=python3.5 -changedir=. +usedevelop = True +changedir = . deps = {[testenv]deps} coveralls -commands= +commands = coverage run --source=_pytest -m pytest testing coverage report -m coveralls [pytest] -minversion=2.0 -plugins=pytester +minversion = 2.0 +plugins = pytester #--pyargs --doctest-modules --ignore=.tox -addopts= -rxsX -p pytester --ignore=testing/cx_freeze -rsyncdirs=tox.ini pytest.py _pytest testing -python_files=test_*.py *_test.py testing/*/*.py -python_classes=Test Acceptance -python_functions=test +addopts = -rxsX -p pytester --ignore=testing/cx_freeze +rsyncdirs = tox.ini pytest.py _pytest testing +python_files = test_*.py *_test.py testing/*/*.py +python_classes = Test Acceptance +python_functions = test norecursedirs = .tox ja .hg cx_freeze_source - +xfail_strict=true +filterwarnings = + error + # produced by path.local + ignore:bad escape.*:DeprecationWarning:re + # produced by path.readlines + ignore:.*U.*mode is deprecated:DeprecationWarning + # produced by pytest-xdist + ignore:.*type argument to addoption.*:DeprecationWarning + # produced by python >=3.5 on execnet (pytest-xdist) + ignore:.*inspect.getargspec.*deprecated, use inspect.signature.*:DeprecationWarning [flake8] -ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202,E704,E731,E402 +max-line-length = 120 exclude = _pytest/vendored_packages/pluggy.py