Merge pull request #3576 from RonnyPfannschmidt/addmarker-api

fix addmarker - extract mark from markdecorator
This commit is contained in:
Bruno Oliveira 2018-06-13 18:36:40 -03:00 committed by GitHub
commit 94c41bec64
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
29 changed files with 63 additions and 1077 deletions

View File

@ -162,11 +162,11 @@ Preparing Pull Requests
Short version
~~~~~~~~~~~~~
#. Fork the repository;
#. enable and install pre-commit https://pre-commit.com/ to ensure styleguides and codechecks are followed
#. Target ``master`` for bugfixes and doc changes;
#. Fork the repository.
#. Enable and install `pre-commit <https://pre-commit.com>`_ to ensure style-guides and code checks are followed.
#. Target ``master`` for bugfixes and doc changes.
#. Target ``features`` for new features or functionality changes.
#. Follow **PEP-8**. There's a ``tox`` command to help fixing it: ``tox -e fix-lint``.
#. Follow **PEP-8** for naming and `black <https://github.com/ambv/black>`_ for formatting.
#. Tests are run using ``tox``::
tox -e linting,py27,py36
@ -177,7 +177,7 @@ Short version
and one of ``bugfix``, ``removal``, ``feature``, ``vendor``, ``doc`` or
``trivial`` for the issue type.
#. Unless your change is a trivial or a documentation fix (e.g., a typo or reword of a small section) please
add yourself to the ``AUTHORS`` file, in alphabetical order;
add yourself to the ``AUTHORS`` file, in alphabetical order.
Long version
@ -217,15 +217,15 @@ Here is a simple overview, with pytest-specific bits:
If you need some help with Git, follow this quick start
guide: https://git.wiki.kernel.org/index.php/QuickStart
#. install pre-commit and install its hook on the pytest repo
https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks
pytest uses pre-commit to ensure code-style and code formatting is the same
#. Install `pre-commit <https://pre-commit.com>`_ and its hook on the pytest repo::
$ pip install --user pre-commit
$ pre-commit install
Afterwards pre-commit will run whenever you commit.
Afterwards ``pre-commit`` will run whenever you commit.
https://pre-commit.com/ is a framework for managing and maintaining multi-language pre-commit hooks
to ensure code-style and code formatting is consistent.
#. Install tox
@ -245,15 +245,7 @@ Here is a simple overview, with pytest-specific bits:
This command will run tests via the "tox" tool against Python 2.7 and 3.6
and also perform "lint" coding-style checks.
#. You can now edit your local working copy. Please follow PEP-8.
You can now make the changes you want and run the tests again as necessary.
If you have too much linting errors, try running::
$ tox -e fix-lint
To fix pep8 related errors.
#. You can now edit your local working copy and run the tests again as necessary. Please follow PEP-8 for naming.
You can pass different options to ``tox``. For example, to run tests on Python 2.7 and pass options to pytest
(e.g. enter pdb on failure) to pytest you can do::
@ -264,6 +256,9 @@ Here is a simple overview, with pytest-specific bits:
$ tox -e py36 -- testing/test_config.py
When committing, ``pre-commit`` will re-format the files if necessary.
#. Commit and push once your tests pass and you are happy with your change(s)::
$ git commit -a -m "<commit message>"

View File

@ -0,0 +1 @@
Fix regression in ``Node.add_marker`` by extracting the mark object of a ``MarkDecorator``.

View File

@ -0,0 +1 @@
``Node.add_marker`` now supports an append=True/False to determine whether the mark comes last (default) or first.

View File

@ -1,17 +0,0 @@
import subprocess
def test_build_docs(tmpdir):
doctrees = tmpdir.join("doctrees")
htmldir = tmpdir.join("html")
subprocess.check_call(
["sphinx-build", "-W", "-bhtml", "-d", str(doctrees), ".", str(htmldir)]
)
def test_linkcheck(tmpdir):
doctrees = tmpdir.join("doctrees")
htmldir = tmpdir.join("html")
subprocess.check_call(
["sphinx-build", "-blinkcheck", "-d", str(doctrees), ".", str(htmldir)]
)

View File

@ -1,44 +0,0 @@
from __future__ import print_function
import textwrap
import inspect
class Writer(object):
def __init__(self, clsname):
self.clsname = clsname
def __enter__(self):
self.file = open("%s.api" % self.clsname, "w")
return self
def __exit__(self, *args):
self.file.close()
print("wrote", self.file.name)
def line(self, line):
self.file.write(line + "\n")
def docmethod(self, method):
doc = " ".join(method.__doc__.split())
indent = " "
w = textwrap.TextWrapper(initial_indent=indent, subsequent_indent=indent)
spec = inspect.getargspec(method)
del spec.args[0]
self.line(".. py:method:: " + method.__name__ + inspect.formatargspec(*spec))
self.line("")
self.line(w.fill(doc))
self.line("")
def pytest_funcarg__a(request):
with Writer("request") as writer:
writer.docmethod(request.getfixturevalue)
writer.docmethod(request.cached_setup)
writer.docmethod(request.addfinalizer)
writer.docmethod(request.applymarker)
def test_hello(a):
pass

View File

@ -1,117 +0,0 @@
===============================================
ATTIC documentation
===============================================
XXX REVIEW and remove the below XXX
Customizing the testing process
===============================
writing conftest.py files
-----------------------------------
You may put conftest.py files containing project-specific
configuration in your project's root directory, it's usually
best to put it just into the same directory level as your
topmost ``__init__.py``. In fact, ``pytest`` performs
an "upwards" search starting from the directory that you specify
to be tested and will lookup configuration values right-to-left.
You may have options that reside e.g. in your home directory
but note that project specific settings will be considered
first. There is a flag that helps you debugging your
conftest.py configurations::
pytest --trace-config
customizing the collecting and running process
-----------------------------------------------
To introduce different test items you can create
one or more ``conftest.py`` files in your project.
When the collection process traverses directories
and modules the default collectors will produce
custom Collectors and Items if they are found
in a local ``conftest.py`` file.
Customizing the collection process in a module
----------------------------------------------
If you have a module where you want to take responsibility for
collecting your own test Items and possibly even for executing
a test then you can provide `generative tests`_ that yield
callables and possibly arguments as a tuple. This is especially
useful for calling application test machinery with different
parameter sets but counting each of the calls as a separate
tests.
.. _`generative tests`: features.html#generative-tests
The other extension possibility is about
specifying a custom test ``Item`` class which
is responsible for setting up and executing an underlying
test. Or you can extend the collection process for a whole
directory tree by putting Items in a ``conftest.py`` configuration file.
The collection process dynamically consults the *chain of conftest.py*
modules to determine collectors and items at ``Directory``, ``Module``,
``Class``, ``Function`` or ``Generator`` level respectively.
Customizing execution of Items and Functions
----------------------------------------------------
- ``pytest.Function`` test items control execution
of a test function through its ``function.runtest()`` method.
This method is responsible for performing setup and teardown
("Test Fixtures") for a test Function.
- ``Function.execute(target, *args)`` methods are invoked by
the default ``Function.run()`` to actually execute a python
function with the given (usually empty set of) arguments.
.. _`py-dev mailing list`: http://codespeak.net/mailman/listinfo/py-dev
.. _`test generators`: funcargs.html#test-generators
.. _`generative tests`:
generative tests: yielding parametrized tests
====================================================
Deprecated since 1.0 in favour of `test generators`_.
*Generative tests* are test methods that are *generator functions* which
``yield`` callables and their arguments. This is useful for running a
test function multiple times against different parameters. Example::
def test_generative():
for x in (42,17,49):
yield check, x
def check(arg):
assert arg % 7 == 0 # second generated tests fails!
Note that ``test_generative()`` will cause three tests
to get run, notably ``check(42)``, ``check(17)`` and ``check(49)``
of which the middle one will obviously fail.
To make it easier to distinguish the generated tests it is possible to specify an explicit name for them, like for example::
def test_generative():
for x in (42,17,49):
yield "case %d" % x, check, x
disabling a test class
----------------------
If you want to disable a complete test class you
can set the class-level attribute ``disabled``.
For example, in order to avoid running some tests on Win32::
class TestPosixOnly(object):
disabled = sys.platform == 'win32'
def test_xxx(self):
...

View File

@ -1,17 +0,0 @@
<html>
<head>
<meta http-equiv="refresh" content=" 1 ; URL=customize.html" />
</head>
<body>
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-7597274-3");
pageTracker._trackPageview();
} catch(err) {}</script>
</body>
</html>

View File

@ -1,17 +0,0 @@
<html>
<head>
<meta http-equiv="refresh" content=" 1 ; URL=plugin/xdist.html" />
</head>
<body>
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-7597274-3");
pageTracker._trackPageview();
} catch(err) {}</script>
</body>
</html>

View File

@ -1,17 +0,0 @@
<html>
<head>
<meta http-equiv="refresh" content=" 1 ; URL=customize.html" />
</head>
<body>
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-7597274-3");
pageTracker._trackPageview();
} catch(err) {}</script>
</body>
</html>

View File

@ -1,33 +0,0 @@
=======================================
pytest documentation index
=======================================
features_: overview and discussion of features.
quickstart_: getting started with writing a simple test.
`talks, tutorials, examples`_: tutorial examples, slides
funcargs_: powerful parametrized test function setup
`plugins`_: list of available plugins with usage examples and feature details.
customize_: configuration, customization, extensions
changelog_: history of changes covering last releases
**Continuous Integration of pytest's own tests and plugins with Hudson**:
`http://hudson.testrun.org/view/pytest`_
.. _`http://hudson.testrun.org/view/pytest`: http://hudson.testrun.org/view/pytest/
.. _changelog: ../changelog.html
.. _`plugins`: plugin/index.html
.. _`talks, tutorials, examples`: talks.html
.. _quickstart: quickstart.html
.. _features: features.html
.. _funcargs: funcargs.html
.. _customize: customize.html

View File

@ -1,13 +0,0 @@
Mission
====================================
``pytest`` strives to make testing a fun and no-boilerplate effort.
The tool is distributed as a `pytest` package. Its project independent
``pytest`` command line tool helps you to:
* rapidly collect and run tests
* run unit- or doctests, functional or integration tests
* distribute tests to multiple environments
* use local or global plugins for custom test types and setup

View File

@ -1,230 +0,0 @@
produce code coverage reports using the 'coverage' package, including support for distributed testing.
======================================================================================================
.. contents::
:local:
This plugin produces coverage reports. It supports centralised testing and distributed testing in
both load and each modes. It also supports coverage of subprocesses.
All features offered by the coverage package should be available, either through pytest-cov or
through coverage's config file.
Installation
------------
The `pytest-cov`_ package may be installed with pip or easy_install::
pip install pytest-cov
easy_install pytest-cov
.. _`pytest-cov`: https://pypi.org/project/pytest-cov/
Uninstallation
--------------
Uninstalling packages is supported by pip::
pip uninstall pytest-cov
However easy_install does not provide an uninstall facility.
.. IMPORTANT::
Ensure that you manually delete the init_covmain.pth file in your
site-packages directory.
This file starts coverage collection of subprocesses if appropriate during
site initialization at python startup.
Usage
-----
Centralised Testing
~~~~~~~~~~~~~~~~~~~
Centralised testing will report on the combined coverage of the main process and all of it's
subprocesses.
Running centralised testing::
pytest --cov myproj tests/
Shows a terminal report::
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
Name Stmts Miss Cover
----------------------------------------
myproj/__init__ 2 0 100%
myproj/myproj 257 13 94%
myproj/feature4286 94 7 92%
----------------------------------------
TOTAL 353 20 94%
Distributed Testing: Load
~~~~~~~~~~~~~~~~~~~~~~~~~
Distributed testing with dist mode set to load will report on the combined coverage of all slaves.
The slaves may be spread out over any number of hosts and each slave may be located anywhere on the
file system. Each slave will have it's subprocesses measured.
Running distributed testing with dist mode set to load::
pytest --cov myproj -n 2 tests/
Shows a terminal report::
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
Name Stmts Miss Cover
----------------------------------------
myproj/__init__ 2 0 100%
myproj/myproj 257 13 94%
myproj/feature4286 94 7 92%
----------------------------------------
TOTAL 353 20 94%
Again but spread over different hosts and different directories::
pytest --cov myproj --dist load
--tx ssh=memedough@host1//chdir=testenv1
--tx ssh=memedough@host2//chdir=/tmp/testenv2//python=/tmp/env1/bin/python
--rsyncdir myproj --rsyncdir tests --rsync examples
tests/
Shows a terminal report::
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
Name Stmts Miss Cover
----------------------------------------
myproj/__init__ 2 0 100%
myproj/myproj 257 13 94%
myproj/feature4286 94 7 92%
----------------------------------------
TOTAL 353 20 94%
Distributed Testing: Each
~~~~~~~~~~~~~~~~~~~~~~~~~
Distributed testing with dist mode set to each will report on the combined coverage of all slaves.
Since each slave is running all tests this allows generating a combined coverage report for multiple
environments.
Running distributed testing with dist mode set to each::
pytest --cov myproj --dist each
--tx popen//chdir=/tmp/testenv3//python=/usr/local/python27/bin/python
--tx ssh=memedough@host2//chdir=/tmp/testenv4//python=/tmp/env2/bin/python
--rsyncdir myproj --rsyncdir tests --rsync examples
tests/
Shows a terminal report::
---------------------------------------- coverage ----------------------------------------
platform linux2, python 2.6.5-final-0
platform linux2, python 2.7.0-final-0
Name Stmts Miss Cover
----------------------------------------
myproj/__init__ 2 0 100%
myproj/myproj 257 13 94%
myproj/feature4286 94 7 92%
----------------------------------------
TOTAL 353 20 94%
Reporting
---------
It is possible to generate any combination of the reports for a single test run.
The available reports are terminal (with or without missing line numbers shown), HTML, XML and
annotated source code.
The terminal report without line numbers (default)::
pytest --cov-report term --cov myproj tests/
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
Name Stmts Miss Cover
----------------------------------------
myproj/__init__ 2 0 100%
myproj/myproj 257 13 94%
myproj/feature4286 94 7 92%
----------------------------------------
TOTAL 353 20 94%
The terminal report with line numbers::
pytest --cov-report term-missing --cov myproj tests/
-------------------- coverage: platform linux2, python 2.6.4-final-0 ---------------------
Name Stmts Miss Cover Missing
--------------------------------------------------
myproj/__init__ 2 0 100%
myproj/myproj 257 13 94% 24-26, 99, 149, 233-236, 297-298, 369-370
myproj/feature4286 94 7 92% 183-188, 197
--------------------------------------------------
TOTAL 353 20 94%
The remaining three reports output to files without showing anything on the terminal (useful for
when the output is going to a continuous integration server)::
pytest --cov-report html --cov-report xml --cov-report annotate --cov myproj tests/
Coverage Data File
------------------
The data file is erased at the beginning of testing to ensure clean data for each test run.
The data file is left at the end of testing so that it is possible to use normal coverage tools to
examine it.
Limitations
-----------
For distributed testing the slaves must have the pytest-cov package installed. This is needed since
the plugin must be registered through setuptools / distribute for pytest to start the plugin on the
slave.
For subprocess measurement environment variables must make it from the main process to the
subprocess. The python used by the subprocess must have pytest-cov installed. The subprocess must
do normal site initialization so that the environment variables can be detected and coverage
started.
Acknowledgments
----------------
Holger Krekel for pytest with its distributed testing support.
Ned Batchelder for coverage and its ability to combine the coverage results of parallel runs.
Whilst this plugin has been built fresh from the ground up to support distributed testing it has
been influenced by the work done on pytest-coverage (Ross Lawley, James Mills, Holger Krekel) and
nose-cover (Jason Pellerin) which are other coverage plugins for pytest and nose respectively.
No doubt others have contributed to these tools as well.
command line options
--------------------
``--cov=path``
measure coverage for filesystem path (multi-allowed)
``--cov-report=type``
type of report to generate: term, term-missing, annotate, html, xml (multi-allowed)
``--cov-config=path``
config file for coverage, default: .coveragerc
.. include:: links.txt

View File

@ -1,51 +0,0 @@
Write and report coverage data with the 'coverage' package.
===========================================================
.. contents::
:local:
Note: Original code by Ross Lawley.
Install
--------------
Use pip to (un)install::
pip install pytest-coverage
pip uninstall pytest-coverage
or alternatively use easy_install to install::
easy_install pytest-coverage
Usage
-------------
To get full test coverage reports for a particular package type::
pytest --cover-report=report
command line options
--------------------
``--cover=COVERPACKAGES``
(multi allowed) only include info from specified package.
``--cover-report=REPORT_TYPE``
html: Directory for html output.
report: Output a text report.
annotate: Annotate your source code for which lines were executed and which were not.
xml: Output an xml report compatible with the cobertura plugin for hudson.
``--cover-directory=DIRECTORY``
Directory for the reports (html / annotate results) defaults to ./coverage
``--cover-xml-file=XML_FILE``
File for the xml report defaults to ./coverage.xml
``--cover-show-missing``
Show missing files
``--cover-ignore-errors=IGNORE_ERRORS``
Ignore errors of finding source files for code.
.. include:: links.txt

View File

@ -1,6 +0,0 @@
pytest_django plugin (EXTERNAL)
==========================================
pytest_django is a plugin for ``pytest`` that provides a set of useful tools for testing Django applications, checkout Ben Firshman's `pytest_django github page`_.
.. _`pytest_django github page`: http://github.com/bfirsh/pytest_django/tree/master

View File

@ -1,44 +0,0 @@
report test coverage using the 'figleaf' package.
=================================================
.. contents::
:local:
Install
---------------
To install the plugin issue::
easy_install pytest-figleaf # or
pip install pytest-figleaf
and if you are using pip you can also uninstall::
pip uninstall pytest-figleaf
Usage
---------------
After installation you can simply type::
pytest --figleaf [...]
to enable figleaf coverage in your test run. A default ".figleaf" data file
and "html" directory will be created. You can use command line options
to control where data and html files are created.
command line options
--------------------
``--figleaf``
trace python coverage with figleaf and write HTML for files below the current working dir
``--fig-data=dir``
set tracing file, default: ".figleaf".
``--fig-html=dir``
set html reporting dir, default "html".
.. include:: links.txt

View File

@ -1,36 +0,0 @@
provide version info, conftest/environment config names.
========================================================
.. contents::
:local:
command line options
--------------------
``--version``
display py lib version and import information.
``-p name``
early-load given plugin (multi-allowed).
``--trace-config``
trace considerations of conftest.py files.
``--debug``
generate and show internal debugging information.
``--help-config``
show available conftest.py and ENV-variable names.
Start improving this plugin in 30 seconds
=========================================
1. Download `pytest_helpconfig.py`_ plugin source code
2. put it somewhere as ``pytest_helpconfig.py`` into your import path
3. a subsequent ``pytest`` run will use your local version
Checkout customize_, other plugins_ or `get in contact`_.
.. include:: links.txt

View File

@ -1,68 +0,0 @@
advanced python testing
=======================
skipping_ advanced skipping for python test functions, classes or modules.
mark_ generic mechanism for marking python functions.
pdb_ interactive debugging with the Python Debugger.
figleaf_ (external) report test coverage using the 'figleaf' package.
monkeypatch_ safely patch object attributes, dicts and environment variables.
coverage_ (external) Write and report coverage data with the 'coverage' package.
cov_ (external) produce code coverage reports using the 'coverage' package, including support for distributed testing.
capture_ configurable per-test stdout/stderr capturing mechanisms.
capturelog_ (external) capture output of logging module.
recwarn_ helpers for asserting deprecation and other warnings.
tmpdir_ provide temporary directories to test functions.
distributed testing, CI and deployment
======================================
xdist_ (external) loop on failing tests, distribute test runs to CPUs and hosts.
pastebin_ submit failure or test session information to a pastebin service.
junitxml_ logging of test results in JUnit-XML format, for use with Hudson
resultlog_ non-xml machine-readable logging of test results.
genscript_ generate standalone test script to be distributed along with an application.
testing domains and conventions codecheckers
============================================
oejskit_ (external) run javascript tests in real life browsers
django_ (external) for testing django applications
unittest_ automatically discover and run traditional "unittest.py" style tests.
nose_ nose-compatibility plugin: allow to run nose test suites natively.
doctest_ collect and execute doctests from modules and test files.
restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt files.
internal, debugging, help functionality
=======================================
helpconfig_ provide version info, conftest/environment config names.
terminal_ Implements terminal reporting of the full testing process.
hooklog_ log invocations of extension hooks to a file.
.. include:: links.txt

View File

@ -1,45 +0,0 @@
.. _`helpconfig`: helpconfig.html
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_recwarn.py
.. _`unittest`: unittest.html
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_monkeypatch.py
.. _`pastebin`: pastebin.html
.. _`skipping`: skipping.html
.. _`plugins`: index.html
.. _`mark`: mark.html
.. _`tmpdir`: tmpdir.html
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_doctest.py
.. _`capture`: capture.html
.. _`pytest_nose.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_nose.py
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_restdoc.py
.. _`restdoc`: restdoc.html
.. _`xdist`: xdist.html
.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pastebin.py
.. _`pytest_tmpdir.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_tmpdir.py
.. _`terminal`: terminal.html
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_hooklog.py
.. _`capturelog`: capturelog.html
.. _`junitxml`: junitxml.html
.. _`pytest_skipping.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_skipping.py
.. _`checkout the pytest development version`: ../../install.html#checkout
.. _`pytest_helpconfig.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_helpconfig.py
.. _`oejskit`: oejskit.html
.. _`doctest`: doctest.html
.. _`pytest_mark.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_mark.py
.. _`get in contact`: ../../contact.html
.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_capture.py
.. _`figleaf`: figleaf.html
.. _`customize`: ../customize.html
.. _`hooklog`: hooklog.html
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_terminal.py
.. _`recwarn`: recwarn.html
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_pdb.py
.. _`monkeypatch`: monkeypatch.html
.. _`coverage`: coverage.html
.. _`resultlog`: resultlog.html
.. _`cov`: cov.html
.. _`pytest_junitxml.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_junitxml.py
.. _`django`: django.html
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_unittest.py
.. _`nose`: nose.html
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/1.3.4/py/_plugin/pytest_resultlog.py
.. _`pdb`: pdb.html

View File

@ -1,56 +0,0 @@
nose-compatibility plugin: allow to run nose test suites natively.
==================================================================
.. contents::
:local:
This is an experimental plugin for allowing to run tests written
in 'nosetests' style with ``pytest``.
Usage
-------------
type::
pytest # instead of 'nosetests'
and you should be able to run nose style tests and at the same
time can make full use of pytest's capabilities.
Supported nose Idioms
----------------------
* setup and teardown at module/class/method level
* SkipTest exceptions and markers
* setup/teardown decorators
* yield-based tests and their setup
* general usage of nose utilities
Unsupported idioms / issues
----------------------------------
- nose-style doctests are not collected and executed correctly,
also fixtures don't work.
- no nose-configuration is recognized
If you find other issues or have suggestions please run::
pytest --pastebin=all
and send the resulting URL to a ``pytest`` contact channel,
at best to the mailing list.
Start improving this plugin in 30 seconds
=========================================
1. Download `pytest_nose.py`_ plugin source code
2. put it somewhere as ``pytest_nose.py`` into your import path
3. a subsequent ``pytest`` run will use your local version
Checkout customize_, other plugins_ or `get in contact`_.
.. include:: links.txt

View File

@ -1,12 +0,0 @@
pytest_oejskit plugin (EXTERNAL)
==========================================
The `oejskit`_ offers a ``pytest`` plugin for running Javascript tests in live browsers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations.
The approach enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along.
For more info and download please visit the `oejskit PyPI`_ page.
.. _`oejskit`:
.. _`oejskit PyPI`: https://pypi.org/project/oejskit/
.. source link 'http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py',

View File

@ -1,38 +0,0 @@
Implements terminal reporting of the full testing process.
==========================================================
.. contents::
:local:
This is a good source for looking at the various reporting hooks.
command line options
--------------------
``-v, --verbose``
increase verbosity.
``-r chars``
show extra test summary info as specified by chars (f)ailed, (s)skipped, (x)failed, (X)passed.
``-l, --showlocals``
show locals in tracebacks (disabled by default).
``--tb=style``
traceback print mode (long/short/line/no).
``--full-trace``
don't cut any tracebacks (default is to cut).
``--fixtures``
show available fixtures, sorted by plugin appearance (fixtures with leading ``_`` are only shown with '-v')
Start improving this plugin in 30 seconds
=========================================
1. Download `pytest_terminal.py`_ plugin source code
2. put it somewhere as ``pytest_terminal.py`` into your import path
3. a subsequent ``pytest`` run will use your local version
Checkout customize_, other plugins_ or `get in contact`_.
.. include:: links.txt

View File

@ -1,172 +0,0 @@
loop on failing tests, distribute test runs to CPUs and hosts.
==============================================================
.. contents::
:local:
The `pytest-xdist`_ plugin extends ``pytest`` with some unique
test execution modes:
* Looponfail: run your tests repeatedly in a subprocess. After each run
``pytest`` waits until a file in your project changes and then re-runs the
previously failing tests. This is repeated until all tests pass after which
again a full run is performed.
* Load-balancing: if you have multiple CPUs or hosts you can use
those for a combined test run. This allows to speed up
development or to use special resources of remote machines.
* Multi-Platform coverage: you can specify different Python interpreters
or different platforms and run tests in parallel on all of them.
Before running tests remotely, ``pytest`` efficiently synchronizes your
program source code to the remote place. All test results
are reported back and displayed to your local test session.
You may specify different Python versions and interpreters.
.. _`pytest-xdist`: https://pypi.org/project/pytest-xdist/
Usage examples
---------------------
Speed up test runs by sending tests to multiple CPUs
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
To send tests to multiple CPUs, type::
pytest -n NUM
Especially for longer running tests or tests requiring
a lot of IO this can lead to considerable speed ups.
Running tests in a Python subprocess
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
To instantiate a python2.4 sub process and send tests to it, you may type::
pytest -d --tx popen//python=python2.4
This will start a subprocess which is run with the "python2.4"
Python interpreter, found in your system binary lookup path.
If you prefix the --tx option value like this::
--tx 3*popen//python=python2.4
then three subprocesses would be created and tests
will be load-balanced across these three processes.
Sending tests to remote SSH accounts
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Suppose you have a package ``mypkg`` which contains some
tests that you can successfully run locally. And you
have a ssh-reachable machine ``myhost``. Then
you can ad-hoc distribute your tests by typing::
pytest -d --tx ssh=myhostpopen --rsyncdir mypkg mypkg
This will synchronize your ``mypkg`` package directory
to a remote ssh account and then locally collect tests
and send them to remote places for execution.
You can specify multiple ``--rsyncdir`` directories
to be sent to the remote side.
**NOTE:** For ``pytest`` to collect and send tests correctly
you not only need to make sure all code and tests
directories are rsynced, but that any test (sub) directory
also has an ``__init__.py`` file because internally
``pytest`` references tests using their fully qualified python
module path. **You will otherwise get strange errors**
during setup of the remote side.
Sending tests to remote Socket Servers
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Download the single-module `socketserver.py`_ Python program
and run it like this::
python socketserver.py
It will tell you that it starts listening on the default
port. You can now on your home machine specify this
new socket host with something like this::
pytest -d --tx socket=192.168.1.102:8888 --rsyncdir mypkg mypkg
.. _`atonce`:
Running tests on many platforms at once
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
The basic command to run tests on multiple platforms is::
pytest --dist=each --tx=spec1 --tx=spec2
If you specify a windows host, an OSX host and a Linux
environment this command will send each tests to all
platforms - and report back failures from all platforms
at once. The specifications strings use the `xspec syntax`_.
.. _`xspec syntax`: http://codespeak.net/execnet/trunk/basics.html#xspec
.. _`socketserver.py`: http://codespeak.net/svn/py/dist/py/execnet/script/socketserver.py
.. _`execnet`: http://codespeak.net/execnet
Specifying test exec environments in a conftest.py
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
Instead of specifying command line options, you can
put options values in a ``conftest.py`` file like this::
option_tx = ['ssh=myhost//python=python2.7', 'popen//python=python2.7']
option_dist = True
Any commandline ``--tx`` specifications will add to the list of
available execution environments.
Specifying "rsync" dirs in a conftest.py
+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
In your ``mypkg/conftest.py`` you may specify directories to synchronise
or to exclude::
rsyncdirs = ['.', '../plugins']
rsyncignore = ['_cache']
These directory specifications are relative to the directory
where the ``conftest.py`` is found.
command line options
--------------------
``-f, --looponfail``
run tests in subprocess, wait for modified files and re-run failing test set until all pass.
``-n numprocesses``
shortcut for '--dist=load --tx=NUM*popen'
``--boxed``
box each test run in a separate process (unix)
``--dist=distmode``
set mode for distributing tests to exec environments.
each: send each test to each available environment.
load: send each test to one available environment so it is run only once.
(default) no: run tests inprocess, don't distribute.
``--tx=xspec``
add a test execution environment. some examples: --tx popen//python=python2.7 --tx socket=192.168.1.102:8888 --tx ssh=user@codespeak.net//chdir=testcache
``-d``
load-balance tests. shortcut for '--dist=load'
``--rsyncdir=dir1``
add directory for rsyncing to remote tx nodes.
.. include:: links.txt

View File

@ -1,17 +0,0 @@
<html>
<head>
<meta http-equiv="refresh" content=" 1 ; URL=index.html" />
</head>
<body>
<script type="text/javascript">
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
</script>
<script type="text/javascript">
try {
var pageTracker = _gat._getTracker("UA-7597274-3");
pageTracker._trackPageview();
} catch(err) {}</script>
</body>
</html>

View File

@ -281,7 +281,18 @@ def _marked(func, mark):
class MarkInfo(object):
""" Marking object created by :class:`MarkDecorator` instances. """
_marks = attr.ib()
_marks = attr.ib(convert=list)
@_marks.validator
def validate_marks(self, attribute, value):
for item in value:
if not isinstance(item, Mark):
raise ValueError(
"MarkInfo expects Mark instances, got {!r} ({!r})".format(
item, type(item)
)
)
combined = attr.ib(
repr=False,
default=attr.Factory(

View File

@ -173,10 +173,13 @@ class Node(object):
chain.reverse()
return chain
def add_marker(self, marker):
""" dynamically add a marker object to the node.
def add_marker(self, marker, append=True):
"""dynamically add a marker object to the node.
``marker`` can be a string or pytest.mark.* instance.
:type marker: ``str`` or ``pytest.mark.*`` object
:param marker:
``append=True`` whether to append the marker,
if ``False`` insert at position ``0``.
"""
from _pytest.mark import MarkDecorator, MARK_GEN
@ -185,7 +188,10 @@ class Node(object):
elif not isinstance(marker, MarkDecorator):
raise ValueError("is not a string or pytest.mark.* Marker")
self.keywords[marker.name] = marker
self.own_markers.append(marker)
if append:
self.own_markers.append(marker.mark)
else:
self.own_markers.insert(0, marker.mark)
def iter_markers(self, name=None):
"""

View File

@ -108,6 +108,9 @@ class TestMockDecoration(object):
values = getfuncargnames(f)
assert values == ("x",)
@pytest.mark.xfail(
strict=False, reason="getfuncargnames breaks if mock is imported"
)
def test_wrapped_getfuncargnames_patching(self):
from _pytest.compat import getfuncargnames

View File

@ -1,7 +1,7 @@
from __future__ import absolute_import, division, print_function
import os
import sys
import mock
import pytest
from _pytest.mark import (
MarkGenerator as Mark,
@ -9,6 +9,7 @@ from _pytest.mark import (
transfer_markers,
EMPTY_PARAMETERSET_OPTION,
)
from _pytest.nodes import Node
ignore_markinfo = pytest.mark.filterwarnings(
"ignore:MarkInfo objects:_pytest.deprecated.RemovedInPytest4Warning"
@ -1123,3 +1124,20 @@ def test_mark_expressions_no_smear(testdir):
passed_k, skipped_k, failed_k = reprec_keywords.countoutcomes()
assert passed_k == 2
assert skipped_k == failed_k == 0
def test_addmarker_getmarker():
node = Node("Test", config=mock.Mock(), session=mock.Mock(), nodeid="Test")
node.add_marker(pytest.mark.a(1))
node.add_marker("b")
node.get_marker("a").combined
node.get_marker("b").combined
def test_addmarker_order():
node = Node("Test", config=mock.Mock(), session=mock.Mock(), nodeid="Test")
node.add_marker("a")
node.add_marker("b")
node.add_marker("c", append=False)
extracted = [x.name for x in node.iter_markers()]
assert extracted == ["c", "a", "b"]

View File

@ -73,6 +73,7 @@ commands = {[testenv:py27-pexpect]commands}
deps =
pytest-xdist>=1.13
hypothesis>=3.56
mock
distribute = true
changedir=testing
setenv =