commit
cb9b8e4632
3
.hgtags
3
.hgtags
|
@ -13,3 +13,6 @@ c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8
|
|||
c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8
|
||||
0eaa0fdf2ba0163cf534dc2eff4ba2e5fc66c261 1.0.0b8
|
||||
e2a60653cb490aeed81bbbd83c070b99401c211c 1.0.0b9
|
||||
5ea0cdf7854c3d4278d36eda94a2b68483a0e211 1.0.0
|
||||
5ea0cdf7854c3d4278d36eda94a2b68483a0e211 1.0.0
|
||||
7acde360d94b6a2690ce3d03ff39301da84c0a2b 1.0.0
|
||||
|
|
|
@ -1,3 +1,9 @@
|
|||
Changes between 1.0.0b9 and 1.0.0
|
||||
=====================================
|
||||
|
||||
* more terse reporting try to show filesystem path relatively to current dir
|
||||
* improve xfail output a bit
|
||||
|
||||
Changes between 1.0.0b8 and 1.0.0b9
|
||||
=====================================
|
||||
|
||||
|
|
15
MANIFEST
15
MANIFEST
|
@ -28,17 +28,18 @@ doc/test/examples.txt
|
|||
doc/test/extend.txt
|
||||
doc/test/features.txt
|
||||
doc/test/funcargs.txt
|
||||
doc/test/plugin/capture.txt
|
||||
doc/test/plugin/doctest.txt
|
||||
doc/test/plugin/figleaf.txt
|
||||
doc/test/plugin/hooklog.txt
|
||||
doc/test/plugin/hookspec.txt
|
||||
doc/test/plugin/index.txt
|
||||
doc/test/plugin/iocapture.txt
|
||||
doc/test/plugin/keyword.txt
|
||||
doc/test/plugin/links.txt
|
||||
doc/test/plugin/monkeypatch.txt
|
||||
doc/test/plugin/oejskit.txt
|
||||
doc/test/plugin/pastebin.txt
|
||||
doc/test/plugin/pdb.txt
|
||||
doc/test/plugin/pocoo.txt
|
||||
doc/test/plugin/recwarn.txt
|
||||
doc/test/plugin/restdoc.txt
|
||||
doc/test/plugin/resultlog.txt
|
||||
|
@ -59,7 +60,9 @@ example/execnet/svn-sync-repo.py
|
|||
example/execnet/sysinfo.py
|
||||
example/funcarg/conftest.py
|
||||
example/funcarg/costlysetup/conftest.py
|
||||
example/funcarg/costlysetup/sub1/__init__.py
|
||||
example/funcarg/costlysetup/sub1/test_quick.py
|
||||
example/funcarg/costlysetup/sub2/__init__.py
|
||||
example/funcarg/costlysetup/sub2/test_two.py
|
||||
example/funcarg/mysetup/__init__.py
|
||||
example/funcarg/mysetup/conftest.py
|
||||
|
@ -315,6 +318,7 @@ py/test/dist/dsession.py
|
|||
py/test/dist/mypickle.py
|
||||
py/test/dist/nodemanage.py
|
||||
py/test/dist/testing/__init__.py
|
||||
py/test/dist/testing/acceptance_test.py
|
||||
py/test/dist/testing/test_dsession.py
|
||||
py/test/dist/testing/test_mypickle.py
|
||||
py/test/dist/testing/test_nodemanage.py
|
||||
|
@ -333,28 +337,27 @@ py/test/plugin/__init__.py
|
|||
py/test/plugin/conftest.py
|
||||
py/test/plugin/hookspec.py
|
||||
py/test/plugin/pytest__pytest.py
|
||||
py/test/plugin/pytest_capture.py
|
||||
py/test/plugin/pytest_default.py
|
||||
py/test/plugin/pytest_doctest.py
|
||||
py/test/plugin/pytest_execnetcleanup.py
|
||||
py/test/plugin/pytest_figleaf.py
|
||||
py/test/plugin/pytest_hooklog.py
|
||||
py/test/plugin/pytest_iocapture.py
|
||||
py/test/plugin/pytest_keyword.py
|
||||
py/test/plugin/pytest_monkeypatch.py
|
||||
py/test/plugin/pytest_pastebin.py
|
||||
py/test/plugin/pytest_pdb.py
|
||||
py/test/plugin/pytest_pocoo.py
|
||||
py/test/plugin/pytest_pylint.py
|
||||
py/test/plugin/pytest_pytester.py
|
||||
py/test/plugin/pytest_recwarn.py
|
||||
py/test/plugin/pytest_restdoc.py
|
||||
py/test/plugin/pytest_resultdb.py
|
||||
py/test/plugin/pytest_resultlog.py
|
||||
py/test/plugin/pytest_runner.py
|
||||
py/test/plugin/pytest_terminal.py
|
||||
py/test/plugin/pytest_tmpdir.py
|
||||
py/test/plugin/pytest_unittest.py
|
||||
py/test/plugin/pytest_xfail.py
|
||||
py/test/plugin/test_pytest_iocapture.py
|
||||
py/test/plugin/test_pytest_capture.py
|
||||
py/test/plugin/test_pytest_runner.py
|
||||
py/test/plugin/test_pytest_runner_xunit.py
|
||||
py/test/plugin/test_pytest_terminal.py
|
||||
|
|
|
@ -1,54 +1,63 @@
|
|||
py.test / py lib 1.0.0: new test plugins, funcargs and cleanups
|
||||
============================================================================
|
||||
|
||||
Welcome to the 1.0 release bringing new flexibility and
|
||||
power to testing with Python. Main news:
|
||||
pylib 1.0.0 released: testing-with-python innovations continue
|
||||
--------------------------------------------------------------------
|
||||
|
||||
* funcargs - new flexibilty and zero-boilerplate fixtures for Python testing:
|
||||
Took a few betas but finally i uploaded a `1.0.0 py lib release`_,
|
||||
featuring the mature and powerful py.test tool and "execnet-style"
|
||||
*elastic* distributed programming. With the new release, there are
|
||||
many new advanced automated testing features - here is a quick summary:
|
||||
|
||||
- separate test code, configuration and setup
|
||||
* funcargs_ - pythonic zero-boilerplate fixtures for Python test functions :
|
||||
|
||||
- totally separates test code, test configuration and test setup
|
||||
- ideal for integration and functional tests
|
||||
- more powerful dynamic generation of tests
|
||||
- allows for flexible and natural test parametrization schemes
|
||||
|
||||
* new plugin architecture, allowing project-specific and
|
||||
cross-project single-file plugins. Many useful examples
|
||||
shipped by default:
|
||||
* new `plugin architecture`_, allowing easy-to-write project-specific and cross-project single-file plugins. The most notable new external plugin is `oejskit`_ which naturally enables **running and reporting of javascript-unittests in real-life browsers**.
|
||||
|
||||
* pytest_unittest.py: run and integrate traditional unittest.py tests
|
||||
* pytest_xfail.py: mark tests as "expected to fail" and report separately.
|
||||
* pytest_pocoo.py: automatically send tracebacks to pocoo paste service
|
||||
* pytest_monkeypatch.py: safely monkeypatch from tests
|
||||
* pytest_figleaf.py: generate html coverage reports
|
||||
* pytest_resultlog.py: generate buildbot-friendly reporting output
|
||||
* many new features done in easy-to-improve `default plugins`_, highlights:
|
||||
|
||||
and many more!
|
||||
* xfail: mark tests as "expected to fail" and report separately.
|
||||
* pastebin: automatically send tracebacks to pocoo paste service
|
||||
* capture: flexibly capture stdout/stderr of subprocesses, per-test ...
|
||||
* monkeypatch: safely monkeypatch modules/classes from within tests
|
||||
* unittest: run and integrate traditional unittest.py tests
|
||||
* figleaf: generate html coverage reports with the figleaf module
|
||||
* resultlog: generate buildbot-friendly reporting output
|
||||
* ...
|
||||
|
||||
* distributed testing and distributed execution (py.execnet):
|
||||
* `distributed testing`_ and `elastic distributed execution`_:
|
||||
|
||||
- new unified "TX" URL scheme for specifying remote resources
|
||||
- new sync/async ways to handle multiple remote processes
|
||||
- new unified "TX" URL scheme for specifying remote processes
|
||||
- new distribution modes "--dist=each" and "--dist=load"
|
||||
- new sync/async ways to handle 1:N communication
|
||||
- improved documentation
|
||||
|
||||
See the py.test and py lib documentation for more info:
|
||||
The py lib continues to offer most of the functionality used by
|
||||
the testing tool in `independent namespaces`_.
|
||||
|
||||
Some non-test related code, notably greenlets/co-routines and
|
||||
api-generation now live as their own projects which simplifies the
|
||||
installation procedure because no C-Extensions are required anymore.
|
||||
|
||||
The whole package should work well with Linux, Win32 and OSX, on Python
|
||||
2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
|
||||
|
||||
For more info, see the py.test and py lib documentation:
|
||||
|
||||
http://pytest.org
|
||||
|
||||
http://pylib.org
|
||||
|
||||
The py lib now is smaller and focuses more on offering
|
||||
functionality used by the py.test tool in independent
|
||||
namespaces:
|
||||
|
||||
* py.execnet: elastic code deployment to SSH, Socket and local sub processes
|
||||
* py.code: higher-level introspection and dynamic generation of python code
|
||||
* py.path: path abstractions over local and subversion files
|
||||
|
||||
Some non-strictly-test related code, notably greenlets/co-routines
|
||||
and apigen now live on their own and have been removed, also simplifying
|
||||
the installation procedures.
|
||||
|
||||
The whole package works well with Linux, OSX and Win32, on
|
||||
Python 2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
|
||||
|
||||
best,
|
||||
have fun,
|
||||
holger
|
||||
|
||||
.. _`independent namespaces`: http://pylib.org
|
||||
.. _`funcargs`: http://codespeak.net/py/dist/test/funcargs.html
|
||||
.. _`plugin architecture`: http://codespeak.net/py/dist/test/extend.html
|
||||
.. _`default plugins`: http://codespeak.net/py/dist/test/plugin/index.html
|
||||
.. _`distributed testing`: http://codespeak.net/py/dist/test/dist.html
|
||||
.. _`elastic distributed execution`: http://codespeak.net/py/dist/execnet.html
|
||||
.. _`1.0.0 py lib release`: http://pypi.python.org/pypi/py
|
||||
.. _`oejskit`: http://codespeak.net/py/dist/test/plugin/oejskit.html
|
||||
|
||||
|
|
|
@ -14,6 +14,17 @@ class css:
|
|||
class Page(object):
|
||||
doctype = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
|
||||
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
|
||||
googlefragment = """
|
||||
<script type="text/javascript">
|
||||
var gaJsHost = (("https:" == document.location.protocol) ? "https://ssl." : "http://www.");
|
||||
document.write(unescape("%3Cscript src='" + gaJsHost + "google-analytics.com/ga.js' type='text/javascript'%3E%3C/script%3E"));
|
||||
</script>
|
||||
<script type="text/javascript">
|
||||
try {
|
||||
var pageTracker = _gat._getTracker("UA-7597274-3");
|
||||
pageTracker._trackPageview();
|
||||
} catch(err) {}</script>
|
||||
"""
|
||||
|
||||
def __init__(self, project, title, targetpath, stylesheeturl=None,
|
||||
type="text/html", encoding="ISO-8859-1"):
|
||||
|
@ -47,8 +58,10 @@ class Page(object):
|
|||
def fill_menubar(self):
|
||||
items = [
|
||||
self.a_docref("pylib index", "index.html"),
|
||||
self.a_docref("py.test index", "test/test.html"),
|
||||
self.a_docref("py.test plugins", "test/plugin/index.html"),
|
||||
self.a_docref("test doc-index", "test/test.html"),
|
||||
self.a_docref("test quickstart", "test/quickstart.html"),
|
||||
self.a_docref("test features", "test/features.html"),
|
||||
self.a_docref("test plugins", "test/plugin/index.html"),
|
||||
self.a_docref("py.execnet", "execnet.html"),
|
||||
#self.a_docref("py.code", "code.html"),
|
||||
#self.a_apigenref("api", "api/index.html"),
|
||||
|
@ -91,6 +104,7 @@ class Page(object):
|
|||
|
||||
def unicode(self, doctype=True):
|
||||
page = self._root.unicode()
|
||||
page = page.replace("</body>", self.googlefragment + "</body>")
|
||||
if doctype:
|
||||
return self.doctype + page
|
||||
else:
|
||||
|
|
|
@ -27,7 +27,7 @@ Other (minor) support functionality
|
|||
`miscellaneous features`_ describes some small but nice py lib features.
|
||||
|
||||
|
||||
.. _`PyPI project page`: http://pypi.python.org/pypi?%3Aaction=pkg_edit&name=py
|
||||
.. _`PyPI project page`: http://pypi.python.org/pypi/py/
|
||||
|
||||
For the latest Release, see `PyPI project page`_
|
||||
|
||||
|
|
|
@ -3,19 +3,21 @@
|
|||
==========================================================
|
||||
|
||||
Since version 1.0 py.test features the "funcarg" mechanism which
|
||||
allows a test function to take arguments which will be independently
|
||||
provided by factory functions. Factory functions are automatically
|
||||
discovered and allow to encapsulate all neccessary setup and glue code
|
||||
for running tests. Compared to `xUnit style`_ the new mechanism is
|
||||
meant to:
|
||||
allows a test function to take arguments independently provided
|
||||
by factory functions. Factory functions allow to encapsulate
|
||||
all setup and fixture glue code into nicely separated objects
|
||||
and provide a natural way for writing python test functions.
|
||||
Compared to `xUnit style`_ the new mechanism is meant to:
|
||||
|
||||
* make test functions easier to write and to read
|
||||
* isolate test fixture creation to a single place
|
||||
* bring new flexibility and power to test state management
|
||||
* enable running of a test function with different values
|
||||
* naturally extend towards parametrizing test functions
|
||||
with multiple argument sets
|
||||
(superseding `old-style generative tests`_)
|
||||
* to enable creation of helper objects that interact with the execution
|
||||
of a test function, see the `blog post about the monkeypatch funcarg`_.
|
||||
* enable creation of zero-boilerplate test helper objects that
|
||||
interact with the execution of a test function, see the
|
||||
`blog post about the monkeypatch funcarg`_.
|
||||
|
||||
If you find issues or have further suggestions for improving
|
||||
the mechanism you are welcome to checkout `contact possibilities`_ page.
|
||||
|
|
|
@ -1,6 +1,6 @@
|
|||
|
||||
pytest_iocapture plugin
|
||||
=======================
|
||||
pytest_capture plugin
|
||||
=====================
|
||||
|
||||
configurable per-test stdout/stderr capturing mechanisms.
|
||||
|
||||
|
@ -113,8 +113,8 @@ command line options
|
|||
|
||||
``-s``
|
||||
shortcut for --capture=no.
|
||||
``--capture=capture``
|
||||
set IO capturing method during tests: sys|fd|no.
|
||||
``--capture=method``
|
||||
set capturing method during tests: fd (default)|sys|no.
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
@ -122,8 +122,8 @@ Start improving this plugin in 30 seconds
|
|||
|
||||
Do you find the above documentation or the plugin itself lacking?
|
||||
|
||||
1. Download `pytest_iocapture.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_iocapture.py`` into your import path
|
||||
1. Download `pytest_capture.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_capture.py`` into your import path
|
||||
3. a subsequent ``py.test`` run will use your local version
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
|
@ -39,7 +39,7 @@ hook specification sourcecode
|
|||
def pytest_collectstart(collector):
|
||||
""" collector starts collecting. """
|
||||
|
||||
def pytest_collectreport(rep):
|
||||
def pytest_collectreport(report):
|
||||
""" collector finished collecting. """
|
||||
|
||||
def pytest_deselected(items):
|
||||
|
@ -89,7 +89,7 @@ hook specification sourcecode
|
|||
""" make ItemTestReport for the given item and call outcome. """
|
||||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_runtest_logreport(rep):
|
||||
def pytest_runtest_logreport(report):
|
||||
""" process item test report. """
|
||||
|
||||
# special handling for final teardown - somewhat internal for now
|
||||
|
|
|
@ -2,13 +2,13 @@
|
|||
Plugins related to Python test functions and programs
|
||||
=====================================================
|
||||
|
||||
xfail_ mark python tests as expected-to-fail and report them separately.
|
||||
xfail_ mark python test functions as expected-to-fail and report them separately.
|
||||
|
||||
figleaf_ write and report coverage data with 'figleaf'.
|
||||
|
||||
monkeypatch_ safely patch object attributes, dicts and environment variables.
|
||||
|
||||
iocapture_ configurable per-test stdout/stderr capturing mechanisms.
|
||||
capture_ configurable per-test stdout/stderr capturing mechanisms.
|
||||
|
||||
recwarn_ helpers for asserting deprecation and other warnings.
|
||||
|
||||
|
@ -28,7 +28,7 @@ restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt file
|
|||
Plugins for generic reporting and failure logging
|
||||
=================================================
|
||||
|
||||
pocoo_ submit failure information to paste.pocoo.org
|
||||
pastebin_ submit failure or test session information to a pastebin service.
|
||||
|
||||
resultlog_ resultlog plugin for machine-readable logging of test results.
|
||||
|
||||
|
|
|
@ -1,33 +1,33 @@
|
|||
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_recwarn.py
|
||||
.. _`pytest_iocapture.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_iocapture.py
|
||||
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_monkeypatch.py
|
||||
.. _`plugins`: index.html
|
||||
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_doctest.py
|
||||
.. _`terminal`: terminal.html
|
||||
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_recwarn.py
|
||||
.. _`unittest`: unittest.html
|
||||
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_monkeypatch.py
|
||||
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_keyword.py
|
||||
.. _`pastebin`: pastebin.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`pytest_capture.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_capture.py
|
||||
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_doctest.py
|
||||
.. _`capture`: capture.html
|
||||
.. _`hooklog`: hooklog.html
|
||||
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_restdoc.py
|
||||
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_restdoc.py
|
||||
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_hooklog.py
|
||||
.. _`pytest_pastebin.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_pastebin.py
|
||||
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_figleaf.py
|
||||
.. _`xfail`: xfail.html
|
||||
.. _`pytest_pocoo.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_pocoo.py
|
||||
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_keyword.py
|
||||
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_figleaf.py
|
||||
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_hooklog.py
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`pocoo`: pocoo.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
||||
.. _`oejskit`: oejskit.html
|
||||
.. _`unittest`: unittest.html
|
||||
.. _`iocapture`: iocapture.html
|
||||
.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_xfail.py
|
||||
.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_xfail.py
|
||||
.. _`figleaf`: figleaf.html
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_terminal.py
|
||||
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_terminal.py
|
||||
.. _`recwarn`: recwarn.html
|
||||
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_pdb.py
|
||||
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_pdb.py
|
||||
.. _`monkeypatch`: monkeypatch.html
|
||||
.. _`resultlog`: resultlog.html
|
||||
.. _`keyword`: keyword.html
|
||||
.. _`restdoc`: restdoc.html
|
||||
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_unittest.py
|
||||
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_unittest.py
|
||||
.. _`doctest`: doctest.html
|
||||
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/69bd12627e4d304c89c2003842703ccb10dfe838/py/test/plugin/pytest_resultlog.py
|
||||
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/3b3ea41060652c47739450a590c4d71625bc05bd/py/test/plugin/pytest_resultlog.py
|
||||
.. _`pdb`: pdb.html
|
||||
|
|
|
@ -0,0 +1,46 @@
|
|||
|
||||
pytest_pastebin plugin
|
||||
======================
|
||||
|
||||
submit failure or test session information to a pastebin service.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
Usage
|
||||
----------
|
||||
|
||||
**Creating a URL for each test failure**::
|
||||
|
||||
py.test --pastebin=failed
|
||||
|
||||
This will submit full failure information to a remote Paste service and
|
||||
provide a URL for each failure. You may select tests as usual or add
|
||||
for example ``-x`` if you only want to send one particular failure.
|
||||
|
||||
**Creating a URL for a whole test session log**::
|
||||
|
||||
py.test --pastebin=all
|
||||
|
||||
Currently only pasting to the http://paste.pocoo.org service is implemented.
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--pastebin=mode``
|
||||
send failed|all info to Pocoo pastebin service.
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking?
|
||||
|
||||
1. Download `pytest_pastebin.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_pastebin.py`` into your import path
|
||||
3. a subsequent ``py.test`` run will use your local version
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
.. include:: links.txt
|
|
@ -1,31 +0,0 @@
|
|||
|
||||
pytest_pocoo plugin
|
||||
===================
|
||||
|
||||
submit failure information to paste.pocoo.org
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``-P, --pocoo-sendfailures``
|
||||
send failures to http://paste.pocoo.org paste service
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking?
|
||||
|
||||
1. Download `pytest_pocoo.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_pocoo.py`` into your import path
|
||||
3. a subsequent ``py.test`` run will use your local version
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
.. include:: links.txt
|
|
@ -9,6 +9,21 @@ Implements terminal reporting of the full testing process.
|
|||
|
||||
This is a good source for looking at the various reporting hooks.
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--collectonly``
|
||||
only collect tests, don't execute them.
|
||||
``--traceconfig``
|
||||
trace considerations of conftest.py files.
|
||||
``--nomagic``
|
||||
don't reinterpret asserts, no traceback cutting.
|
||||
``--fulltrace``
|
||||
don't cut any tracebacks (default is to cut).
|
||||
``--debug``
|
||||
generate and show debugging information.
|
||||
|
||||
Start improving this plugin in 30 seconds
|
||||
=========================================
|
||||
|
||||
|
|
|
@ -2,7 +2,7 @@
|
|||
pytest_xfail plugin
|
||||
===================
|
||||
|
||||
mark python tests as expected-to-fail and report them separately.
|
||||
mark python test functions as expected-to-fail and report them separately.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
|
@ -10,8 +10,8 @@ mark python tests as expected-to-fail and report them separately.
|
|||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to add the 'xfail' keyword to your
|
||||
test function::
|
||||
Use the generic mark decorator to mark your test functions as
|
||||
'expected to fail'::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
#
|
|
@ -0,0 +1 @@
|
|||
#
|
|
@ -5,11 +5,11 @@ WIDTH = 75
|
|||
|
||||
plugins = [
|
||||
('Plugins related to Python test functions and programs',
|
||||
'xfail figleaf monkeypatch iocapture recwarn',),
|
||||
'xfail figleaf monkeypatch capture recwarn',),
|
||||
('Plugins for other testing styles and languages',
|
||||
'unittest doctest oejskit restdoc'),
|
||||
('Plugins for generic reporting and failure logging',
|
||||
'pocoo resultlog terminal',),
|
||||
'pastebin resultlog terminal',),
|
||||
('internal plugins / core functionality',
|
||||
'pdb keyword hooklog')
|
||||
#('internal plugins / core functionality',
|
||||
|
|
|
@ -20,7 +20,7 @@ For questions please check out http://pylib.org/contact.html
|
|||
from initpkg import initpkg
|
||||
trunk = None
|
||||
|
||||
version = trunk or "1.0.0b9"
|
||||
version = trunk or "1.0.0"
|
||||
|
||||
initpkg(__name__,
|
||||
description = "py.test and pylib: advanced testing tool and networking lib",
|
||||
|
@ -32,7 +32,7 @@ initpkg(__name__,
|
|||
author_email = "holger at merlinux.eu, py-dev at codespeak.net",
|
||||
long_description = globals()['__doc__'],
|
||||
classifiers = [
|
||||
"Development Status :: 4 - Beta",
|
||||
"Development Status :: 5 - Production/Stable",
|
||||
"Intended Audience :: Developers",
|
||||
"License :: OSI Approved :: MIT License",
|
||||
"Operating System :: POSIX",
|
||||
|
|
|
@ -57,14 +57,15 @@ class ExceptionInfo(object):
|
|||
reprcrash = ReprFileLocation(path, lineno+1, exconly)
|
||||
return reprcrash
|
||||
|
||||
def getrepr(self, showlocals=False, style="long", tbfilter=True, funcargs=False):
|
||||
def getrepr(self, showlocals=False, style="long",
|
||||
abspath=False, tbfilter=True, funcargs=False):
|
||||
""" return str()able representation of this exception info.
|
||||
showlocals: show locals per traceback entry
|
||||
style: long|short|no traceback style
|
||||
tbfilter: hide entries (where __tracebackhide__ is true)
|
||||
"""
|
||||
fmt = FormattedExcinfo(showlocals=showlocals, style=style,
|
||||
tbfilter=tbfilter, funcargs=funcargs)
|
||||
abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
|
||||
return fmt.repr_excinfo(self)
|
||||
|
||||
def __str__(self):
|
||||
|
@ -78,11 +79,12 @@ class FormattedExcinfo(object):
|
|||
flow_marker = ">"
|
||||
fail_marker = "E"
|
||||
|
||||
def __init__(self, showlocals=False, style="long", tbfilter=True, funcargs=False):
|
||||
def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
|
||||
self.showlocals = showlocals
|
||||
self.style = style
|
||||
self.tbfilter = tbfilter
|
||||
self.funcargs = funcargs
|
||||
self.abspath = abspath
|
||||
|
||||
def _getindent(self, source):
|
||||
# figure out indent for given source
|
||||
|
@ -154,8 +156,9 @@ class FormattedExcinfo(object):
|
|||
if name == '__builtins__':
|
||||
lines.append("__builtins__ = <builtins>")
|
||||
else:
|
||||
# This formatting could all be handled by the _repr() function, which is
|
||||
# only repr.Repr in disguise, so is very configurable.
|
||||
# This formatting could all be handled by the
|
||||
# _repr() function, which is only repr.Repr in
|
||||
# disguise, so is very configurable.
|
||||
str_repr = self._saferepr(value)
|
||||
#if len(str_repr) < 70 or not isinstance(value,
|
||||
# (list, tuple, dict)):
|
||||
|
@ -180,7 +183,8 @@ class FormattedExcinfo(object):
|
|||
reprargs = self.repr_args(entry)
|
||||
lines.extend(self.get_source(source, line_index, excinfo))
|
||||
message = excinfo and excinfo.typename or ""
|
||||
filelocrepr = ReprFileLocation(entry.path, entry.lineno+1, message)
|
||||
path = self._makepath(entry.path)
|
||||
filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
|
||||
localsrepr = self.repr_locals(entry.locals)
|
||||
return ReprEntry(lines, reprargs, localsrepr, filelocrepr)
|
||||
else:
|
||||
|
@ -193,6 +197,13 @@ class FormattedExcinfo(object):
|
|||
lines.extend(self.get_exconly(excinfo, indent=4))
|
||||
return ReprEntry(lines, None, None, None)
|
||||
|
||||
def _makepath(self, path):
|
||||
if not self.abspath:
|
||||
np = py.path.local().bestrelpath(path)
|
||||
if len(np) < len(str(path)):
|
||||
path = np
|
||||
return path
|
||||
|
||||
def repr_traceback(self, excinfo):
|
||||
traceback = excinfo.traceback
|
||||
if self.tbfilter:
|
||||
|
|
|
@ -625,6 +625,29 @@ raise ValueError()
|
|||
assert tw.lines[9] == ""
|
||||
assert tw.lines[10].endswith("mod.py:3: ValueError")
|
||||
|
||||
def test_toterminal_long_filenames(self):
|
||||
mod = self.importasmod("""
|
||||
def f():
|
||||
raise ValueError()
|
||||
""")
|
||||
excinfo = py.test.raises(ValueError, mod.f)
|
||||
tw = TWMock()
|
||||
path = py.path.local(mod.__file__)
|
||||
old = path.dirpath().chdir()
|
||||
try:
|
||||
repr = excinfo.getrepr(abspath=False)
|
||||
repr.toterminal(tw)
|
||||
line = tw.lines[-1]
|
||||
x = py.path.local().bestrelpath(path)
|
||||
if len(x) < len(str(path)):
|
||||
assert line == "mod.py:3: ValueError"
|
||||
|
||||
repr = excinfo.getrepr(abspath=True)
|
||||
repr.toterminal(tw)
|
||||
line = tw.lines[-1]
|
||||
assert line == "%s:3: ValueError" %(path,)
|
||||
finally:
|
||||
old.chdir()
|
||||
|
||||
def test_format_excinfo(self):
|
||||
mod = self.importasmod("""
|
||||
|
|
|
@ -201,13 +201,8 @@ class TerminalWriter(object):
|
|||
self._file.flush()
|
||||
|
||||
def line(self, s='', **kw):
|
||||
if s:
|
||||
s = self.markup(s, **kw)
|
||||
self._file.write(s + '\n')
|
||||
else:
|
||||
self._file.write('\n')
|
||||
self._file.flush()
|
||||
|
||||
self.write(s, **kw)
|
||||
self.write('\n')
|
||||
|
||||
class Win32ConsoleWriter(object):
|
||||
|
||||
|
|
|
@ -152,14 +152,14 @@ class PathBase(object):
|
|||
return ""
|
||||
|
||||
def bestrelpath(self, dest):
|
||||
""" return relative path from self to dest
|
||||
such that self.join(bestrelpath) == dest.
|
||||
""" return a string which is a relative path from self
|
||||
to dest such that self.join(bestrelpath) == dest and
|
||||
if not such path can be determined return dest.
|
||||
"""
|
||||
try:
|
||||
base = self.common(dest)
|
||||
if not base: # can be the case on windows
|
||||
return dest
|
||||
return str(dest)
|
||||
self2base = self.relto(base)
|
||||
reldest = dest.relto(base)
|
||||
if self2base:
|
||||
|
@ -172,7 +172,7 @@ class PathBase(object):
|
|||
target = dest.sep.join(l)
|
||||
return target
|
||||
except AttributeError:
|
||||
return dest
|
||||
return str(dest)
|
||||
|
||||
|
||||
def parts(self, reverse=False):
|
||||
|
|
|
@ -10,6 +10,6 @@ Generator = py.test.collect.Generator
|
|||
Function = py.test.collect.Function
|
||||
Instance = py.test.collect.Instance
|
||||
|
||||
pytest_plugins = "default runner iocapture terminal keyword xfail tmpdir execnetcleanup monkeypatch recwarn pdb unittest".split()
|
||||
pytest_plugins = "default runner capture terminal keyword xfail tmpdir execnetcleanup monkeypatch recwarn pdb pastebin unittest".split()
|
||||
|
||||
conf_capture = "fd"
|
||||
|
|
|
@ -34,16 +34,16 @@ class LoopState(object):
|
|||
return "<LoopState exitstatus=%r shuttingdown=%r len(colitems)=%d>" % (
|
||||
self.exitstatus, self.shuttingdown, len(self.colitems))
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.item in self.dsession.item2nodes:
|
||||
if rep.when != "teardown": # otherwise we have already managed it
|
||||
self.dsession.removeitem(rep.item, rep.node)
|
||||
if rep.failed:
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.item in self.dsession.item2nodes:
|
||||
if report.when != "teardown": # otherwise we already managed it
|
||||
self.dsession.removeitem(report.item, report.node)
|
||||
if report.failed:
|
||||
self.testsfailed = True
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if rep.passed:
|
||||
self.colitems.extend(rep.result)
|
||||
def pytest_collectreport(self, report):
|
||||
if report.passed:
|
||||
self.colitems.extend(report.result)
|
||||
|
||||
def pytest_testnodeready(self, node):
|
||||
self.dsession.addnode(node)
|
||||
|
@ -199,7 +199,7 @@ class DSession(Session):
|
|||
else:
|
||||
self.config.hook.pytest_collectstart(collector=next)
|
||||
colrep = self.config.hook.pytest_make_collect_report(collector=next)
|
||||
self.queueevent("pytest_collectreport", rep=colrep)
|
||||
self.queueevent("pytest_collectreport", report=colrep)
|
||||
if self.config.option.dist == "each":
|
||||
self.senditems_each(senditems)
|
||||
else:
|
||||
|
@ -267,7 +267,7 @@ class DSession(Session):
|
|||
info = "!!! Node %r crashed during running of test %r" %(node, item)
|
||||
rep = runner.ItemTestReport(item=item, excinfo=info, when="???")
|
||||
rep.node = node
|
||||
self.config.hook.pytest_runtest_logreport(rep=rep)
|
||||
self.config.hook.pytest_runtest_logreport(report=rep)
|
||||
|
||||
def setup(self):
|
||||
""" setup any neccessary resources ahead of the test run. """
|
||||
|
|
|
@ -0,0 +1,148 @@
|
|||
import py
|
||||
|
||||
class TestDistribution:
|
||||
def test_dist_conftest_options(self, testdir):
|
||||
p1 = testdir.tmpdir.ensure("dir", 'p1.py')
|
||||
p1.dirpath("__init__.py").write("")
|
||||
p1.dirpath("conftest.py").write(py.code.Source("""
|
||||
print "importing conftest", __file__
|
||||
import py
|
||||
Option = py.test.config.Option
|
||||
option = py.test.config.addoptions("someopt",
|
||||
Option('--someopt', action="store_true", dest="someopt", default=False))
|
||||
dist_rsync_roots = ['../dir']
|
||||
print "added options", option
|
||||
print "config file seen from conftest", py.test.config
|
||||
"""))
|
||||
p1.write(py.code.Source("""
|
||||
import py, conftest
|
||||
def test_1():
|
||||
print "config from test_1", py.test.config
|
||||
print "conftest from test_1", conftest.__file__
|
||||
print "test_1: py.test.config.option.someopt", py.test.config.option.someopt
|
||||
print "test_1: conftest", conftest
|
||||
print "test_1: conftest.option.someopt", conftest.option.someopt
|
||||
assert conftest.option.someopt
|
||||
"""))
|
||||
result = testdir.runpytest('-d', '--tx=popen', p1, '--someopt')
|
||||
assert result.ret == 0
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
def test_manytests_to_one_popen(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail0():
|
||||
assert 0
|
||||
def test_fail1():
|
||||
raise ValueError()
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("hello")
|
||||
""",
|
||||
)
|
||||
result = testdir.runpytest(p1, '-d', '--tx=popen', '--tx=popen')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1*popen*Python*",
|
||||
"*2*popen*Python*",
|
||||
"*2 failed, 1 passed, 1 skipped*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_dist_conftest_specified(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail0():
|
||||
assert 0
|
||||
def test_fail1():
|
||||
raise ValueError()
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("hello")
|
||||
""",
|
||||
)
|
||||
testdir.makeconftest("""
|
||||
pytest_option_tx = 'popen popen popen'.split()
|
||||
""")
|
||||
result = testdir.runpytest(p1, '-d')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1*popen*Python*",
|
||||
"*2*popen*Python*",
|
||||
"*3*popen*Python*",
|
||||
"*2 failed, 1 passed, 1 skipped*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_dist_tests_with_crash(self, testdir):
|
||||
if not hasattr(py.std.os, 'kill'):
|
||||
py.test.skip("no os.kill")
|
||||
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail0():
|
||||
assert 0
|
||||
def test_fail1():
|
||||
raise ValueError()
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("hello")
|
||||
def test_crash():
|
||||
import time
|
||||
import os
|
||||
time.sleep(0.5)
|
||||
os.kill(os.getpid(), 15)
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(p1, '-d', '--tx=3*popen')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*popen*Python*",
|
||||
"*popen*Python*",
|
||||
"*popen*Python*",
|
||||
"*node down*",
|
||||
"*3 failed, 1 passed, 1 skipped*"
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_distribution_rsyncdirs_example(self, testdir):
|
||||
source = testdir.mkdir("source")
|
||||
dest = testdir.mkdir("dest")
|
||||
subdir = source.mkdir("example_pkg")
|
||||
subdir.ensure("__init__.py")
|
||||
p = subdir.join("test_one.py")
|
||||
p.write("def test_5(): assert not __file__.startswith(%r)" % str(p))
|
||||
result = testdir.runpytest("-d", "--rsyncdir=%(subdir)s" % locals(),
|
||||
"--tx=popen//chdir=%(dest)s" % locals(), p)
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1* *popen*platform*",
|
||||
#"RSyncStart: [G1]",
|
||||
#"RSyncFinished: [G1]",
|
||||
"*1 passed*"
|
||||
])
|
||||
assert dest.join(subdir.basename).check(dir=1)
|
||||
|
||||
def test_dist_each(self, testdir):
|
||||
interpreters = []
|
||||
for name in ("python2.4", "python2.5"):
|
||||
interp = py.path.local.sysfind(name)
|
||||
if interp is None:
|
||||
py.test.skip("%s not found" % name)
|
||||
interpreters.append(interp)
|
||||
|
||||
testdir.makepyfile(__init__="", test_one="""
|
||||
import sys
|
||||
def test_hello():
|
||||
print "%s...%s" % sys.version_info[:2]
|
||||
assert 0
|
||||
""")
|
||||
args = ["--dist=each"]
|
||||
args += ["--tx", "popen//python=%s" % interpreters[0]]
|
||||
args += ["--tx", "popen//python=%s" % interpreters[1]]
|
||||
result = testdir.runpytest(*args)
|
||||
result.stdout.fnmatch_lines(["2...4"])
|
||||
result.stdout.fnmatch_lines(["2...5"])
|
||||
|
|
@ -81,8 +81,8 @@ class TestDSession:
|
|||
session.triggertesting([modcol])
|
||||
name, args, kwargs = session.queue.get(block=False)
|
||||
assert name == 'pytest_collectreport'
|
||||
rep = kwargs['rep']
|
||||
assert len(rep.result) == 1
|
||||
report = kwargs['report']
|
||||
assert len(report.result) == 1
|
||||
|
||||
def test_triggertesting_item(self, testdir):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
|
@ -134,7 +134,7 @@ class TestDSession:
|
|||
session.queueevent(None)
|
||||
session.loop_once(loopstate)
|
||||
assert node.sent == [[item]]
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item, node))
|
||||
session.queueevent("pytest_runtest_logreport", report=run(item, node))
|
||||
session.loop_once(loopstate)
|
||||
assert loopstate.shuttingdown
|
||||
assert not loopstate.testsfailed
|
||||
|
@ -182,7 +182,7 @@ class TestDSession:
|
|||
item = item1
|
||||
node = nodes[0]
|
||||
when = "call"
|
||||
session.queueevent("pytest_runtest_logreport", rep=rep)
|
||||
session.queueevent("pytest_runtest_logreport", report=rep)
|
||||
reprec = testdir.getreportrecorder(session)
|
||||
print session.item2nodes
|
||||
loopstate = session._initloopstate([])
|
||||
|
@ -190,7 +190,7 @@ class TestDSession:
|
|||
session.loop_once(loopstate)
|
||||
assert len(session.item2nodes[item1]) == 1
|
||||
rep.when = "teardown"
|
||||
session.queueevent("pytest_runtest_logreport", rep=rep)
|
||||
session.queueevent("pytest_runtest_logreport", report=rep)
|
||||
session.loop_once(loopstate)
|
||||
assert len(session.item2nodes[item1]) == 1
|
||||
|
||||
|
@ -249,7 +249,7 @@ class TestDSession:
|
|||
|
||||
assert node.sent == [[item]]
|
||||
ev = run(item, node, excinfo=excinfo)
|
||||
session.queueevent("pytest_runtest_logreport", rep=ev)
|
||||
session.queueevent("pytest_runtest_logreport", report=ev)
|
||||
session.loop_once(loopstate)
|
||||
assert loopstate.shuttingdown
|
||||
session.queueevent("pytest_testnodedown", node=node, error=None)
|
||||
|
@ -286,8 +286,8 @@ class TestDSession:
|
|||
# run tests ourselves and produce reports
|
||||
ev1 = run(items[0], node, "fail")
|
||||
ev2 = run(items[1], node, None)
|
||||
session.queueevent("pytest_runtest_logreport", rep=ev1) # a failing one
|
||||
session.queueevent("pytest_runtest_logreport", rep=ev2)
|
||||
session.queueevent("pytest_runtest_logreport", report=ev1) # a failing one
|
||||
session.queueevent("pytest_runtest_logreport", report=ev2)
|
||||
# now call the loop
|
||||
loopstate = session._initloopstate(items)
|
||||
session.loop_once(loopstate)
|
||||
|
@ -302,7 +302,7 @@ class TestDSession:
|
|||
loopstate = session._initloopstate([])
|
||||
loopstate.shuttingdown = True
|
||||
reprec = testdir.getreportrecorder(session)
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item, node))
|
||||
session.queueevent("pytest_runtest_logreport", report=run(item, node))
|
||||
session.loop_once(loopstate)
|
||||
assert not reprec.getcalls("pytest_testnodedown")
|
||||
session.queueevent("pytest_testnodedown", node=node, error=None)
|
||||
|
@ -343,7 +343,7 @@ class TestDSession:
|
|||
node = MockNode()
|
||||
session.addnode(node)
|
||||
session.senditems_load([item])
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item, node))
|
||||
session.queueevent("pytest_runtest_logreport", report=run(item, node))
|
||||
loopstate = session._initloopstate([])
|
||||
session.loop_once(loopstate)
|
||||
assert node._shutdown is True
|
||||
|
@ -369,10 +369,10 @@ class TestDSession:
|
|||
session.senditems_load([item1])
|
||||
# node2pending will become empty when the loop sees the report
|
||||
rep = run(item1, node)
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item1, node))
|
||||
session.queueevent("pytest_runtest_logreport", report=run(item1, node))
|
||||
|
||||
# but we have a collection pending
|
||||
session.queueevent("pytest_collectreport", rep=colreport)
|
||||
session.queueevent("pytest_collectreport", report=colreport)
|
||||
|
||||
loopstate = session._initloopstate([])
|
||||
session.loop_once(loopstate)
|
||||
|
@ -396,11 +396,11 @@ class TestDSession:
|
|||
dsession = DSession(config)
|
||||
hookrecorder = testdir.getreportrecorder(config).hookrecorder
|
||||
dsession.main([config.getfsnode(p1)])
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").rep
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").report
|
||||
assert rep.passed
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").rep
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").report
|
||||
assert rep.skipped
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").rep
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").report
|
||||
assert rep.failed
|
||||
# see that the node is really down
|
||||
node = hookrecorder.popcall("pytest_testnodedown").node
|
||||
|
|
|
@ -115,7 +115,7 @@ class TestMasterSlaveConnection:
|
|||
node = mysetup.makenode(item.config)
|
||||
node.send(item)
|
||||
kwargs = mysetup.geteventargs("pytest_runtest_logreport")
|
||||
rep = kwargs['rep']
|
||||
rep = kwargs['report']
|
||||
assert rep.passed
|
||||
print rep
|
||||
assert rep.item == item
|
||||
|
@ -135,10 +135,10 @@ class TestMasterSlaveConnection:
|
|||
node.send(item)
|
||||
for outcome in "passed failed skipped".split():
|
||||
kwargs = mysetup.geteventargs("pytest_runtest_logreport")
|
||||
rep = kwargs['rep']
|
||||
assert getattr(rep, outcome)
|
||||
report = kwargs['report']
|
||||
assert getattr(report, outcome)
|
||||
|
||||
node.sendlist(items)
|
||||
for outcome in "passed failed skipped".split():
|
||||
rep = mysetup.geteventargs("pytest_runtest_logreport")['rep']
|
||||
rep = mysetup.geteventargs("pytest_runtest_logreport")['report']
|
||||
assert getattr(rep, outcome)
|
||||
|
|
|
@ -56,9 +56,9 @@ class TXNode(object):
|
|||
self._down = True
|
||||
self.notify("pytest_testnodedown", error=None, node=self)
|
||||
elif eventname == "pytest_runtest_logreport":
|
||||
rep = kwargs['rep']
|
||||
rep = kwargs['report']
|
||||
rep.node = self
|
||||
self.notify("pytest_runtest_logreport", rep=rep)
|
||||
self.notify("pytest_runtest_logreport", report=rep)
|
||||
else:
|
||||
self.notify(eventname, *args, **kwargs)
|
||||
except KeyboardInterrupt:
|
||||
|
@ -110,8 +110,8 @@ class SlaveNode(object):
|
|||
def sendevent(self, eventname, *args, **kwargs):
|
||||
self.channel.send((eventname, args, kwargs))
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
self.sendevent("pytest_runtest_logreport", rep=rep)
|
||||
def pytest_runtest_logreport(self, report):
|
||||
self.sendevent("pytest_runtest_logreport", report=report)
|
||||
|
||||
def run(self):
|
||||
channel = self.channel
|
||||
|
|
|
@ -137,9 +137,9 @@ def slave_runsession(channel, config, fullwidth, hasmarkup):
|
|||
session.shouldclose = channel.isclosed
|
||||
|
||||
class Failures(list):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.failed:
|
||||
self.append(rep)
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.failed:
|
||||
self.append(report)
|
||||
pytest_collectreport = pytest_runtest_logreport
|
||||
|
||||
failreports = Failures()
|
||||
|
|
|
@ -33,7 +33,7 @@ def pytest_collect_file(path, parent):
|
|||
def pytest_collectstart(collector):
|
||||
""" collector starts collecting. """
|
||||
|
||||
def pytest_collectreport(rep):
|
||||
def pytest_collectreport(report):
|
||||
""" collector finished collecting. """
|
||||
|
||||
def pytest_deselected(items):
|
||||
|
@ -83,7 +83,7 @@ def pytest_runtest_makereport(item, call):
|
|||
""" make ItemTestReport for the given item and call outcome. """
|
||||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_runtest_logreport(rep):
|
||||
def pytest_runtest_logreport(report):
|
||||
""" process item test report. """
|
||||
|
||||
# special handling for final teardown - somewhat internal for now
|
||||
|
|
|
@ -89,8 +89,8 @@ def pytest_addoption(parser):
|
|||
group._addoption('-s', action="store_const", const="no", dest="capture",
|
||||
help="shortcut for --capture=no.")
|
||||
group._addoption('--capture', action="store", default=None,
|
||||
metavar="capture", type="choice", choices=['fd', 'sys', 'no'],
|
||||
help="set IO capturing method during tests: sys|fd|no.")
|
||||
metavar="method", type="choice", choices=['fd', 'sys', 'no'],
|
||||
help="set capturing method during tests: fd (default)|sys|no.")
|
||||
|
||||
def addouterr(rep, outerr):
|
||||
repr = getattr(rep, 'longrepr', None)
|
|
@ -38,7 +38,7 @@ def pytest_report_iteminfo(item):
|
|||
return item.reportinfo()
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general", "test collection and failure interaction options")
|
||||
group = parser.getgroup("general", "general testing options")
|
||||
group._addoption('-v', '--verbose', action="count",
|
||||
dest="verbose", default=0, help="increase verbosity."),
|
||||
group._addoption('-x', '--exitfirst',
|
||||
|
@ -67,23 +67,8 @@ def pytest_addoption(parser):
|
|||
help="run tests, re-run failing test set until all pass.")
|
||||
|
||||
group = parser.addgroup("test process debugging")
|
||||
group.addoption('--collectonly',
|
||||
action="store_true", dest="collectonly",
|
||||
help="only collect tests, don't execute them."),
|
||||
group.addoption('--traceconfig',
|
||||
action="store_true", dest="traceconfig", default=False,
|
||||
help="trace considerations of conftest.py files."),
|
||||
group._addoption('--nomagic',
|
||||
action="store_true", dest="nomagic", default=False,
|
||||
help="don't reinterpret asserts, no traceback cutting. ")
|
||||
group._addoption('--fulltrace',
|
||||
action="store_true", dest="fulltrace", default=False,
|
||||
help="don't cut any tracebacks (default is to cut).")
|
||||
group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
|
||||
help="base temporary directory for this test run.")
|
||||
group.addoption('--debug',
|
||||
action="store_true", dest="debug", default=False,
|
||||
help="generate and show debugging information.")
|
||||
|
||||
group = parser.addgroup("dist", "distributed testing") # see http://pytest.org/help/dist")
|
||||
group._addoption('--dist', metavar="distmode",
|
||||
|
|
|
@ -132,8 +132,8 @@ class TestDoctests:
|
|||
""")
|
||||
reprec = testdir.inline_run(p)
|
||||
call = reprec.getcall("pytest_runtest_logreport")
|
||||
assert call.rep.failed
|
||||
assert call.rep.longrepr
|
||||
assert call.report.failed
|
||||
assert call.report.longrepr
|
||||
# XXX
|
||||
#testitem, = items
|
||||
#excinfo = py.test.raises(Failed, "testitem.runtest()")
|
||||
|
|
|
@ -0,0 +1,130 @@
|
|||
"""
|
||||
submit failure or test session information to a pastebin service.
|
||||
|
||||
Usage
|
||||
----------
|
||||
|
||||
**Creating a URL for each test failure**::
|
||||
|
||||
py.test --pastebin=failed
|
||||
|
||||
This will submit full failure information to a remote Paste service and
|
||||
provide a URL for each failure. You may select tests as usual or add
|
||||
for example ``-x`` if you only want to send one particular failure.
|
||||
|
||||
**Creating a URL for a whole test session log**::
|
||||
|
||||
py.test --pastebin=all
|
||||
|
||||
Currently only pasting to the http://paste.pocoo.org service is implemented.
|
||||
|
||||
"""
|
||||
import py, sys
|
||||
|
||||
class url:
|
||||
base = "http://paste.pocoo.org"
|
||||
xmlrpc = base + "/xmlrpc/"
|
||||
show = base + "/show/"
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption('--pastebin', metavar="mode",
|
||||
action='store', dest="pastebin", default=None,
|
||||
type="choice", choices=['failed', 'all'],
|
||||
help="send failed|all info to Pocoo pastebin service.")
|
||||
|
||||
def pytest_configure(__call__, config):
|
||||
import tempfile
|
||||
__call__.execute()
|
||||
if config.option.pastebin == "all":
|
||||
config._pastebinfile = tempfile.TemporaryFile()
|
||||
tr = config.pluginmanager.impname2plugin['terminalreporter']
|
||||
oldwrite = tr._tw.write
|
||||
def tee_write(s, **kwargs):
|
||||
oldwrite(s, **kwargs)
|
||||
config._pastebinfile.write(str(s))
|
||||
tr._tw.write = tee_write
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
if hasattr(config, '_pastebinfile'):
|
||||
config._pastebinfile.seek(0)
|
||||
sessionlog = config._pastebinfile.read()
|
||||
config._pastebinfile.close()
|
||||
del config._pastebinfile
|
||||
proxyid = getproxy().newPaste("python", sessionlog)
|
||||
pastebinurl = "%s%s" % (url.show, proxyid)
|
||||
print >>sys.stderr, "session-log:", pastebinurl
|
||||
tr = config.pluginmanager.impname2plugin['terminalreporter']
|
||||
del tr._tw.__dict__['write']
|
||||
|
||||
def getproxy():
|
||||
return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
if terminalreporter.config.option.pastebin != "failed":
|
||||
return
|
||||
tr = terminalreporter
|
||||
if 'failed' in tr.stats:
|
||||
terminalreporter.write_sep("=", "Sending information to Paste Service")
|
||||
if tr.config.option.debug:
|
||||
terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,))
|
||||
serverproxy = getproxy()
|
||||
for rep in terminalreporter.stats.get('failed'):
|
||||
try:
|
||||
msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
|
||||
except AttributeError:
|
||||
msg = tr._getfailureheadline(rep)
|
||||
tw = py.io.TerminalWriter(stringio=True)
|
||||
rep.toterminal(tw)
|
||||
s = tw.stringio.getvalue()
|
||||
assert len(s)
|
||||
proxyid = serverproxy.newPaste("python", s)
|
||||
pastebinurl = "%s%s" % (url.show, proxyid)
|
||||
tr.write_line("%s --> %s" %(msg, pastebinurl))
|
||||
|
||||
|
||||
class TestPasting:
|
||||
def pytest_funcarg__pastebinlist(self, request):
|
||||
mp = request.getfuncargvalue("monkeypatch")
|
||||
pastebinlist = []
|
||||
class MockProxy:
|
||||
def newPaste(self, language, code):
|
||||
pastebinlist.append((language, code))
|
||||
mp.setitem(globals(), 'getproxy', MockProxy)
|
||||
return pastebinlist
|
||||
|
||||
def test_failed(self, testdir, pastebinlist):
|
||||
testpath = testdir.makepyfile("""
|
||||
import py
|
||||
def test_pass():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("")
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath, "--paste=failed")
|
||||
assert len(pastebinlist) == 1
|
||||
assert pastebinlist[0][0] == "python"
|
||||
s = pastebinlist[0][1]
|
||||
assert s.find("def test_fail") != -1
|
||||
assert reprec.countoutcomes() == [1,1,1]
|
||||
|
||||
def test_all(self, testdir, pastebinlist):
|
||||
testpath = testdir.makepyfile("""
|
||||
import py
|
||||
def test_pass():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("")
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath, "--pastebin=all")
|
||||
assert reprec.countoutcomes() == [1,1,1]
|
||||
assert len(pastebinlist) == 1
|
||||
assert pastebinlist[0][0] == "python"
|
||||
s = pastebinlist[0][1]
|
||||
for x in 'test_fail test_skip skipped'.split():
|
||||
assert s.find(x), (s, x)
|
||||
|
|
@ -1,63 +0,0 @@
|
|||
"""
|
||||
submit failure information to paste.pocoo.org
|
||||
"""
|
||||
import py
|
||||
|
||||
class url:
|
||||
base = "http://paste.pocoo.org"
|
||||
xmlrpc = base + "/xmlrpc/"
|
||||
show = base + "/show/"
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup("pocoo plugin")
|
||||
group.addoption('-P', '--pocoo-sendfailures',
|
||||
action='store_true', dest="pocoo_sendfailures",
|
||||
help="send failures to %s paste service" %(url.base,))
|
||||
|
||||
def getproxy():
|
||||
return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
if terminalreporter.config.option.pocoo_sendfailures:
|
||||
tr = terminalreporter
|
||||
if 'failed' in tr.stats and tr.config.option.tbstyle != "no":
|
||||
terminalreporter.write_sep("=", "Sending failures to %s" %(url.base,))
|
||||
terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,))
|
||||
#print self.__class__.getproxy
|
||||
#print self.__class__, id(self.__class__)
|
||||
serverproxy = getproxy()
|
||||
for ev in terminalreporter.stats.get('failed'):
|
||||
tw = py.io.TerminalWriter(stringio=True)
|
||||
ev.toterminal(tw)
|
||||
s = tw.stringio.getvalue()
|
||||
# XXX add failure summary
|
||||
assert len(s)
|
||||
terminalreporter.write_line("newpaste() ...")
|
||||
proxyid = serverproxy.newPaste("python", s)
|
||||
terminalreporter.write_line("%s%s\n" % (url.show, proxyid))
|
||||
break
|
||||
|
||||
|
||||
def test_toproxy(testdir, monkeypatch):
|
||||
l = []
|
||||
class MockProxy:
|
||||
def newPaste(self, language, code):
|
||||
l.append((language, code))
|
||||
monkeypatch.setitem(globals(), 'getproxy', MockProxy)
|
||||
testdir.plugins.insert(0, globals())
|
||||
testpath = testdir.makepyfile("""
|
||||
import py
|
||||
def test_pass():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("")
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath, "-P")
|
||||
assert len(l) == 1
|
||||
assert l[0][0] == "python"
|
||||
s = l[0][1]
|
||||
assert s.find("def test_fail") != -1
|
||||
assert reprec.countoutcomes() == [1,1,1]
|
||||
|
|
@ -4,38 +4,33 @@ XXX: Currently in progress, NOT IN WORKING STATE.
|
|||
"""
|
||||
import py
|
||||
|
||||
lint = py.test.importorskip("pylint")
|
||||
pylint = py.test.importorskip("pylint.lint")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup('pylint options')
|
||||
group.addoption('--pylint', action='store_true',
|
||||
default=False, dest='pylint',
|
||||
help='Pylint coverate of test files.')
|
||||
help='run pylint on python files.')
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
if path.ext == ".py":
|
||||
if parent.config.getvalue('pylint'):
|
||||
return PylintItem(path, parent, self.lint)
|
||||
return PylintItem(path, parent)
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
print 'placeholder for pylint output'
|
||||
#def pytest_terminal_summary(terminalreporter):
|
||||
# print 'placeholder for pylint output'
|
||||
|
||||
class PylintItem(py.test.collect.Item):
|
||||
def __init__(self, path, parent, lintlib):
|
||||
name = self.__class__.__name__ + ":" + path.basename
|
||||
super(PylintItem, self).__init__(name=name, parent=parent)
|
||||
self.fspath = path
|
||||
self.lint = lintlib
|
||||
|
||||
def runtest(self):
|
||||
# run lint here
|
||||
capture = py.io.StdCaptureFD()
|
||||
#pylib.org has docs on py.io.stdcaptureFD
|
||||
self.linter = self.lint.PyLinter() #TODO: should this be in the PylintPlugin?
|
||||
self.linter.check(str(self.fspath))
|
||||
out, err = capture.reset()
|
||||
try:
|
||||
linter = pylint.lint.PyLinter()
|
||||
linter.check(str(self.fspath))
|
||||
finally:
|
||||
out, err = capture.reset()
|
||||
rating = out.strip().split('\n')[-1]
|
||||
print ">>>",
|
||||
print rating
|
||||
assert 0
|
||||
|
||||
|
||||
|
|
|
@ -341,7 +341,7 @@ class ReportRecorder(object):
|
|||
# functionality for test reports
|
||||
|
||||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.rep for x in self.getcalls(names)]
|
||||
return [x.report for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
|
@ -406,7 +406,7 @@ def test_reportrecorder(testdir):
|
|||
skipped = False
|
||||
when = "call"
|
||||
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
recorder.hook.pytest_runtest_logreport(report=rep)
|
||||
failures = recorder.getfailures()
|
||||
assert failures == [rep]
|
||||
failures = recorder.getfailures()
|
||||
|
@ -420,14 +420,14 @@ def test_reportrecorder(testdir):
|
|||
when = "call"
|
||||
rep.passed = False
|
||||
rep.skipped = True
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
recorder.hook.pytest_runtest_logreport(report=rep)
|
||||
|
||||
modcol = testdir.getmodulecol("")
|
||||
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
rep.passed = False
|
||||
rep.failed = True
|
||||
rep.skipped = False
|
||||
recorder.hook.pytest_collectreport(rep=rep)
|
||||
recorder.hook.pytest_collectreport(report=rep)
|
||||
|
||||
passed, skipped, failed = recorder.listoutcomes()
|
||||
assert not passed and skipped and failed
|
||||
|
@ -440,7 +440,7 @@ def test_reportrecorder(testdir):
|
|||
|
||||
recorder.unregister()
|
||||
recorder.clear()
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
recorder.hook.pytest_runtest_logreport(report=rep)
|
||||
py.test.raises(ValueError, "recorder.getfailures()")
|
||||
|
||||
class LineComp:
|
||||
|
|
|
@ -59,25 +59,25 @@ class ResultLog(object):
|
|||
testpath = generic_path(node)
|
||||
self.write_log_entry(testpath, shortrepr, longrepr)
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
code = rep.shortrepr
|
||||
if rep.passed:
|
||||
def pytest_runtest_logreport(self, report):
|
||||
code = report.shortrepr
|
||||
if report.passed:
|
||||
longrepr = ""
|
||||
elif rep.failed:
|
||||
longrepr = str(rep.longrepr)
|
||||
elif rep.skipped:
|
||||
longrepr = str(rep.longrepr.reprcrash.message)
|
||||
self.log_outcome(rep.item, code, longrepr)
|
||||
elif report.failed:
|
||||
longrepr = str(report.longrepr)
|
||||
elif report.skipped:
|
||||
longrepr = str(report.longrepr.reprcrash.message)
|
||||
self.log_outcome(report.item, code, longrepr)
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if not rep.passed:
|
||||
if rep.failed:
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
if report.failed:
|
||||
code = "F"
|
||||
else:
|
||||
assert rep.skipped
|
||||
assert report.skipped
|
||||
code = "S"
|
||||
longrepr = str(rep.longrepr.reprcrash)
|
||||
self.log_outcome(rep.collector, code, longrepr)
|
||||
longrepr = str(report.longrepr.reprcrash)
|
||||
self.log_outcome(report.collector, code, longrepr)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
path = excrepr.reprcrash.path
|
||||
|
|
|
@ -40,7 +40,7 @@ def pytest_runtest_protocol(item):
|
|||
if item.config.getvalue("boxed"):
|
||||
reports = forked_run_report(item)
|
||||
for rep in reports:
|
||||
item.config.hook.pytest_runtest_logreport(rep=rep)
|
||||
item.config.hook.pytest_runtest_logreport(report=rep)
|
||||
else:
|
||||
runtestprotocol(item)
|
||||
return True
|
||||
|
@ -89,7 +89,7 @@ def call_and_report(item, when, log=True):
|
|||
hook = item.config.hook
|
||||
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||
if log and (when == "call" or not report.passed):
|
||||
hook.pytest_runtest_logreport(rep=report)
|
||||
hook.pytest_runtest_logreport(report=report)
|
||||
return report
|
||||
|
||||
def call_runtest_hook(item, when):
|
||||
|
|
|
@ -6,6 +6,24 @@ This is a good source for looking at the various reporting hooks.
|
|||
import py
|
||||
import sys
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("test process debugging")
|
||||
group.addoption('--collectonly',
|
||||
action="store_true", dest="collectonly",
|
||||
help="only collect tests, don't execute them."),
|
||||
group.addoption('--traceconfig',
|
||||
action="store_true", dest="traceconfig", default=False,
|
||||
help="trace considerations of conftest.py files."),
|
||||
group._addoption('--nomagic',
|
||||
action="store_true", dest="nomagic", default=False,
|
||||
help="don't reinterpret asserts, no traceback cutting. ")
|
||||
group._addoption('--fulltrace',
|
||||
action="store_true", dest="fulltrace", default=False,
|
||||
help="don't cut any tracebacks (default is to cut).")
|
||||
group.addoption('--debug',
|
||||
action="store_true", dest="debug", default=False,
|
||||
help="generate and show debugging information.")
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.collectonly:
|
||||
reporter = CollectonlyReporter(config)
|
||||
|
@ -18,7 +36,7 @@ def pytest_configure(config):
|
|||
name = attr.split("_")[-1]
|
||||
assert hasattr(self.reporter._tw, name), name
|
||||
setattr(reporter._tw, name, getattr(config, attr))
|
||||
config.pluginmanager.register(reporter)
|
||||
config.pluginmanager.register(reporter, 'terminalreporter')
|
||||
|
||||
class TerminalReporter:
|
||||
def __init__(self, config, file=None):
|
||||
|
@ -169,7 +187,8 @@ class TerminalReporter:
|
|||
def pytest__teardown_final_logerror(self, rep):
|
||||
self.stats.setdefault("error", []).append(rep)
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
def pytest_runtest_logreport(self, report):
|
||||
rep = report
|
||||
cat, letter, word = self.getcategoryletterword(rep)
|
||||
if not letter and not word:
|
||||
# probably passed setup/teardown
|
||||
|
@ -194,15 +213,15 @@ class TerminalReporter:
|
|||
self._tw.write(" " + line)
|
||||
self.currentfspath = -2
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if not rep.passed:
|
||||
if rep.failed:
|
||||
self.stats.setdefault("error", []).append(rep)
|
||||
msg = rep.longrepr.reprcrash.message
|
||||
self.write_fspath_result(rep.collector.fspath, "E")
|
||||
elif rep.skipped:
|
||||
self.stats.setdefault("skipped", []).append(rep)
|
||||
self.write_fspath_result(rep.collector.fspath, "S")
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
if report.failed:
|
||||
self.stats.setdefault("error", []).append(report)
|
||||
msg = report.longrepr.reprcrash.message
|
||||
self.write_fspath_result(report.collector.fspath, "E")
|
||||
elif report.skipped:
|
||||
self.stats.setdefault("skipped", []).append(report)
|
||||
self.write_fspath_result(report.collector.fspath, "S")
|
||||
|
||||
def pytest_sessionstart(self, session):
|
||||
self.write_sep("=", "test session starts", bold=True)
|
||||
|
@ -399,10 +418,10 @@ class CollectonlyReporter:
|
|||
def pytest_itemstart(self, item, node=None):
|
||||
self.outindent(item)
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if not rep.passed:
|
||||
self.outindent("!!! %s !!!" % rep.longrepr.reprcrash.message)
|
||||
self._failed.append(rep)
|
||||
def pytest_collectreport(self, report):
|
||||
if not report.passed:
|
||||
self.outindent("!!! %s !!!" % report.longrepr.reprcrash.message)
|
||||
self._failed.append(report)
|
||||
self.indent = self.indent[:-len(self.INDENT)]
|
||||
|
||||
def pytest_sessionfinish(self, session, exitstatus):
|
||||
|
|
|
@ -1,11 +1,11 @@
|
|||
"""
|
||||
mark python tests as expected-to-fail and report them separately.
|
||||
mark python test functions as expected-to-fail and report them separately.
|
||||
|
||||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to add the 'xfail' keyword to your
|
||||
test function::
|
||||
Use the generic mark decorator to mark your test functions as
|
||||
'expected to fail'::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
|
@ -14,6 +14,7 @@ test function::
|
|||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" section or "unexpectedly passing" section.
|
||||
|
||||
"""
|
||||
|
||||
import py
|
||||
|
@ -48,28 +49,29 @@ def pytest_terminal_summary(terminalreporter):
|
|||
xfailed = tr.stats.get("xfailed")
|
||||
if xfailed:
|
||||
tr.write_sep("_", "expected failures")
|
||||
for event in xfailed:
|
||||
entry = event.longrepr.reprcrash
|
||||
key = entry.path, entry.lineno, entry.message
|
||||
reason = event.longrepr.reprcrash.message
|
||||
modpath = event.item.getmodpath(includemodule=True)
|
||||
#tr._tw.line("%s %s:%d: %s" %(modpath, entry.path, entry.lineno, entry.message))
|
||||
tr._tw.line("%s %s:%d: " %(modpath, entry.path, entry.lineno))
|
||||
for rep in xfailed:
|
||||
entry = rep.longrepr.reprcrash
|
||||
modpath = rep.item.getmodpath(includemodule=True)
|
||||
pos = "%s %s:%d: " %(modpath, entry.path, entry.lineno)
|
||||
reason = rep.longrepr.reprcrash.message
|
||||
tr._tw.line("%s %s" %(pos, reason))
|
||||
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
|
||||
for event in xpassed:
|
||||
tr._tw.line("%s: xpassed" %(event.item,))
|
||||
for rep in xpassed:
|
||||
fspath, lineno, modpath = rep.item.reportinfo()
|
||||
pos = "%s %s:%d: unexpectedly passing" %(modpath, fspath, lineno)
|
||||
tr._tw.line(pos)
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
# =============================================================================
|
||||
#
|
||||
# plugin tests
|
||||
#
|
||||
# ===============================================================================
|
||||
# =============================================================================
|
||||
|
||||
def test_xfail(testdir, linecomp):
|
||||
def test_xfail(testdir):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import py, os, sys
|
||||
from py.__.test.plugin.pytest_iocapture import CaptureManager
|
||||
from py.__.test.plugin.pytest_capture import CaptureManager
|
||||
|
||||
class TestCaptureManager:
|
||||
|
||||
|
@ -88,6 +88,31 @@ class TestPerTestCapturing:
|
|||
"in func2*",
|
||||
])
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_capture_scope_cache(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import sys
|
||||
def setup_module(func):
|
||||
print "module-setup"
|
||||
def setup_function(func):
|
||||
print "function-setup"
|
||||
def test_func():
|
||||
print "in function"
|
||||
assert 0
|
||||
def teardown_function(func):
|
||||
print "in teardown"
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func():*",
|
||||
"*Captured stdout during setup*",
|
||||
"module-setup*",
|
||||
"function-setup*",
|
||||
"*Captured stdout*",
|
||||
"in teardown*",
|
||||
])
|
||||
|
||||
|
||||
def test_no_carry_over(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def test_func1():
|
||||
|
@ -230,9 +255,7 @@ class TestLoggingInteraction:
|
|||
# verify proper termination
|
||||
assert "closed" not in s
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_logging_and_crossscope_fixtures(self, testdir):
|
||||
# XXX also needs final teardown reporting to work!
|
||||
p = testdir.makepyfile("""
|
||||
import logging
|
||||
def setup_module(function):
|
||||
|
@ -246,14 +269,14 @@ class TestLoggingInteraction:
|
|||
logging.warn("hello3")
|
||||
assert 0
|
||||
""")
|
||||
for optargs in (('--iocapture=sys',), ('--iocapture=fd',)):
|
||||
for optargs in (('--capture=sys',), ('--capture=fd',)):
|
||||
print optargs
|
||||
result = testdir.runpytest(p, *optargs)
|
||||
s = result.stdout.str()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*WARN*hello3", # errors come first
|
||||
"*WARN*hello1",
|
||||
"*WARN*hello2",
|
||||
"*WARN*hello3",
|
||||
])
|
||||
# verify proper termination
|
||||
assert "closed" not in s
|
||||
|
@ -303,5 +326,3 @@ class TestCaptureFuncarg:
|
|||
])
|
||||
assert result.ret == 2
|
||||
|
||||
|
||||
|
|
@ -193,24 +193,6 @@ class BaseFunctionalTests:
|
|||
else:
|
||||
py.test.fail("did not raise")
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_capture_per_func(self, testdir):
|
||||
reports = testdir.runitem("""
|
||||
import sys
|
||||
def setup_function(func):
|
||||
print "in setup"
|
||||
def test_func():
|
||||
print "in function"
|
||||
assert 0
|
||||
def teardown_function(func):
|
||||
print "in teardown"
|
||||
""")
|
||||
assert reports[0].outerr[0] == "in setup\n"
|
||||
assert reports[1].outerr[0] == "in function\n"
|
||||
assert reports[2].outerr[0] == "in teardown\n"
|
||||
|
||||
|
||||
|
||||
class TestExecutionNonForked(BaseFunctionalTests):
|
||||
def getrunner(self):
|
||||
def f(item):
|
||||
|
|
|
@ -311,7 +311,7 @@ class TestCollectonly:
|
|||
" <Function 'test_func'>",
|
||||
])
|
||||
rep.config.hook.pytest_collectreport(
|
||||
rep=runner.CollectReport(modcol, [], excinfo=None))
|
||||
report=runner.CollectReport(modcol, [], excinfo=None))
|
||||
assert rep.indent == indent
|
||||
|
||||
def test_collectonly_skipped_module(self, testdir, linecomp):
|
||||
|
@ -352,6 +352,40 @@ class TestCollectonly:
|
|||
])
|
||||
assert result.ret == 3
|
||||
|
||||
def test_collectonly_simple(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def test_func1():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("--collectonly", p)
|
||||
stderr = result.stderr.str().strip()
|
||||
assert stderr.startswith("inserting into sys.path")
|
||||
assert result.ret == 0
|
||||
extra = result.stdout.fnmatch_lines(py.code.Source("""
|
||||
<Module '*.py'>
|
||||
<Function 'test_func1'*>
|
||||
<Class 'TestClass'>
|
||||
<Instance '()'>
|
||||
<Function 'test_method'*>
|
||||
""").strip())
|
||||
|
||||
def test_collectonly_error(self, testdir):
|
||||
p = testdir.makepyfile("import Errlkjqweqwe")
|
||||
result = testdir.runpytest("--collectonly", p)
|
||||
stderr = result.stderr.str().strip()
|
||||
assert stderr.startswith("inserting into sys.path")
|
||||
assert result.ret == 1
|
||||
extra = result.stdout.fnmatch_lines(py.code.Source("""
|
||||
<Module '*.py'>
|
||||
*ImportError*
|
||||
!!!*failures*!!!
|
||||
*test_collectonly_error.py:1*
|
||||
""").strip())
|
||||
|
||||
|
||||
def test_repr_python_version(monkeypatch):
|
||||
monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0))
|
||||
assert repr_pythonversion() == "2.5.1-final-0"
|
||||
|
@ -417,3 +451,174 @@ class TestFixtureReporting:
|
|||
"*failingfunc*",
|
||||
"*1 failed*1 error*",
|
||||
])
|
||||
|
||||
class TestTerminalFunctional:
|
||||
def test_skipped_reasons(self, testdir):
|
||||
testdir.makepyfile(
|
||||
test_one="""
|
||||
from conftest import doskip
|
||||
def setup_function(func):
|
||||
doskip()
|
||||
def test_func():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
doskip()
|
||||
""",
|
||||
test_two = """
|
||||
from conftest import doskip
|
||||
doskip()
|
||||
""",
|
||||
conftest = """
|
||||
import py
|
||||
def doskip():
|
||||
py.test.skip('test')
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*test_one.py ss",
|
||||
"*test_two.py S",
|
||||
"___* skipped test summary *_",
|
||||
"*conftest.py:3: *3* Skipped: 'test'",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_deselected(self, testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
def test_one():
|
||||
pass
|
||||
def test_two():
|
||||
pass
|
||||
def test_three():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("-k", "test_two:", testpath)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*test_deselected.py ..",
|
||||
"=* 1 test*deselected by 'test_two:'*=",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_no_skip_summary_if_failure(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import py
|
||||
def test_ok():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("dontshow")
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.stdout.str().find("skip test summary") == -1
|
||||
assert result.ret == 1
|
||||
|
||||
def test_passes(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_passes():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
old = p1.dirpath().chdir()
|
||||
try:
|
||||
result = testdir.runpytest()
|
||||
finally:
|
||||
old.chdir()
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"test_passes.py ..",
|
||||
"* 2 pass*",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_header_trailer_info(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_passes():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
verinfo = ".".join(map(str, py.std.sys.version_info[:3]))
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*===== test session starts ====*",
|
||||
"python: platform %s -- Python %s*" %(
|
||||
py.std.sys.platform, verinfo), # , py.std.sys.executable),
|
||||
"*test_header_trailer_info.py .",
|
||||
"=* 1 passed in *.[0-9][0-9] seconds *=",
|
||||
])
|
||||
|
||||
def test_traceback_failure(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def g():
|
||||
return 2
|
||||
def f(x):
|
||||
assert x == g()
|
||||
def test_onefails():
|
||||
f(3)
|
||||
""")
|
||||
result = testdir.runpytest(p1)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
" def test_onefails():",
|
||||
"> f(3)",
|
||||
"",
|
||||
"*test_*.py:6: ",
|
||||
"_ _ _ *",
|
||||
#"",
|
||||
" def f(x):",
|
||||
"> assert x == g()",
|
||||
"E assert 3 == 2",
|
||||
"E + where 2 = g()",
|
||||
"",
|
||||
"*test_traceback_failure.py:4: AssertionError"
|
||||
])
|
||||
|
||||
|
||||
def test_showlocals(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_showlocals():
|
||||
x = 3
|
||||
y = "x" * 5000
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p1, '-l')
|
||||
result.stdout.fnmatch_lines([
|
||||
#"_ _ * Locals *",
|
||||
"x* = 3",
|
||||
"y* = 'xxxxxx*"
|
||||
])
|
||||
|
||||
def test_verbose_reporting(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail():
|
||||
raise ValueError()
|
||||
def test_pass():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_skip(self):
|
||||
py.test.skip("hello")
|
||||
def test_gen():
|
||||
def check(x):
|
||||
assert x == 1
|
||||
yield check, 0
|
||||
""")
|
||||
result = testdir.runpytest(p1, '-v')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_verbose_reporting.py:2: test_fail*FAIL*",
|
||||
"*test_verbose_reporting.py:4: test_pass*PASS*",
|
||||
"*test_verbose_reporting.py:7: TestClass.test_skip*SKIP*",
|
||||
"*test_verbose_reporting.py:10: test_gen*FAIL*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
result = testdir.runpytest(p1, '-v', '-n 1')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*FAIL*test_verbose_reporting.py:2: test_fail*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
|
|
|
@ -45,7 +45,7 @@ class Session(object):
|
|||
if rep.passed:
|
||||
for x in self.genitems(rep.result, keywordexpr):
|
||||
yield x
|
||||
self.config.hook.pytest_collectreport(rep=rep)
|
||||
self.config.hook.pytest_collectreport(report=rep)
|
||||
if self.shouldstop:
|
||||
break
|
||||
|
||||
|
@ -79,8 +79,8 @@ class Session(object):
|
|||
""" setup any neccessary resources ahead of the test run. """
|
||||
self.config.hook.pytest_sessionstart(session=self)
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.failed:
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.failed:
|
||||
self._testsfailed = True
|
||||
if self.config.option.exitfirst:
|
||||
self.shouldstop = True
|
||||
|
|
|
@ -54,40 +54,6 @@ class TestGeneralUsage:
|
|||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_collectonly_simple(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
def test_func1():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("--collectonly", p)
|
||||
stderr = result.stderr.str().strip()
|
||||
assert stderr.startswith("inserting into sys.path")
|
||||
assert result.ret == 0
|
||||
extra = result.stdout.fnmatch_lines(py.code.Source("""
|
||||
<Module '*.py'>
|
||||
<Function 'test_func1'*>
|
||||
<Class 'TestClass'>
|
||||
<Instance '()'>
|
||||
<Function 'test_method'*>
|
||||
""").strip())
|
||||
|
||||
def test_collectonly_error(self, testdir):
|
||||
p = testdir.makepyfile("import Errlkjqweqwe")
|
||||
result = testdir.runpytest("--collectonly", p)
|
||||
stderr = result.stderr.str().strip()
|
||||
assert stderr.startswith("inserting into sys.path")
|
||||
assert result.ret == 1
|
||||
extra = result.stdout.fnmatch_lines(py.code.Source("""
|
||||
<Module '*.py'>
|
||||
*ImportError*
|
||||
!!!*failures*!!!
|
||||
*test_collectonly_error.py:1*
|
||||
""").strip())
|
||||
|
||||
|
||||
def test_nested_import_error(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import import_fails
|
||||
|
@ -101,356 +67,3 @@ class TestGeneralUsage:
|
|||
"E ImportError: No module named does_not_work",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_skipped_reasons(self, testdir):
|
||||
testdir.makepyfile(
|
||||
test_one="""
|
||||
from conftest import doskip
|
||||
def setup_function(func):
|
||||
doskip()
|
||||
def test_func():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
doskip()
|
||||
""",
|
||||
test_two = """
|
||||
from conftest import doskip
|
||||
doskip()
|
||||
""",
|
||||
conftest = """
|
||||
import py
|
||||
def doskip():
|
||||
py.test.skip('test')
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest()
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*test_one.py ss",
|
||||
"*test_two.py S",
|
||||
"___* skipped test summary *_",
|
||||
"*conftest.py:3: *3* Skipped: 'test'",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_deselected(self, testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
def test_one():
|
||||
pass
|
||||
def test_two():
|
||||
pass
|
||||
def test_three():
|
||||
pass
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest("-k", "test_two:", testpath)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*test_deselected.py ..",
|
||||
"=* 1 test*deselected by 'test_two:'*=",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_no_skip_summary_if_failure(self, testdir):
|
||||
testdir.makepyfile("""
|
||||
import py
|
||||
def test_ok():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("dontshow")
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.stdout.str().find("skip test summary") == -1
|
||||
assert result.ret == 1
|
||||
|
||||
def test_passes(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_passes():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_method(self):
|
||||
pass
|
||||
""")
|
||||
old = p1.dirpath().chdir()
|
||||
try:
|
||||
result = testdir.runpytest()
|
||||
finally:
|
||||
old.chdir()
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"test_passes.py ..",
|
||||
"* 2 pass*",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
||||
def test_header_trailer_info(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_passes():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
verinfo = ".".join(map(str, py.std.sys.version_info[:3]))
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*===== test session starts ====*",
|
||||
"python: platform %s -- Python %s*" %(
|
||||
py.std.sys.platform, verinfo), # , py.std.sys.executable),
|
||||
"*test_header_trailer_info.py .",
|
||||
"=* 1 passed in *.[0-9][0-9] seconds *=",
|
||||
])
|
||||
|
||||
def test_traceback_failure(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def g():
|
||||
return 2
|
||||
def f(x):
|
||||
assert x == g()
|
||||
def test_onefails():
|
||||
f(3)
|
||||
""")
|
||||
result = testdir.runpytest(p1)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
" def test_onefails():",
|
||||
"> f(3)",
|
||||
"",
|
||||
"*test_*.py:6: ",
|
||||
"_ _ _ *",
|
||||
#"",
|
||||
" def f(x):",
|
||||
"> assert x == g()",
|
||||
"E assert 3 == 2",
|
||||
"E + where 2 = g()",
|
||||
"",
|
||||
"*test_traceback_failure.py:4: AssertionError"
|
||||
])
|
||||
|
||||
|
||||
def test_showlocals(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_showlocals():
|
||||
x = 3
|
||||
y = "x" * 5000
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(p1, '-l')
|
||||
result.stdout.fnmatch_lines([
|
||||
#"_ _ * Locals *",
|
||||
"x* = 3",
|
||||
"y* = 'xxxxxx*"
|
||||
])
|
||||
|
||||
def test_verbose_reporting(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail():
|
||||
raise ValueError()
|
||||
def test_pass():
|
||||
pass
|
||||
class TestClass:
|
||||
def test_skip(self):
|
||||
py.test.skip("hello")
|
||||
def test_gen():
|
||||
def check(x):
|
||||
assert x == 1
|
||||
yield check, 0
|
||||
""")
|
||||
result = testdir.runpytest(p1, '-v')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_verbose_reporting.py:2: test_fail*FAIL*",
|
||||
"*test_verbose_reporting.py:4: test_pass*PASS*",
|
||||
"*test_verbose_reporting.py:7: TestClass.test_skip*SKIP*",
|
||||
"*test_verbose_reporting.py:10: test_gen*FAIL*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
result = testdir.runpytest(p1, '-v', '-n 1')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*FAIL*test_verbose_reporting.py:2: test_fail*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
class TestDistribution:
|
||||
def test_dist_conftest_options(self, testdir):
|
||||
p1 = testdir.tmpdir.ensure("dir", 'p1.py')
|
||||
p1.dirpath("__init__.py").write("")
|
||||
p1.dirpath("conftest.py").write(py.code.Source("""
|
||||
print "importing conftest", __file__
|
||||
import py
|
||||
Option = py.test.config.Option
|
||||
option = py.test.config.addoptions("someopt",
|
||||
Option('--someopt', action="store_true", dest="someopt", default=False))
|
||||
dist_rsync_roots = ['../dir']
|
||||
print "added options", option
|
||||
print "config file seen from conftest", py.test.config
|
||||
"""))
|
||||
p1.write(py.code.Source("""
|
||||
import py, conftest
|
||||
def test_1():
|
||||
print "config from test_1", py.test.config
|
||||
print "conftest from test_1", conftest.__file__
|
||||
print "test_1: py.test.config.option.someopt", py.test.config.option.someopt
|
||||
print "test_1: conftest", conftest
|
||||
print "test_1: conftest.option.someopt", conftest.option.someopt
|
||||
assert conftest.option.someopt
|
||||
"""))
|
||||
result = testdir.runpytest('-d', '--tx=popen', p1, '--someopt')
|
||||
assert result.ret == 0
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
||||
def test_manytests_to_one_popen(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail0():
|
||||
assert 0
|
||||
def test_fail1():
|
||||
raise ValueError()
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("hello")
|
||||
""",
|
||||
)
|
||||
result = testdir.runpytest(p1, '-d', '--tx=popen', '--tx=popen')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1*popen*Python*",
|
||||
"*2*popen*Python*",
|
||||
"*2 failed, 1 passed, 1 skipped*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_dist_conftest_specified(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail0():
|
||||
assert 0
|
||||
def test_fail1():
|
||||
raise ValueError()
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("hello")
|
||||
""",
|
||||
)
|
||||
testdir.makeconftest("""
|
||||
pytest_option_tx = 'popen popen popen'.split()
|
||||
""")
|
||||
result = testdir.runpytest(p1, '-d')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1*popen*Python*",
|
||||
"*2*popen*Python*",
|
||||
"*3*popen*Python*",
|
||||
"*2 failed, 1 passed, 1 skipped*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_dist_tests_with_crash(self, testdir):
|
||||
if not hasattr(py.std.os, 'kill'):
|
||||
py.test.skip("no os.kill")
|
||||
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail0():
|
||||
assert 0
|
||||
def test_fail1():
|
||||
raise ValueError()
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("hello")
|
||||
def test_crash():
|
||||
import time
|
||||
import os
|
||||
time.sleep(0.5)
|
||||
os.kill(os.getpid(), 15)
|
||||
"""
|
||||
)
|
||||
result = testdir.runpytest(p1, '-d', '--tx=3*popen')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*popen*Python*",
|
||||
"*popen*Python*",
|
||||
"*popen*Python*",
|
||||
"*node down*",
|
||||
"*3 failed, 1 passed, 1 skipped*"
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
def test_distribution_rsyncdirs_example(self, testdir):
|
||||
source = testdir.mkdir("source")
|
||||
dest = testdir.mkdir("dest")
|
||||
subdir = source.mkdir("example_pkg")
|
||||
subdir.ensure("__init__.py")
|
||||
p = subdir.join("test_one.py")
|
||||
p.write("def test_5(): assert not __file__.startswith(%r)" % str(p))
|
||||
result = testdir.runpytest("-d", "--rsyncdir=%(subdir)s" % locals(),
|
||||
"--tx=popen//chdir=%(dest)s" % locals(), p)
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1* *popen*platform*",
|
||||
#"RSyncStart: [G1]",
|
||||
#"RSyncFinished: [G1]",
|
||||
"*1 passed*"
|
||||
])
|
||||
assert dest.join(subdir.basename).check(dir=1)
|
||||
|
||||
def test_dist_each(self, testdir):
|
||||
interpreters = []
|
||||
for name in ("python2.4", "python2.5"):
|
||||
interp = py.path.local.sysfind(name)
|
||||
if interp is None:
|
||||
py.test.skip("%s not found" % name)
|
||||
interpreters.append(interp)
|
||||
|
||||
testdir.makepyfile(__init__="", test_one="""
|
||||
import sys
|
||||
def test_hello():
|
||||
print "%s...%s" % sys.version_info[:2]
|
||||
assert 0
|
||||
""")
|
||||
args = ["--dist=each"]
|
||||
args += ["--tx", "popen//python=%s" % interpreters[0]]
|
||||
args += ["--tx", "popen//python=%s" % interpreters[1]]
|
||||
result = testdir.runpytest(*args)
|
||||
result.stdout.fnmatch_lines(["2...4"])
|
||||
result.stdout.fnmatch_lines(["2...5"])
|
||||
|
||||
|
||||
class TestInteractive:
|
||||
def test_simple_looponfail_interaction(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_1():
|
||||
assert 1 == 0
|
||||
""")
|
||||
p1.setmtime(p1.mtime() - 50.0)
|
||||
child = testdir.spawn_pytest("--looponfail %s" % p1)
|
||||
child.expect("assert 1 == 0")
|
||||
child.expect("test_simple_looponfail_interaction.py:")
|
||||
child.expect("1 failed")
|
||||
child.expect("waiting for changes")
|
||||
p1.write(py.code.Source("""
|
||||
def test_1():
|
||||
assert 1 == 1
|
||||
"""))
|
||||
child.expect("MODIFIED.*test_simple_looponfail_interaction.py", timeout=4.0)
|
||||
child.expect("1 passed", timeout=5.0)
|
||||
child.kill(15)
|
||||
|
||||
class TestKeyboardInterrupt:
|
||||
def test_raised_in_testfunction(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
import py
|
||||
def test_fail():
|
||||
raise ValueError()
|
||||
def test_inter():
|
||||
raise KeyboardInterrupt()
|
||||
""")
|
||||
result = testdir.runpytest(p1)
|
||||
result.stdout.fnmatch_lines([
|
||||
#"*test_inter() INTERRUPTED",
|
||||
"*KEYBOARD INTERRUPT*",
|
||||
"*1 failed*",
|
||||
])
|
||||
|
||||
|
|
4
setup.py
4
setup.py
|
@ -31,7 +31,7 @@ def main():
|
|||
name='py',
|
||||
description='py.test and pylib: advanced testing tool and networking lib',
|
||||
long_description = long_description,
|
||||
version= trunk or '1.0.0b9',
|
||||
version= trunk or '1.0.0',
|
||||
url='http://pylib.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
@ -45,7 +45,7 @@ def main():
|
|||
'py.svnwcrevert = py.cmdline:pysvnwcrevert',
|
||||
'py.test = py.cmdline:pytest',
|
||||
'py.which = py.cmdline:pywhich']},
|
||||
classifiers=['Development Status :: 4 - Beta',
|
||||
classifiers=['Development Status :: 5 - Production/Stable',
|
||||
'Intended Audience :: Developers',
|
||||
'License :: OSI Approved :: MIT License',
|
||||
'Operating System :: POSIX',
|
||||
|
|
Loading…
Reference in New Issue