commit
8336df9929
|
@ -1,6 +1,11 @@
|
|||
Changes between 1.0.0b7 and 1.0.0b8
|
||||
=====================================
|
||||
|
||||
* pytest_unittest-plugin are now enabled by default
|
||||
|
||||
* introduced pytest_keyboardinterrupt hook and
|
||||
refined pytest_sessionfinish hooked.
|
||||
|
||||
* workaround a buggy logging module interaction ("closing already closed
|
||||
files"). Thanks to Sridhar Ratnakumar for triggering.
|
||||
|
||||
|
|
|
@ -4,39 +4,41 @@ py.test / py lib 1.0.0: new test plugins, funcargs and cleanups
|
|||
Welcome to the 1.0 release bringing new flexibility and
|
||||
power to testing with Python. Main news:
|
||||
|
||||
* improved test architecture, featuring super-simple project
|
||||
specific or cross-project single-file plugins, e.g:
|
||||
* funcargs - new flexibilty and zero-boilerplate fixtures for Python testing:
|
||||
|
||||
* pytest_unittest.py: run traditional unittest.py tests
|
||||
* pytest_xfail.py: mark tests as "expected to fail"
|
||||
- separate test code, configuration and setup
|
||||
- ideal for integration and functional tests
|
||||
- more powerful dynamic generation of tests
|
||||
|
||||
* new plugin architecture, allowing project-specific and
|
||||
cross-project single-file plugins. Many useful examples
|
||||
shipped by default:
|
||||
|
||||
* pytest_unittest.py: run and integrate traditional unittest.py tests
|
||||
* pytest_xfail.py: mark tests as "expected to fail" and report separately.
|
||||
* pytest_pocoo.py: automatically send tracebacks to pocoo paste service
|
||||
* pytest_monkeypatch.py: safely patch parts of your environment in a test function
|
||||
* pytest_monkeypatch.py: safely monkeypatch from tests
|
||||
* pytest_figleaf.py: generate html coverage reports
|
||||
* pytest_resultlog.py: generate buildbot-friendly output
|
||||
* pytest_resultlog.py: generate buildbot-friendly reporting output
|
||||
|
||||
and many more!
|
||||
|
||||
* funcargs - bringing new flexibilty and zero-boilerplate to Python testing:
|
||||
|
||||
- cleanly separated test code and test configuration and test value setup
|
||||
- ideal for integration and functional tests
|
||||
- new generative tests -> deprecation of yield-generated tests
|
||||
|
||||
* distributed testing and distributed execution (py.execnet):
|
||||
|
||||
- new unified "TX" URL scheme for specifying remote resources
|
||||
- new sync/async ways to handle multiple remote processes
|
||||
- much improved documentation
|
||||
- improved documentation
|
||||
|
||||
|
||||
See the py.test documentation for more info:
|
||||
See the py.test and py lib documentation for more info:
|
||||
|
||||
http://pytest.org
|
||||
http://pylib.org
|
||||
|
||||
The py lib also got smaller and focuses on offering much of the
|
||||
well-tested py.test code in independent namespaces:
|
||||
The py lib now is smaller and focuses more on offering
|
||||
functionality used by the py.test tool in independent
|
||||
namespaces:
|
||||
|
||||
* py.execnet: ad-hoc code distribution to SSH, Socket and local sub processes
|
||||
* py.execnet: elastic code deployment to SSH, Socket and local sub processes
|
||||
* py.code: higher-level introspection and dynamic generation of python code
|
||||
* py.path: path abstractions over local and subversion files
|
||||
|
||||
|
@ -47,8 +49,6 @@ the installation procedures.
|
|||
The whole package works well with Linux, OSX and Win32, on
|
||||
Python 2.3, 2.4, 2.5 and 2.6. (Expect Python3 compatibility soon!)
|
||||
|
||||
Download/Install: http://codespeak.net/py/dist/download.html
|
||||
|
||||
best,
|
||||
holger
|
||||
|
||||
|
|
|
@ -1,8 +1,8 @@
|
|||
=======
|
||||
py.code
|
||||
=======
|
||||
================================================================================
|
||||
py.code: higher level python code and introspection objects
|
||||
================================================================================
|
||||
|
||||
The :api:`py.code` part of the 'py lib' contains some functionality to help
|
||||
The :api:`py.code` part of the pylib contains some functionality to help
|
||||
dealing with Python code objects. Even though working with Python's internal
|
||||
code objects (as found on frames and callables) can be very powerful, it's
|
||||
usually also quite cumbersome, because the API provided by core Python is
|
||||
|
|
|
@ -4,6 +4,13 @@ from py.__.misc.difftime import worded_time
|
|||
|
||||
html = py.xml.html
|
||||
|
||||
class css:
|
||||
#pagetitle = "pagetitle"
|
||||
contentspace = "contentspace"
|
||||
menubar = "menubar"
|
||||
navspace = "navspace"
|
||||
versioninfo = "versioninfo"
|
||||
|
||||
class Page(object):
|
||||
doctype = ('<!DOCTYPE html PUBLIC "-//W3C//DTD XHTML 1.0 Transitional//EN"'
|
||||
' "http://www.w3.org/TR/xhtml1/DTD/xhtml1-transitional.dtd">\n')
|
||||
|
@ -22,8 +29,8 @@ class Page(object):
|
|||
self._root = html.html(self.head, self.body)
|
||||
self.fill()
|
||||
|
||||
def a_href(self, name, url):
|
||||
return html.a(name, class_="menu", href=url)
|
||||
def a_href(self, name, url, **kwargs):
|
||||
return html.a(name, class_="menu", href=url, **kwargs)
|
||||
|
||||
def a_docref(self, name, relhtmlpath):
|
||||
docpath = self.project.docpath
|
||||
|
@ -39,20 +46,27 @@ class Page(object):
|
|||
|
||||
def fill_menubar(self):
|
||||
items = [
|
||||
self.a_docref("index", "index.html"),
|
||||
self.a_docref("pylib index", "index.html"),
|
||||
self.a_docref("py.test index", "test/test.html"),
|
||||
self.a_docref("py.test plugins", "test/plugin/index.html"),
|
||||
self.a_docref("py.execnet", "execnet.html"),
|
||||
#self.a_docref("py.code", "code.html"),
|
||||
#self.a_apigenref("api", "api/index.html"),
|
||||
#self.a_apigenref("source", "source/index.html"),
|
||||
#self.a_href("source", "http://bitbucket.org/hpk42/py-trunk/src/"),
|
||||
self.a_href("issues", "http://bitbucket.org/hpk42/py-trunk/issues/"),
|
||||
self.a_docref("contact", "contact.html"),
|
||||
self.a_docref("download", "download.html"),
|
||||
self.a_docref("install", "download.html"),
|
||||
]
|
||||
items2 = [items.pop(0)]
|
||||
sep = " "
|
||||
for item in items:
|
||||
items2.append(sep)
|
||||
items2.append(item)
|
||||
self.menubar = html.div(id="menubar", *items2)
|
||||
self.menubar = html.div(id=css.menubar, *[
|
||||
html.div(item) for item in items])
|
||||
version = py.version
|
||||
self.menubar.insert(0,
|
||||
html.div("%s" % (py.version), style="font-style: italic;")
|
||||
)
|
||||
#self.a_href("%s-%s" % (self.title, py.version),
|
||||
# "http://pypi.python.org/pypi/py/%s" % version,
|
||||
#id="versioninfo",
|
||||
|
||||
def fill(self):
|
||||
content_type = "%s;charset=%s" %(self.type, self.encoding)
|
||||
|
@ -65,14 +79,14 @@ class Page(object):
|
|||
type="text/css"))
|
||||
self.fill_menubar()
|
||||
|
||||
self.metaspace = html.div(
|
||||
html.div(self.title, class_="project_title"),
|
||||
self.menubar,
|
||||
id='metaspace')
|
||||
|
||||
self.body.append(self.project.logo)
|
||||
self.body.append(self.metaspace)
|
||||
self.contentspace = html.div(id="contentspace")
|
||||
self.body.append(html.div(
|
||||
self.project.logo,
|
||||
self.menubar,
|
||||
id=css.navspace,
|
||||
))
|
||||
|
||||
#self.body.append(html.div(self.title, id=css.pagetitle))
|
||||
self.contentspace = html.div(id=css.contentspace)
|
||||
self.body.append(self.contentspace)
|
||||
|
||||
def unicode(self, doctype=True):
|
||||
|
@ -115,9 +129,9 @@ class Project:
|
|||
encoding = 'latin1'
|
||||
logo = html.div(
|
||||
html.a(
|
||||
html.img(alt="py lib", id='pyimg', height=114, width=154,
|
||||
html.img(alt="py lib", id='pyimg', height=114/2, width=154/2,
|
||||
src="http://codespeak.net/img/pylib.png"),
|
||||
href="http://codespeak.net"))
|
||||
href="http://pylib.org"))
|
||||
Page = PyPage
|
||||
|
||||
def __init__(self, sourcepath=None):
|
||||
|
@ -173,21 +187,21 @@ class Project:
|
|||
stylesheet=stylesheet, encoding=encoding)
|
||||
content = strip_html_header(content, encoding=encoding)
|
||||
|
||||
title = "[%s] %s" % (txtpath.purebasename, py.version)
|
||||
title = txtpath.purebasename
|
||||
if txtpath.dirpath().basename == "test":
|
||||
title = "py.test " + title
|
||||
# title = "[%s] %s" % (txtpath.purebasename, py.version)
|
||||
page = self.Page(self, title,
|
||||
outputpath, stylesheeturl=stylesheet)
|
||||
|
||||
try:
|
||||
modified = py.process.cmdexec(
|
||||
"hg tip --template 'last modified {date|shortdate}'"
|
||||
"hg tip --template 'modified {date|shortdate}'"
|
||||
)
|
||||
except py.process.cmdexec.Error:
|
||||
modified = " "
|
||||
|
||||
page.contentspace.append(
|
||||
html.div(html.div(modified,
|
||||
style="float: right; font-style: italic;"),
|
||||
id = 'docinfoline'))
|
||||
#page.body.append(html.div(modified, id="docinfoline"))
|
||||
|
||||
page.contentspace.append(py.xml.raw(content))
|
||||
outputpath.ensure().write(page.unicode().encode(encoding))
|
||||
|
|
|
@ -1,18 +1,20 @@
|
|||
Contact and communication
|
||||
Contact and Communication points
|
||||
===================================
|
||||
|
||||
- **#pylib on irc.freenode.net**: you are welcome to lurk or ask questions!
|
||||
- `py-dev developers list`_ announcements and discussions.
|
||||
|
||||
- `py-dev developers list`_ development mailing list.
|
||||
- #pylib on irc.freenode.net IRC channel for random questions.
|
||||
|
||||
- `tetamap`_: Holger Krekel's blog, often about testing and py.test related news.
|
||||
|
||||
- `py-svn general commit mailing list`_ to follow all development commits.
|
||||
- `py-svn general commit mailing list`_ to follow development commits,
|
||||
|
||||
- `development bug/feature tracker`_ this roundup instance serves to file bugs and track issues.
|
||||
(soon to be substitued by a google-code or other hosted one).
|
||||
- `bitbucket issue tracker`_ use this bitbucket issue tracker to report
|
||||
bugs or request features.
|
||||
|
||||
- `merlinux.eu`_ offers teaching and consulting services.
|
||||
- `merlinux.eu`_ offers on-site teaching and consulting services.
|
||||
|
||||
.. _`bitbucket issue tracker`: http://bitbucket.org/hpk42/py-trunk/issues/
|
||||
|
||||
.. _`merlinux.eu`: http://merlinux.eu
|
||||
|
||||
|
|
|
@ -1,10 +1,11 @@
|
|||
==============
|
||||
Downloading
|
||||
==============
|
||||
..
|
||||
==============
|
||||
Downloading
|
||||
==============
|
||||
|
||||
.. _`PyPI project page`: http://pypi.python.org/pypi/py/
|
||||
.. _`PyPI project page`: http://pypi.python.org/pypi/py/
|
||||
|
||||
Latest Release, see `PyPI project page`_
|
||||
Latest Release, see `PyPI project page`_
|
||||
|
||||
using setuptools / easy_install
|
||||
===================================================
|
||||
|
@ -52,8 +53,13 @@ With a working `setuptools installation`_ you can then issue::
|
|||
|
||||
in order to work with your checkout version.
|
||||
|
||||
For enhancing one of the plugins you may go to
|
||||
the ``py/test/plugin/`` sub directory.
|
||||
|
||||
.. _mercurial: http://mercurial.selenic.com/wiki/
|
||||
|
||||
.. _`no-setuptools`:
|
||||
|
||||
Working without setuptools / from source
|
||||
==========================================
|
||||
|
||||
|
|
|
@ -1,17 +1,25 @@
|
|||
==========
|
||||
py.execnet
|
||||
==========
|
||||
==============================================================================
|
||||
py.execnet: *elastic* distributed programming
|
||||
==============================================================================
|
||||
|
||||
``py.execnet`` allows to:
|
||||
``execnet`` helps you to:
|
||||
|
||||
* instantiate local or remote Python Processes
|
||||
* ad-hoc instantiate local or remote Python Processes
|
||||
* send code for execution in one or many processes
|
||||
* asynchronously send and receive data between processes through channels
|
||||
* completely avoid manual installation steps on remote places
|
||||
* send and receive data between processes through channels
|
||||
|
||||
One of it's unique features is that it uses a **zero-install**
|
||||
technique: no manual installation steps are required on
|
||||
remote places, only a basic working Python interpreter
|
||||
and some input/output connection to it.
|
||||
|
||||
There is a `EuroPython2009 talk`_ from July 2009 with
|
||||
examples and some pictures.
|
||||
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 2
|
||||
|
||||
.. _`EuroPython2009 talk`: http://codespeak.net/download/py/ep2009-execnet.pdf
|
||||
|
||||
Gateways: immediately spawn local or remote process
|
||||
|
|
|
@ -1,19 +1,19 @@
|
|||
py lib: Main tools and APIs
|
||||
===================================
|
||||
py lib: testing and distributed programming library
|
||||
====================================================
|
||||
|
||||
.. _`PyPI project page`: http://pypi.python.org/pypi?%3Aaction=pkg_edit&name=py
|
||||
|
||||
Latest Release, see `PyPI project page`_
|
||||
The ``py`` lib has several namespaces which help with testing,
|
||||
generating and distributing code across machines. Here is
|
||||
documentation on the most interesting ones:
|
||||
|
||||
`py.test`_ write and deploy unit- and functional tests to multiple machines.
|
||||
|
||||
`py.execnet`_ rapidly deploy local or remote processes from your program.
|
||||
`py.execnet`_ elastic distributed programming.
|
||||
|
||||
`py.code`_: generate code and use advanced introspection/traceback support.
|
||||
|
||||
`py.path`_: use path objects to transparently access local and svn filesystems.
|
||||
|
||||
`py.code`_: generate python code and use advanced introspection/traceback support.
|
||||
|
||||
Minor support functionality
|
||||
Other (minor) support functionality
|
||||
===================================
|
||||
|
||||
`py lib scripts`_ to make python development easier.
|
||||
|
@ -27,6 +27,10 @@ Minor support functionality
|
|||
`miscellaneous features`_ describes some small but nice py lib features.
|
||||
|
||||
|
||||
.. _`PyPI project page`: http://pypi.python.org/pypi?%3Aaction=pkg_edit&name=py
|
||||
|
||||
For the latest Release, see `PyPI project page`_
|
||||
|
||||
.. _`download and installation`: download.html
|
||||
.. _`py-dev at codespeak net`: http://codespeak.net/mailman/listinfo/py-dev
|
||||
.. _`py.execnet`: execnet.html
|
||||
|
|
|
@ -67,10 +67,6 @@ ul a, ol a {
|
|||
dl {
|
||||
}
|
||||
|
||||
dt {
|
||||
font-weight: bold;
|
||||
}
|
||||
|
||||
dd {
|
||||
line-height: 1.5em;
|
||||
margin-bottom: 1em;
|
||||
|
@ -85,15 +81,18 @@ blockquote {
|
|||
code {
|
||||
color: Black;
|
||||
/*background-color: #dee7ec;*/
|
||||
background-color: #cccccc;
|
||||
/*background-color: #cccccc;*/
|
||||
}
|
||||
|
||||
pre {
|
||||
padding: 1em;
|
||||
border: 1px solid #8cacbb;
|
||||
border: 1px dotted #8cacbb;
|
||||
color: Black;
|
||||
/*
|
||||
background-color: #dee7ec;
|
||||
background-color: #cccccc;
|
||||
background-color: #dee7ec;
|
||||
*/
|
||||
overflow: auto;
|
||||
}
|
||||
|
||||
|
@ -111,7 +110,6 @@ a[href] { color: black; text-decoration: underline; }
|
|||
|
||||
span.menu_selected {
|
||||
color: black;
|
||||
font: 140% Verdana, Helvetica, Arial, sans-serif;
|
||||
text-decoration: none;
|
||||
padding-right: 0.3em;
|
||||
background-color: #cccccc;
|
||||
|
@ -120,14 +118,13 @@ span.menu_selected {
|
|||
|
||||
a.menu {
|
||||
/*color: #3ba6ec; */
|
||||
font: 140% Verdana, Helvetica, Arial, sans-serif;
|
||||
font: 120% Verdana, Helvetica, Arial, sans-serif;
|
||||
text-decoration: none;
|
||||
padding-right: 0.3em;
|
||||
}
|
||||
|
||||
a.menu[href]:visited, a.menu[href]:link{
|
||||
/*color: #3ba6ec; */
|
||||
font: 140% Verdana, Helvetica, Arial, sans-serif;
|
||||
text-decoration: none;
|
||||
}
|
||||
|
||||
|
@ -135,11 +132,12 @@ a.menu[href]:hover {
|
|||
/*color: black;*/
|
||||
}
|
||||
|
||||
div.project_title{
|
||||
div#pagetitle{
|
||||
/*border-spacing: 20px;*/
|
||||
font: 160% Verdana, Helvetica, Arial, sans-serif;
|
||||
color: #3ba6ec;
|
||||
vertical-align: middle;
|
||||
left: 80 px;
|
||||
padding-bottom: 0.3em;
|
||||
}
|
||||
|
||||
|
@ -566,7 +564,11 @@ td.navibar {
|
|||
padding: 0px;
|
||||
}
|
||||
|
||||
div.pagename {
|
||||
a#versioninfo {
|
||||
color: blue;
|
||||
}
|
||||
|
||||
div#pagename {
|
||||
font-size: 140%;
|
||||
color: blue;
|
||||
text-align: center;
|
||||
|
@ -593,37 +595,6 @@ a.wikiaction[href]:hover {
|
|||
/*background-color: #dddddd; */
|
||||
}
|
||||
|
||||
span.wikiuserpref {
|
||||
padding-top: 1em;
|
||||
font-size: 120%;
|
||||
}
|
||||
|
||||
div.wikitrail {
|
||||
vertical-align: bottom;
|
||||
/*font-size: -1;*/
|
||||
padding-top: 1em;
|
||||
display: none;
|
||||
}
|
||||
|
||||
div.wikiaction {
|
||||
vertical-align: middle;
|
||||
/*border-bottom: 1px solid #8cacbb;*/
|
||||
padding-bottom:1em;
|
||||
text-align: left;
|
||||
width: 100%;
|
||||
}
|
||||
|
||||
div.wikieditmenu {
|
||||
text-align: right;
|
||||
}
|
||||
|
||||
form.wikiedit {
|
||||
border: 1px solid #8cacbb;
|
||||
background-color: #f0f0f0;
|
||||
background-color: #fabf00;
|
||||
padding: 1em;
|
||||
padding-right: 0em;
|
||||
}
|
||||
|
||||
div.legenditem {
|
||||
padding-top: 0.5em;
|
||||
|
@ -769,25 +740,16 @@ td.toplist {
|
|||
}
|
||||
|
||||
img#pyimg {
|
||||
position: absolute;
|
||||
top: 4px;
|
||||
left: 4px;
|
||||
float: left;
|
||||
}
|
||||
|
||||
div#navspace {
|
||||
position: absolute;
|
||||
top: 100px;
|
||||
left: 11px;
|
||||
font-size: 100%;
|
||||
width: 150px;
|
||||
overflow: hidden; /* scroll; */
|
||||
}
|
||||
|
||||
div#metaspace {
|
||||
position: absolute;
|
||||
top: 10px;
|
||||
left: 170px;
|
||||
}
|
||||
|
||||
div#errorline {
|
||||
position: relative;
|
||||
|
@ -799,7 +761,6 @@ div#contentspace {
|
|||
position: absolute;
|
||||
/* font: 120% "Times New Roman", serif;*/
|
||||
font: 110% Verdana, Helvetica, Arial, sans-serif;
|
||||
top: 100px;
|
||||
left: 170px;
|
||||
margin-right: 5px;
|
||||
}
|
||||
|
@ -810,16 +771,17 @@ div#menubar {
|
|||
}
|
||||
|
||||
/* for the documentation page */
|
||||
div#docinfoline {
|
||||
position: relative;
|
||||
top: 5px;
|
||||
left: 0px;
|
||||
|
||||
/*background-color: #dee7ec; */
|
||||
padding: 5pt;
|
||||
padding-bottom: 1em;
|
||||
div#title{
|
||||
|
||||
font-size: 110%;
|
||||
color: black;
|
||||
/*border-width: 1pt;
|
||||
|
||||
|
||||
/*background-color: #dee7ec;
|
||||
#padding: 5pt;
|
||||
#padding-bottom: 1em;
|
||||
#color: black;
|
||||
border-width: 1pt;
|
||||
border-style: solid;*/
|
||||
|
||||
}
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
Test configuration
|
||||
========================
|
||||
.. contents::
|
||||
:local:
|
||||
:depth: 2
|
||||
|
||||
available test options
|
||||
-----------------------------
|
||||
|
|
|
@ -15,7 +15,7 @@ need to follow a naming pattern; they have an all lowercase ``pytest_``
|
|||
prefixed name. While conftest plugins are discovered automatically,
|
||||
named plugins must be explicitely specified.
|
||||
|
||||
.. _`named plugins`: plugins.html
|
||||
.. _`named plugins`: plugin/index.html
|
||||
|
||||
.. _`tool startup`:
|
||||
.. _`test tool starts up`:
|
||||
|
@ -98,7 +98,7 @@ and minimizes version incompatibilites. Below you find some introductory
|
|||
information on particular hooks. It's sensible to look at existing
|
||||
plugins so see example usages and start off with your own plugin.
|
||||
|
||||
.. _`hook definition specification`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/hookspec.py
|
||||
.. _`hook definition specification`: plugin/hookspec.html
|
||||
|
||||
.. _`configuration hooks`:
|
||||
|
||||
|
@ -135,8 +135,8 @@ adding global py.test helpers and functionality
|
|||
If you want to make global helper functions or objects available
|
||||
to your test code you can implement:
|
||||
|
||||
def pytest_namespace(config):
|
||||
""" return dictionary with items to be made available on py.test. """
|
||||
def pytest_namespace():
|
||||
""" return dictionary with items to be made available on py.test. namespace """
|
||||
|
||||
All such returned items will be made available directly on
|
||||
the ``py.test`` namespace.
|
||||
|
|
|
@ -1,26 +1,23 @@
|
|||
==================================================
|
||||
py.test Features
|
||||
py.test features
|
||||
==================================================
|
||||
|
||||
py.test is an extensible tool for running all kinds
|
||||
of tests one one or more machines. It supports a variety
|
||||
of testing methods for your Python application and modules,
|
||||
including unit, functional, integration and doc-testing.
|
||||
|
||||
It is used in projects that run more than 10000 tests
|
||||
daily as well as single-python-module projects.
|
||||
of tests on one or more machines. It supports a variety
|
||||
of testing methods including unit, functional, integration
|
||||
and doc-testing. It is used in projects that run more
|
||||
than 10 thousand tests regularly as well as in single-file projects.
|
||||
|
||||
py.test presents a clean and powerful command line interface
|
||||
and strives to generally make testing a fun effort.
|
||||
and strives to generally make testing a fun no-boilerplate effort.
|
||||
It works and is tested against linux, windows and osx
|
||||
on CPython 2.3 - CPython 2.6.
|
||||
|
||||
py.test 1.0 works across linux, windows and osx
|
||||
and on Python 2.3 - Python 2.6.
|
||||
|
||||
More detailed feature list:
|
||||
|
||||
.. contents::
|
||||
.. contents:: List of Contents
|
||||
:depth: 1
|
||||
|
||||
.. _`autocollect`:
|
||||
|
||||
automatically collects and executes tests
|
||||
===============================================
|
||||
|
||||
|
@ -302,7 +299,7 @@ purposes such as:
|
|||
* running non-python tests
|
||||
* managing custom test state setup
|
||||
|
||||
.. _`list of plugins`: plugins.html
|
||||
.. _`list of plugins`: plugin/index.html
|
||||
.. _`extension mechanisms`: extend.html
|
||||
|
||||
.. _`reStructured Text`: http://docutils.sourceforge.net
|
||||
|
|
|
@ -136,12 +136,12 @@ managing fixtures across test modules and test runs
|
|||
|
||||
.. sourcecode:: python
|
||||
|
||||
def cached_setup(setup, teardown=None, scope="module", keyextra=None):
|
||||
def cached_setup(setup, teardown=None, scope="module", extrakey=None):
|
||||
""" cache and return result of calling setup().
|
||||
|
||||
The scope determines the cache key and ``keyextra`` adds to the cachekey.
|
||||
The scope also determines when teardown(result) will be called.
|
||||
valid scopes:
|
||||
The scope and the ``extrakey`` determine the cache key.
|
||||
The scope also determines when teardown(result)
|
||||
will be called. valid scopes are:
|
||||
scope == 'function': when the single test function run finishes.
|
||||
scope == 'module': when tests in a different module are run
|
||||
scope == 'session': when tests of the session have run.
|
||||
|
|
|
@ -0,0 +1,240 @@
|
|||
|
||||
pytest_doctest plugin
|
||||
=====================
|
||||
|
||||
collect and execute doctests from modules and test files.
|
||||
|
||||
Usage
|
||||
-------------
|
||||
|
||||
By default all files matching the ``test_*.txt`` pattern will
|
||||
be run with the ``doctest`` module. If you issue::
|
||||
|
||||
py.test --doctest-modules
|
||||
|
||||
all python files in your projects will be doctest-run
|
||||
as well.
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--doctest-modules``
|
||||
search all python files for doctests
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_doctest.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_doctest.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_doctest.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
collect and execute doctests from modules and test files.
|
||||
|
||||
Usage
|
||||
-------------
|
||||
|
||||
By default all files matching the ``test_*.txt`` pattern will
|
||||
be run with the ``doctest`` module. If you issue::
|
||||
|
||||
py.test --doctest-modules
|
||||
|
||||
all python files in your projects will be doctest-run
|
||||
as well.
|
||||
"""
|
||||
|
||||
import py
|
||||
from py.__.code.excinfo import Repr, ReprFileLocation
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup("doctest options")
|
||||
group.addoption("--doctest-modules",
|
||||
action="store_true", default=False,
|
||||
help="search all python files for doctests",
|
||||
dest="doctestmodules")
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
if path.ext == ".py":
|
||||
if parent.config.getvalue("doctestmodules"):
|
||||
return DoctestModule(path, parent)
|
||||
if path.check(fnmatch="test_*.txt"):
|
||||
return DoctestTextfile(path, parent)
|
||||
|
||||
class ReprFailDoctest(Repr):
|
||||
def __init__(self, reprlocation, lines):
|
||||
self.reprlocation = reprlocation
|
||||
self.lines = lines
|
||||
def toterminal(self, tw):
|
||||
for line in self.lines:
|
||||
tw.line(line)
|
||||
self.reprlocation.toterminal(tw)
|
||||
|
||||
class DoctestItem(py.test.collect.Item):
|
||||
def __init__(self, path, parent):
|
||||
name = self.__class__.__name__ + ":" + path.basename
|
||||
super(DoctestItem, self).__init__(name=name, parent=parent)
|
||||
self.fspath = path
|
||||
|
||||
def repr_failure(self, excinfo, outerr):
|
||||
if excinfo.errisinstance(py.compat.doctest.DocTestFailure):
|
||||
doctestfailure = excinfo.value
|
||||
example = doctestfailure.example
|
||||
test = doctestfailure.test
|
||||
filename = test.filename
|
||||
lineno = test.lineno + example.lineno + 1
|
||||
message = excinfo.type.__name__
|
||||
reprlocation = ReprFileLocation(filename, lineno, message)
|
||||
checker = py.compat.doctest.OutputChecker()
|
||||
REPORT_UDIFF = py.compat.doctest.REPORT_UDIFF
|
||||
filelines = py.path.local(filename).readlines(cr=0)
|
||||
i = max(test.lineno, max(0, lineno - 10)) # XXX?
|
||||
lines = []
|
||||
for line in filelines[i:lineno]:
|
||||
lines.append("%03d %s" % (i+1, line))
|
||||
i += 1
|
||||
lines += checker.output_difference(example,
|
||||
doctestfailure.got, REPORT_UDIFF).split("\n")
|
||||
return ReprFailDoctest(reprlocation, lines)
|
||||
elif excinfo.errisinstance(py.compat.doctest.UnexpectedException):
|
||||
excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
|
||||
return super(DoctestItem, self).repr_failure(excinfo, outerr)
|
||||
else:
|
||||
return super(DoctestItem, self).repr_failure(excinfo, outerr)
|
||||
|
||||
class DoctestTextfile(DoctestItem):
|
||||
def runtest(self):
|
||||
if not self._deprecated_testexecution():
|
||||
failed, tot = py.compat.doctest.testfile(
|
||||
str(self.fspath), module_relative=False,
|
||||
raise_on_error=True, verbose=0)
|
||||
|
||||
class DoctestModule(DoctestItem):
|
||||
def runtest(self):
|
||||
module = self.fspath.pyimport()
|
||||
failed, tot = py.compat.doctest.testmod(
|
||||
module, raise_on_error=True, verbose=0)
|
||||
|
||||
|
||||
#
|
||||
# Plugin tests
|
||||
#
|
||||
|
||||
class TestDoctests:
|
||||
|
||||
def test_collect_testtextfile(self, testdir):
|
||||
testdir.maketxtfile(whatever="")
|
||||
checkfile = testdir.maketxtfile(test_something="""
|
||||
alskdjalsdk
|
||||
>>> i = 5
|
||||
>>> i-1
|
||||
4
|
||||
""")
|
||||
for x in (testdir.tmpdir, checkfile):
|
||||
#print "checking that %s returns custom items" % (x,)
|
||||
items, reprec = testdir.inline_genitems(x)
|
||||
assert len(items) == 1
|
||||
assert isinstance(items[0], DoctestTextfile)
|
||||
|
||||
def test_collect_module(self, testdir):
|
||||
path = testdir.makepyfile(whatever="#")
|
||||
for p in (path, testdir.tmpdir):
|
||||
items, reprec = testdir.inline_genitems(p, '--doctest-modules')
|
||||
assert len(items) == 1
|
||||
assert isinstance(items[0], DoctestModule)
|
||||
|
||||
def test_simple_doctestfile(self, testdir):
|
||||
p = testdir.maketxtfile(test_doc="""
|
||||
>>> x = 1
|
||||
>>> x == 1
|
||||
False
|
||||
""")
|
||||
reprec = testdir.inline_run(p)
|
||||
reprec.assertoutcome(failed=1)
|
||||
|
||||
def test_doctest_unexpected_exception(self, testdir):
|
||||
from py.__.test.outcome import Failed
|
||||
|
||||
p = testdir.maketxtfile("""
|
||||
>>> i = 0
|
||||
>>> i = 1
|
||||
>>> x
|
||||
2
|
||||
""")
|
||||
reprec = testdir.inline_run(p)
|
||||
call = reprec.getcall("pytest_runtest_logreport")
|
||||
assert call.rep.failed
|
||||
assert call.rep.longrepr
|
||||
# XXX
|
||||
#testitem, = items
|
||||
#excinfo = py.test.raises(Failed, "testitem.runtest()")
|
||||
#repr = testitem.repr_failure(excinfo, ("", ""))
|
||||
#assert repr.reprlocation
|
||||
|
||||
def test_doctestmodule(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
'''
|
||||
>>> x = 1
|
||||
>>> x == 1
|
||||
False
|
||||
|
||||
'''
|
||||
""")
|
||||
reprec = testdir.inline_run(p, "--doctest-modules")
|
||||
reprec.assertoutcome(failed=1)
|
||||
|
||||
def test_doctestmodule_external(self, testdir):
|
||||
p = testdir.makepyfile("""
|
||||
#
|
||||
def somefunc():
|
||||
'''
|
||||
>>> i = 0
|
||||
>>> i + 1
|
||||
2
|
||||
'''
|
||||
""")
|
||||
result = testdir.runpytest(p, "--doctest-modules")
|
||||
result.stdout.fnmatch_lines([
|
||||
'004 *>>> i = 0',
|
||||
'005 *>>> i + 1',
|
||||
'*Expected:',
|
||||
"* 2",
|
||||
"*Got:",
|
||||
"* 1",
|
||||
"*:5: DocTestFailure"
|
||||
])
|
||||
|
||||
|
||||
def test_txtfile_failing(self, testdir):
|
||||
p = testdir.maketxtfile("""
|
||||
>>> i = 0
|
||||
>>> i + 1
|
||||
2
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
'001 >>> i = 0',
|
||||
'002 >>> i + 1',
|
||||
'Expected:',
|
||||
" 2",
|
||||
"Got:",
|
||||
" 1",
|
||||
"*test_txtfile_failing.txt:2: DocTestFailure"
|
||||
])
|
||||
|
||||
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_doctest.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,86 @@
|
|||
|
||||
pytest_execnetcleanup plugin
|
||||
============================
|
||||
|
||||
cleanup execnet gateways during test function runs.
|
||||
|
||||
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_execnetcleanup.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_execnetcleanup.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_execnetcleanup.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
cleanup execnet gateways during test function runs.
|
||||
"""
|
||||
import py
|
||||
|
||||
pytest_plugins = "xfail"
|
||||
|
||||
def pytest_configure(config):
|
||||
config.pluginmanager.register(Execnetcleanup())
|
||||
|
||||
class Execnetcleanup:
|
||||
_gateways = None
|
||||
def __init__(self, debug=False):
|
||||
self._debug = debug
|
||||
|
||||
def pyexecnet_gateway_init(self, gateway):
|
||||
if self._gateways is not None:
|
||||
self._gateways.append(gateway)
|
||||
|
||||
def pyexecnet_gateway_exit(self, gateway):
|
||||
if self._gateways is not None:
|
||||
self._gateways.remove(gateway)
|
||||
|
||||
def pytest_sessionstart(self, session):
|
||||
self._gateways = []
|
||||
|
||||
def pytest_sessionfinish(self, session, exitstatus):
|
||||
l = []
|
||||
for gw in self._gateways:
|
||||
gw.exit()
|
||||
l.append(gw)
|
||||
#for gw in l:
|
||||
# gw.join()
|
||||
|
||||
def pytest_pyfunc_call(self, __call__, pyfuncitem):
|
||||
if self._gateways is not None:
|
||||
gateways = self._gateways[:]
|
||||
res = __call__.execute(firstresult=True)
|
||||
while len(self._gateways) > len(gateways):
|
||||
self._gateways[-1].exit()
|
||||
return res
|
||||
|
||||
def test_execnetplugin(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
import py
|
||||
import sys
|
||||
def test_hello():
|
||||
sys._gw = py.execnet.PopenGateway()
|
||||
def test_world():
|
||||
assert hasattr(sys, '_gw')
|
||||
py.test.raises(KeyError, "sys._gw.exit()") # already closed
|
||||
|
||||
""", "-s", "--debug")
|
||||
reprec.assertoutcome(passed=2)
|
||||
|
||||
.. _`pytest_execnetcleanup.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_execnetcleanup.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,110 @@
|
|||
|
||||
pytest_figleaf plugin
|
||||
=====================
|
||||
|
||||
write and report coverage data with 'figleaf'.
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``-F``
|
||||
trace python coverage with figleaf and write HTML for files below the current working dir
|
||||
``--figleaf-data=FIGLEAFDATA``
|
||||
path to coverage tracing file.
|
||||
``--figleaf-html=FIGLEAFHTML``
|
||||
path to the coverage html dir.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_figleaf.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_figleaf.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_figleaf.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
write and report coverage data with 'figleaf'.
|
||||
|
||||
"""
|
||||
import py
|
||||
|
||||
figleaf = py.test.importorskip("figleaf.annotate_html")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup('figleaf options')
|
||||
group.addoption('-F', action='store_true', default=False,
|
||||
dest = 'figleaf',
|
||||
help=('trace python coverage with figleaf and write HTML '
|
||||
'for files below the current working dir'))
|
||||
group.addoption('--figleaf-data', action='store', default='.figleaf',
|
||||
dest='figleafdata',
|
||||
help='path to coverage tracing file.')
|
||||
group.addoption('--figleaf-html', action='store', default='html',
|
||||
dest='figleafhtml',
|
||||
help='path to the coverage html dir.')
|
||||
|
||||
def pytest_configure(config):
|
||||
figleaf.start()
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
config = terminalreporter.config
|
||||
datafile = py.path.local(config.getvalue('figleafdata'))
|
||||
tw = terminalreporter._tw
|
||||
tw.sep('-', 'figleaf')
|
||||
tw.line('Writing figleaf data to %s' % (datafile))
|
||||
figleaf.stop()
|
||||
figleaf.write_coverage(str(datafile))
|
||||
coverage = get_coverage(datafile, config)
|
||||
reportdir = py.path.local(config.getvalue('figleafhtml'))
|
||||
tw.line('Writing figleaf html to file://%s' % (reportdir))
|
||||
figleaf.annotate_html.prepare_reportdir(str(reportdir))
|
||||
exclude = []
|
||||
figleaf.annotate_html.report_as_html(coverage,
|
||||
str(reportdir), exclude, {})
|
||||
|
||||
def get_coverage(datafile, config):
|
||||
# basepath = config.topdir
|
||||
basepath = py.path.local()
|
||||
data = figleaf.read_coverage(str(datafile))
|
||||
d = {}
|
||||
coverage = figleaf.combine_coverage(d, data)
|
||||
for path in coverage.keys():
|
||||
if not py.path.local(path).relto(basepath):
|
||||
del coverage[path]
|
||||
return coverage
|
||||
|
||||
|
||||
def test_functional(testdir):
|
||||
py.test.importorskip("figleaf")
|
||||
testdir.plugins.append("figleaf")
|
||||
testdir.makepyfile("""
|
||||
def f():
|
||||
x = 42
|
||||
def test_whatever():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest('-F')
|
||||
assert result.ret == 0
|
||||
assert result.stdout.fnmatch_lines([
|
||||
'*figleaf html*'
|
||||
])
|
||||
#print result.stdout.str()
|
||||
|
||||
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_figleaf.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,72 @@
|
|||
|
||||
pytest_hooklog plugin
|
||||
=====================
|
||||
|
||||
log invocations of extension hooks to a file.
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--hooklog=HOOKLOG``
|
||||
write hook calls to the given file.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_hooklog.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_hooklog.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_hooklog.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
""" log invocations of extension hooks to a file. """
|
||||
import py
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption("--hooklog", dest="hooklog", default=None,
|
||||
help="write hook calls to the given file.")
|
||||
|
||||
def pytest_configure(config):
|
||||
hooklog = config.getvalue("hooklog")
|
||||
if hooklog:
|
||||
assert not config.pluginmanager.comregistry.logfile
|
||||
config.pluginmanager.comregistry.logfile = open(hooklog, 'w')
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
f = config.pluginmanager.comregistry.logfile
|
||||
if f:
|
||||
f.close()
|
||||
config.pluginmanager.comregistry.logfile = None
|
||||
|
||||
# ===============================================================================
|
||||
# plugin tests
|
||||
# ===============================================================================
|
||||
|
||||
def test_functional(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_pass():
|
||||
pass
|
||||
""")
|
||||
testdir.runpytest("--hooklog=hook.log")
|
||||
s = testdir.tmpdir.join("hook.log").read()
|
||||
assert s.find("pytest_sessionstart") != -1
|
||||
assert s.find("ItemTestReport") != -1
|
||||
assert s.find("sessionfinish") != -1
|
||||
|
||||
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_hooklog.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,165 @@
|
|||
|
||||
hook specification sourcecode
|
||||
=============================
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
hook specifications for py.test plugins
|
||||
"""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Command line and configuration
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_addoption(parser):
|
||||
""" called before commandline parsing. """
|
||||
|
||||
def pytest_namespace():
|
||||
""" return dict of name->object which will get stored at py.test. namespace"""
|
||||
|
||||
def pytest_configure(config):
|
||||
""" called after command line options have been parsed.
|
||||
and all plugins and initial conftest files been loaded.
|
||||
"""
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
""" called before test process is exited. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# collection hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_collect_directory(path, parent):
|
||||
""" return Collection node or None for the given path. """
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
""" return Collection node or None for the given path. """
|
||||
|
||||
def pytest_collectstart(collector):
|
||||
""" collector starts collecting. """
|
||||
|
||||
def pytest_collectreport(rep):
|
||||
""" collector finished collecting. """
|
||||
|
||||
def pytest_deselected(items):
|
||||
""" called for test items deselected by keyword. """
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
""" perform a collection and return a collection. """
|
||||
pytest_make_collect_report.firstresult = True
|
||||
|
||||
# XXX rename to item_collected()? meaning in distribution context?
|
||||
def pytest_itemstart(item, node=None):
|
||||
""" test item gets collected. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# Python test function related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
""" return custom item/collector for a python object in a module, or None. """
|
||||
pytest_pycollect_makeitem.firstresult = True
|
||||
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
""" perform function call to the with the given function arguments. """
|
||||
pytest_pyfunc_call.firstresult = True
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
""" generate (multiple) parametrized calls to a test function."""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# generic runtest related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_runtest_protocol(item):
|
||||
""" implement fixture, run and report protocol. """
|
||||
pytest_runtest_protocol.firstresult = True
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
""" called before pytest_runtest_call(). """
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
""" execute test item. """
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
""" called after pytest_runtest_call(). """
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
""" make ItemTestReport for the given item and call outcome. """
|
||||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_runtest_logreport(rep):
|
||||
""" process item test report. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# test session related hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
""" before session.main() is called. """
|
||||
|
||||
def pytest_sessionfinish(session, exitstatus):
|
||||
""" whole test run finishes. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# hooks for influencing reporting (invoked from pytest_terminal)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_report_teststatus(rep):
|
||||
""" return shortletter and verbose word. """
|
||||
pytest_report_teststatus.firstresult = True
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
""" add additional section in terminal summary reporting. """
|
||||
|
||||
def pytest_report_iteminfo(item):
|
||||
""" return (fspath, lineno, name) for the item.
|
||||
the information is used for result display and to sort tests
|
||||
"""
|
||||
pytest_report_iteminfo.firstresult = True
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# doctest hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_doctest_prepare_content(content):
|
||||
""" return processed content for a given doctest"""
|
||||
pytest_doctest_prepare_content.firstresult = True
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# distributed testing
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_testnodeready(node):
|
||||
""" Test Node is ready to operate. """
|
||||
|
||||
def pytest_testnodedown(node, error):
|
||||
""" Test Node is down. """
|
||||
|
||||
def pytest_rescheduleitems(items):
|
||||
""" reschedule Items from a node that went down. """
|
||||
|
||||
def pytest_looponfailinfo(failreports, rootdirs):
|
||||
""" info for repeating failing tests. """
|
||||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# error handling and internal debugging hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_plugin_registered(plugin):
|
||||
""" a new py lib plugin got registered. """
|
||||
|
||||
def pytest_plugin_unregistered(plugin):
|
||||
""" a py lib plugin got unregistered. """
|
||||
|
||||
def pytest_internalerror(excrepr):
|
||||
""" called for internal errors. """
|
||||
|
||||
def pytest_keyboard_interrupt(excinfo):
|
||||
""" called for keyboard interrupt. """
|
||||
|
||||
def pytest_trace(category, msg):
|
||||
""" called for debug info. """
|
||||
|
|
@ -0,0 +1,71 @@
|
|||
|
||||
Plugins related to Python test functions and programs
|
||||
=====================================================
|
||||
|
||||
xfail_ mark python tests as expected-to-fail and report them separately.
|
||||
|
||||
figleaf_ write and report coverage data with 'figleaf'.
|
||||
|
||||
monkeypatch_ safely patch object attributes, dicts and environment variables.
|
||||
|
||||
iocapture_ convenient capturing of writes to stdout/stderror streams
|
||||
|
||||
recwarn_ helpers for asserting deprecation and other warnings.
|
||||
|
||||
|
||||
Plugins for other testing styles and languages
|
||||
==============================================
|
||||
|
||||
unittest_ automatically discover and run traditional "unittest.py" style tests.
|
||||
|
||||
doctest_ collect and execute doctests from modules and test files.
|
||||
|
||||
oejskit_ run javascript tests in real life browsers
|
||||
|
||||
restdoc_ perform ReST syntax, local and remote reference tests on .rst/.txt files.
|
||||
|
||||
|
||||
Plugins for generic reporting and failure logging
|
||||
=================================================
|
||||
|
||||
pocoo_ submit failure information to paste.pocoo.org
|
||||
|
||||
resultlog_ resultlog plugin for machine-readable logging of test results.
|
||||
|
||||
terminal_ terminal reporting of the full testing process.
|
||||
|
||||
|
||||
internal plugins / core functionality
|
||||
=====================================
|
||||
|
||||
pdb_ interactive debugging with the Python Debugger.
|
||||
|
||||
keyword_ py.test.mark / keyword plugin
|
||||
|
||||
hooklog_ log invocations of extension hooks to a file.
|
||||
|
||||
runner_ collect and run test items and create reports.
|
||||
|
||||
execnetcleanup_ cleanup execnet gateways during test function runs.
|
||||
|
||||
pytester_ funcargs and support code for testing py.test's own functionality.
|
||||
|
||||
|
||||
.. _`xfail`: xfail.html
|
||||
.. _`figleaf`: figleaf.html
|
||||
.. _`monkeypatch`: monkeypatch.html
|
||||
.. _`iocapture`: iocapture.html
|
||||
.. _`recwarn`: recwarn.html
|
||||
.. _`unittest`: unittest.html
|
||||
.. _`doctest`: doctest.html
|
||||
.. _`oejskit`: oejskit.html
|
||||
.. _`restdoc`: restdoc.html
|
||||
.. _`pocoo`: pocoo.html
|
||||
.. _`resultlog`: resultlog.html
|
||||
.. _`terminal`: terminal.html
|
||||
.. _`pdb`: pdb.html
|
||||
.. _`keyword`: keyword.html
|
||||
.. _`hooklog`: hooklog.html
|
||||
.. _`runner`: runner.html
|
||||
.. _`execnetcleanup`: execnetcleanup.html
|
||||
.. _`pytester`: pytester.html
|
|
@ -0,0 +1,171 @@
|
|||
|
||||
pytest_iocapture plugin
|
||||
=======================
|
||||
|
||||
convenient capturing of writes to stdout/stderror streams
|
||||
|
||||
and file descriptors.
|
||||
|
||||
Example Usage
|
||||
----------------------
|
||||
|
||||
You can use the `capsys funcarg`_ to capture writes
|
||||
to stdout and stderr streams by using it in a test
|
||||
likes this:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
def test_myoutput(capsys):
|
||||
print "hello"
|
||||
print >>sys.stderr, "world"
|
||||
out, err = capsys.reset()
|
||||
assert out == "hello\n"
|
||||
assert err == "world\n"
|
||||
print "next"
|
||||
out, err = capsys.reset()
|
||||
assert out == "next\n"
|
||||
|
||||
The ``reset()`` call returns a tuple and will restart
|
||||
capturing so that you can successively check for output.
|
||||
After the test function finishes the original streams
|
||||
will be restored.
|
||||
.. _`capsys funcarg`:
|
||||
|
||||
|
||||
the 'capsys' test function argument
|
||||
-----------------------------------
|
||||
|
||||
captures writes to sys.stdout/sys.stderr and makes
|
||||
them available successively via a ``capsys.reset()`` method
|
||||
which returns a ``(out, err)`` tuple of captured strings.
|
||||
.. _`capfd funcarg`:
|
||||
|
||||
|
||||
the 'capfd' test function argument
|
||||
----------------------------------
|
||||
|
||||
captures writes to file descriptors 1 and 2 and makes
|
||||
them available successively via a ``capsys.reset()`` method
|
||||
which returns a ``(out, err)`` tuple of captured strings.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_iocapture.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_iocapture.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_iocapture.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
convenient capturing of writes to stdout/stderror streams
|
||||
and file descriptors.
|
||||
|
||||
Example Usage
|
||||
----------------------
|
||||
|
||||
You can use the `capsys funcarg`_ to capture writes
|
||||
to stdout and stderr streams by using it in a test
|
||||
likes this:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
def test_myoutput(capsys):
|
||||
print "hello"
|
||||
print >>sys.stderr, "world"
|
||||
out, err = capsys.reset()
|
||||
assert out == "hello\\n"
|
||||
assert err == "world\\n"
|
||||
print "next"
|
||||
out, err = capsys.reset()
|
||||
assert out == "next\\n"
|
||||
|
||||
The ``reset()`` call returns a tuple and will restart
|
||||
capturing so that you can successively check for output.
|
||||
After the test function finishes the original streams
|
||||
will be restored.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
def pytest_funcarg__capsys(request):
|
||||
"""captures writes to sys.stdout/sys.stderr and makes
|
||||
them available successively via a ``capsys.reset()`` method
|
||||
which returns a ``(out, err)`` tuple of captured strings.
|
||||
"""
|
||||
capture = Capture(py.io.StdCapture)
|
||||
request.addfinalizer(capture.finalize)
|
||||
return capture
|
||||
|
||||
def pytest_funcarg__capfd(request):
|
||||
"""captures writes to file descriptors 1 and 2 and makes
|
||||
them available successively via a ``capsys.reset()`` method
|
||||
which returns a ``(out, err)`` tuple of captured strings.
|
||||
"""
|
||||
capture = Capture(py.io.StdCaptureFD)
|
||||
request.addfinalizer(capture.finalize)
|
||||
return capture
|
||||
|
||||
def pytest_pyfunc_call(pyfuncitem):
|
||||
if hasattr(pyfuncitem, 'funcargs'):
|
||||
for funcarg, value in pyfuncitem.funcargs.items():
|
||||
if funcarg == "capsys" or funcarg == "capfd":
|
||||
value.reset()
|
||||
|
||||
class Capture:
|
||||
_capture = None
|
||||
def __init__(self, captureclass):
|
||||
self._captureclass = captureclass
|
||||
|
||||
def finalize(self):
|
||||
if self._capture:
|
||||
self._capture.reset()
|
||||
|
||||
def reset(self):
|
||||
res = None
|
||||
if self._capture:
|
||||
res = self._capture.reset()
|
||||
self._capture = self._captureclass()
|
||||
return res
|
||||
|
||||
class TestCapture:
|
||||
def test_std_functional(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
def test_hello(capsys):
|
||||
print 42
|
||||
out, err = capsys.reset()
|
||||
assert out.startswith("42")
|
||||
""")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_stdfd_functional(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
def test_hello(capfd):
|
||||
import os
|
||||
os.write(1, "42")
|
||||
out, err = capfd.reset()
|
||||
assert out.startswith("42")
|
||||
""")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
def test_funcall_yielded_no_funcargs(self, testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
def test_hello():
|
||||
yield lambda: None
|
||||
""")
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
||||
.. _`pytest_iocapture.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_iocapture.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,110 @@
|
|||
|
||||
pytest_keyword plugin
|
||||
=====================
|
||||
|
||||
py.test.mark / keyword plugin
|
||||
|
||||
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_keyword.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_keyword.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_keyword.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
py.test.mark / keyword plugin
|
||||
"""
|
||||
import py
|
||||
|
||||
def pytest_namespace():
|
||||
mark = KeywordDecorator({})
|
||||
return {'mark': mark}
|
||||
|
||||
class KeywordDecorator:
|
||||
""" decorator for setting function attributes. """
|
||||
def __init__(self, keywords, lastname=None):
|
||||
self._keywords = keywords
|
||||
self._lastname = lastname
|
||||
|
||||
def __call__(self, func=None, **kwargs):
|
||||
if func is None:
|
||||
kw = self._keywords.copy()
|
||||
kw.update(kwargs)
|
||||
return KeywordDecorator(kw)
|
||||
elif not hasattr(func, 'func_dict'):
|
||||
kw = self._keywords.copy()
|
||||
name = self._lastname
|
||||
if name is None:
|
||||
name = "mark"
|
||||
kw[name] = func
|
||||
return KeywordDecorator(kw)
|
||||
func.func_dict.update(self._keywords)
|
||||
return func
|
||||
|
||||
def __getattr__(self, name):
|
||||
if name[0] == "_":
|
||||
raise AttributeError(name)
|
||||
kw = self._keywords.copy()
|
||||
kw[name] = True
|
||||
return self.__class__(kw, lastname=name)
|
||||
|
||||
def test_pytest_mark_getattr():
|
||||
mark = KeywordDecorator({})
|
||||
def f(): pass
|
||||
|
||||
mark.hello(f)
|
||||
assert f.hello == True
|
||||
|
||||
mark.hello("test")(f)
|
||||
assert f.hello == "test"
|
||||
|
||||
py.test.raises(AttributeError, "mark._hello")
|
||||
py.test.raises(AttributeError, "mark.__str__")
|
||||
|
||||
def test_pytest_mark_call():
|
||||
mark = KeywordDecorator({})
|
||||
def f(): pass
|
||||
mark(x=3)(f)
|
||||
assert f.x == 3
|
||||
def g(): pass
|
||||
mark(g)
|
||||
assert not g.func_dict
|
||||
|
||||
mark.hello(f)
|
||||
assert f.hello == True
|
||||
|
||||
mark.hello("test")(f)
|
||||
assert f.hello == "test"
|
||||
|
||||
mark("x1")(f)
|
||||
assert f.mark == "x1"
|
||||
|
||||
def test_mark_plugin(testdir):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
pytest_plugins = "keyword"
|
||||
@py.test.mark.hello
|
||||
def test_hello():
|
||||
assert hasattr(test_hello, 'hello')
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
assert result.stdout.fnmatch_lines(["*passed*"])
|
||||
|
||||
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_keyword.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,189 @@
|
|||
|
||||
pytest_monkeypatch plugin
|
||||
=========================
|
||||
|
||||
safely patch object attributes, dicts and environment variables.
|
||||
|
||||
Usage
|
||||
----------------
|
||||
|
||||
Use the `monkeypatch funcarg`_ to safely patch the environment
|
||||
variables, object attributes or dictionaries. For example, if you want
|
||||
to set the environment variable ``ENV1`` and patch the
|
||||
``os.path.abspath`` function to return a particular value during a test
|
||||
function execution you can write it down like this:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
def test_mytest(monkeypatch):
|
||||
monkeypatch.setenv('ENV1', 'myval')
|
||||
monkeypatch.setattr(os.path, 'abspath', lambda x: '/')
|
||||
... # your test code
|
||||
|
||||
The function argument will do the modifications and memorize the
|
||||
old state. After the test function finished execution all
|
||||
modifications will be reverted. See the `monkeypatch blog post`_
|
||||
for an extensive discussion.
|
||||
|
||||
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
.. _`monkeypatch funcarg`:
|
||||
|
||||
|
||||
the 'monkeypatch' test function argument
|
||||
----------------------------------------
|
||||
|
||||
The returned ``monkeypatch`` funcarg provides three
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
|
||||
monkeypatch.setattr(obj, name, value)
|
||||
monkeypatch.setitem(mapping, name, value)
|
||||
monkeypatch.setenv(name, value)
|
||||
|
||||
All such modifications will be undone when the requesting
|
||||
test function finished its execution.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_monkeypatch.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_monkeypatch.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_monkeypatch.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
safely patch object attributes, dicts and environment variables.
|
||||
|
||||
Usage
|
||||
----------------
|
||||
|
||||
Use the `monkeypatch funcarg`_ to safely patch the environment
|
||||
variables, object attributes or dictionaries. For example, if you want
|
||||
to set the environment variable ``ENV1`` and patch the
|
||||
``os.path.abspath`` function to return a particular value during a test
|
||||
function execution you can write it down like this:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
def test_mytest(monkeypatch):
|
||||
monkeypatch.setenv('ENV1', 'myval')
|
||||
monkeypatch.setattr(os.path, 'abspath', lambda x: '/')
|
||||
... # your test code
|
||||
|
||||
The function argument will do the modifications and memorize the
|
||||
old state. After the test function finished execution all
|
||||
modifications will be reverted. See the `monkeypatch blog post`_
|
||||
for an extensive discussion.
|
||||
|
||||
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
def pytest_funcarg__monkeypatch(request):
|
||||
"""The returned ``monkeypatch`` funcarg provides three
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
|
||||
monkeypatch.setattr(obj, name, value)
|
||||
monkeypatch.setitem(mapping, name, value)
|
||||
monkeypatch.setenv(name, value)
|
||||
|
||||
All such modifications will be undone when the requesting
|
||||
test function finished its execution.
|
||||
"""
|
||||
monkeypatch = MonkeyPatch()
|
||||
request.addfinalizer(monkeypatch.finalize)
|
||||
return monkeypatch
|
||||
|
||||
notset = object()
|
||||
|
||||
class MonkeyPatch:
|
||||
def __init__(self):
|
||||
self._setattr = []
|
||||
self._setitem = []
|
||||
|
||||
def setattr(self, obj, name, value):
|
||||
self._setattr.insert(0, (obj, name, getattr(obj, name, notset)))
|
||||
setattr(obj, name, value)
|
||||
|
||||
def setitem(self, dictionary, name, value):
|
||||
self._setitem.insert(0, (dictionary, name, dictionary.get(name, notset)))
|
||||
dictionary[name] = value
|
||||
|
||||
def setenv(self, name, value):
|
||||
self.setitem(os.environ, name, str(value))
|
||||
|
||||
def finalize(self):
|
||||
for obj, name, value in self._setattr:
|
||||
if value is not notset:
|
||||
setattr(obj, name, value)
|
||||
else:
|
||||
delattr(obj, name)
|
||||
for dictionary, name, value in self._setitem:
|
||||
if value is notset:
|
||||
del dictionary[name]
|
||||
else:
|
||||
dictionary[name] = value
|
||||
|
||||
|
||||
def test_setattr():
|
||||
class A:
|
||||
x = 1
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setattr(A, 'x', 2)
|
||||
assert A.x == 2
|
||||
monkeypatch.setattr(A, 'x', 3)
|
||||
assert A.x == 3
|
||||
monkeypatch.finalize()
|
||||
assert A.x == 1
|
||||
|
||||
monkeypatch.setattr(A, 'y', 3)
|
||||
assert A.y == 3
|
||||
monkeypatch.finalize()
|
||||
assert not hasattr(A, 'y')
|
||||
|
||||
|
||||
def test_setitem():
|
||||
d = {'x': 1}
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setitem(d, 'x', 2)
|
||||
monkeypatch.setitem(d, 'y', 1700)
|
||||
assert d['x'] == 2
|
||||
assert d['y'] == 1700
|
||||
monkeypatch.setitem(d, 'x', 3)
|
||||
assert d['x'] == 3
|
||||
monkeypatch.finalize()
|
||||
assert d['x'] == 1
|
||||
assert 'y' not in d
|
||||
|
||||
def test_setenv():
|
||||
monkeypatch = MonkeyPatch()
|
||||
monkeypatch.setenv('XYZ123', 2)
|
||||
import os
|
||||
assert os.environ['XYZ123'] == "2"
|
||||
monkeypatch.finalize()
|
||||
assert 'XYZ123' not in os.environ
|
||||
|
||||
def test_monkeypatch_plugin(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
pytest_plugins = 'pytest_monkeypatch',
|
||||
def test_method(monkeypatch):
|
||||
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
|
||||
""")
|
||||
res = reprec.countoutcomes()
|
||||
assert tuple(res) == (1, 0, 0), res
|
||||
|
||||
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_monkeypatch.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,192 @@
|
|||
|
||||
pytest_pdb plugin
|
||||
=================
|
||||
|
||||
interactive debugging with the Python Debugger.
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--pdb``
|
||||
start pdb (the Python debugger) on errors.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_pdb.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_pdb.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_pdb.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
interactive debugging with the Python Debugger.
|
||||
"""
|
||||
import py
|
||||
import pdb, sys, linecache
|
||||
from py.__.test.outcome import Skipped
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption('--pdb',
|
||||
action="store_true", dest="usepdb", default=False,
|
||||
help="start pdb (the Python debugger) on errors.")
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.usepdb:
|
||||
if config.getvalue("looponfail"):
|
||||
raise config.Error("--pdb incompatible with --looponfail.")
|
||||
if config.option.dist != "no":
|
||||
raise config.Error("--pdb incomptaible with distributing tests.")
|
||||
config.pluginmanager.register(PdbInvoke())
|
||||
|
||||
class PdbInvoke:
|
||||
def pytest_runtest_makereport(self, item, call):
|
||||
if call.excinfo and not call.excinfo.errisinstance(Skipped):
|
||||
tw = py.io.TerminalWriter()
|
||||
repr = call.excinfo.getrepr()
|
||||
repr.toterminal(tw)
|
||||
post_mortem(call.excinfo._excinfo[2])
|
||||
|
||||
class Pdb(py.std.pdb.Pdb):
|
||||
def do_list(self, arg):
|
||||
self.lastcmd = 'list'
|
||||
last = None
|
||||
if arg:
|
||||
try:
|
||||
x = eval(arg, {}, {})
|
||||
if type(x) == type(()):
|
||||
first, last = x
|
||||
first = int(first)
|
||||
last = int(last)
|
||||
if last < first:
|
||||
# Assume it's a count
|
||||
last = first + last
|
||||
else:
|
||||
first = max(1, int(x) - 5)
|
||||
except:
|
||||
print '*** Error in argument:', repr(arg)
|
||||
return
|
||||
elif self.lineno is None:
|
||||
first = max(1, self.curframe.f_lineno - 5)
|
||||
else:
|
||||
first = self.lineno + 1
|
||||
if last is None:
|
||||
last = first + 10
|
||||
filename = self.curframe.f_code.co_filename
|
||||
breaklist = self.get_file_breaks(filename)
|
||||
try:
|
||||
for lineno in range(first, last+1):
|
||||
# start difference from normal do_line
|
||||
line = self._getline(filename, lineno)
|
||||
# end difference from normal do_line
|
||||
if not line:
|
||||
print '[EOF]'
|
||||
break
|
||||
else:
|
||||
s = repr(lineno).rjust(3)
|
||||
if len(s) < 4: s = s + ' '
|
||||
if lineno in breaklist: s = s + 'B'
|
||||
else: s = s + ' '
|
||||
if lineno == self.curframe.f_lineno:
|
||||
s = s + '->'
|
||||
print s + '\t' + line,
|
||||
self.lineno = lineno
|
||||
except KeyboardInterrupt:
|
||||
pass
|
||||
do_l = do_list
|
||||
|
||||
def _getline(self, filename, lineno):
|
||||
if hasattr(filename, "__source__"):
|
||||
try:
|
||||
return filename.__source__.lines[lineno - 1] + "\n"
|
||||
except IndexError:
|
||||
return None
|
||||
return linecache.getline(filename, lineno)
|
||||
|
||||
def get_stack(self, f, t):
|
||||
# Modified from bdb.py to be able to walk the stack beyond generators,
|
||||
# which does not work in the normal pdb :-(
|
||||
stack, i = pdb.Pdb.get_stack(self, f, t)
|
||||
if f is None:
|
||||
i = max(0, len(stack) - 1)
|
||||
return stack, i
|
||||
|
||||
def post_mortem(t):
|
||||
# modified from pdb.py for the new get_stack() implementation
|
||||
p = Pdb()
|
||||
p.reset()
|
||||
p.interaction(None, t)
|
||||
|
||||
def set_trace():
|
||||
# again, a copy of the version in pdb.py
|
||||
Pdb().set_trace(sys._getframe().f_back)
|
||||
|
||||
|
||||
class TestPDB:
|
||||
def pytest_funcarg__pdblist(self, request):
|
||||
monkeypatch = request.getfuncargvalue("monkeypatch")
|
||||
pdblist = []
|
||||
def mypdb(*args):
|
||||
pdblist.append(args)
|
||||
monkeypatch.setitem(globals(), 'post_mortem', mypdb)
|
||||
return pdblist
|
||||
|
||||
def test_incompatibility_messages(self, testdir):
|
||||
Error = py.test.config.Error
|
||||
py.test.raises(Error, "testdir.parseconfigure('--pdb', '--looponfail')")
|
||||
py.test.raises(Error, "testdir.parseconfigure('--pdb', '-n 3')")
|
||||
py.test.raises(Error, "testdir.parseconfigure('--pdb', '-d')")
|
||||
|
||||
def test_pdb_on_fail(self, testdir, pdblist):
|
||||
rep = testdir.inline_runsource1('--pdb', """
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
assert rep.failed
|
||||
assert len(pdblist) == 1
|
||||
tb = py.code.Traceback(pdblist[0][0])
|
||||
assert tb[-1].name == "test_func"
|
||||
|
||||
def test_pdb_on_skip(self, testdir, pdblist):
|
||||
rep = testdir.inline_runsource1('--pdb', """
|
||||
import py
|
||||
def test_func():
|
||||
py.test.skip("hello")
|
||||
""")
|
||||
assert rep.skipped
|
||||
assert len(pdblist) == 0
|
||||
|
||||
def test_pdb_interaction(self, testdir):
|
||||
p1 = testdir.makepyfile("""
|
||||
def test_1():
|
||||
i = 0
|
||||
assert i == 1
|
||||
""")
|
||||
child = testdir.spawn_pytest("--pdb %s" % p1)
|
||||
#child.expect(".*def test_1.*")
|
||||
child.expect(".*i = 0.*")
|
||||
child.expect("(Pdb)")
|
||||
child.sendeof()
|
||||
child.expect("1 failed")
|
||||
if child.isalive():
|
||||
child.wait()
|
||||
|
||||
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_pdb.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,101 @@
|
|||
|
||||
pytest_pocoo plugin
|
||||
===================
|
||||
|
||||
submit failure information to paste.pocoo.org
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``-P, --pocoo-sendfailures``
|
||||
send failures to http://paste.pocoo.org paste service
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_pocoo.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_pocoo.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_pocoo.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
submit failure information to paste.pocoo.org
|
||||
"""
|
||||
import py
|
||||
|
||||
class url:
|
||||
base = "http://paste.pocoo.org"
|
||||
xmlrpc = base + "/xmlrpc/"
|
||||
show = base + "/show/"
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup("pocoo plugin")
|
||||
group.addoption('-P', '--pocoo-sendfailures',
|
||||
action='store_true', dest="pocoo_sendfailures",
|
||||
help="send failures to %s paste service" %(url.base,))
|
||||
|
||||
def getproxy():
|
||||
return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
if terminalreporter.config.option.pocoo_sendfailures:
|
||||
tr = terminalreporter
|
||||
if 'failed' in tr.stats and tr.config.option.tbstyle != "no":
|
||||
terminalreporter.write_sep("=", "Sending failures to %s" %(url.base,))
|
||||
terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,))
|
||||
#print self.__class__.getproxy
|
||||
#print self.__class__, id(self.__class__)
|
||||
serverproxy = getproxy()
|
||||
for ev in terminalreporter.stats.get('failed'):
|
||||
tw = py.io.TerminalWriter(stringio=True)
|
||||
ev.toterminal(tw)
|
||||
s = tw.stringio.getvalue()
|
||||
# XXX add failure summary
|
||||
assert len(s)
|
||||
terminalreporter.write_line("newpaste() ...")
|
||||
proxyid = serverproxy.newPaste("python", s)
|
||||
terminalreporter.write_line("%s%s\n" % (url.show, proxyid))
|
||||
break
|
||||
|
||||
|
||||
def test_toproxy(testdir, monkeypatch):
|
||||
l = []
|
||||
class MockProxy:
|
||||
def newPaste(self, language, code):
|
||||
l.append((language, code))
|
||||
monkeypatch.setitem(globals(), 'getproxy', MockProxy)
|
||||
testdir.plugins.insert(0, globals())
|
||||
testpath = testdir.makepyfile("""
|
||||
import py
|
||||
def test_pass():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("")
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath, "-P")
|
||||
assert len(l) == 1
|
||||
assert l[0][0] == "python"
|
||||
s = l[0][1]
|
||||
assert s.find("def test_fail") != -1
|
||||
assert reprec.countoutcomes() == [1,1,1]
|
||||
|
||||
.. _`pytest_pocoo.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_pocoo.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,568 @@
|
|||
|
||||
pytest_pytester plugin
|
||||
======================
|
||||
|
||||
funcargs and support code for testing py.test's own functionality.
|
||||
|
||||
|
||||
.. _`testdir funcarg`:
|
||||
|
||||
|
||||
the 'testdir' test function argument
|
||||
------------------------------------
|
||||
|
||||
XXX missing docstring
|
||||
.. _`reportrecorder funcarg`:
|
||||
|
||||
|
||||
the 'reportrecorder' test function argument
|
||||
-------------------------------------------
|
||||
|
||||
XXX missing docstring
|
||||
.. _`linecomp funcarg`:
|
||||
|
||||
|
||||
the 'linecomp' test function argument
|
||||
-------------------------------------
|
||||
|
||||
XXX missing docstring
|
||||
.. _`LineMatcher funcarg`:
|
||||
|
||||
|
||||
the 'LineMatcher' test function argument
|
||||
----------------------------------------
|
||||
|
||||
XXX missing docstring
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_pytester.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_pytester.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_pytester.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
funcargs and support code for testing py.test's own functionality.
|
||||
"""
|
||||
|
||||
import py
|
||||
import sys, os
|
||||
import inspect
|
||||
from py.__.test.config import Config as pytestConfig
|
||||
import hookspec
|
||||
|
||||
pytest_plugins = '_pytest'
|
||||
|
||||
def pytest_funcarg__linecomp(request):
|
||||
return LineComp()
|
||||
|
||||
def pytest_funcarg__LineMatcher(request):
|
||||
return LineMatcher
|
||||
|
||||
def pytest_funcarg__testdir(request):
|
||||
tmptestdir = TmpTestdir(request)
|
||||
return tmptestdir
|
||||
|
||||
def pytest_funcarg__reportrecorder(request):
|
||||
reprec = ReportRecorder(py._com.comregistry)
|
||||
request.addfinalizer(lambda: reprec.comregistry.unregister(reprec))
|
||||
return reprec
|
||||
|
||||
class RunResult:
|
||||
def __init__(self, ret, outlines, errlines):
|
||||
self.ret = ret
|
||||
self.outlines = outlines
|
||||
self.errlines = errlines
|
||||
self.stdout = LineMatcher(outlines)
|
||||
self.stderr = LineMatcher(errlines)
|
||||
|
||||
class TmpTestdir:
|
||||
def __init__(self, request):
|
||||
self.request = request
|
||||
self._pytest = request.getfuncargvalue("_pytest")
|
||||
# XXX remove duplication with tmpdir plugin
|
||||
basetmp = request.config.ensuretemp("testdir")
|
||||
name = request.function.__name__
|
||||
for i in range(100):
|
||||
try:
|
||||
tmpdir = basetmp.mkdir(name + str(i))
|
||||
except py.error.EEXIST:
|
||||
continue
|
||||
break
|
||||
# we need to create another subdir
|
||||
# because Directory.collect() currently loads
|
||||
# conftest.py from sibling directories
|
||||
self.tmpdir = tmpdir.mkdir(name)
|
||||
self.plugins = []
|
||||
self._syspathremove = []
|
||||
self.chdir() # always chdir
|
||||
assert hasattr(self, '_olddir')
|
||||
self.request.addfinalizer(self.finalize)
|
||||
|
||||
def __repr__(self):
|
||||
return "<TmpTestdir %r>" % (self.tmpdir,)
|
||||
|
||||
def Config(self, comregistry=None, topdir=None):
|
||||
if topdir is None:
|
||||
topdir = self.tmpdir.dirpath()
|
||||
return pytestConfig(comregistry, topdir=topdir)
|
||||
|
||||
def finalize(self):
|
||||
for p in self._syspathremove:
|
||||
py.std.sys.path.remove(p)
|
||||
if hasattr(self, '_olddir'):
|
||||
self._olddir.chdir()
|
||||
|
||||
def getreportrecorder(self, obj):
|
||||
if isinstance(obj, py._com.Registry):
|
||||
registry = obj
|
||||
elif hasattr(obj, 'comregistry'):
|
||||
registry = obj.comregistry
|
||||
elif hasattr(obj, 'pluginmanager'):
|
||||
registry = obj.pluginmanager.comregistry
|
||||
elif hasattr(obj, 'config'):
|
||||
registry = obj.config.pluginmanager.comregistry
|
||||
else:
|
||||
raise ValueError("obj %r provides no comregistry" %(obj,))
|
||||
assert isinstance(registry, py._com.Registry)
|
||||
reprec = ReportRecorder(registry)
|
||||
reprec.hookrecorder = self._pytest.gethookrecorder(hookspec, registry)
|
||||
reprec.hook = reprec.hookrecorder.hook
|
||||
return reprec
|
||||
|
||||
def chdir(self):
|
||||
old = self.tmpdir.chdir()
|
||||
if not hasattr(self, '_olddir'):
|
||||
self._olddir = old
|
||||
|
||||
def _makefile(self, ext, args, kwargs):
|
||||
items = kwargs.items()
|
||||
if args:
|
||||
source = "\n".join(map(str, args))
|
||||
basename = self.request.function.__name__
|
||||
items.insert(0, (basename, source))
|
||||
ret = None
|
||||
for name, value in items:
|
||||
p = self.tmpdir.join(name).new(ext=ext)
|
||||
source = py.code.Source(value)
|
||||
p.write(str(py.code.Source(value)).lstrip())
|
||||
if ret is None:
|
||||
ret = p
|
||||
return ret
|
||||
|
||||
|
||||
def makefile(self, ext, *args, **kwargs):
|
||||
return self._makefile(ext, args, kwargs)
|
||||
|
||||
def makeconftest(self, source):
|
||||
return self.makepyfile(conftest=source)
|
||||
|
||||
def makepyfile(self, *args, **kwargs):
|
||||
return self._makefile('.py', args, kwargs)
|
||||
|
||||
def maketxtfile(self, *args, **kwargs):
|
||||
return self._makefile('.txt', args, kwargs)
|
||||
|
||||
def syspathinsert(self, path=None):
|
||||
if path is None:
|
||||
path = self.tmpdir
|
||||
py.std.sys.path.insert(0, str(path))
|
||||
self._syspathremove.append(str(path))
|
||||
|
||||
def mkdir(self, name):
|
||||
return self.tmpdir.mkdir(name)
|
||||
|
||||
def genitems(self, colitems):
|
||||
return list(self.session.genitems(colitems))
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
#config = self.parseconfig(*args)
|
||||
config = self.parseconfig(*args)
|
||||
session = config.initsession()
|
||||
rec = self.getreportrecorder(config)
|
||||
colitems = [config.getfsnode(arg) for arg in config.args]
|
||||
items = list(session.genitems(colitems))
|
||||
return items, rec
|
||||
|
||||
def runitem(self, source):
|
||||
# used from runner functional tests
|
||||
item = self.getitem(source)
|
||||
# the test class where we are called from wants to provide the runner
|
||||
testclassinstance = self.request.function.im_self
|
||||
runner = testclassinstance.getrunner()
|
||||
return runner(item)
|
||||
|
||||
def inline_runsource(self, source, *cmdlineargs):
|
||||
p = self.makepyfile(source)
|
||||
l = list(cmdlineargs) + [p]
|
||||
return self.inline_run(*l)
|
||||
|
||||
def inline_runsource1(self, *args):
|
||||
args = list(args)
|
||||
source = args.pop()
|
||||
p = self.makepyfile(source)
|
||||
l = list(args) + [p]
|
||||
reprec = self.inline_run(*l)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 1, reports
|
||||
return reports[0]
|
||||
|
||||
def inline_run(self, *args):
|
||||
config = self.parseconfig(*args)
|
||||
config.pluginmanager.do_configure(config)
|
||||
session = config.initsession()
|
||||
reprec = self.getreportrecorder(config)
|
||||
session.main()
|
||||
config.pluginmanager.do_unconfigure(config)
|
||||
return reprec
|
||||
|
||||
def config_preparse(self):
|
||||
config = self.Config()
|
||||
for plugin in self.plugins:
|
||||
if isinstance(plugin, str):
|
||||
config.pluginmanager.import_plugin(plugin)
|
||||
else:
|
||||
if isinstance(plugin, dict):
|
||||
plugin = PseudoPlugin(plugin)
|
||||
if not config.pluginmanager.isregistered(plugin):
|
||||
config.pluginmanager.register(plugin)
|
||||
#print "config.pluginmanager.impname2plugin", config.pluginmanager.impname2plugin
|
||||
return config
|
||||
|
||||
def parseconfig(self, *args):
|
||||
if not args:
|
||||
args = (self.tmpdir,)
|
||||
config = self.config_preparse()
|
||||
args = list(args) + ["--basetemp=%s" % self.tmpdir.dirpath('basetemp')]
|
||||
config.parse(args)
|
||||
return config
|
||||
|
||||
def parseconfigure(self, *args):
|
||||
config = self.parseconfig(*args)
|
||||
config.pluginmanager.do_configure(config)
|
||||
return config
|
||||
|
||||
def getitem(self, source, funcname="test_func"):
|
||||
modcol = self.getmodulecol(source)
|
||||
moditems = modcol.collect()
|
||||
for item in modcol.collect():
|
||||
if item.name == funcname:
|
||||
return item
|
||||
else:
|
||||
assert 0, "%r item not found in module:\n%s" %(funcname, source)
|
||||
|
||||
def getitems(self, source):
|
||||
modcol = self.getmodulecol(source)
|
||||
return list(modcol.config.initsession().genitems([modcol]))
|
||||
#assert item is not None, "%r item not found in module:\n%s" %(funcname, source)
|
||||
#return item
|
||||
|
||||
def getfscol(self, path, configargs=()):
|
||||
self.config = self.parseconfig(path, *configargs)
|
||||
self.session = self.config.initsession()
|
||||
return self.config.getfsnode(path)
|
||||
|
||||
def getmodulecol(self, source, configargs=(), withinit=False):
|
||||
kw = {self.request.function.__name__: py.code.Source(source).strip()}
|
||||
path = self.makepyfile(**kw)
|
||||
if withinit:
|
||||
self.makepyfile(__init__ = "#")
|
||||
self.config = self.parseconfig(path, *configargs)
|
||||
self.session = self.config.initsession()
|
||||
#self.config.pluginmanager.do_configure(config=self.config)
|
||||
# XXX
|
||||
self.config.pluginmanager.import_plugin("runner")
|
||||
plugin = self.config.pluginmanager.getplugin("runner")
|
||||
plugin.pytest_configure(config=self.config)
|
||||
|
||||
return self.config.getfsnode(path)
|
||||
|
||||
def prepare(self):
|
||||
p = self.tmpdir.join("conftest.py")
|
||||
if not p.check():
|
||||
plugins = [x for x in self.plugins if isinstance(x, str)]
|
||||
if not plugins:
|
||||
return
|
||||
p.write("import py ; pytest_plugins = %r" % plugins)
|
||||
else:
|
||||
if self.plugins:
|
||||
print "warning, ignoring reusing existing con", p
|
||||
|
||||
def popen(self, cmdargs, stdout, stderr, **kw):
|
||||
if not hasattr(py.std, 'subprocess'):
|
||||
py.test.skip("no subprocess module")
|
||||
env = os.environ.copy()
|
||||
env['PYTHONPATH'] = ":".join(filter(None, [
|
||||
str(os.getcwd()), env.get('PYTHONPATH', '')]))
|
||||
kw['env'] = env
|
||||
#print "env", env
|
||||
return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
|
||||
|
||||
def run(self, *cmdargs):
|
||||
self.prepare()
|
||||
old = self.tmpdir.chdir()
|
||||
#print "chdir", self.tmpdir
|
||||
try:
|
||||
return self._run(*cmdargs)
|
||||
finally:
|
||||
old.chdir()
|
||||
|
||||
def _run(self, *cmdargs):
|
||||
cmdargs = map(str, cmdargs)
|
||||
p1 = py.path.local("stdout")
|
||||
p2 = py.path.local("stderr")
|
||||
print "running", cmdargs, "curdir=", py.path.local()
|
||||
f1 = p1.open("w")
|
||||
f2 = p2.open("w")
|
||||
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
|
||||
close_fds=(sys.platform != "win32"))
|
||||
ret = popen.wait()
|
||||
f1.close()
|
||||
f2.close()
|
||||
out, err = p1.readlines(cr=0), p2.readlines(cr=0)
|
||||
if err:
|
||||
for line in err:
|
||||
print >>py.std.sys.stderr, line
|
||||
if out:
|
||||
for line in out:
|
||||
print >>py.std.sys.stdout, line
|
||||
return RunResult(ret, out, err)
|
||||
|
||||
def runpybin(self, scriptname, *args):
|
||||
fullargs = self._getpybinargs(scriptname) + args
|
||||
return self.run(*fullargs)
|
||||
|
||||
def _getpybinargs(self, scriptname):
|
||||
bindir = py.path.local(py.__file__).dirpath("bin")
|
||||
script = bindir.join(scriptname)
|
||||
assert script.check()
|
||||
return py.std.sys.executable, script
|
||||
|
||||
def runpytest(self, *args):
|
||||
p = py.path.local.make_numbered_dir(prefix="runpytest-",
|
||||
keep=None, rootdir=self.tmpdir)
|
||||
args = ('--basetemp=%s' % p, ) + args
|
||||
return self.runpybin("py.test", *args)
|
||||
|
||||
def spawn_pytest(self, string, expect_timeout=10.0):
|
||||
pexpect = py.test.importorskip("pexpect", "2.3")
|
||||
basetemp = self.tmpdir.mkdir("pexpect")
|
||||
invoke = "%s %s" % self._getpybinargs("py.test")
|
||||
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
|
||||
child = pexpect.spawn(cmd, logfile=basetemp.join("spawn.out").open("w"))
|
||||
child.timeout = expect_timeout
|
||||
return child
|
||||
|
||||
class PseudoPlugin:
|
||||
def __init__(self, vars):
|
||||
self.__dict__.update(vars)
|
||||
|
||||
class ReportRecorder(object):
|
||||
def __init__(self, comregistry):
|
||||
self.comregistry = comregistry
|
||||
comregistry.register(self)
|
||||
|
||||
def getcall(self, name):
|
||||
return self.hookrecorder.getcall(name)
|
||||
|
||||
def popcall(self, name):
|
||||
return self.hookrecorder.popcall(name)
|
||||
|
||||
def getcalls(self, names):
|
||||
""" return list of ParsedCall instances matching the given eventname. """
|
||||
return self.hookrecorder.getcalls(names)
|
||||
|
||||
# functionality for test reports
|
||||
|
||||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.rep for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
l = []
|
||||
for rep in self.getreports(names=names):
|
||||
colitem = rep.getnode()
|
||||
if not inamepart or inamepart in colitem.listnames():
|
||||
l.append(rep)
|
||||
if not l:
|
||||
raise ValueError("could not find test report matching %r: no test reports at all!" %
|
||||
(inamepart,))
|
||||
if len(l) > 1:
|
||||
raise ValueError("found more than one testreport matching %r: %s" %(
|
||||
inamepart, l))
|
||||
return l[0]
|
||||
|
||||
def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
|
||||
return [rep for rep in self.getreports(names) if rep.failed]
|
||||
|
||||
def getfailedcollections(self):
|
||||
return self.getfailures('pytest_collectreport')
|
||||
|
||||
def listoutcomes(self):
|
||||
passed = []
|
||||
skipped = []
|
||||
failed = []
|
||||
for rep in self.getreports("pytest_runtest_logreport"):
|
||||
if rep.passed:
|
||||
if rep.when == "call":
|
||||
passed.append(rep)
|
||||
elif rep.skipped:
|
||||
skipped.append(rep)
|
||||
elif rep.failed:
|
||||
failed.append(rep)
|
||||
return passed, skipped, failed
|
||||
|
||||
def countoutcomes(self):
|
||||
return map(len, self.listoutcomes())
|
||||
|
||||
def assertoutcome(self, passed=0, skipped=0, failed=0):
|
||||
realpassed, realskipped, realfailed = self.listoutcomes()
|
||||
assert passed == len(realpassed)
|
||||
assert skipped == len(realskipped)
|
||||
assert failed == len(realfailed)
|
||||
|
||||
def clear(self):
|
||||
self.hookrecorder.calls[:] = []
|
||||
|
||||
def unregister(self):
|
||||
self.comregistry.unregister(self)
|
||||
self.hookrecorder.finish_recording()
|
||||
|
||||
def test_reportrecorder(testdir):
|
||||
registry = py._com.Registry()
|
||||
recorder = testdir.getreportrecorder(registry)
|
||||
assert not recorder.getfailures()
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
class rep:
|
||||
excinfo = None
|
||||
passed = False
|
||||
failed = True
|
||||
skipped = False
|
||||
when = "call"
|
||||
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
failures = recorder.getfailures()
|
||||
assert failures == [rep]
|
||||
failures = recorder.getfailures()
|
||||
assert failures == [rep]
|
||||
|
||||
class rep:
|
||||
excinfo = None
|
||||
passed = False
|
||||
failed = False
|
||||
skipped = True
|
||||
when = "call"
|
||||
rep.passed = False
|
||||
rep.skipped = True
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
|
||||
modcol = testdir.getmodulecol("")
|
||||
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
rep.passed = False
|
||||
rep.failed = True
|
||||
rep.skipped = False
|
||||
recorder.hook.pytest_collectreport(rep=rep)
|
||||
|
||||
passed, skipped, failed = recorder.listoutcomes()
|
||||
assert not passed and skipped and failed
|
||||
|
||||
numpassed, numskipped, numfailed = recorder.countoutcomes()
|
||||
assert numpassed == 0
|
||||
assert numskipped == 1
|
||||
assert numfailed == 1
|
||||
assert len(recorder.getfailedcollections()) == 1
|
||||
|
||||
recorder.unregister()
|
||||
recorder.clear()
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
py.test.raises(ValueError, "recorder.getfailures()")
|
||||
|
||||
class LineComp:
|
||||
def __init__(self):
|
||||
self.stringio = py.std.StringIO.StringIO()
|
||||
|
||||
def assert_contains_lines(self, lines2):
|
||||
""" assert that lines2 are contained (linearly) in lines1.
|
||||
return a list of extralines found.
|
||||
"""
|
||||
__tracebackhide__ = True
|
||||
val = self.stringio.getvalue()
|
||||
self.stringio.truncate(0) # remove what we got
|
||||
lines1 = val.split("\n")
|
||||
return LineMatcher(lines1).fnmatch_lines(lines2)
|
||||
|
||||
class LineMatcher:
|
||||
def __init__(self, lines):
|
||||
self.lines = lines
|
||||
|
||||
def str(self):
|
||||
return "\n".join(self.lines)
|
||||
|
||||
def fnmatch_lines(self, lines2):
|
||||
if isinstance(lines2, str):
|
||||
lines2 = py.code.Source(lines2)
|
||||
if isinstance(lines2, py.code.Source):
|
||||
lines2 = lines2.strip().lines
|
||||
|
||||
from fnmatch import fnmatch
|
||||
__tracebackhide__ = True
|
||||
lines1 = self.lines[:]
|
||||
nextline = None
|
||||
extralines = []
|
||||
for line in lines2:
|
||||
nomatchprinted = False
|
||||
while lines1:
|
||||
nextline = lines1.pop(0)
|
||||
if line == nextline:
|
||||
print "exact match:", repr(line)
|
||||
break
|
||||
elif fnmatch(nextline, line):
|
||||
print "fnmatch:", repr(line)
|
||||
print " with:", repr(nextline)
|
||||
break
|
||||
else:
|
||||
if not nomatchprinted:
|
||||
print "nomatch:", repr(line)
|
||||
nomatchprinted = True
|
||||
print " and:", repr(nextline)
|
||||
extralines.append(nextline)
|
||||
else:
|
||||
if line != nextline:
|
||||
#__tracebackhide__ = True
|
||||
raise AssertionError("expected line not found: %r" % line)
|
||||
extralines.extend(lines1)
|
||||
return extralines
|
||||
|
||||
def test_parseconfig(testdir):
|
||||
config1 = testdir.parseconfig()
|
||||
config2 = testdir.parseconfig()
|
||||
assert config2 != config1
|
||||
assert config1 != py.test.config
|
||||
|
||||
def test_testdir_runs_with_plugin(testdir):
|
||||
testdir.makepyfile("""
|
||||
pytest_plugins = "pytest_pytester"
|
||||
def test_hello(testdir):
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
assert result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
.. _`pytest_pytester.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_pytester.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,211 @@
|
|||
|
||||
pytest_recwarn plugin
|
||||
=====================
|
||||
|
||||
helpers for asserting deprecation and other warnings.
|
||||
|
||||
**recwarn**: function argument where one can call recwarn.pop() to get
|
||||
the last warning that would have been shown.
|
||||
|
||||
**py.test.deprecated_call(func, *args, **kwargs)**: assert that the given function call triggers a deprecation warning.
|
||||
.. _`recwarn funcarg`:
|
||||
|
||||
|
||||
the 'recwarn' test function argument
|
||||
------------------------------------
|
||||
|
||||
check that warnings have been raised.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_recwarn.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_recwarn.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_recwarn.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
helpers for asserting deprecation and other warnings.
|
||||
|
||||
**recwarn**: function argument where one can call recwarn.pop() to get
|
||||
the last warning that would have been shown.
|
||||
|
||||
**py.test.deprecated_call(func, *args, **kwargs)**: assert that the given function call triggers a deprecation warning.
|
||||
"""
|
||||
|
||||
import py
|
||||
import os
|
||||
|
||||
def pytest_funcarg__recwarn(request):
|
||||
""" check that warnings have been raised. """
|
||||
warnings = WarningsRecorder()
|
||||
request.addfinalizer(warnings.finalize)
|
||||
return warnings
|
||||
|
||||
def pytest_namespace():
|
||||
return {'deprecated_call': deprecated_call}
|
||||
|
||||
def deprecated_call(func, *args, **kwargs):
|
||||
""" assert that calling func(*args, **kwargs)
|
||||
triggers a DeprecationWarning.
|
||||
"""
|
||||
warningmodule = py.std.warnings
|
||||
l = []
|
||||
oldwarn_explicit = getattr(warningmodule, 'warn_explicit')
|
||||
def warn_explicit(*args, **kwargs):
|
||||
l.append(args)
|
||||
oldwarn_explicit(*args, **kwargs)
|
||||
oldwarn = getattr(warningmodule, 'warn')
|
||||
def warn(*args, **kwargs):
|
||||
l.append(args)
|
||||
oldwarn(*args, **kwargs)
|
||||
|
||||
warningmodule.warn_explicit = warn_explicit
|
||||
warningmodule.warn = warn
|
||||
try:
|
||||
ret = func(*args, **kwargs)
|
||||
finally:
|
||||
warningmodule.warn_explicit = warn_explicit
|
||||
warningmodule.warn = warn
|
||||
if not l:
|
||||
#print warningmodule
|
||||
raise AssertionError("%r did not produce DeprecationWarning" %(func,))
|
||||
return ret
|
||||
|
||||
|
||||
class RecordedWarning:
|
||||
def __init__(self, message, category, filename, lineno, line):
|
||||
self.message = message
|
||||
self.category = category
|
||||
self.filename = filename
|
||||
self.lineno = lineno
|
||||
self.line = line
|
||||
|
||||
class WarningsRecorder:
|
||||
def __init__(self):
|
||||
warningmodule = py.std.warnings
|
||||
self.list = []
|
||||
def showwarning(message, category, filename, lineno, line=0):
|
||||
self.list.append(RecordedWarning(
|
||||
message, category, filename, lineno, line))
|
||||
try:
|
||||
self.old_showwarning(message, category,
|
||||
filename, lineno, line=line)
|
||||
except TypeError:
|
||||
# < python2.6
|
||||
self.old_showwarning(message, category, filename, lineno)
|
||||
self.old_showwarning = warningmodule.showwarning
|
||||
warningmodule.showwarning = showwarning
|
||||
|
||||
def pop(self, cls=Warning):
|
||||
""" pop the first recorded warning, raise exception if not exists."""
|
||||
for i, w in py.builtin.enumerate(self.list):
|
||||
if issubclass(w.category, cls):
|
||||
return self.list.pop(i)
|
||||
__tracebackhide__ = True
|
||||
assert 0, "%r not found in %r" %(cls, self.list)
|
||||
|
||||
#def resetregistry(self):
|
||||
# import warnings
|
||||
# warnings.onceregistry.clear()
|
||||
# warnings.__warningregistry__.clear()
|
||||
|
||||
def clear(self):
|
||||
self.list[:] = []
|
||||
|
||||
def finalize(self):
|
||||
py.std.warnings.showwarning = self.old_showwarning
|
||||
|
||||
def test_WarningRecorder():
|
||||
showwarning = py.std.warnings.showwarning
|
||||
rec = WarningsRecorder()
|
||||
assert py.std.warnings.showwarning != showwarning
|
||||
assert not rec.list
|
||||
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
|
||||
assert len(rec.list) == 1
|
||||
py.std.warnings.warn(DeprecationWarning("hello"))
|
||||
assert len(rec.list) == 2
|
||||
warn = rec.pop()
|
||||
assert str(warn.message) == "hello"
|
||||
l = rec.list
|
||||
rec.clear()
|
||||
assert len(rec.list) == 0
|
||||
assert l is rec.list
|
||||
py.test.raises(AssertionError, "rec.pop()")
|
||||
rec.finalize()
|
||||
assert showwarning == py.std.warnings.showwarning
|
||||
|
||||
def test_recwarn_functional(testdir):
|
||||
reprec = testdir.inline_runsource("""
|
||||
pytest_plugins = 'pytest_recwarn',
|
||||
import warnings
|
||||
oldwarn = warnings.showwarning
|
||||
def test_method(recwarn):
|
||||
assert warnings.showwarning != oldwarn
|
||||
warnings.warn("hello")
|
||||
warn = recwarn.pop()
|
||||
assert isinstance(warn.message, UserWarning)
|
||||
def test_finalized():
|
||||
assert warnings.showwarning == oldwarn
|
||||
""")
|
||||
res = reprec.countoutcomes()
|
||||
assert tuple(res) == (2, 0, 0), res
|
||||
|
||||
#
|
||||
# ============ test py.test.deprecated_call() ==============
|
||||
#
|
||||
|
||||
def dep(i):
|
||||
if i == 0:
|
||||
py.std.warnings.warn("is deprecated", DeprecationWarning)
|
||||
return 42
|
||||
|
||||
reg = {}
|
||||
def dep_explicit(i):
|
||||
if i == 0:
|
||||
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
|
||||
filename="hello", lineno=3)
|
||||
|
||||
def test_deprecated_call_raises():
|
||||
excinfo = py.test.raises(AssertionError,
|
||||
"py.test.deprecated_call(dep, 3)")
|
||||
assert str(excinfo).find("did not produce") != -1
|
||||
|
||||
def test_deprecated_call():
|
||||
py.test.deprecated_call(dep, 0)
|
||||
|
||||
def test_deprecated_call_ret():
|
||||
ret = py.test.deprecated_call(dep, 0)
|
||||
assert ret == 42
|
||||
|
||||
def test_deprecated_call_preserves():
|
||||
r = py.std.warnings.onceregistry.copy()
|
||||
f = py.std.warnings.filters[:]
|
||||
test_deprecated_call_raises()
|
||||
test_deprecated_call()
|
||||
assert r == py.std.warnings.onceregistry
|
||||
assert f == py.std.warnings.filters
|
||||
|
||||
def test_deprecated_explicit_call_raises():
|
||||
py.test.raises(AssertionError,
|
||||
"py.test.deprecated_call(dep_explicit, 3)")
|
||||
|
||||
def test_deprecated_explicit_call():
|
||||
py.test.deprecated_call(dep_explicit, 0)
|
||||
py.test.deprecated_call(dep_explicit, 0)
|
||||
|
||||
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_recwarn.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,531 @@
|
|||
|
||||
pytest_restdoc plugin
|
||||
=====================
|
||||
|
||||
perform ReST syntax, local and remote reference tests on .rst/.txt files.
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``-R, --urlcheck``
|
||||
urlopen() remote links found in ReST text files.
|
||||
``--urltimeout=secs``
|
||||
timeout in seconds for remote urlchecks
|
||||
``--forcegen``
|
||||
force generation of html files.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_restdoc.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_restdoc.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_restdoc.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
perform ReST syntax, local and remote reference tests on .rst/.txt files.
|
||||
"""
|
||||
import py
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup("ReST", "ReST documentation check options")
|
||||
group.addoption('-R', '--urlcheck',
|
||||
action="store_true", dest="urlcheck", default=False,
|
||||
help="urlopen() remote links found in ReST text files.")
|
||||
group.addoption('--urltimeout', action="store", metavar="secs",
|
||||
type="int", dest="urlcheck_timeout", default=5,
|
||||
help="timeout in seconds for remote urlchecks")
|
||||
group.addoption('--forcegen',
|
||||
action="store_true", dest="forcegen", default=False,
|
||||
help="force generation of html files.")
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
if path.ext in (".txt", ".rst"):
|
||||
project = getproject(path)
|
||||
if project is not None:
|
||||
return ReSTFile(path, parent=parent, project=project)
|
||||
|
||||
def getproject(path):
|
||||
for parent in path.parts(reverse=True):
|
||||
confrest = parent.join("confrest.py")
|
||||
if confrest.check():
|
||||
Project = confrest.pyimport().Project
|
||||
return Project(parent)
|
||||
|
||||
class ReSTFile(py.test.collect.File):
|
||||
def __init__(self, fspath, parent, project=None):
|
||||
super(ReSTFile, self).__init__(fspath=fspath, parent=parent)
|
||||
if project is None:
|
||||
project = getproject(fspath)
|
||||
assert project is not None
|
||||
self.project = project
|
||||
|
||||
def collect(self):
|
||||
return [
|
||||
ReSTSyntaxTest(self.project, "ReSTSyntax", parent=self),
|
||||
LinkCheckerMaker("checklinks", parent=self),
|
||||
DoctestText("doctest", parent=self),
|
||||
]
|
||||
|
||||
def deindent(s, sep='\n'):
|
||||
leastspaces = -1
|
||||
lines = s.split(sep)
|
||||
for line in lines:
|
||||
if not line.strip():
|
||||
continue
|
||||
spaces = len(line) - len(line.lstrip())
|
||||
if leastspaces == -1 or spaces < leastspaces:
|
||||
leastspaces = spaces
|
||||
if leastspaces == -1:
|
||||
return s
|
||||
for i, line in py.builtin.enumerate(lines):
|
||||
if not line.strip():
|
||||
lines[i] = ''
|
||||
else:
|
||||
lines[i] = line[leastspaces:]
|
||||
return sep.join(lines)
|
||||
|
||||
class ReSTSyntaxTest(py.test.collect.Item):
|
||||
def __init__(self, project, *args, **kwargs):
|
||||
super(ReSTSyntaxTest, self).__init__(*args, **kwargs)
|
||||
self.project = project
|
||||
|
||||
def reportinfo(self):
|
||||
return self.fspath, None, "syntax check"
|
||||
|
||||
def runtest(self):
|
||||
self.restcheck(py.path.svnwc(self.fspath))
|
||||
|
||||
def restcheck(self, path):
|
||||
py.test.importorskip("docutils")
|
||||
self.register_linkrole()
|
||||
from docutils.utils import SystemMessage
|
||||
try:
|
||||
self._checkskip(path, self.project.get_htmloutputpath(path))
|
||||
self.project.process(path)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except SystemMessage:
|
||||
# we assume docutils printed info on stdout
|
||||
py.test.fail("docutils processing failed, see captured stderr")
|
||||
|
||||
def register_linkrole(self):
|
||||
from py.__.rest import directive
|
||||
directive.register_linkrole('api', self.resolve_linkrole)
|
||||
directive.register_linkrole('source', self.resolve_linkrole)
|
||||
|
||||
# XXX fake sphinx' "toctree" and refs
|
||||
directive.register_linkrole('ref', self.resolve_linkrole)
|
||||
|
||||
from docutils.parsers.rst import directives
|
||||
def toctree_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
return []
|
||||
toctree_directive.content = 1
|
||||
toctree_directive.options = {'maxdepth': int, 'glob': directives.flag,
|
||||
'hidden': directives.flag}
|
||||
directives.register_directive('toctree', toctree_directive)
|
||||
self.register_pygments()
|
||||
|
||||
def register_pygments(self):
|
||||
# taken from pygments-main/external/rst-directive.py
|
||||
try:
|
||||
from pygments.formatters import HtmlFormatter
|
||||
except ImportError:
|
||||
def pygments_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
return []
|
||||
else:
|
||||
# The default formatter
|
||||
DEFAULT = HtmlFormatter(noclasses=True)
|
||||
# Add name -> formatter pairs for every variant you want to use
|
||||
VARIANTS = {
|
||||
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
|
||||
}
|
||||
|
||||
from docutils import nodes
|
||||
from docutils.parsers.rst import directives
|
||||
|
||||
from pygments import highlight
|
||||
from pygments.lexers import get_lexer_by_name, TextLexer
|
||||
|
||||
def pygments_directive(name, arguments, options, content, lineno,
|
||||
content_offset, block_text, state, state_machine):
|
||||
try:
|
||||
lexer = get_lexer_by_name(arguments[0])
|
||||
except ValueError:
|
||||
# no lexer found - use the text one instead of an exception
|
||||
lexer = TextLexer()
|
||||
# take an arbitrary option if more than one is given
|
||||
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
|
||||
parsed = highlight(u'\n'.join(content), lexer, formatter)
|
||||
return [nodes.raw('', parsed, format='html')]
|
||||
|
||||
pygments_directive.arguments = (1, 0, 1)
|
||||
pygments_directive.content = 1
|
||||
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
|
||||
|
||||
directives.register_directive('sourcecode', pygments_directive)
|
||||
|
||||
def resolve_linkrole(self, name, text, check=True):
|
||||
apigen_relpath = self.project.apigen_relpath
|
||||
|
||||
if name == 'api':
|
||||
if text == 'py':
|
||||
return ('py', apigen_relpath + 'api/index.html')
|
||||
else:
|
||||
assert text.startswith('py.'), (
|
||||
'api link "%s" does not point to the py package') % (text,)
|
||||
dotted_name = text
|
||||
if dotted_name.find('(') > -1:
|
||||
dotted_name = dotted_name[:text.find('(')]
|
||||
# remove pkg root
|
||||
path = dotted_name.split('.')[1:]
|
||||
dotted_name = '.'.join(path)
|
||||
obj = py
|
||||
if check:
|
||||
for chunk in path:
|
||||
try:
|
||||
obj = getattr(obj, chunk)
|
||||
except AttributeError:
|
||||
raise AssertionError(
|
||||
'problem with linkrole :api:`%s`: can not resolve '
|
||||
'dotted name %s' % (text, dotted_name,))
|
||||
return (text, apigen_relpath + 'api/%s.html' % (dotted_name,))
|
||||
elif name == 'source':
|
||||
assert text.startswith('py/'), ('source link "%s" does not point '
|
||||
'to the py package') % (text,)
|
||||
relpath = '/'.join(text.split('/')[1:])
|
||||
if check:
|
||||
pkgroot = py.__pkg__.getpath()
|
||||
abspath = pkgroot.join(relpath)
|
||||
assert pkgroot.join(relpath).check(), (
|
||||
'problem with linkrole :source:`%s`: '
|
||||
'path %s does not exist' % (text, relpath))
|
||||
if relpath.endswith('/') or not relpath:
|
||||
relpath += 'index.html'
|
||||
else:
|
||||
relpath += '.html'
|
||||
return (text, apigen_relpath + 'source/%s' % (relpath,))
|
||||
elif name == 'ref':
|
||||
return ("", "")
|
||||
|
||||
def _checkskip(self, lpath, htmlpath=None):
|
||||
if not self.config.getvalue("forcegen"):
|
||||
lpath = py.path.local(lpath)
|
||||
if htmlpath is not None:
|
||||
htmlpath = py.path.local(htmlpath)
|
||||
if lpath.ext == '.txt':
|
||||
htmlpath = htmlpath or lpath.new(ext='.html')
|
||||
if htmlpath.check(file=1) and htmlpath.mtime() >= lpath.mtime():
|
||||
py.test.skip("html file is up to date, use --forcegen to regenerate")
|
||||
#return [] # no need to rebuild
|
||||
|
||||
class DoctestText(py.test.collect.Item):
|
||||
def reportinfo(self):
|
||||
return self.fspath, None, "doctest"
|
||||
|
||||
def runtest(self):
|
||||
content = self._normalize_linesep()
|
||||
newcontent = self.config.hook.pytest_doctest_prepare_content(content=content)
|
||||
if newcontent is not None:
|
||||
content = newcontent
|
||||
s = content
|
||||
l = []
|
||||
prefix = '.. >>> '
|
||||
mod = py.std.types.ModuleType(self.fspath.purebasename)
|
||||
skipchunk = False
|
||||
for line in deindent(s).split('\n'):
|
||||
stripped = line.strip()
|
||||
if skipchunk and line.startswith(skipchunk):
|
||||
print "skipping", line
|
||||
continue
|
||||
skipchunk = False
|
||||
if stripped.startswith(prefix):
|
||||
try:
|
||||
exec py.code.Source(stripped[len(prefix):]).compile() in \
|
||||
mod.__dict__
|
||||
except ValueError, e:
|
||||
if e.args and e.args[0] == "skipchunk":
|
||||
skipchunk = " " * (len(line) - len(line.lstrip()))
|
||||
else:
|
||||
raise
|
||||
else:
|
||||
l.append(line)
|
||||
docstring = "\n".join(l)
|
||||
mod.__doc__ = docstring
|
||||
failed, tot = py.compat.doctest.testmod(mod, verbose=1)
|
||||
if failed:
|
||||
py.test.fail("doctest %s: %s failed out of %s" %(
|
||||
self.fspath, failed, tot))
|
||||
|
||||
def _normalize_linesep(self):
|
||||
# XXX quite nasty... but it works (fixes win32 issues)
|
||||
s = self.fspath.read()
|
||||
linesep = '\n'
|
||||
if '\r' in s:
|
||||
if '\n' not in s:
|
||||
linesep = '\r'
|
||||
else:
|
||||
linesep = '\r\n'
|
||||
s = s.replace(linesep, '\n')
|
||||
return s
|
||||
|
||||
class LinkCheckerMaker(py.test.collect.Collector):
|
||||
def collect(self):
|
||||
return list(self.genlinkchecks())
|
||||
|
||||
def genlinkchecks(self):
|
||||
path = self.fspath
|
||||
# generating functions + args as single tests
|
||||
timeout = self.config.getvalue("urlcheck_timeout")
|
||||
for lineno, line in py.builtin.enumerate(path.readlines()):
|
||||
line = line.strip()
|
||||
if line.startswith('.. _'):
|
||||
if line.startswith('.. _`'):
|
||||
delim = '`:'
|
||||
else:
|
||||
delim = ':'
|
||||
l = line.split(delim, 1)
|
||||
if len(l) != 2:
|
||||
continue
|
||||
tryfn = l[1].strip()
|
||||
name = "%s:%d" %(tryfn, lineno)
|
||||
if tryfn.startswith('http:') or tryfn.startswith('https'):
|
||||
if self.config.getvalue("urlcheck"):
|
||||
yield CheckLink(name, parent=self,
|
||||
args=(tryfn, path, lineno, timeout), checkfunc=urlcheck)
|
||||
elif tryfn.startswith('webcal:'):
|
||||
continue
|
||||
else:
|
||||
i = tryfn.find('#')
|
||||
if i != -1:
|
||||
checkfn = tryfn[:i]
|
||||
else:
|
||||
checkfn = tryfn
|
||||
if checkfn.strip() and (1 or checkfn.endswith('.html')):
|
||||
yield CheckLink(name, parent=self,
|
||||
args=(tryfn, path, lineno), checkfunc=localrefcheck)
|
||||
|
||||
class CheckLink(py.test.collect.Item):
|
||||
def __init__(self, name, parent, args, checkfunc):
|
||||
super(CheckLink, self).__init__(name, parent)
|
||||
self.args = args
|
||||
self.checkfunc = checkfunc
|
||||
|
||||
def runtest(self):
|
||||
return self.checkfunc(*self.args)
|
||||
|
||||
def reportinfo(self, basedir=None):
|
||||
return (self.fspath, self.args[2], "checklink: %s" % self.args[0])
|
||||
|
||||
def urlcheck(tryfn, path, lineno, TIMEOUT_URLOPEN):
|
||||
old = py.std.socket.getdefaulttimeout()
|
||||
py.std.socket.setdefaulttimeout(TIMEOUT_URLOPEN)
|
||||
try:
|
||||
try:
|
||||
print "trying remote", tryfn
|
||||
py.std.urllib2.urlopen(tryfn)
|
||||
finally:
|
||||
py.std.socket.setdefaulttimeout(old)
|
||||
except (py.std.urllib2.URLError, py.std.urllib2.HTTPError), e:
|
||||
if getattr(e, 'code', None) in (401, 403): # authorization required, forbidden
|
||||
py.test.skip("%s: %s" %(tryfn, str(e)))
|
||||
else:
|
||||
py.test.fail("remote reference error %r in %s:%d\n%s" %(
|
||||
tryfn, path.basename, lineno+1, e))
|
||||
|
||||
def localrefcheck(tryfn, path, lineno):
|
||||
# assume it should be a file
|
||||
i = tryfn.find('#')
|
||||
if tryfn.startswith('javascript:'):
|
||||
return # don't check JS refs
|
||||
if i != -1:
|
||||
anchor = tryfn[i+1:]
|
||||
tryfn = tryfn[:i]
|
||||
else:
|
||||
anchor = ''
|
||||
fn = path.dirpath(tryfn)
|
||||
ishtml = fn.ext == '.html'
|
||||
fn = ishtml and fn.new(ext='.txt') or fn
|
||||
print "filename is", fn
|
||||
if not fn.check(): # not ishtml or not fn.check():
|
||||
if not py.path.local(tryfn).check(): # the html could be there
|
||||
py.test.fail("reference error %r in %s:%d" %(
|
||||
tryfn, path.basename, lineno+1))
|
||||
if anchor:
|
||||
source = unicode(fn.read(), 'latin1')
|
||||
source = source.lower().replace('-', ' ') # aehem
|
||||
|
||||
anchor = anchor.replace('-', ' ')
|
||||
match2 = ".. _`%s`:" % anchor
|
||||
match3 = ".. _%s:" % anchor
|
||||
candidates = (anchor, match2, match3)
|
||||
print "candidates", repr(candidates)
|
||||
for line in source.split('\n'):
|
||||
line = line.strip()
|
||||
if line in candidates:
|
||||
break
|
||||
else:
|
||||
py.test.fail("anchor reference error %s#%s in %s:%d" %(
|
||||
tryfn, anchor, path.basename, lineno+1))
|
||||
|
||||
|
||||
#
|
||||
# PLUGIN tests
|
||||
#
|
||||
|
||||
def test_deindent():
|
||||
assert deindent('foo') == 'foo'
|
||||
assert deindent('foo\n bar') == 'foo\n bar'
|
||||
assert deindent(' foo\n bar\n') == 'foo\nbar\n'
|
||||
assert deindent(' foo\n\n bar\n') == 'foo\n\nbar\n'
|
||||
assert deindent(' foo\n bar\n') == 'foo\n bar\n'
|
||||
assert deindent(' foo\n bar\n') == ' foo\nbar\n'
|
||||
|
||||
class TestApigenLinkRole:
|
||||
disabled = True
|
||||
|
||||
# these tests are moved here from the former py/doc/conftest.py
|
||||
def test_resolve_linkrole(self):
|
||||
from py.__.doc.conftest import get_apigen_relpath
|
||||
apigen_relpath = get_apigen_relpath()
|
||||
|
||||
assert resolve_linkrole('api', 'py.foo.bar', False) == (
|
||||
'py.foo.bar', apigen_relpath + 'api/foo.bar.html')
|
||||
assert resolve_linkrole('api', 'py.foo.bar()', False) == (
|
||||
'py.foo.bar()', apigen_relpath + 'api/foo.bar.html')
|
||||
assert resolve_linkrole('api', 'py', False) == (
|
||||
'py', apigen_relpath + 'api/index.html')
|
||||
py.test.raises(AssertionError, 'resolve_linkrole("api", "foo.bar")')
|
||||
assert resolve_linkrole('source', 'py/foo/bar.py', False) == (
|
||||
'py/foo/bar.py', apigen_relpath + 'source/foo/bar.py.html')
|
||||
assert resolve_linkrole('source', 'py/foo/', False) == (
|
||||
'py/foo/', apigen_relpath + 'source/foo/index.html')
|
||||
assert resolve_linkrole('source', 'py/', False) == (
|
||||
'py/', apigen_relpath + 'source/index.html')
|
||||
py.test.raises(AssertionError, 'resolve_linkrole("source", "/foo/bar/")')
|
||||
|
||||
def test_resolve_linkrole_check_api(self):
|
||||
assert resolve_linkrole('api', 'py.test.ensuretemp')
|
||||
py.test.raises(AssertionError, "resolve_linkrole('api', 'py.foo.baz')")
|
||||
|
||||
def test_resolve_linkrole_check_source(self):
|
||||
assert resolve_linkrole('source', 'py/path/common.py')
|
||||
py.test.raises(AssertionError,
|
||||
"resolve_linkrole('source', 'py/foo/bar.py')")
|
||||
|
||||
|
||||
class TestDoctest:
|
||||
def pytest_funcarg__testdir(self, request):
|
||||
testdir = request.getfuncargvalue("testdir")
|
||||
assert request.module.__name__ == __name__
|
||||
testdir.makepyfile(confrest="from py.__.misc.rest import Project")
|
||||
for p in testdir.plugins:
|
||||
if p == globals():
|
||||
break
|
||||
else:
|
||||
testdir.plugins.append(globals())
|
||||
return testdir
|
||||
|
||||
def test_doctest_extra_exec(self, testdir):
|
||||
xtxt = testdir.maketxtfile(x="""
|
||||
hello::
|
||||
.. >>> raise ValueError
|
||||
>>> None
|
||||
""")
|
||||
reprec = testdir.inline_run(xtxt)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 1
|
||||
|
||||
def test_doctest_basic(self, testdir):
|
||||
xtxt = testdir.maketxtfile(x="""
|
||||
..
|
||||
>>> from os.path import abspath
|
||||
|
||||
hello world
|
||||
|
||||
>>> assert abspath
|
||||
>>> i=3
|
||||
>>> print i
|
||||
3
|
||||
|
||||
yes yes
|
||||
|
||||
>>> i
|
||||
3
|
||||
|
||||
end
|
||||
""")
|
||||
reprec = testdir.inline_run(xtxt)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 0
|
||||
assert passed + skipped == 2
|
||||
|
||||
def test_doctest_eol(self, testdir):
|
||||
ytxt = testdir.maketxtfile(y=".. >>> 1 + 1\r\n 2\r\n\r\n")
|
||||
reprec = testdir.inline_run(ytxt)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 0
|
||||
assert passed + skipped == 2
|
||||
|
||||
def test_doctest_indentation(self, testdir):
|
||||
footxt = testdir.maketxtfile(foo=
|
||||
'..\n >>> print "foo\\n bar"\n foo\n bar\n')
|
||||
reprec = testdir.inline_run(footxt)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 0
|
||||
assert skipped + passed == 2
|
||||
|
||||
def test_js_ignore(self, testdir):
|
||||
xtxt = testdir.maketxtfile(xtxt="""
|
||||
`blah`_
|
||||
|
||||
.. _`blah`: javascript:some_function()
|
||||
""")
|
||||
reprec = testdir.inline_run(xtxt)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert failed == 0
|
||||
assert skipped + passed == 3
|
||||
|
||||
def test_pytest_doctest_prepare_content(self, testdir):
|
||||
l = []
|
||||
class MyPlugin:
|
||||
def pytest_doctest_prepare_content(self, content):
|
||||
l.append(content)
|
||||
return content.replace("False", "True")
|
||||
|
||||
testdir.plugins.append(MyPlugin())
|
||||
|
||||
xtxt = testdir.maketxtfile(x="""
|
||||
hello:
|
||||
|
||||
>>> 2 == 2
|
||||
False
|
||||
|
||||
""")
|
||||
reprec = testdir.inline_run(xtxt)
|
||||
assert len(l) == 1
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
assert passed >= 1
|
||||
assert not failed
|
||||
assert skipped <= 1
|
||||
|
||||
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_restdoc.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,281 @@
|
|||
|
||||
pytest_resultlog plugin
|
||||
=======================
|
||||
|
||||
resultlog plugin for machine-readable logging of test results.
|
||||
|
||||
Useful for buildbot integration code.
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--resultlog=path``
|
||||
path for machine-readable result log.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_resultlog.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_resultlog.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_resultlog.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""resultlog plugin for machine-readable logging of test results.
|
||||
Useful for buildbot integration code.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.addgroup("resultlog", "resultlog plugin options")
|
||||
group.addoption('--resultlog', action="store", dest="resultlog", metavar="path", default=None,
|
||||
help="path for machine-readable result log.")
|
||||
|
||||
def pytest_configure(config):
|
||||
resultlog = config.option.resultlog
|
||||
if resultlog:
|
||||
logfile = open(resultlog, 'w', 1) # line buffered
|
||||
config._resultlog = ResultLog(logfile)
|
||||
config.pluginmanager.register(config._resultlog)
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
resultlog = getattr(config, '_resultlog', None)
|
||||
if resultlog:
|
||||
resultlog.logfile.close()
|
||||
del config._resultlog
|
||||
config.pluginmanager.unregister(resultlog)
|
||||
|
||||
def generic_path(item):
|
||||
chain = item.listchain()
|
||||
gpath = [chain[0].name]
|
||||
fspath = chain[0].fspath
|
||||
fspart = False
|
||||
for node in chain[1:]:
|
||||
newfspath = node.fspath
|
||||
if newfspath == fspath:
|
||||
if fspart:
|
||||
gpath.append(':')
|
||||
fspart = False
|
||||
else:
|
||||
gpath.append('.')
|
||||
else:
|
||||
gpath.append('/')
|
||||
fspart = True
|
||||
name = node.name
|
||||
if name[0] in '([':
|
||||
gpath.pop()
|
||||
gpath.append(name)
|
||||
fspath = newfspath
|
||||
return ''.join(gpath)
|
||||
|
||||
class ResultLog(object):
|
||||
def __init__(self, logfile):
|
||||
self.logfile = logfile # preferably line buffered
|
||||
|
||||
def write_log_entry(self, testpath, shortrepr, longrepr):
|
||||
print >>self.logfile, "%s %s" % (shortrepr, testpath)
|
||||
for line in longrepr.splitlines():
|
||||
print >>self.logfile, " %s" % line
|
||||
|
||||
def log_outcome(self, node, shortrepr, longrepr):
|
||||
testpath = generic_path(node)
|
||||
self.write_log_entry(testpath, shortrepr, longrepr)
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
code = rep.shortrepr
|
||||
if rep.passed:
|
||||
longrepr = ""
|
||||
elif rep.failed:
|
||||
longrepr = str(rep.longrepr)
|
||||
elif rep.skipped:
|
||||
longrepr = str(rep.longrepr.reprcrash.message)
|
||||
self.log_outcome(rep.item, code, longrepr)
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if not rep.passed:
|
||||
if rep.failed:
|
||||
code = "F"
|
||||
else:
|
||||
assert rep.skipped
|
||||
code = "S"
|
||||
longrepr = str(rep.longrepr.reprcrash)
|
||||
self.log_outcome(rep.collector, code, longrepr)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
path = excrepr.reprcrash.path
|
||||
self.write_log_entry(path, '!', str(excrepr))
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
#
|
||||
# plugin tests
|
||||
#
|
||||
# ===============================================================================
|
||||
|
||||
import os, StringIO
|
||||
|
||||
def test_generic_path():
|
||||
from py.__.test.collect import Node, Item, FSCollector
|
||||
p1 = Node('a')
|
||||
assert p1.fspath is None
|
||||
p2 = Node('B', parent=p1)
|
||||
p3 = Node('()', parent = p2)
|
||||
item = Item('c', parent = p3)
|
||||
|
||||
res = generic_path(item)
|
||||
assert res == 'a.B().c'
|
||||
|
||||
p0 = FSCollector('proj/test')
|
||||
p1 = FSCollector('proj/test/a', parent=p0)
|
||||
p2 = Node('B', parent=p1)
|
||||
p3 = Node('()', parent = p2)
|
||||
p4 = Node('c', parent=p3)
|
||||
item = Item('[1]', parent = p4)
|
||||
|
||||
res = generic_path(item)
|
||||
assert res == 'test/a:B().c[1]'
|
||||
|
||||
def test_write_log_entry():
|
||||
reslog = ResultLog(None)
|
||||
reslog.logfile = StringIO.StringIO()
|
||||
reslog.write_log_entry('name', '.', '')
|
||||
entry = reslog.logfile.getvalue()
|
||||
assert entry[-1] == '\n'
|
||||
entry_lines = entry.splitlines()
|
||||
assert len(entry_lines) == 1
|
||||
assert entry_lines[0] == '. name'
|
||||
|
||||
reslog.logfile = StringIO.StringIO()
|
||||
reslog.write_log_entry('name', 's', 'Skipped')
|
||||
entry = reslog.logfile.getvalue()
|
||||
assert entry[-1] == '\n'
|
||||
entry_lines = entry.splitlines()
|
||||
assert len(entry_lines) == 2
|
||||
assert entry_lines[0] == 's name'
|
||||
assert entry_lines[1] == ' Skipped'
|
||||
|
||||
reslog.logfile = StringIO.StringIO()
|
||||
reslog.write_log_entry('name', 's', 'Skipped\n')
|
||||
entry = reslog.logfile.getvalue()
|
||||
assert entry[-1] == '\n'
|
||||
entry_lines = entry.splitlines()
|
||||
assert len(entry_lines) == 2
|
||||
assert entry_lines[0] == 's name'
|
||||
assert entry_lines[1] == ' Skipped'
|
||||
|
||||
reslog.logfile = StringIO.StringIO()
|
||||
longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
|
||||
reslog.write_log_entry('name', 'F', longrepr)
|
||||
entry = reslog.logfile.getvalue()
|
||||
assert entry[-1] == '\n'
|
||||
entry_lines = entry.splitlines()
|
||||
assert len(entry_lines) == 5
|
||||
assert entry_lines[0] == 'F name'
|
||||
assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
|
||||
|
||||
|
||||
class TestWithFunctionIntegration:
|
||||
# XXX (hpk) i think that the resultlog plugin should
|
||||
# provide a Parser object so that one can remain
|
||||
# ignorant regarding formatting details.
|
||||
def getresultlog(self, testdir, arg):
|
||||
resultlog = testdir.tmpdir.join("resultlog")
|
||||
testdir.plugins.append("resultlog")
|
||||
args = ["--resultlog=%s" % resultlog] + [arg]
|
||||
testdir.runpytest(*args)
|
||||
return filter(None, resultlog.readlines(cr=0))
|
||||
|
||||
def test_collection_report(self, testdir):
|
||||
ok = testdir.makepyfile(test_collection_ok="")
|
||||
skip = testdir.makepyfile(test_collection_skip="import py ; py.test.skip('hello')")
|
||||
fail = testdir.makepyfile(test_collection_fail="XXX")
|
||||
lines = self.getresultlog(testdir, ok)
|
||||
assert not lines
|
||||
|
||||
lines = self.getresultlog(testdir, skip)
|
||||
assert len(lines) == 2
|
||||
assert lines[0].startswith("S ")
|
||||
assert lines[0].endswith("test_collection_skip.py")
|
||||
assert lines[1].startswith(" ")
|
||||
assert lines[1].endswith("test_collection_skip.py:1: Skipped: 'hello'")
|
||||
|
||||
lines = self.getresultlog(testdir, fail)
|
||||
assert lines
|
||||
assert lines[0].startswith("F ")
|
||||
assert lines[0].endswith("test_collection_fail.py"), lines[0]
|
||||
for x in lines[1:]:
|
||||
assert x.startswith(" ")
|
||||
assert "XXX" in "".join(lines[1:])
|
||||
|
||||
def test_log_test_outcomes(self, testdir):
|
||||
mod = testdir.makepyfile(test_mod="""
|
||||
import py
|
||||
def test_pass(): pass
|
||||
def test_skip(): py.test.skip("hello")
|
||||
def test_fail(): raise ValueError("val")
|
||||
""")
|
||||
lines = self.getresultlog(testdir, mod)
|
||||
assert len(lines) >= 3
|
||||
assert lines[0].startswith(". ")
|
||||
assert lines[0].endswith("test_pass")
|
||||
assert lines[1].startswith("s "), lines[1]
|
||||
assert lines[1].endswith("test_skip")
|
||||
assert lines[2].find("hello") != -1
|
||||
|
||||
assert lines[3].startswith("F ")
|
||||
assert lines[3].endswith("test_fail")
|
||||
tb = "".join(lines[4:])
|
||||
assert tb.find("ValueError") != -1
|
||||
|
||||
def test_internal_exception(self):
|
||||
# they are produced for example by a teardown failing
|
||||
# at the end of the run
|
||||
try:
|
||||
raise ValueError
|
||||
except ValueError:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
reslog = ResultLog(StringIO.StringIO())
|
||||
reslog.pytest_internalerror(excinfo.getrepr())
|
||||
entry = reslog.logfile.getvalue()
|
||||
entry_lines = entry.splitlines()
|
||||
|
||||
assert entry_lines[0].startswith('! ')
|
||||
assert os.path.basename(__file__)[:-1] in entry_lines[0] #.py/.pyc
|
||||
assert entry_lines[-1][0] == ' '
|
||||
assert 'ValueError' in entry
|
||||
|
||||
def test_generic(testdir, LineMatcher):
|
||||
testdir.plugins.append("resultlog")
|
||||
testdir.makepyfile("""
|
||||
import py
|
||||
def test_pass():
|
||||
pass
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_skip():
|
||||
py.test.skip("")
|
||||
""")
|
||||
testdir.runpytest("--resultlog=result.log")
|
||||
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
|
||||
LineMatcher(lines).fnmatch_lines([
|
||||
". *:test_pass",
|
||||
"F *:test_fail",
|
||||
"s *:test_skip",
|
||||
])
|
||||
|
||||
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_resultlog.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,304 @@
|
|||
|
||||
pytest_runner plugin
|
||||
====================
|
||||
|
||||
collect and run test items and create reports.
|
||||
|
||||
|
||||
|
||||
command line options
|
||||
--------------------
|
||||
|
||||
|
||||
``--boxed``
|
||||
box each test run in a separate process
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_runner.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_runner.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_runner.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
collect and run test items and create reports.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
from py.__.test.outcome import Skipped
|
||||
|
||||
#
|
||||
# pytest plugin hooks
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--boxed',
|
||||
action="store_true", dest="boxed", default=False,
|
||||
help="box each test run in a separate process")
|
||||
|
||||
# XXX move to pytest_sessionstart and fix py.test owns tests
|
||||
def pytest_configure(config):
|
||||
config._setupstate = SetupState()
|
||||
|
||||
def pytest_sessionfinish(session, exitstatus):
|
||||
# XXX see above
|
||||
if hasattr(session.config, '_setupstate'):
|
||||
session.config._setupstate.teardown_all()
|
||||
# prevent logging module atexit handler from choking on
|
||||
# its attempt to close already closed streams
|
||||
# see http://bugs.python.org/issue6333
|
||||
mod = py.std.sys.modules.get("logging", None)
|
||||
if mod is not None:
|
||||
mod.raiseExceptions = False
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = collector.config.guardedcall(
|
||||
lambda: collector._memocollect()
|
||||
)
|
||||
result = None
|
||||
if not call.excinfo:
|
||||
result = call.result
|
||||
return CollectReport(collector, result, call.excinfo, call.outerr)
|
||||
|
||||
return report
|
||||
|
||||
def pytest_runtest_protocol(item):
|
||||
if item.config.getvalue("boxed"):
|
||||
reports = forked_run_report(item)
|
||||
for rep in reports:
|
||||
item.config.hook.pytest_runtest_logreport(rep=rep)
|
||||
else:
|
||||
runtestprotocol(item)
|
||||
return True
|
||||
|
||||
def runtestprotocol(item, log=True):
|
||||
rep = call_and_report(item, "setup", log)
|
||||
reports = [rep]
|
||||
if rep.passed:
|
||||
reports.append(call_and_report(item, "call", log))
|
||||
reports.append(call_and_report(item, "teardown", log))
|
||||
return reports
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
item.config._setupstate.prepare(item)
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
if not item._deprecated_testexecution():
|
||||
item.runtest()
|
||||
|
||||
def pytest_runtest_makereport(item, call):
|
||||
return ItemTestReport(item, call.excinfo, call.when, call.outerr)
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
item.config._setupstate.teardown_exact(item)
|
||||
|
||||
#
|
||||
# Implementation
|
||||
|
||||
def call_and_report(item, when, log=True):
|
||||
call = RuntestHookCall(item, when)
|
||||
hook = item.config.hook
|
||||
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||
if log and (when == "call" or not report.passed):
|
||||
hook.pytest_runtest_logreport(rep=report)
|
||||
return report
|
||||
|
||||
|
||||
class RuntestHookCall:
|
||||
excinfo = None
|
||||
_prefix = "pytest_runtest_"
|
||||
def __init__(self, item, when):
|
||||
self.when = when
|
||||
hookname = self._prefix + when
|
||||
hook = getattr(item.config.hook, hookname)
|
||||
capture = item.config._getcapture()
|
||||
try:
|
||||
try:
|
||||
self.result = hook(item=item)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
self.excinfo = py.code.ExceptionInfo()
|
||||
finally:
|
||||
self.outerr = capture.reset()
|
||||
|
||||
def forked_run_report(item):
|
||||
# for now, we run setup/teardown in the subprocess
|
||||
# XXX optionally allow sharing of setup/teardown
|
||||
EXITSTATUS_TESTEXIT = 4
|
||||
from py.__.test.dist.mypickle import ImmutablePickler
|
||||
ipickle = ImmutablePickler(uneven=0)
|
||||
ipickle.selfmemoize(item.config)
|
||||
# XXX workaround the issue that 2.6 cannot pickle
|
||||
# instances of classes defined in global conftest.py files
|
||||
ipickle.selfmemoize(item)
|
||||
def runforked():
|
||||
try:
|
||||
reports = runtestprotocol(item, log=False)
|
||||
except KeyboardInterrupt:
|
||||
py.std.os._exit(EXITSTATUS_TESTEXIT)
|
||||
return ipickle.dumps(reports)
|
||||
|
||||
ff = py.process.ForkedFunc(runforked)
|
||||
result = ff.waitfinish()
|
||||
if result.retval is not None:
|
||||
return ipickle.loads(result.retval)
|
||||
else:
|
||||
if result.exitstatus == EXITSTATUS_TESTEXIT:
|
||||
py.test.exit("forked test item %s raised Exit" %(item,))
|
||||
return [report_process_crash(item, result)]
|
||||
|
||||
def report_process_crash(item, result):
|
||||
path, lineno = item._getfslineno()
|
||||
info = "%s:%s: running the test CRASHED with signal %d" %(
|
||||
path, lineno, result.signal)
|
||||
return ItemTestReport(item, excinfo=info, when="???")
|
||||
|
||||
class BaseReport(object):
|
||||
def __repr__(self):
|
||||
l = ["%s=%s" %(key, value)
|
||||
for key, value in self.__dict__.items()]
|
||||
return "<%s %s>" %(self.__class__.__name__, " ".join(l),)
|
||||
|
||||
def toterminal(self, out):
|
||||
longrepr = self.longrepr
|
||||
if hasattr(longrepr, 'toterminal'):
|
||||
longrepr.toterminal(out)
|
||||
else:
|
||||
out.line(str(longrepr))
|
||||
|
||||
class ItemTestReport(BaseReport):
|
||||
failed = passed = skipped = False
|
||||
|
||||
def __init__(self, item, excinfo=None, when=None, outerr=None):
|
||||
self.item = item
|
||||
self.when = when
|
||||
self.outerr = outerr
|
||||
if item and when != "setup":
|
||||
self.keywords = item.readkeywords()
|
||||
else:
|
||||
# if we fail during setup it might mean
|
||||
# we are not able to access the underlying object
|
||||
# this might e.g. happen if we are unpickled
|
||||
# and our parent collector did not collect us
|
||||
# (because it e.g. skipped for platform reasons)
|
||||
self.keywords = {}
|
||||
if not excinfo:
|
||||
self.passed = True
|
||||
self.shortrepr = "."
|
||||
else:
|
||||
if not isinstance(excinfo, py.code.ExceptionInfo):
|
||||
self.failed = True
|
||||
shortrepr = "?"
|
||||
longrepr = excinfo
|
||||
elif excinfo.errisinstance(Skipped):
|
||||
self.skipped = True
|
||||
shortrepr = "s"
|
||||
longrepr = self.item._repr_failure_py(excinfo, outerr)
|
||||
else:
|
||||
self.failed = True
|
||||
shortrepr = self.item.shortfailurerepr
|
||||
if self.when == "call":
|
||||
longrepr = self.item.repr_failure(excinfo, outerr)
|
||||
else: # exception in setup or teardown
|
||||
longrepr = self.item._repr_failure_py(excinfo, outerr)
|
||||
shortrepr = shortrepr.lower()
|
||||
self.shortrepr = shortrepr
|
||||
self.longrepr = longrepr
|
||||
|
||||
def getnode(self):
|
||||
return self.item
|
||||
|
||||
class CollectReport(BaseReport):
|
||||
skipped = failed = passed = False
|
||||
|
||||
def __init__(self, collector, result, excinfo=None, outerr=None):
|
||||
self.collector = collector
|
||||
if not excinfo:
|
||||
self.passed = True
|
||||
self.result = result
|
||||
else:
|
||||
self.outerr = outerr
|
||||
self.longrepr = self.collector._repr_failure_py(excinfo, outerr)
|
||||
if excinfo.errisinstance(Skipped):
|
||||
self.skipped = True
|
||||
self.reason = str(excinfo.value)
|
||||
else:
|
||||
self.failed = True
|
||||
|
||||
def getnode(self):
|
||||
return self.collector
|
||||
|
||||
class SetupState(object):
|
||||
""" shared state for setting up/tearing down test items or collectors. """
|
||||
def __init__(self):
|
||||
self.stack = []
|
||||
self._finalizers = {}
|
||||
|
||||
def addfinalizer(self, finalizer, colitem):
|
||||
""" attach a finalizer to the given colitem.
|
||||
if colitem is None, this will add a finalizer that
|
||||
is called at the end of teardown_all().
|
||||
"""
|
||||
assert callable(finalizer)
|
||||
#assert colitem in self.stack
|
||||
self._finalizers.setdefault(colitem, []).append(finalizer)
|
||||
|
||||
def _pop_and_teardown(self):
|
||||
colitem = self.stack.pop()
|
||||
self._teardown_with_finalization(colitem)
|
||||
|
||||
def _callfinalizers(self, colitem):
|
||||
finalizers = self._finalizers.pop(colitem, None)
|
||||
while finalizers:
|
||||
fin = finalizers.pop()
|
||||
fin()
|
||||
|
||||
def _teardown_with_finalization(self, colitem):
|
||||
self._callfinalizers(colitem)
|
||||
if colitem:
|
||||
colitem.teardown()
|
||||
for colitem in self._finalizers:
|
||||
assert colitem is None or colitem in self.stack
|
||||
|
||||
def teardown_all(self):
|
||||
while self.stack:
|
||||
self._pop_and_teardown()
|
||||
self._teardown_with_finalization(None)
|
||||
assert not self._finalizers
|
||||
|
||||
def teardown_exact(self, item):
|
||||
if item == self.stack[-1]:
|
||||
self._pop_and_teardown()
|
||||
else:
|
||||
self._callfinalizers(item)
|
||||
|
||||
def prepare(self, colitem):
|
||||
""" setup objects along the collector chain to the test-method
|
||||
and teardown previously setup objects."""
|
||||
needed_collectors = colitem.listchain()
|
||||
while self.stack:
|
||||
if self.stack == needed_collectors[:len(self.stack)]:
|
||||
break
|
||||
self._pop_and_teardown()
|
||||
for col in needed_collectors[len(self.stack):]:
|
||||
col.setup()
|
||||
self.stack.append(col)
|
||||
|
||||
.. _`pytest_runner.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_runner.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,432 @@
|
|||
|
||||
pytest_terminal plugin
|
||||
======================
|
||||
|
||||
terminal reporting of the full testing process.
|
||||
|
||||
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_terminal.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_terminal.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_terminal.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
terminal reporting of the full testing process.
|
||||
"""
|
||||
import py
|
||||
import sys
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.option.collectonly:
|
||||
reporter = CollectonlyReporter(config)
|
||||
else:
|
||||
reporter = TerminalReporter(config)
|
||||
# XXX see remote.py's XXX
|
||||
for attr in 'pytest_terminal_hasmarkup', 'pytest_terminal_fullwidth':
|
||||
if hasattr(config, attr):
|
||||
#print "SETTING TERMINAL OPTIONS", attr, getattr(config, attr)
|
||||
name = attr.split("_")[-1]
|
||||
assert hasattr(self.reporter._tw, name), name
|
||||
setattr(reporter._tw, name, getattr(config, attr))
|
||||
config.pluginmanager.register(reporter)
|
||||
|
||||
class TerminalReporter:
|
||||
def __init__(self, config, file=None):
|
||||
self.config = config
|
||||
self.stats = {}
|
||||
self.curdir = py.path.local()
|
||||
if file is None:
|
||||
file = py.std.sys.stdout
|
||||
self._tw = py.io.TerminalWriter(file)
|
||||
self.currentfspath = None
|
||||
self.gateway2info = {}
|
||||
|
||||
def write_fspath_result(self, fspath, res):
|
||||
fspath = self.curdir.bestrelpath(fspath)
|
||||
if fspath != self.currentfspath:
|
||||
self._tw.line()
|
||||
relpath = self.curdir.bestrelpath(fspath)
|
||||
self._tw.write(relpath + " ")
|
||||
self.currentfspath = fspath
|
||||
self._tw.write(res)
|
||||
|
||||
def write_ensure_prefix(self, prefix, extra="", **kwargs):
|
||||
if self.currentfspath != prefix:
|
||||
self._tw.line()
|
||||
self.currentfspath = prefix
|
||||
self._tw.write(prefix)
|
||||
if extra:
|
||||
self._tw.write(extra, **kwargs)
|
||||
self.currentfspath = -2
|
||||
|
||||
def ensure_newline(self):
|
||||
if self.currentfspath:
|
||||
self._tw.line()
|
||||
self.currentfspath = None
|
||||
|
||||
def write_line(self, line, **markup):
|
||||
line = str(line)
|
||||
self.ensure_newline()
|
||||
self._tw.line(line, **markup)
|
||||
|
||||
def write_sep(self, sep, title=None, **markup):
|
||||
self.ensure_newline()
|
||||
self._tw.sep(sep, title, **markup)
|
||||
|
||||
def getcategoryletterword(self, rep):
|
||||
res = self.config.hook.pytest_report_teststatus(rep=rep)
|
||||
if res:
|
||||
return res
|
||||
for cat in 'skipped failed passed ???'.split():
|
||||
if getattr(rep, cat, None):
|
||||
break
|
||||
return cat, self.getoutcomeletter(rep), self.getoutcomeword(rep)
|
||||
|
||||
def getoutcomeletter(self, rep):
|
||||
return rep.shortrepr
|
||||
|
||||
def getoutcomeword(self, rep):
|
||||
if rep.passed:
|
||||
return "PASS", dict(green=True)
|
||||
elif rep.failed:
|
||||
return "FAIL", dict(red=True)
|
||||
elif rep.skipped:
|
||||
return "SKIP"
|
||||
else:
|
||||
return "???", dict(red=True)
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
for line in str(excrepr).split("\n"):
|
||||
self.write_line("INTERNALERROR> " + line)
|
||||
|
||||
def pyexecnet_gwmanage_newgateway(self, gateway, platinfo):
|
||||
#self.write_line("%s instantiated gateway from spec %r" %(gateway.id, gateway.spec._spec))
|
||||
d = {}
|
||||
d['version'] = repr_pythonversion(platinfo.version_info)
|
||||
d['id'] = gateway.id
|
||||
d['spec'] = gateway.spec._spec
|
||||
d['platform'] = platinfo.platform
|
||||
if self.config.option.verbose:
|
||||
d['extra'] = "- " + platinfo.executable
|
||||
else:
|
||||
d['extra'] = ""
|
||||
d['cwd'] = platinfo.cwd
|
||||
infoline = ("%(id)s %(spec)s -- platform %(platform)s, "
|
||||
"Python %(version)s "
|
||||
"cwd: %(cwd)s"
|
||||
"%(extra)s" % d)
|
||||
self.write_line(infoline)
|
||||
self.gateway2info[gateway] = infoline
|
||||
|
||||
def pyexecnet_gwmanage_rsyncstart(self, source, gateways):
|
||||
targets = ", ".join([gw.id for gw in gateways])
|
||||
msg = "rsyncstart: %s -> %s" %(source, targets)
|
||||
if not self.config.option.verbose:
|
||||
msg += " # use --verbose to see rsync progress"
|
||||
self.write_line(msg)
|
||||
|
||||
def pyexecnet_gwmanage_rsyncfinish(self, source, gateways):
|
||||
targets = ", ".join([gw.id for gw in gateways])
|
||||
self.write_line("rsyncfinish: %s -> %s" %(source, targets))
|
||||
|
||||
def pytest_plugin_registered(self, plugin):
|
||||
if self.config.option.traceconfig:
|
||||
msg = "PLUGIN registered: %s" %(plugin,)
|
||||
# XXX this event may happen during setup/teardown time
|
||||
# which unfortunately captures our output here
|
||||
# which garbles our output if we use self.write_line
|
||||
self.write_line(msg)
|
||||
|
||||
def pytest_testnodeready(self, node):
|
||||
self.write_line("%s txnode ready to receive tests" %(node.gateway.id,))
|
||||
|
||||
def pytest_testnodedown(self, node, error):
|
||||
if error:
|
||||
self.write_line("%s node down, error: %s" %(node.gateway.id, error))
|
||||
|
||||
def pytest_trace(self, category, msg):
|
||||
if self.config.option.debug or \
|
||||
self.config.option.traceconfig and category.find("config") != -1:
|
||||
self.write_line("[%s] %s" %(category, msg))
|
||||
|
||||
def pytest_rescheduleitems(self, items):
|
||||
if self.config.option.debug:
|
||||
self.write_sep("!", "RESCHEDULING %s " %(items,))
|
||||
|
||||
def pytest_deselected(self, items):
|
||||
self.stats.setdefault('deselected', []).append(items)
|
||||
|
||||
def pytest_itemstart(self, item, node=None):
|
||||
if self.config.option.dist != "no":
|
||||
# for dist-testing situations itemstart means we
|
||||
# queued the item for sending, not interesting (unless debugging)
|
||||
if self.config.option.debug:
|
||||
line = self._reportinfoline(item)
|
||||
extra = ""
|
||||
if node:
|
||||
extra = "-> " + str(node.gateway.id)
|
||||
self.write_ensure_prefix(line, extra)
|
||||
else:
|
||||
if self.config.option.verbose:
|
||||
line = self._reportinfoline(item)
|
||||
self.write_ensure_prefix(line, "")
|
||||
else:
|
||||
# ensure that the path is printed before the
|
||||
# 1st test of a module starts running
|
||||
fspath, lineno, msg = self._getreportinfo(item)
|
||||
self.write_fspath_result(fspath, "")
|
||||
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.passed and rep.when in ("setup", "teardown"):
|
||||
return
|
||||
fspath = rep.item.fspath
|
||||
cat, letter, word = self.getcategoryletterword(rep)
|
||||
if isinstance(word, tuple):
|
||||
word, markup = word
|
||||
else:
|
||||
markup = {}
|
||||
self.stats.setdefault(cat, []).append(rep)
|
||||
if not self.config.option.verbose:
|
||||
fspath, lineno, msg = self._getreportinfo(rep.item)
|
||||
self.write_fspath_result(fspath, letter)
|
||||
else:
|
||||
line = self._reportinfoline(rep.item)
|
||||
if not hasattr(rep, 'node'):
|
||||
self.write_ensure_prefix(line, word, **markup)
|
||||
else:
|
||||
self.ensure_newline()
|
||||
if hasattr(rep, 'node'):
|
||||
self._tw.write("%s " % rep.node.gateway.id)
|
||||
self._tw.write(word, **markup)
|
||||
self._tw.write(" " + line)
|
||||
self.currentfspath = -2
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if not rep.passed:
|
||||
if rep.failed:
|
||||
self.stats.setdefault("failed", []).append(rep)
|
||||
msg = rep.longrepr.reprcrash.message
|
||||
self.write_fspath_result(rep.collector.fspath, "F")
|
||||
elif rep.skipped:
|
||||
self.stats.setdefault("skipped", []).append(rep)
|
||||
self.write_fspath_result(rep.collector.fspath, "S")
|
||||
|
||||
def pytest_sessionstart(self, session):
|
||||
self.write_sep("=", "test session starts", bold=True)
|
||||
self._sessionstarttime = py.std.time.time()
|
||||
|
||||
verinfo = ".".join(map(str, sys.version_info[:3]))
|
||||
msg = "python: platform %s -- Python %s" % (sys.platform, verinfo)
|
||||
if self.config.option.verbose or self.config.option.debug:
|
||||
msg += " -- " + str(sys.executable)
|
||||
msg += " -- pytest-%s" % (py.__version__)
|
||||
self.write_line(msg)
|
||||
|
||||
if self.config.option.debug or self.config.option.traceconfig:
|
||||
rev = py.__pkg__.getrev()
|
||||
self.write_line("using py lib: %s <rev %s>" % (
|
||||
py.path.local(py.__file__).dirpath(), rev))
|
||||
if self.config.option.traceconfig:
|
||||
plugins = []
|
||||
for plugin in self.config.pluginmanager.comregistry:
|
||||
name = plugin.__class__.__name__
|
||||
if name.endswith("Plugin"):
|
||||
name = name[:-6]
|
||||
#if name == "Conftest":
|
||||
# XXX get filename
|
||||
plugins.append(name)
|
||||
else:
|
||||
plugins.append(str(plugin))
|
||||
|
||||
plugins = ", ".join(plugins)
|
||||
self.write_line("active plugins: %s" %(plugins,))
|
||||
for i, testarg in py.builtin.enumerate(self.config.args):
|
||||
self.write_line("test object %d: %s" %(i+1, testarg))
|
||||
|
||||
def pytest_sessionfinish(self, __call__, session, exitstatus):
|
||||
__call__.execute()
|
||||
self._tw.line("")
|
||||
if exitstatus in (0, 1, 2):
|
||||
self.summary_failures()
|
||||
self.summary_skips()
|
||||
self.config.hook.pytest_terminal_summary(terminalreporter=self)
|
||||
if exitstatus == 2:
|
||||
self._report_keyboardinterrupt()
|
||||
self.summary_deselected()
|
||||
self.summary_stats()
|
||||
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
self._keyboardinterrupt_memo = excinfo.getrepr()
|
||||
|
||||
def _report_keyboardinterrupt(self):
|
||||
self.write_sep("!", "KEYBOARD INTERRUPT")
|
||||
excrepr = self._keyboardinterrupt_memo
|
||||
if self.config.option.verbose:
|
||||
excrepr.toterminal(self._tw)
|
||||
else:
|
||||
excrepr.reprcrash.toterminal(self._tw)
|
||||
|
||||
def pytest_looponfailinfo(self, failreports, rootdirs):
|
||||
if failreports:
|
||||
self.write_sep("#", "LOOPONFAILING", red=True)
|
||||
for report in failreports:
|
||||
try:
|
||||
loc = report.longrepr.reprcrash
|
||||
except AttributeError:
|
||||
loc = str(report.longrepr)[:50]
|
||||
self.write_line(loc, red=True)
|
||||
self.write_sep("#", "waiting for changes")
|
||||
for rootdir in rootdirs:
|
||||
self.write_line("### Watching: %s" %(rootdir,), bold=True)
|
||||
|
||||
def _reportinfoline(self, item):
|
||||
fspath, lineno, msg = self._getreportinfo(item)
|
||||
if fspath:
|
||||
fspath = self.curdir.bestrelpath(fspath)
|
||||
if lineno is not None:
|
||||
lineno += 1
|
||||
if fspath and lineno and msg:
|
||||
line = "%(fspath)s:%(lineno)s: %(msg)s"
|
||||
elif fspath and msg:
|
||||
line = "%(fspath)s: %(msg)s"
|
||||
elif fspath and lineno:
|
||||
line = "%(fspath)s:%(lineno)s"
|
||||
else:
|
||||
line = "[noreportinfo]"
|
||||
return line % locals() + " "
|
||||
|
||||
def _getfailureheadline(self, rep):
|
||||
if hasattr(rep, "collector"):
|
||||
return str(rep.collector.fspath)
|
||||
else:
|
||||
fspath, lineno, msg = self._getreportinfo(rep.item)
|
||||
return msg
|
||||
|
||||
def _getreportinfo(self, item):
|
||||
try:
|
||||
return item.__reportinfo
|
||||
except AttributeError:
|
||||
pass
|
||||
reportinfo = item.config.hook.pytest_report_iteminfo(item=item)
|
||||
# cache on item
|
||||
item.__reportinfo = reportinfo
|
||||
return reportinfo
|
||||
|
||||
#
|
||||
# summaries for sessionfinish
|
||||
#
|
||||
|
||||
def summary_failures(self):
|
||||
if 'failed' in self.stats and self.config.option.tbstyle != "no":
|
||||
self.write_sep("=", "FAILURES")
|
||||
for rep in self.stats['failed']:
|
||||
msg = self._getfailureheadline(rep)
|
||||
self.write_sep("_", msg)
|
||||
if hasattr(rep, 'node'):
|
||||
self.write_line(self.gateway2info.get(
|
||||
rep.node.gateway, "node %r (platinfo not found? strange)")
|
||||
[:self._tw.fullwidth-1])
|
||||
rep.toterminal(self._tw)
|
||||
|
||||
def summary_stats(self):
|
||||
session_duration = py.std.time.time() - self._sessionstarttime
|
||||
|
||||
keys = "failed passed skipped deselected".split()
|
||||
parts = []
|
||||
for key in keys:
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" %(len(val), key))
|
||||
line = ", ".join(parts)
|
||||
# XXX coloring
|
||||
self.write_sep("=", "%s in %.2f seconds" %(line, session_duration))
|
||||
|
||||
def summary_deselected(self):
|
||||
if 'deselected' in self.stats:
|
||||
self.write_sep("=", "%d tests deselected by %r" %(
|
||||
len(self.stats['deselected']), self.config.option.keyword), bold=True)
|
||||
|
||||
def summary_skips(self):
|
||||
if 'skipped' in self.stats:
|
||||
if 'failed' not in self.stats: # or self.config.option.showskipsummary:
|
||||
fskips = folded_skips(self.stats['skipped'])
|
||||
if fskips:
|
||||
self.write_sep("_", "skipped test summary")
|
||||
for num, fspath, lineno, reason in fskips:
|
||||
self._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason))
|
||||
|
||||
class CollectonlyReporter:
|
||||
INDENT = " "
|
||||
|
||||
def __init__(self, config, out=None):
|
||||
self.config = config
|
||||
if out is None:
|
||||
out = py.std.sys.stdout
|
||||
self.out = py.io.TerminalWriter(out)
|
||||
self.indent = ""
|
||||
self._failed = []
|
||||
|
||||
def outindent(self, line):
|
||||
self.out.line(self.indent + str(line))
|
||||
|
||||
def pytest_internalerror(self, excrepr):
|
||||
for line in str(excrepr).split("\n"):
|
||||
self.out.line("INTERNALERROR> " + line)
|
||||
|
||||
def pytest_collectstart(self, collector):
|
||||
self.outindent(collector)
|
||||
self.indent += self.INDENT
|
||||
|
||||
def pytest_itemstart(self, item, node=None):
|
||||
self.outindent(item)
|
||||
|
||||
def pytest_collectreport(self, rep):
|
||||
if not rep.passed:
|
||||
self.outindent("!!! %s !!!" % rep.longrepr.reprcrash.message)
|
||||
self._failed.append(rep)
|
||||
self.indent = self.indent[:-len(self.INDENT)]
|
||||
|
||||
def pytest_sessionfinish(self, session, exitstatus):
|
||||
if self._failed:
|
||||
self.out.sep("!", "collection failures")
|
||||
for rep in self._failed:
|
||||
rep.toterminal(self.out)
|
||||
|
||||
def folded_skips(skipped):
|
||||
d = {}
|
||||
for event in skipped:
|
||||
entry = event.longrepr.reprcrash
|
||||
key = entry.path, entry.lineno, entry.message
|
||||
d.setdefault(key, []).append(event)
|
||||
l = []
|
||||
for key, events in d.iteritems():
|
||||
l.append((len(events),) + key)
|
||||
return l
|
||||
|
||||
def repr_pythonversion(v=None):
|
||||
if v is None:
|
||||
v = sys.version_info
|
||||
try:
|
||||
return "%s.%s.%s-%s-%s" % v
|
||||
except (TypeError, ValueError):
|
||||
return str(v)
|
||||
|
||||
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_terminal.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,162 @@
|
|||
|
||||
pytest_unittest plugin
|
||||
======================
|
||||
|
||||
automatically discover and run traditional "unittest.py" style tests.
|
||||
|
||||
Usage
|
||||
----------------
|
||||
|
||||
This plugin collects and runs Python `unittest.py style`_ tests.
|
||||
It will automatically collect ``unittest.TestCase`` subclasses
|
||||
and their ``test`` methods from the test modules of a project
|
||||
(usually following the ``test_*.py`` pattern).
|
||||
|
||||
This plugin is enabled by default.
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_unittest.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_unittest.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_unittest.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
automatically discover and run traditional "unittest.py" style tests.
|
||||
|
||||
Usage
|
||||
----------------
|
||||
|
||||
This plugin collects and runs Python `unittest.py style`_ tests.
|
||||
It will automatically collect ``unittest.TestCase`` subclasses
|
||||
and their ``test`` methods from the test modules of a project
|
||||
(usually following the ``test_*.py`` pattern).
|
||||
|
||||
This plugin is enabled by default.
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
"""
|
||||
import py
|
||||
import sys
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if 'unittest' not in sys.modules:
|
||||
return # nobody could have possibly derived a subclass
|
||||
if py.std.inspect.isclass(obj) and issubclass(obj, py.std.unittest.TestCase):
|
||||
return UnitTestCase(name, parent=collector)
|
||||
|
||||
class UnitTestCase(py.test.collect.Class):
|
||||
def collect(self):
|
||||
return [UnitTestCaseInstance("()", self)]
|
||||
|
||||
def setup(self):
|
||||
pass
|
||||
|
||||
def teardown(self):
|
||||
pass
|
||||
|
||||
_dummy = object()
|
||||
class UnitTestCaseInstance(py.test.collect.Instance):
|
||||
def collect(self):
|
||||
loader = py.std.unittest.TestLoader()
|
||||
names = loader.getTestCaseNames(self.obj.__class__)
|
||||
l = []
|
||||
for name in names:
|
||||
callobj = getattr(self.obj, name)
|
||||
if callable(callobj):
|
||||
l.append(UnitTestFunction(name, parent=self))
|
||||
return l
|
||||
|
||||
def _getobj(self):
|
||||
x = self.parent.obj
|
||||
return self.parent.obj(methodName='run')
|
||||
|
||||
class UnitTestFunction(py.test.collect.Function):
|
||||
def __init__(self, name, parent, args=(), obj=_dummy, sort_value=None):
|
||||
super(UnitTestFunction, self).__init__(name, parent)
|
||||
self._args = args
|
||||
if obj is not _dummy:
|
||||
self._obj = obj
|
||||
self._sort_value = sort_value
|
||||
|
||||
def runtest(self):
|
||||
target = self.obj
|
||||
args = self._args
|
||||
target(*args)
|
||||
|
||||
def setup(self):
|
||||
instance = self.obj.im_self
|
||||
instance.setUp()
|
||||
|
||||
def teardown(self):
|
||||
instance = self.obj.im_self
|
||||
instance.tearDown()
|
||||
|
||||
|
||||
def test_simple_unittest(testdir):
|
||||
testpath = testdir.makepyfile("""
|
||||
import unittest
|
||||
pytest_plugins = "pytest_unittest"
|
||||
class MyTestCase(unittest.TestCase):
|
||||
def testpassing(self):
|
||||
self.assertEquals('foo', 'foo')
|
||||
def test_failing(self):
|
||||
self.assertEquals('foo', 'bar')
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath)
|
||||
assert reprec.matchreport("testpassing").passed
|
||||
assert reprec.matchreport("test_failing").failed
|
||||
|
||||
def test_setup(testdir):
|
||||
testpath = testdir.makepyfile(test_two="""
|
||||
import unittest
|
||||
pytest_plugins = "pytest_unittest" # XXX
|
||||
class MyTestCase(unittest.TestCase):
|
||||
def setUp(self):
|
||||
self.foo = 1
|
||||
def test_setUp(self):
|
||||
self.assertEquals(1, self.foo)
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath)
|
||||
rep = reprec.matchreport("test_setUp")
|
||||
assert rep.passed
|
||||
|
||||
def test_teardown(testdir):
|
||||
testpath = testdir.makepyfile(test_three="""
|
||||
import unittest
|
||||
pytest_plugins = "pytest_unittest" # XXX
|
||||
class MyTestCase(unittest.TestCase):
|
||||
l = []
|
||||
def test_one(self):
|
||||
pass
|
||||
def tearDown(self):
|
||||
self.l.append(None)
|
||||
class Second(unittest.TestCase):
|
||||
def test_check(self):
|
||||
self.assertEquals(MyTestCase.l, [None])
|
||||
""")
|
||||
reprec = testdir.inline_run(testpath)
|
||||
passed, skipped, failed = reprec.countoutcomes()
|
||||
print "COUNTS", passed, skipped, failed
|
||||
assert failed == 0, failed
|
||||
assert passed == 2
|
||||
assert passed + skipped + failed == 2
|
||||
|
||||
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_unittest.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -0,0 +1,134 @@
|
|||
|
||||
pytest_xfail plugin
|
||||
===================
|
||||
|
||||
mark python tests as expected-to-fail and report them separately.
|
||||
|
||||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to add the 'xfail' keyword to your
|
||||
test function::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" section or "unexpectedly passing" section.
|
||||
|
||||
Getting and improving this plugin
|
||||
---------------------------------
|
||||
|
||||
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `pytest_xfail.py`_ plugin source code
|
||||
2. put it somewhere as ``pytest_xfail.py`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
|
||||
For your convenience here is also an inlined version of ``pytest_xfail.py``:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
"""
|
||||
mark python tests as expected-to-fail and report them separately.
|
||||
|
||||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to add the 'xfail' keyword to your
|
||||
test function::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" section or "unexpectedly passing" section.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
pytest_plugins = ['keyword']
|
||||
|
||||
def pytest_runtest_makereport(__call__, item, call):
|
||||
if call.when != "call":
|
||||
return
|
||||
if hasattr(item, 'obj') and hasattr(item.obj, 'func_dict'):
|
||||
if 'xfail' in item.obj.func_dict:
|
||||
res = __call__.execute(firstresult=True)
|
||||
if call.excinfo:
|
||||
res.skipped = True
|
||||
res.failed = res.passed = False
|
||||
else:
|
||||
res.skipped = res.passed = False
|
||||
res.failed = True
|
||||
return res
|
||||
|
||||
def pytest_report_teststatus(rep):
|
||||
""" return shortletter and verbose word. """
|
||||
if 'xfail' in rep.keywords:
|
||||
if rep.skipped:
|
||||
return "xfailed", "x", "xfail"
|
||||
elif rep.failed:
|
||||
return "xpassed", "P", "xpass"
|
||||
|
||||
# called by the terminalreporter instance/plugin
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
tr = terminalreporter
|
||||
xfailed = tr.stats.get("xfailed")
|
||||
if xfailed:
|
||||
tr.write_sep("_", "expected failures")
|
||||
for event in xfailed:
|
||||
entry = event.longrepr.reprcrash
|
||||
key = entry.path, entry.lineno, entry.message
|
||||
reason = event.longrepr.reprcrash.message
|
||||
modpath = event.item.getmodpath(includemodule=True)
|
||||
#tr._tw.line("%s %s:%d: %s" %(modpath, entry.path, entry.lineno, entry.message))
|
||||
tr._tw.line("%s %s:%d: " %(modpath, entry.path, entry.lineno))
|
||||
|
||||
xpassed = terminalreporter.stats.get("xpassed")
|
||||
if xpassed:
|
||||
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
|
||||
for event in xpassed:
|
||||
tr._tw.line("%s: xpassed" %(event.item,))
|
||||
|
||||
|
||||
# ===============================================================================
|
||||
#
|
||||
# plugin tests
|
||||
#
|
||||
# ===============================================================================
|
||||
|
||||
def test_xfail(testdir, linecomp):
|
||||
p = testdir.makepyfile(test_one="""
|
||||
import py
|
||||
@py.test.mark.xfail
|
||||
def test_this():
|
||||
assert 0
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_that():
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
extra = result.stdout.fnmatch_lines([
|
||||
"*expected failures*",
|
||||
"*test_one.test_this*test_one.py:4*",
|
||||
"*UNEXPECTEDLY PASSING*",
|
||||
"*test_that*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_xfail.py
|
||||
.. _`extend`: ../extend.html
|
||||
.. _`plugins`: index.html
|
||||
.. _`contact`: ../../contact.html
|
||||
.. _`checkout the py.test development version`: ../../download.html#checkout
|
|
@ -1,72 +0,0 @@
|
|||
|
||||
=====================================================
|
||||
Plugins related to Python test functions and programs
|
||||
=====================================================
|
||||
* `pytest_xfail`_ mark tests as expected-to-fail and report them separately.
|
||||
|
||||
* `pytest_figleaf`_ write and report coverage data with 'figleaf'.
|
||||
|
||||
* `pytest_monkeypatch`_ safely patch object attributes, dicts and environment variables.
|
||||
|
||||
* `pytest_iocapture`_ 'capsys' and 'capfd' funcargs for capturing stdout/stderror.
|
||||
|
||||
* `pytest_recwarn`_ helpers for asserting deprecation and other warnings.
|
||||
|
||||
|
||||
==============================================
|
||||
Plugins for other testing styles and languages
|
||||
==============================================
|
||||
* `pytest_unittest`_ automatically discover and run traditional "unittest.py" style tests.
|
||||
|
||||
* `pytest_doctest`_ collect and execute doctests from modules and test files.
|
||||
|
||||
* `pytest_restdoc`_ perform ReST syntax, local and remote reference tests on .rst/.txt files.
|
||||
|
||||
* `pytest_oejskit`_ Testing Javascript in real browsers
|
||||
|
||||
|
||||
=================================================
|
||||
Plugins for generic reporting and failure logging
|
||||
=================================================
|
||||
* `pytest_pocoo`_ submit failure information to paste.pocoo.org
|
||||
|
||||
* `pytest_resultlog`_ resultlog plugin for machine-readable logging of test results.
|
||||
|
||||
* `pytest_terminal`_ terminal reporting of the full testing process.
|
||||
|
||||
|
||||
=====================================
|
||||
internal plugins / core functionality
|
||||
=====================================
|
||||
* `pytest_pdb`_ interactive debugging with the Python Debugger.
|
||||
|
||||
* `pytest_keyword`_ py.test.mark / keyword plugin
|
||||
|
||||
* `pytest_hooklog`_ log invocations of extension hooks to a file.
|
||||
|
||||
* `pytest_runner`_ collect and run test items and create reports.
|
||||
|
||||
* `pytest_execnetcleanup`_ cleanup execnet gateways during test function runs.
|
||||
|
||||
* `pytest_pytester`_ funcargs and support code for testing py.test's own functionality.
|
||||
|
||||
|
||||
|
||||
.. _`pytest_xfail`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_xfail.py
|
||||
.. _`pytest_figleaf`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_figleaf.py
|
||||
.. _`pytest_monkeypatch`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_monkeypatch.py
|
||||
.. _`pytest_iocapture`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_iocapture.py
|
||||
.. _`pytest_recwarn`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_recwarn.py
|
||||
.. _`pytest_unittest`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_unittest.py
|
||||
.. _`pytest_doctest`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_doctest.py
|
||||
.. _`pytest_restdoc`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_restdoc.py
|
||||
.. _`pytest_oejskit`: http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py
|
||||
.. _`pytest_pocoo`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_pocoo.py
|
||||
.. _`pytest_resultlog`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_resultlog.py
|
||||
.. _`pytest_terminal`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_terminal.py
|
||||
.. _`pytest_pdb`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_pdb.py
|
||||
.. _`pytest_keyword`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_keyword.py
|
||||
.. _`pytest_hooklog`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_hooklog.py
|
||||
.. _`pytest_runner`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_runner.py
|
||||
.. _`pytest_execnetcleanup`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_execnetcleanup.py
|
||||
.. _`pytest_pytester`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_pytester.py
|
|
@ -5,54 +5,30 @@
|
|||
Quickstart
|
||||
==================
|
||||
|
||||
This document assumes basic python knowledge. If you have a
|
||||
`setuptools installation`_, install ``py.test`` by typing::
|
||||
.. _here: ../download.html#no-setuptools
|
||||
|
||||
This document assumes basic python knowledge and a working `setuptools
|
||||
installation`_ (otherwise see here_). You can install
|
||||
the py lib and py.test by typing::
|
||||
|
||||
easy_install -U py
|
||||
|
||||
For alternative installation methods please see the download_ page.
|
||||
|
||||
You should now have a ``py.test`` command line tool and can
|
||||
look at its documented cmdline options via this command::
|
||||
|
||||
py.test -h
|
||||
|
||||
Writing and running a test
|
||||
==========================
|
||||
|
||||
``py.test`` is the command line tool to run tests.
|
||||
Let's write a first test module by putting the following
|
||||
test function into a ``test_sample.py`` file::
|
||||
Now open a file ``test_sample.py`` file and put the following
|
||||
example content into it::
|
||||
|
||||
# content of test_sample.py
|
||||
def test_answer():
|
||||
assert 42 == 43
|
||||
|
||||
Now you can run the test by passing it as an argument::
|
||||
You can now run the test file like this::
|
||||
|
||||
py.test test_sample.py
|
||||
|
||||
What does happen here? ``py.test`` looks for functions and
|
||||
methods in the module that start with ``test_``. It then
|
||||
executes those tests. Assertions about test outcomes are
|
||||
done via the standard ``assert`` statement.
|
||||
|
||||
You can also use ``py.test`` to run all tests in a directory structure by
|
||||
invoking it without any arguments::
|
||||
|
||||
py.test
|
||||
|
||||
This will automatically collect and run any Python module whose filenames
|
||||
start with ``test_`` or ends with ``_test`` from the directory and any
|
||||
subdirectories, starting with the current directory, and run them. Each
|
||||
Python test module is inspected for test methods starting with ``test_``.
|
||||
|
||||
.. Organising your tests
|
||||
.. ---------------------------
|
||||
|
||||
Please refer to `features`_ for a walk through the basic features
|
||||
and will see an error report on the failing assert statement.
|
||||
For further information please refer to `features`_
|
||||
or checkout the `tutorials`_ page for more introduction material.
|
||||
|
||||
.. _`automatically collected`: features.html#autocollect
|
||||
.. _download: ../download.html
|
||||
.. _features: features.html
|
||||
.. _tutorials: talks.html
|
||||
|
|
|
@ -1,11 +1,13 @@
|
|||
=======
|
||||
py.test
|
||||
=======
|
||||
=======================================
|
||||
py.test documentation index
|
||||
=======================================
|
||||
|
||||
the project independent ``py.test`` command line tool helps you to:
|
||||
|
||||
* rapidly collect and run tests
|
||||
* use unit- or doctests, functional or integration tests
|
||||
* run unit- or doctests, functional or integration tests
|
||||
* distribute tests to multiple environments
|
||||
* local or global plugins for custom test scenarios and types
|
||||
* use local or global plugins for custom test types and setup
|
||||
|
||||
quickstart_: for getting started immediately.
|
||||
|
||||
|
@ -23,7 +25,7 @@ config_: ``conftest.py`` files and the config object
|
|||
|
||||
talks_: talk and tutorial slides
|
||||
|
||||
.. _`available plugins`: plugins.html
|
||||
.. _`available plugins`: plugin/index.html
|
||||
.. _talks: talks.html
|
||||
.. _quickstart: quickstart.html
|
||||
.. _features: features.html
|
||||
|
|
|
@ -7,7 +7,7 @@ plugins = [
|
|||
('Plugins related to Python test functions and programs',
|
||||
'xfail figleaf monkeypatch iocapture recwarn',),
|
||||
('Plugins for other testing styles and languages',
|
||||
'unittest doctest restdoc osjskit'),
|
||||
'unittest doctest oejskit restdoc'),
|
||||
('Plugins for generic reporting and failure logging',
|
||||
'pocoo resultlog terminal',),
|
||||
('internal plugins / core functionality',
|
||||
|
@ -16,91 +16,247 @@ plugins = [
|
|||
]
|
||||
|
||||
externals = {
|
||||
'osjskit': ('`pytest_oejskit`_ Testing Javascript in real browsers',
|
||||
'''
|
||||
jskit contains infrastructure and in particular a py.test plugin to enable running tests for JavaScript code inside browsers directly using py.test as the test driver. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations.
|
||||
'oejskit': 'run javascript tests in real life browsers',
|
||||
|
||||
The approach also enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along.
|
||||
|
||||
jskit also contains code to help modularizing JavaScript code which can be used to describe and track dependencies dynamically during development and that can help resolving them statically when deploying/packaging.
|
||||
|
||||
jskit depends on simplejson. It also uses MochiKit - of which it ships a version within itself for convenience - for its own working though in does not imposes its usage on tested code.
|
||||
|
||||
jskit was initially developed by Open End AB and is released under the MIT license.
|
||||
''', 'http://pypi.python.org/pypi/oejskit',
|
||||
('pytest_oejskit',
|
||||
'http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py',
|
||||
))}
|
||||
}
|
||||
|
||||
class ExternalDoc:
|
||||
def __init__(self, name):
|
||||
self.title, self.longdesc, self.url, sourcelink = externals[name]
|
||||
self.sourcelink = sourcelink
|
||||
|
||||
|
||||
def warn(*args):
|
||||
msg = " ".join(map(str, args))
|
||||
print >>sys.stderr, "WARN:", msg
|
||||
|
||||
class RestWriter:
|
||||
def __init__(self, target):
|
||||
self.target = py.path.local(target)
|
||||
self.links = []
|
||||
|
||||
def _getmsg(self, args):
|
||||
return " ".join(map(str, args))
|
||||
|
||||
def Print(self, *args, **kwargs):
|
||||
msg = self._getmsg(args)
|
||||
if 'indent' in kwargs:
|
||||
indent = kwargs['indent'] * " "
|
||||
lines = [(indent + x) for x in msg.split("\n")]
|
||||
msg = "\n".join(lines)
|
||||
self.out.write(msg)
|
||||
if not msg or msg[-1] != "\n":
|
||||
self.out.write("\n")
|
||||
self.out.flush()
|
||||
|
||||
def sourcecode(self, source):
|
||||
lines = str(source).split("\n")
|
||||
self.Print(".. sourcecode:: python")
|
||||
self.Print()
|
||||
for line in lines:
|
||||
self.Print(" ", line)
|
||||
|
||||
def _sep(self, separator, args):
|
||||
msg = self._getmsg(args)
|
||||
sep = len(msg) * separator
|
||||
self.Print()
|
||||
self.Print(msg)
|
||||
self.Print(sep)
|
||||
self.Print()
|
||||
|
||||
|
||||
def h1(self, *args):
|
||||
self._sep('=', args)
|
||||
|
||||
def h2(self, *args):
|
||||
self._sep('-', args)
|
||||
|
||||
def h3(self, *args):
|
||||
self._sep('+', args)
|
||||
|
||||
def li(self, *args):
|
||||
msg = self._getmsg(args)
|
||||
sep = "* %s" %(msg)
|
||||
self.Print(sep)
|
||||
|
||||
def dt(self, term):
|
||||
self.Print("``%s``" % term)
|
||||
|
||||
def dd(self, doc):
|
||||
self.Print(doc, indent=4)
|
||||
|
||||
def para(self, *args):
|
||||
msg = self._getmsg(args)
|
||||
self.Print(msg)
|
||||
|
||||
def add_internal_link(self, name, path):
|
||||
relpath = path.new(ext=".html").relto(self.target.dirpath())
|
||||
self.links.append((name, relpath))
|
||||
|
||||
def write_links(self):
|
||||
self.Print()
|
||||
for link in self.links:
|
||||
#warn(repr(self.link))
|
||||
self.Print(".. _`%s`: %s" % (link[0], link[1]))
|
||||
|
||||
def make(self, **kwargs):
|
||||
self.out = self.target.open("w")
|
||||
self.makerest(**kwargs)
|
||||
self.write_links()
|
||||
|
||||
self.out.close()
|
||||
print "wrote", self.target
|
||||
del self.out
|
||||
|
||||
class PluginOverview(RestWriter):
|
||||
def makerest(self, config):
|
||||
plugindir = py.path.local(py.__file__).dirpath("test", "plugin")
|
||||
for cat, specs in plugins:
|
||||
pluginlist = specs.split()
|
||||
self.h1(cat)
|
||||
for name in pluginlist:
|
||||
oneliner = externals.get(name, None)
|
||||
docpath = self.target.dirpath(name).new(ext=".txt")
|
||||
if oneliner is not None:
|
||||
htmlpath = docpath.new(ext='.html')
|
||||
self.para("%s_ %s" %(name, oneliner))
|
||||
self.add_internal_link(name, htmlpath)
|
||||
else:
|
||||
doc = PluginDoc(docpath)
|
||||
doc.make(config=config, name=name)
|
||||
self.add_internal_link(name, doc.target)
|
||||
self.para("%s_ %s" %(name, doc.oneliner))
|
||||
self.Print()
|
||||
|
||||
class HookSpec(RestWriter):
|
||||
|
||||
def makerest(self, config):
|
||||
module = config.pluginmanager.hook._hookspecs
|
||||
source = py.code.Source(module)
|
||||
self.h1("hook specification sourcecode")
|
||||
self.sourcecode(source)
|
||||
|
||||
class PluginDoc(RestWriter):
|
||||
def makerest(self, config, name):
|
||||
config.pluginmanager.import_plugin(name)
|
||||
plugin = config.pluginmanager.getplugin(name)
|
||||
assert plugin is not None, plugin
|
||||
|
||||
class PluginDoc:
|
||||
def __init__(self, plugin):
|
||||
self.plugin = plugin
|
||||
doc = plugin.__doc__.strip()
|
||||
i = doc.find("\n")
|
||||
if i == -1:
|
||||
title = doc
|
||||
longdesc = "XXX no long description available"
|
||||
oneliner = doc
|
||||
moduledoc = ""
|
||||
else:
|
||||
title = doc[:i].strip()
|
||||
longdesc = doc[i+1:].strip()
|
||||
purename = plugin.__name__.split(".")[-1].strip()
|
||||
self.title = "`%s`_ %s" %(purename, title)
|
||||
self.longdesc = longdesc
|
||||
self.sourcelink = (purename,
|
||||
"http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/" +
|
||||
purename + ".py")
|
||||
|
||||
def warn(msg):
|
||||
print >>sys.stderr, "WARNING:", msg
|
||||
oneliner = doc[:i].strip()
|
||||
moduledoc = doc[i+1:].strip()
|
||||
|
||||
|
||||
def makedoc(name):
|
||||
if name in externals:
|
||||
return ExternalDoc(name)
|
||||
config.pluginmanager.import_plugin(name)
|
||||
plugin = config.pluginmanager.getplugin(name)
|
||||
if plugin is None:
|
||||
return None
|
||||
return PluginDoc(plugin)
|
||||
self.name = plugin.__name__.split(".")[-1]
|
||||
self.oneliner = oneliner
|
||||
self.moduledoc = moduledoc
|
||||
|
||||
self.h1("%s plugin" % self.name) # : %s" %(self.name, self.oneliner))
|
||||
self.Print(self.oneliner)
|
||||
self.Print()
|
||||
|
||||
def header():
|
||||
#print "=" * WIDTH
|
||||
#print "list of available py.test plugins"
|
||||
#print "=" * WIDTH
|
||||
print
|
||||
self.Print(moduledoc)
|
||||
|
||||
self.emit_funcargs(plugin)
|
||||
self.emit_options(plugin)
|
||||
self.emit_source(plugin, config.hg_changeset)
|
||||
#self.sourcelink = (purename,
|
||||
# "http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/" +
|
||||
# purename + ".py")
|
||||
#
|
||||
def emit_source(self, plugin, hg_changeset):
|
||||
basename = py.path.local(plugin.__file__).basename
|
||||
if basename.endswith("pyc"):
|
||||
basename = basename[:-1]
|
||||
#self.para("`%s`_ source code" % basename)
|
||||
#self.links.append((basename,
|
||||
# "http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/" +
|
||||
# basename))
|
||||
self.h2("Getting and improving this plugin")
|
||||
self.para(py.code.Source("""
|
||||
Do you find the above documentation or the plugin itself lacking,
|
||||
not fit for what you need? Here is a **30 seconds guide**
|
||||
to get you started on improving the plugin:
|
||||
|
||||
1. Download `%s`_ plugin source code
|
||||
2. put it somewhere as ``%s`` into your import path
|
||||
3. a subsequent test run will now use your local version!
|
||||
|
||||
Further information: extend_ documentation, other plugins_ or contact_.
|
||||
""" % (basename, basename)))
|
||||
# your work appreciated if you offer back your version. In this case
|
||||
# it probably makes sense if you `checkout the py.test
|
||||
# development version`_ and apply your changes to the plugin
|
||||
# version in there.
|
||||
self.links.append((basename,
|
||||
"http://bitbucket.org/hpk42/py-trunk/raw/%s/"
|
||||
"py/test/plugin/%s" %(hg_changeset, basename)))
|
||||
self.links.append(('extend', '../extend.html'))
|
||||
self.links.append(('plugins', 'index.html'))
|
||||
self.links.append(('contact', '../../contact.html'))
|
||||
self.links.append(('checkout the py.test development version',
|
||||
'../../download.html#checkout'))
|
||||
|
||||
#self.h2("plugin source code")
|
||||
self.Print()
|
||||
self.para("For your convenience here is also an inlined version "
|
||||
"of ``%s``:" %basename)
|
||||
#self(or copy-paste from below)
|
||||
self.Print()
|
||||
self.sourcecode(py.code.Source(plugin))
|
||||
|
||||
def emit_funcargs(self, plugin):
|
||||
funcargfuncs = []
|
||||
prefix = "pytest_funcarg__"
|
||||
for name in vars(plugin):
|
||||
if name.startswith(prefix):
|
||||
funcargfuncs.append(getattr(plugin, name))
|
||||
if not funcargfuncs:
|
||||
return
|
||||
for func in funcargfuncs:
|
||||
argname = func.__name__[len(prefix):]
|
||||
self.Print(".. _`%s funcarg`:" % argname)
|
||||
self.Print()
|
||||
self.h2("the %r test function argument" % argname)
|
||||
if func.__doc__:
|
||||
doclines = func.__doc__.split("\n")
|
||||
source = py.code.Source("\n".join(doclines[1:]))
|
||||
source.lines.insert(0, doclines[0])
|
||||
self.para(str(source))
|
||||
else:
|
||||
self.para("XXX missing docstring")
|
||||
warn("missing docstring", func)
|
||||
|
||||
def emit_options(self, plugin):
|
||||
from py.__.test.parseopt import Parser
|
||||
options = []
|
||||
parser = Parser(processopt=options.append)
|
||||
if hasattr(plugin, 'pytest_addoption'):
|
||||
plugin.pytest_addoption(parser)
|
||||
if not options:
|
||||
return
|
||||
self.h2("command line options")
|
||||
self.Print()
|
||||
formatter = py.compat.optparse.IndentedHelpFormatter()
|
||||
for opt in options:
|
||||
switches = formatter.format_option_strings(opt)
|
||||
self.Print("``%s``" % switches)
|
||||
self.Print(opt.help, indent=4)
|
||||
|
||||
if __name__ == "__main__":
|
||||
config = py.test.config
|
||||
config.parse([])
|
||||
config.pluginmanager.do_configure(config)
|
||||
_config = py.test.config
|
||||
_config.parse([])
|
||||
_config.pluginmanager.do_configure(_config)
|
||||
|
||||
header()
|
||||
pydir = py.path.local(py.__file__).dirpath()
|
||||
|
||||
cmd = "hg tip --template '{node}'"
|
||||
old = pydir.dirpath().chdir()
|
||||
_config.hg_changeset = py.process.cmdexec(cmd).strip()
|
||||
|
||||
testdir = pydir.dirpath("doc", 'test')
|
||||
|
||||
links = []
|
||||
for cat, specs in plugins:
|
||||
pluginlist = specs.split()
|
||||
print len(cat) * "="
|
||||
print cat
|
||||
print len(cat) * "="
|
||||
for name in pluginlist:
|
||||
doc = makedoc(name)
|
||||
if doc is None:
|
||||
warn("skipping", name)
|
||||
continue
|
||||
print "* " + str(doc.title)
|
||||
#print len(doc.title) * "*"
|
||||
#print doc.longdesc
|
||||
links.append(doc.sourcelink)
|
||||
print
|
||||
print
|
||||
print
|
||||
for link in links:
|
||||
warn(repr(link))
|
||||
print ".. _`%s`: %s" % (link[0], link[1])
|
||||
ov = PluginOverview(testdir.join("plugin", "index.txt"))
|
||||
ov.make(config=_config)
|
||||
|
||||
ov = HookSpec(testdir.join("plugin", "hookspec.txt"))
|
||||
ov.make(config=_config)
|
||||
|
||||
|
|
|
@ -10,5 +10,5 @@ Generator = py.test.collect.Generator
|
|||
Function = py.test.collect.Function
|
||||
Instance = py.test.collect.Instance
|
||||
|
||||
pytest_plugins = "default runner terminal keyword xfail tmpdir execnetcleanup monkeypatch recwarn pdb".split()
|
||||
pytest_plugins = "default runner terminal keyword xfail tmpdir execnetcleanup monkeypatch recwarn pdb unittest".split()
|
||||
|
||||
|
|
|
@ -75,7 +75,7 @@ class DSession(Session):
|
|||
self.setup()
|
||||
exitstatus = self.loop(colitems)
|
||||
self.teardown()
|
||||
self.sessionfinishes()
|
||||
self.sessionfinishes(exitstatus=exitstatus)
|
||||
return exitstatus
|
||||
|
||||
def loop_once(self, loopstate):
|
||||
|
|
|
@ -103,6 +103,15 @@ class FuncargRequest:
|
|||
self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname)
|
||||
|
||||
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
|
||||
""" cache and return result of calling setup().
|
||||
|
||||
The scope and the ``extrakey`` determine the cache key.
|
||||
The scope also determines when teardown(result)
|
||||
will be called. valid scopes are:
|
||||
scope == 'function': when the single test function run finishes.
|
||||
scope == 'module': when tests in a different module are run
|
||||
scope == 'session': when tests of the session have run.
|
||||
"""
|
||||
if not hasattr(self.config, '_setupcache'):
|
||||
self.config._setupcache = {} # XXX weakref?
|
||||
cachekey = (self._getscopeitem(scope), extrakey)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
py.test plugin hooks
|
||||
hook specifications for py.test plugins
|
||||
"""
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
|
@ -9,15 +9,14 @@ py.test plugin hooks
|
|||
def pytest_addoption(parser):
|
||||
""" called before commandline parsing. """
|
||||
|
||||
def pytest_namespace():
|
||||
""" return dict of name->object which will get stored at py.test. namespace"""
|
||||
|
||||
def pytest_configure(config):
|
||||
""" called after command line options have been parsed.
|
||||
and all plugins and initial conftest files been loaded.
|
||||
``config`` provides access to all such configuration values.
|
||||
"""
|
||||
|
||||
def pytest_namespace(config):
|
||||
""" return dict of name->object to become available at py.test.*"""
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
""" called before test process is exited. """
|
||||
|
||||
|
@ -94,11 +93,11 @@ def pytest_runtest_logreport(rep):
|
|||
def pytest_sessionstart(session):
|
||||
""" before session.main() is called. """
|
||||
|
||||
def pytest_sessionfinish(session, exitstatus, excrepr=None):
|
||||
def pytest_sessionfinish(session, exitstatus):
|
||||
""" whole test run finishes. """
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# generic reporting hooks (invoked from pytest_terminal)
|
||||
# hooks for influencing reporting (invoked from pytest_terminal)
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_report_teststatus(rep):
|
||||
|
@ -140,7 +139,7 @@ def pytest_looponfailinfo(failreports, rootdirs):
|
|||
|
||||
|
||||
# -------------------------------------------------------------------------
|
||||
# internal debugging hooks
|
||||
# error handling and internal debugging hooks
|
||||
# -------------------------------------------------------------------------
|
||||
|
||||
def pytest_plugin_registered(plugin):
|
||||
|
@ -152,5 +151,8 @@ def pytest_plugin_unregistered(plugin):
|
|||
def pytest_internalerror(excrepr):
|
||||
""" called for internal errors. """
|
||||
|
||||
def pytest_keyboard_interrupt(excinfo):
|
||||
""" called for keyboard interrupt. """
|
||||
|
||||
def pytest_trace(category, msg):
|
||||
""" called for debug info. """
|
||||
|
|
|
@ -57,7 +57,7 @@ class HookRecorder:
|
|||
def _makecallparser(self, method):
|
||||
name = method.__name__
|
||||
args, varargs, varkw, default = py.std.inspect.getargspec(method)
|
||||
if args[0] != "self":
|
||||
if args and args[0] != "self":
|
||||
args.insert(0, 'self')
|
||||
fspec = py.std.inspect.formatargspec(args, varargs, varkw, default)
|
||||
# we use exec because we want to have early type
|
||||
|
|
|
@ -1,5 +1,16 @@
|
|||
"""
|
||||
collect and execute doctests from modules and test files.
|
||||
|
||||
Usage
|
||||
-------------
|
||||
|
||||
By default all files matching the ``test_*.txt`` pattern will
|
||||
be run with the ``doctest`` module. If you issue::
|
||||
|
||||
py.test --doctest-modules
|
||||
|
||||
all python files in your projects will be doctest-run
|
||||
as well.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
@ -9,6 +20,7 @@ def pytest_addoption(parser):
|
|||
group = parser.addgroup("doctest options")
|
||||
group.addoption("--doctest-modules",
|
||||
action="store_true", default=False,
|
||||
help="search all python files for doctests",
|
||||
dest="doctestmodules")
|
||||
|
||||
def pytest_collect_file(path, parent):
|
||||
|
|
|
@ -24,7 +24,7 @@ class Execnetcleanup:
|
|||
def pytest_sessionstart(self, session):
|
||||
self._gateways = []
|
||||
|
||||
def pytest_sessionfinish(self, session, exitstatus, excrepr=None):
|
||||
def pytest_sessionfinish(self, session, exitstatus):
|
||||
l = []
|
||||
for gw in self._gateways:
|
||||
gw.exit()
|
||||
|
|
|
@ -1,9 +1,6 @@
|
|||
"""
|
||||
write and report coverage data with 'figleaf'.
|
||||
|
||||
This plugin generates test coverage data or HTML files
|
||||
from running tests against a code base.
|
||||
|
||||
"""
|
||||
import py
|
||||
|
||||
|
@ -13,11 +10,11 @@ def pytest_addoption(parser):
|
|||
group = parser.addgroup('figleaf options')
|
||||
group.addoption('-F', action='store_true', default=False,
|
||||
dest = 'figleaf',
|
||||
help=('trace coverage with figleaf and write HTML '
|
||||
help=('trace python coverage with figleaf and write HTML '
|
||||
'for files below the current working dir'))
|
||||
group.addoption('--figleaf-data', action='store', default='.figleaf',
|
||||
dest='figleafdata',
|
||||
help='path coverage tracing file.')
|
||||
help='path to coverage tracing file.')
|
||||
group.addoption('--figleaf-html', action='store', default='html',
|
||||
dest='figleafhtml',
|
||||
help='path to the coverage html dir.')
|
||||
|
|
|
@ -1,22 +1,48 @@
|
|||
"""
|
||||
'capsys' and 'capfd' funcargs for capturing stdout/stderror.
|
||||
convenient capturing of writes to stdout/stderror streams
|
||||
and file descriptors.
|
||||
|
||||
Calling the reset() method of the capture funcargs gives
|
||||
a out/err tuple of strings representing the captured streams.
|
||||
You can call reset() multiple times each time getting
|
||||
the chunk of output that was captured between the invocations.
|
||||
Example Usage
|
||||
----------------------
|
||||
|
||||
You can use the `capsys funcarg`_ to capture writes
|
||||
to stdout and stderr streams by using it in a test
|
||||
likes this:
|
||||
|
||||
.. sourcecode:: python
|
||||
|
||||
def test_myoutput(capsys):
|
||||
print "hello"
|
||||
print >>sys.stderr, "world"
|
||||
out, err = capsys.reset()
|
||||
assert out == "hello\\n"
|
||||
assert err == "world\\n"
|
||||
print "next"
|
||||
out, err = capsys.reset()
|
||||
assert out == "next\\n"
|
||||
|
||||
The ``reset()`` call returns a tuple and will restart
|
||||
capturing so that you can successively check for output.
|
||||
After the test function finishes the original streams
|
||||
will be restored.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
def pytest_funcarg__capsys(request):
|
||||
""" capture writes to sys.stdout/sys.stderr. """
|
||||
"""captures writes to sys.stdout/sys.stderr and makes
|
||||
them available successively via a ``capsys.reset()`` method
|
||||
which returns a ``(out, err)`` tuple of captured strings.
|
||||
"""
|
||||
capture = Capture(py.io.StdCapture)
|
||||
request.addfinalizer(capture.finalize)
|
||||
return capture
|
||||
|
||||
def pytest_funcarg__capfd(request):
|
||||
""" capture writes to filedescriptors 1 and 2"""
|
||||
"""captures writes to file descriptors 1 and 2 and makes
|
||||
them available successively via a ``capsys.reset()`` method
|
||||
which returns a ``(out, err)`` tuple of captured strings.
|
||||
"""
|
||||
capture = Capture(py.io.StdCaptureFD)
|
||||
request.addfinalizer(capture.finalize)
|
||||
return capture
|
||||
|
|
|
@ -3,7 +3,7 @@
|
|||
"""
|
||||
import py
|
||||
|
||||
def pytest_namespace(config):
|
||||
def pytest_namespace():
|
||||
mark = KeywordDecorator({})
|
||||
return {'mark': mark}
|
||||
|
||||
|
|
|
@ -1,17 +1,43 @@
|
|||
"""
|
||||
safely patch object attributes, dicts and environment variables.
|
||||
|
||||
the "monkeypatch" funcarg has three helper functions:
|
||||
Usage
|
||||
----------------
|
||||
|
||||
monkeypatch.setattr(obj, name, value)
|
||||
monkeypatch.setitem(obj, name, value)
|
||||
monkeypatch.setenv(name, value)
|
||||
Use the `monkeypatch funcarg`_ to safely patch the environment
|
||||
variables, object attributes or dictionaries. For example, if you want
|
||||
to set the environment variable ``ENV1`` and patch the
|
||||
``os.path.abspath`` function to return a particular value during a test
|
||||
function execution you can write it down like this:
|
||||
|
||||
After the test has run modifications will be undone.
|
||||
.. sourcecode:: python
|
||||
|
||||
def test_mytest(monkeypatch):
|
||||
monkeypatch.setenv('ENV1', 'myval')
|
||||
monkeypatch.setattr(os.path, 'abspath', lambda x: '/')
|
||||
... # your test code
|
||||
|
||||
The function argument will do the modifications and memorize the
|
||||
old state. After the test function finished execution all
|
||||
modifications will be reverted. See the `monkeypatch blog post`_
|
||||
for an extensive discussion.
|
||||
|
||||
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
|
||||
"""
|
||||
|
||||
import os
|
||||
|
||||
def pytest_funcarg__monkeypatch(request):
|
||||
"""The returned ``monkeypatch`` funcarg provides three
|
||||
helper methods to modify objects, dictionaries or os.environ::
|
||||
|
||||
monkeypatch.setattr(obj, name, value)
|
||||
monkeypatch.setitem(mapping, name, value)
|
||||
monkeypatch.setenv(name, value)
|
||||
|
||||
All such modifications will be undone when the requesting
|
||||
test function finished its execution.
|
||||
"""
|
||||
monkeypatch = MonkeyPatch()
|
||||
request.addfinalizer(monkeypatch.finalize)
|
||||
return monkeypatch
|
||||
|
|
|
@ -16,7 +16,7 @@ def pytest_funcarg__recwarn(request):
|
|||
request.addfinalizer(warnings.finalize)
|
||||
return warnings
|
||||
|
||||
def pytest_namespace(config):
|
||||
def pytest_namespace():
|
||||
return {'deprecated_call': deprecated_call}
|
||||
|
||||
def deprecated_call(func, *args, **kwargs):
|
||||
|
|
|
@ -19,11 +19,10 @@ def pytest_addoption(parser):
|
|||
def pytest_configure(config):
|
||||
config._setupstate = SetupState()
|
||||
|
||||
def pytest_sessionfinish(session, exitstatus, excrepr=None):
|
||||
def pytest_sessionfinish(session, exitstatus):
|
||||
# XXX see above
|
||||
if hasattr(session.config, '_setupstate'):
|
||||
session.config._setupstate.teardown_all()
|
||||
|
||||
# prevent logging module atexit handler from choking on
|
||||
# its attempt to close already closed streams
|
||||
# see http://bugs.python.org/issue6333
|
||||
|
|
|
@ -231,20 +231,29 @@ class TerminalReporter:
|
|||
for i, testarg in py.builtin.enumerate(self.config.args):
|
||||
self.write_line("test object %d: %s" %(i+1, testarg))
|
||||
|
||||
def pytest_sessionfinish(self, __call__, session, exitstatus, excrepr=None):
|
||||
def pytest_sessionfinish(self, __call__, session, exitstatus):
|
||||
__call__.execute()
|
||||
self._tw.line("")
|
||||
if exitstatus in (0, 1, 2):
|
||||
self.summary_failures()
|
||||
self.summary_skips()
|
||||
self.config.hook.pytest_terminal_summary(terminalreporter=self)
|
||||
if excrepr is not None:
|
||||
self.summary_final_exc(excrepr)
|
||||
if exitstatus == 2:
|
||||
self.write_sep("!", "KEYBOARD INTERRUPT")
|
||||
self._report_keyboardinterrupt()
|
||||
self.summary_deselected()
|
||||
self.summary_stats()
|
||||
|
||||
def pytest_keyboard_interrupt(self, excinfo):
|
||||
self._keyboardinterrupt_memo = excinfo.getrepr()
|
||||
|
||||
def _report_keyboardinterrupt(self):
|
||||
self.write_sep("!", "KEYBOARD INTERRUPT")
|
||||
excrepr = self._keyboardinterrupt_memo
|
||||
if self.config.option.verbose:
|
||||
excrepr.toterminal(self._tw)
|
||||
else:
|
||||
excrepr.reprcrash.toterminal(self._tw)
|
||||
|
||||
def pytest_looponfailinfo(self, failreports, rootdirs):
|
||||
if failreports:
|
||||
self.write_sep("#", "LOOPONFAILING", red=True)
|
||||
|
@ -334,14 +343,6 @@ class TerminalReporter:
|
|||
for num, fspath, lineno, reason in fskips:
|
||||
self._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason))
|
||||
|
||||
def summary_final_exc(self, excrepr):
|
||||
self.write_sep("!")
|
||||
if self.config.option.verbose:
|
||||
excrepr.toterminal(self._tw)
|
||||
else:
|
||||
excrepr.reprcrash.toterminal(self._tw)
|
||||
|
||||
|
||||
class CollectonlyReporter:
|
||||
INDENT = " "
|
||||
|
||||
|
@ -373,7 +374,7 @@ class CollectonlyReporter:
|
|||
self._failed.append(rep)
|
||||
self.indent = self.indent[:-len(self.INDENT)]
|
||||
|
||||
def pytest_sessionfinish(self, session, exitstatus, excrepr=None):
|
||||
def pytest_sessionfinish(self, session, exitstatus):
|
||||
if self._failed:
|
||||
self.out.sep("!", "collection failures")
|
||||
for rep in self._failed:
|
||||
|
@ -398,375 +399,3 @@ def repr_pythonversion(v=None):
|
|||
except (TypeError, ValueError):
|
||||
return str(v)
|
||||
|
||||
# ===============================================================================
|
||||
#
|
||||
# plugin tests
|
||||
#
|
||||
# ===============================================================================
|
||||
|
||||
import pytest_runner as runner # XXX
|
||||
|
||||
def basic_run_report(item):
|
||||
return runner.call_and_report(item, "call", log=False)
|
||||
|
||||
class TestTerminal:
|
||||
def test_pass_skip_fail(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
import py
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("xx")
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
rep.config.pluginmanager.register(rep)
|
||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
|
||||
for item in testdir.genitems([modcol]):
|
||||
ev = basic_run_report(item)
|
||||
rep.config.hook.pytest_runtest_logreport(rep=ev)
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_pass_skip_fail.py .sF"
|
||||
])
|
||||
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
||||
linecomp.assert_contains_lines([
|
||||
" def test_func():",
|
||||
"> assert 0",
|
||||
"E assert 0",
|
||||
])
|
||||
|
||||
def test_pass_skip_fail_verbose(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
import py
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("xx")
|
||||
def test_func():
|
||||
assert 0
|
||||
""", configargs=("-v",))
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
rep.config.pluginmanager.register(rep)
|
||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
items = modcol.collect()
|
||||
rep.config.option.debug = True #
|
||||
for item in items:
|
||||
rep.config.hook.pytest_itemstart(item=item, node=None)
|
||||
s = linecomp.stringio.getvalue().strip()
|
||||
assert s.endswith(item.name)
|
||||
rep.config.hook.pytest_runtest_logreport(rep=basic_run_report(item))
|
||||
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_pass_skip_fail_verbose.py:2: *test_ok*PASS*",
|
||||
"*test_pass_skip_fail_verbose.py:4: *test_skip*SKIP*",
|
||||
"*test_pass_skip_fail_verbose.py:6: *test_func*FAIL*",
|
||||
])
|
||||
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
||||
linecomp.assert_contains_lines([
|
||||
" def test_func():",
|
||||
"> assert 0",
|
||||
"E assert 0",
|
||||
])
|
||||
|
||||
def test_collect_fail(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("import xyz")
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
rep.config.pluginmanager.register(rep)
|
||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
l = list(testdir.genitems([modcol]))
|
||||
assert len(l) == 0
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_collect_fail.py F*"
|
||||
])
|
||||
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
||||
linecomp.assert_contains_lines([
|
||||
"> import xyz",
|
||||
"E ImportError: No module named xyz"
|
||||
])
|
||||
|
||||
def test_internalerror(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("def test_one(): pass")
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
excinfo = py.test.raises(ValueError, "raise ValueError('hello')")
|
||||
rep.pytest_internalerror(excinfo.getrepr())
|
||||
linecomp.assert_contains_lines([
|
||||
"INTERNALERROR> *raise ValueError*"
|
||||
])
|
||||
|
||||
def test_gwmanage_events(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_one():
|
||||
pass
|
||||
""", configargs=("-v",))
|
||||
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
class gw1:
|
||||
id = "X1"
|
||||
spec = py.execnet.XSpec("popen")
|
||||
class gw2:
|
||||
id = "X2"
|
||||
spec = py.execnet.XSpec("popen")
|
||||
class rinfo:
|
||||
version_info = (2, 5, 1, 'final', 0)
|
||||
executable = "hello"
|
||||
platform = "xyz"
|
||||
cwd = "qwe"
|
||||
|
||||
rep.pyexecnet_gwmanage_newgateway(gw1, rinfo)
|
||||
linecomp.assert_contains_lines([
|
||||
"X1*popen*xyz*2.5*"
|
||||
])
|
||||
|
||||
rep.pyexecnet_gwmanage_rsyncstart(source="hello", gateways=[gw1, gw2])
|
||||
linecomp.assert_contains_lines([
|
||||
"rsyncstart: hello -> X1, X2"
|
||||
])
|
||||
rep.pyexecnet_gwmanage_rsyncfinish(source="hello", gateways=[gw1, gw2])
|
||||
linecomp.assert_contains_lines([
|
||||
"rsyncfinish: hello -> X1, X2"
|
||||
])
|
||||
|
||||
def test_writeline(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("def test_one(): pass")
|
||||
stringio = py.std.cStringIO.StringIO()
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
rep.write_fspath_result(py.path.local("xy.py"), '.')
|
||||
rep.write_line("hello world")
|
||||
lines = linecomp.stringio.getvalue().split('\n')
|
||||
assert not lines[0]
|
||||
assert lines[1].endswith("xy.py .")
|
||||
assert lines[2] == "hello world"
|
||||
|
||||
def test_looponfailreport(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_fail2():
|
||||
raise ValueError()
|
||||
""")
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
reports = [basic_run_report(x) for x in modcol.collect()]
|
||||
rep.pytest_looponfailinfo(reports, [modcol.config.topdir])
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_looponfailreport.py:2: assert 0",
|
||||
"*test_looponfailreport.py:4: ValueError*",
|
||||
"*waiting*",
|
||||
"*%s*" % (modcol.config.topdir),
|
||||
])
|
||||
|
||||
def test_tb_option(self, testdir, linecomp):
|
||||
# XXX usage of testdir
|
||||
for tbopt in ["long", "short", "no"]:
|
||||
print 'testing --tb=%s...' % tbopt
|
||||
modcol = testdir.getmodulecol("""
|
||||
import py
|
||||
def g():
|
||||
raise IndexError
|
||||
def test_func():
|
||||
print 6*7
|
||||
g() # --calling--
|
||||
""", configargs=("--tb=%s" % tbopt,))
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
rep.config.pluginmanager.register(rep)
|
||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
for item in testdir.genitems([modcol]):
|
||||
rep.config.hook.pytest_runtest_logreport(
|
||||
rep=basic_run_report(item))
|
||||
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
||||
s = linecomp.stringio.getvalue()
|
||||
if tbopt == "long":
|
||||
print s
|
||||
assert 'print 6*7' in s
|
||||
else:
|
||||
assert 'print 6*7' not in s
|
||||
if tbopt != "no":
|
||||
assert '--calling--' in s
|
||||
assert 'IndexError' in s
|
||||
else:
|
||||
assert 'FAILURES' not in s
|
||||
assert '--calling--' not in s
|
||||
assert 'IndexError' not in s
|
||||
linecomp.stringio.truncate(0)
|
||||
|
||||
def test_show_path_before_running_test(self, testdir, linecomp):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
tr = TerminalReporter(item.config, file=linecomp.stringio)
|
||||
item.config.pluginmanager.register(tr)
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_show_path_before_running_test.py*"
|
||||
])
|
||||
|
||||
def test_itemreport_reportinfo(self, testdir, linecomp):
|
||||
testdir.makeconftest("""
|
||||
import py
|
||||
class Function(py.test.collect.Function):
|
||||
def reportinfo(self):
|
||||
return "ABCDE", 42, "custom"
|
||||
""")
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
tr = TerminalReporter(item.config, file=linecomp.stringio)
|
||||
item.config.pluginmanager.register(tr)
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*ABCDE "
|
||||
])
|
||||
tr.config.option.verbose = True
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*ABCDE:43: custom*"
|
||||
])
|
||||
|
||||
def test_itemreport_pytest_report_iteminfo(self, testdir, linecomp):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
class Plugin:
|
||||
def pytest_report_iteminfo(self, item):
|
||||
return "FGHJ", 42, "custom"
|
||||
item.config.pluginmanager.register(Plugin())
|
||||
tr = TerminalReporter(item.config, file=linecomp.stringio)
|
||||
item.config.pluginmanager.register(tr)
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*FGHJ "
|
||||
])
|
||||
tr.config.option.verbose = True
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*FGHJ:43: custom*"
|
||||
])
|
||||
|
||||
|
||||
def pseudo_keyboard_interrupt(self, testdir, linecomp, verbose=False):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_foobar():
|
||||
assert 0
|
||||
def test_spamegg():
|
||||
import py; py.test.skip('skip me please!')
|
||||
def test_interrupt_me():
|
||||
raise KeyboardInterrupt # simulating the user
|
||||
""", configargs=("-v",)*verbose)
|
||||
#""", configargs=("--showskipsummary",) + ("-v",)*verbose)
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
modcol.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
try:
|
||||
for item in testdir.genitems([modcol]):
|
||||
modcol.config.hook.pytest_runtest_logreport(
|
||||
rep=basic_run_report(item))
|
||||
except KeyboardInterrupt:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
else:
|
||||
py.test.fail("no KeyboardInterrupt??")
|
||||
s = linecomp.stringio.getvalue()
|
||||
if not verbose:
|
||||
assert s.find("_keyboard_interrupt.py Fs") != -1
|
||||
modcol.config.hook.pytest_sessionfinish(
|
||||
session=testdir.session, exitstatus=2, excrepr=excinfo.getrepr())
|
||||
text = linecomp.stringio.getvalue()
|
||||
linecomp.assert_contains_lines([
|
||||
" def test_foobar():",
|
||||
"> assert 0",
|
||||
"E assert 0",
|
||||
])
|
||||
#assert "Skipped: 'skip me please!'" in text
|
||||
assert "_keyboard_interrupt.py:6: KeyboardInterrupt" in text
|
||||
see_details = "raise KeyboardInterrupt # simulating the user" in text
|
||||
assert see_details == verbose
|
||||
|
||||
def test_keyboard_interrupt(self, testdir, linecomp):
|
||||
self.pseudo_keyboard_interrupt(testdir, linecomp)
|
||||
|
||||
def test_verbose_keyboard_interrupt(self, testdir, linecomp):
|
||||
self.pseudo_keyboard_interrupt(testdir, linecomp, verbose=True)
|
||||
|
||||
def test_skip_reasons_folding(self):
|
||||
class longrepr:
|
||||
class reprcrash:
|
||||
path = 'xyz'
|
||||
lineno = 3
|
||||
message = "justso"
|
||||
|
||||
ev1 = runner.CollectReport(None, None)
|
||||
ev1.when = "execute"
|
||||
ev1.skipped = True
|
||||
ev1.longrepr = longrepr
|
||||
|
||||
ev2 = runner.ItemTestReport(None, excinfo=longrepr)
|
||||
ev2.skipped = True
|
||||
|
||||
l = folded_skips([ev1, ev2])
|
||||
assert len(l) == 1
|
||||
num, fspath, lineno, reason = l[0]
|
||||
assert num == 2
|
||||
assert fspath == longrepr.reprcrash.path
|
||||
assert lineno == longrepr.reprcrash.lineno
|
||||
assert reason == longrepr.reprcrash.message
|
||||
|
||||
class TestCollectonly:
|
||||
def test_collectonly_basic(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
indent = rep.indent
|
||||
rep.config.hook.pytest_collectstart(collector=modcol)
|
||||
linecomp.assert_contains_lines([
|
||||
"<Module 'test_collectonly_basic.py'>"
|
||||
])
|
||||
item = modcol.join("test_func")
|
||||
rep.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
" <Function 'test_func'>",
|
||||
])
|
||||
rep.config.hook.pytest_collectreport(
|
||||
rep=runner.CollectReport(modcol, [], excinfo=None))
|
||||
assert rep.indent == indent
|
||||
|
||||
def test_collectonly_skipped_module(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
import py
|
||||
py.test.skip("nomod")
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
cols = list(testdir.genitems([modcol]))
|
||||
assert len(cols) == 0
|
||||
linecomp.assert_contains_lines("""
|
||||
<Module 'test_collectonly_skipped_module.py'>
|
||||
!!! Skipped: 'nomod' !!!
|
||||
""")
|
||||
|
||||
def test_collectonly_failed_module(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
raise ValueError(0)
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
cols = list(testdir.genitems([modcol]))
|
||||
assert len(cols) == 0
|
||||
linecomp.assert_contains_lines("""
|
||||
<Module 'test_collectonly_failed_module.py'>
|
||||
!!! ValueError: 0 !!!
|
||||
""")
|
||||
|
||||
def test_collectonly_fatal(self, testdir):
|
||||
p1 = testdir.makeconftest("""
|
||||
def pytest_collectstart(collector):
|
||||
assert 0, "urgs"
|
||||
""")
|
||||
result = testdir.runpytest("--collectonly")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*INTERNAL*args*"
|
||||
])
|
||||
assert result.ret == 3
|
||||
|
||||
def test_repr_python_version(monkeypatch):
|
||||
monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0))
|
||||
assert repr_pythonversion() == "2.5.1-final-0"
|
||||
py.std.sys.version_info = x = (2,3)
|
||||
assert repr_pythonversion() == str(x)
|
||||
|
||||
|
|
|
@ -1,19 +1,24 @@
|
|||
"""
|
||||
automatically discover and run traditional "unittest.py" style tests.
|
||||
|
||||
you can mix unittest TestCase subclasses and
|
||||
py.test style tests in one test module.
|
||||
Usage
|
||||
----------------
|
||||
|
||||
XXX consider user-specified test_suite()
|
||||
This plugin collects and runs Python `unittest.py style`_ tests.
|
||||
It will automatically collect ``unittest.TestCase`` subclasses
|
||||
and their ``test`` methods from the test modules of a project
|
||||
(usually following the ``test_*.py`` pattern).
|
||||
|
||||
this code is somewhat derived from Guido Wesdorps
|
||||
|
||||
http://johnnydebris.net/svn/projects/py_unittest
|
||||
This plugin is enabled by default.
|
||||
|
||||
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
|
||||
"""
|
||||
import py
|
||||
import sys
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if 'unittest' not in sys.modules:
|
||||
return # nobody could have possibly derived a subclass
|
||||
if py.std.inspect.isclass(obj) and issubclass(obj, py.std.unittest.TestCase):
|
||||
return UnitTestCase(name, parent=collector)
|
||||
|
||||
|
|
|
@ -1,13 +1,21 @@
|
|||
"""
|
||||
mark tests as expected-to-fail and report them separately.
|
||||
mark python tests as expected-to-fail and report them separately.
|
||||
|
||||
example::
|
||||
usage
|
||||
------------
|
||||
|
||||
Use the generic mark decorator to add the 'xfail' keyword to your
|
||||
test function::
|
||||
|
||||
@py.test.mark.xfail
|
||||
def test_hello():
|
||||
...
|
||||
assert 0
|
||||
|
||||
This test will be executed but no traceback will be reported
|
||||
when it fails. Instead terminal reporting will list it in the
|
||||
"expected to fail" section or "unexpectedly passing" section.
|
||||
"""
|
||||
|
||||
import py
|
||||
|
||||
pytest_plugins = ['keyword']
|
||||
|
|
|
@ -0,0 +1,331 @@
|
|||
"""
|
||||
terminal reporting of the full testing process.
|
||||
"""
|
||||
import py
|
||||
import sys
|
||||
|
||||
# ===============================================================================
|
||||
# plugin tests
|
||||
#
|
||||
# ===============================================================================
|
||||
|
||||
import pytest_runner as runner # XXX
|
||||
from pytest_terminal import TerminalReporter, CollectonlyReporter
|
||||
from pytest_terminal import repr_pythonversion, folded_skips
|
||||
|
||||
def basic_run_report(item):
|
||||
return runner.call_and_report(item, "call", log=False)
|
||||
|
||||
class Option:
|
||||
def __init__(self, verbose=False):
|
||||
self.verbose = verbose
|
||||
def _getcmdargs(self):
|
||||
l = []
|
||||
if self.verbose:
|
||||
l.append('-v')
|
||||
return l
|
||||
def _getcmdstring(self):
|
||||
return " ".join(self._getcmdargs())
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
if "option" in metafunc.funcargnames:
|
||||
metafunc.addcall(
|
||||
id="default",
|
||||
funcargs={'option': Option(verbose=False)}
|
||||
)
|
||||
metafunc.addcall(
|
||||
id="verbose",
|
||||
funcargs={'option': Option(verbose=True)}
|
||||
)
|
||||
|
||||
class TestTerminal:
|
||||
def test_pass_skip_fail(self, testdir, option):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
def test_ok():
|
||||
pass
|
||||
def test_skip():
|
||||
py.test.skip("xx")
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest(option._getcmdstring())
|
||||
if option.verbose:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_pass_skip_fail.py:2: *test_ok*PASS*",
|
||||
"*test_pass_skip_fail.py:4: *test_skip*SKIP*",
|
||||
"*test_pass_skip_fail.py:6: *test_func*FAIL*",
|
||||
])
|
||||
else:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_pass_skip_fail.py .sF"
|
||||
])
|
||||
result.stdout.fnmatch_lines([
|
||||
" def test_func():",
|
||||
"> assert 0",
|
||||
"E assert 0",
|
||||
])
|
||||
|
||||
def test_collect_fail(self, testdir, option):
|
||||
p = testdir.makepyfile("import xyz")
|
||||
result = testdir.runpytest(option._getcmdstring())
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_collect_fail.py F*",
|
||||
"> import xyz",
|
||||
"E ImportError: No module named xyz",
|
||||
])
|
||||
|
||||
def test_internalerror(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("def test_one(): pass")
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
excinfo = py.test.raises(ValueError, "raise ValueError('hello')")
|
||||
rep.pytest_internalerror(excinfo.getrepr())
|
||||
linecomp.assert_contains_lines([
|
||||
"INTERNALERROR> *raise ValueError*"
|
||||
])
|
||||
|
||||
def test_gwmanage_events(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_one():
|
||||
pass
|
||||
""", configargs=("-v",))
|
||||
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
class gw1:
|
||||
id = "X1"
|
||||
spec = py.execnet.XSpec("popen")
|
||||
class gw2:
|
||||
id = "X2"
|
||||
spec = py.execnet.XSpec("popen")
|
||||
class rinfo:
|
||||
version_info = (2, 5, 1, 'final', 0)
|
||||
executable = "hello"
|
||||
platform = "xyz"
|
||||
cwd = "qwe"
|
||||
|
||||
rep.pyexecnet_gwmanage_newgateway(gw1, rinfo)
|
||||
linecomp.assert_contains_lines([
|
||||
"X1*popen*xyz*2.5*"
|
||||
])
|
||||
|
||||
rep.pyexecnet_gwmanage_rsyncstart(source="hello", gateways=[gw1, gw2])
|
||||
linecomp.assert_contains_lines([
|
||||
"rsyncstart: hello -> X1, X2"
|
||||
])
|
||||
rep.pyexecnet_gwmanage_rsyncfinish(source="hello", gateways=[gw1, gw2])
|
||||
linecomp.assert_contains_lines([
|
||||
"rsyncfinish: hello -> X1, X2"
|
||||
])
|
||||
|
||||
def test_writeline(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("def test_one(): pass")
|
||||
stringio = py.std.cStringIO.StringIO()
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
rep.write_fspath_result(py.path.local("xy.py"), '.')
|
||||
rep.write_line("hello world")
|
||||
lines = linecomp.stringio.getvalue().split('\n')
|
||||
assert not lines[0]
|
||||
assert lines[1].endswith("xy.py .")
|
||||
assert lines[2] == "hello world"
|
||||
|
||||
def test_looponfailreport(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
def test_fail():
|
||||
assert 0
|
||||
def test_fail2():
|
||||
raise ValueError()
|
||||
""")
|
||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||
reports = [basic_run_report(x) for x in modcol.collect()]
|
||||
rep.pytest_looponfailinfo(reports, [modcol.config.topdir])
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_looponfailreport.py:2: assert 0",
|
||||
"*test_looponfailreport.py:4: ValueError*",
|
||||
"*waiting*",
|
||||
"*%s*" % (modcol.config.topdir),
|
||||
])
|
||||
|
||||
def test_tb_option(self, testdir, option):
|
||||
p = testdir.makepyfile("""
|
||||
import py
|
||||
def g():
|
||||
raise IndexError
|
||||
def test_func():
|
||||
print 6*7
|
||||
g() # --calling--
|
||||
""")
|
||||
for tbopt in ["long", "short", "no"]:
|
||||
print 'testing --tb=%s...' % tbopt
|
||||
result = testdir.runpytest('--tb=%s' % tbopt)
|
||||
s = result.stdout.str()
|
||||
if tbopt == "long":
|
||||
assert 'print 6*7' in s
|
||||
else:
|
||||
assert 'print 6*7' not in s
|
||||
if tbopt != "no":
|
||||
assert '--calling--' in s
|
||||
assert 'IndexError' in s
|
||||
else:
|
||||
assert 'FAILURES' not in s
|
||||
assert '--calling--' not in s
|
||||
assert 'IndexError' not in s
|
||||
|
||||
def test_show_path_before_running_test(self, testdir, linecomp):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
tr = TerminalReporter(item.config, file=linecomp.stringio)
|
||||
item.config.pluginmanager.register(tr)
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_show_path_before_running_test.py*"
|
||||
])
|
||||
|
||||
def test_itemreport_reportinfo(self, testdir, linecomp):
|
||||
testdir.makeconftest("""
|
||||
import py
|
||||
class Function(py.test.collect.Function):
|
||||
def reportinfo(self):
|
||||
return "ABCDE", 42, "custom"
|
||||
""")
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
tr = TerminalReporter(item.config, file=linecomp.stringio)
|
||||
item.config.pluginmanager.register(tr)
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*ABCDE "
|
||||
])
|
||||
tr.config.option.verbose = True
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*ABCDE:43: custom*"
|
||||
])
|
||||
|
||||
def test_itemreport_pytest_report_iteminfo(self, testdir, linecomp):
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
class Plugin:
|
||||
def pytest_report_iteminfo(self, item):
|
||||
return "FGHJ", 42, "custom"
|
||||
item.config.pluginmanager.register(Plugin())
|
||||
tr = TerminalReporter(item.config, file=linecomp.stringio)
|
||||
item.config.pluginmanager.register(tr)
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*FGHJ "
|
||||
])
|
||||
tr.config.option.verbose = True
|
||||
tr.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
"*FGHJ:43: custom*"
|
||||
])
|
||||
|
||||
|
||||
def test_keyboard_interrupt(self, testdir, option):
|
||||
p = testdir.makepyfile("""
|
||||
def test_foobar():
|
||||
assert 0
|
||||
def test_spamegg():
|
||||
import py; py.test.skip('skip me please!')
|
||||
def test_interrupt_me():
|
||||
raise KeyboardInterrupt # simulating the user
|
||||
""")
|
||||
|
||||
result = testdir.runpytest(option._getcmdstring())
|
||||
result.stdout.fnmatch_lines([
|
||||
" def test_foobar():",
|
||||
"> assert 0",
|
||||
"E assert 0",
|
||||
"*_keyboard_interrupt.py:6: KeyboardInterrupt*",
|
||||
])
|
||||
if option.verbose:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*raise KeyboardInterrupt # simulating the user*",
|
||||
])
|
||||
|
||||
def test_skip_reasons_folding(self):
|
||||
class longrepr:
|
||||
class reprcrash:
|
||||
path = 'xyz'
|
||||
lineno = 3
|
||||
message = "justso"
|
||||
|
||||
ev1 = runner.CollectReport(None, None)
|
||||
ev1.when = "execute"
|
||||
ev1.skipped = True
|
||||
ev1.longrepr = longrepr
|
||||
|
||||
ev2 = runner.ItemTestReport(None, excinfo=longrepr)
|
||||
ev2.skipped = True
|
||||
|
||||
l = folded_skips([ev1, ev2])
|
||||
assert len(l) == 1
|
||||
num, fspath, lineno, reason = l[0]
|
||||
assert num == 2
|
||||
assert fspath == longrepr.reprcrash.path
|
||||
assert lineno == longrepr.reprcrash.lineno
|
||||
assert reason == longrepr.reprcrash.message
|
||||
|
||||
class TestCollectonly:
|
||||
def test_collectonly_basic(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
def test_func():
|
||||
pass
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
indent = rep.indent
|
||||
rep.config.hook.pytest_collectstart(collector=modcol)
|
||||
linecomp.assert_contains_lines([
|
||||
"<Module 'test_collectonly_basic.py'>"
|
||||
])
|
||||
item = modcol.join("test_func")
|
||||
rep.config.hook.pytest_itemstart(item=item)
|
||||
linecomp.assert_contains_lines([
|
||||
" <Function 'test_func'>",
|
||||
])
|
||||
rep.config.hook.pytest_collectreport(
|
||||
rep=runner.CollectReport(modcol, [], excinfo=None))
|
||||
assert rep.indent == indent
|
||||
|
||||
def test_collectonly_skipped_module(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
import py
|
||||
py.test.skip("nomod")
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
cols = list(testdir.genitems([modcol]))
|
||||
assert len(cols) == 0
|
||||
linecomp.assert_contains_lines("""
|
||||
<Module 'test_collectonly_skipped_module.py'>
|
||||
!!! Skipped: 'nomod' !!!
|
||||
""")
|
||||
|
||||
def test_collectonly_failed_module(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol(configargs=['--collectonly'], source="""
|
||||
raise ValueError(0)
|
||||
""")
|
||||
rep = CollectonlyReporter(modcol.config, out=linecomp.stringio)
|
||||
modcol.config.pluginmanager.register(rep)
|
||||
cols = list(testdir.genitems([modcol]))
|
||||
assert len(cols) == 0
|
||||
linecomp.assert_contains_lines("""
|
||||
<Module 'test_collectonly_failed_module.py'>
|
||||
!!! ValueError: 0 !!!
|
||||
""")
|
||||
|
||||
def test_collectonly_fatal(self, testdir):
|
||||
p1 = testdir.makeconftest("""
|
||||
def pytest_collectstart(collector):
|
||||
assert 0, "urgs"
|
||||
""")
|
||||
result = testdir.runpytest("--collectonly")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*INTERNAL*args*"
|
||||
])
|
||||
assert result.ret == 3
|
||||
|
||||
def test_repr_python_version(monkeypatch):
|
||||
monkeypatch.setattr(sys, 'version_info', (2, 5, 1, 'final', 0))
|
||||
assert repr_pythonversion() == "2.5.1-final-0"
|
||||
py.std.sys.version_info = x = (2,3)
|
||||
assert repr_pythonversion() == str(x)
|
||||
|
|
@ -174,7 +174,7 @@ class PluginManager(object):
|
|||
if hasattr(self, '_config'):
|
||||
self.call_plugin(plugin, "pytest_addoption", parser=self._config._parser)
|
||||
self.call_plugin(plugin, "pytest_configure", config=self._config)
|
||||
#dic = self.call_plugin(plugin, "pytest_namespace", config=self._config)
|
||||
#dic = self.call_plugin(plugin, "pytest_namespace")
|
||||
#self._updateext(dic)
|
||||
|
||||
def call_plugin(self, plugin, methname, **kwargs):
|
||||
|
@ -191,7 +191,7 @@ class PluginManager(object):
|
|||
config.pluginmanager.register(self)
|
||||
self._config = config
|
||||
config.hook.pytest_configure(config=self._config)
|
||||
for dic in config.hook.pytest_namespace(config=config) or []:
|
||||
for dic in config.hook.pytest_namespace() or []:
|
||||
self._updateext(dic)
|
||||
|
||||
def do_unconfigure(self, config):
|
||||
|
|
|
@ -86,12 +86,11 @@ class Session(object):
|
|||
self.shouldstop = True
|
||||
pytest_collectreport = pytest_runtest_logreport
|
||||
|
||||
def sessionfinishes(self, exitstatus=0, excinfo=None):
|
||||
def sessionfinishes(self, exitstatus):
|
||||
""" teardown any resources after a test run. """
|
||||
self.config.hook.pytest_sessionfinish(
|
||||
session=self,
|
||||
exitstatus=exitstatus,
|
||||
excrepr=excinfo and excinfo.getrepr() or None
|
||||
)
|
||||
|
||||
def getinitialitems(self, colitems):
|
||||
|
@ -114,13 +113,14 @@ class Session(object):
|
|||
if not self.config.option.collectonly:
|
||||
item.config.hook.pytest_runtest_protocol(item=item)
|
||||
except KeyboardInterrupt:
|
||||
captured_excinfo = py.code.ExceptionInfo()
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
item.config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
|
||||
exitstatus = outcome.EXIT_INTERRUPTED
|
||||
except:
|
||||
captured_excinfo = py.code.ExceptionInfo()
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
self.config.pluginmanager.notify_exception(captured_excinfo)
|
||||
exitstatus = outcome.EXIT_INTERNALERROR
|
||||
if exitstatus == 0 and self._testsfailed:
|
||||
exitstatus = outcome.EXIT_TESTSFAILED
|
||||
self.sessionfinishes(exitstatus=exitstatus, excinfo=captured_excinfo)
|
||||
self.sessionfinishes(exitstatus=exitstatus)
|
||||
return exitstatus
|
||||
|
|
|
@ -175,7 +175,7 @@ class TestPytestPluginInteractions:
|
|||
|
||||
def test_do_ext_namespace(self, testdir):
|
||||
testdir.makeconftest("""
|
||||
def pytest_namespace(config):
|
||||
def pytest_namespace():
|
||||
return {'hello': 'world'}
|
||||
""")
|
||||
p = testdir.makepyfile("""
|
||||
|
|
Loading…
Reference in New Issue