merge 1.0.x branch

--HG--
branch : trunk
This commit is contained in:
holger krekel 2009-07-22 16:10:25 +02:00
commit 03970e0c4a
34 changed files with 404 additions and 3592 deletions

View File

@ -7,3 +7,8 @@
8cd6eb91eba313b012d6e568f37d844dc0751f2e 1.0.0b4
0000000000000000000000000000000000000000 1.0.0b4
2cc0507f117ffe721dff7ee026648cfce00ec92f 1.0.0b6
86f1e1b6e49bf5882a809f11edd1dbb08162cdad 1.0.0b8
86f1e1b6e49bf5882a809f11edd1dbb08162cdad 1.0.0b8
c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8
c63f35c266cbb26dad6b87b5e115d65685adf448 1.0.0b8
0eaa0fdf2ba0163cf534dc2eff4ba2e5fc66c261 1.0.0b8

View File

@ -1,10 +1,10 @@
Changes between 1.0.0b7 and 1.0.0b8
=====================================
* pytest_unittest-plugin are now enabled by default
* pytest_unittest-plugin is now enabled by default
* introduced pytest_keyboardinterrupt hook and
refined pytest_sessionfinish hooked.
refined pytest_sessionfinish hooked, added tests.
* workaround a buggy logging module interaction ("closing already closed
files"). Thanks to Sridhar Ratnakumar for triggering.
@ -13,9 +13,8 @@ Changes between 1.0.0b7 and 1.0.0b8
a dependency only a warning will be issued instead
of exiting the testing process.
* docs:
- refined funcargs doc , use the term "factory" instead
of "provider"
* many improvements to docs:
- refined funcargs doc , use the term "factory" instead of "provider"
- added a new talk/tutorial doc page
- better download page
- better plugin docstrings

View File

@ -28,7 +28,25 @@ doc/test/examples.txt
doc/test/extend.txt
doc/test/features.txt
doc/test/funcargs.txt
doc/test/plugins.txt
doc/test/plugin/doctest.txt
doc/test/plugin/execnetcleanup.txt
doc/test/plugin/figleaf.txt
doc/test/plugin/hooklog.txt
doc/test/plugin/hookspec.txt
doc/test/plugin/index.txt
doc/test/plugin/iocapture.txt
doc/test/plugin/keyword.txt
doc/test/plugin/monkeypatch.txt
doc/test/plugin/pdb.txt
doc/test/plugin/pocoo.txt
doc/test/plugin/pytester.txt
doc/test/plugin/recwarn.txt
doc/test/plugin/restdoc.txt
doc/test/plugin/resultlog.txt
doc/test/plugin/runner.txt
doc/test/plugin/terminal.txt
doc/test/plugin/unittest.txt
doc/test/plugin/xfail.txt
doc/test/quickstart.txt
doc/test/talks.txt
doc/test/test.txt
@ -340,6 +358,7 @@ py/test/plugin/pytest_unittest.py
py/test/plugin/pytest_xfail.py
py/test/plugin/test_pytest_runner.py
py/test/plugin/test_pytest_runner_xunit.py
py/test/plugin/test_pytest_terminal.py
py/test/pluginmanager.py
py/test/pycollect.py
py/test/session.py
@ -358,6 +377,7 @@ py/test/testing/test_conftesthandle.py
py/test/testing/test_deprecated_api.py
py/test/testing/test_funcargs.py
py/test/testing/test_genitems.py
py/test/testing/test_install.py
py/test/testing/test_outcome.py
py/test/testing/test_parseopt.py
py/test/testing/test_pickling.py

View File

@ -633,6 +633,10 @@ div.heading, h1 {
border-bottom: 1px solid #8CACBB;
}
h2 {
border-bottom: 1px dotted #8CACBB;
}
h1, h2, h3, h4, h5, h6 {
color: Black;
@ -648,11 +652,10 @@ h1, h2, h3, h4, h5, h6 {
h1 { font-size: 145%; }
h2 { font-size: 135%; }
h3 { font-size: 125%; }
h4 { font-size: 120%; }
h5 { font-size: 110%; }
h6 { font-size: 80%; }
h2 { font-size: 115%; }
h3 { font-size: 105%; }
h4 { font-size: 100%; }
h5 { font-size: 100%; }
h1 a { text-decoration: None;}

View File

@ -4,6 +4,9 @@ pytest_doctest plugin
collect and execute doctests from modules and test files.
.. contents::
:local:
Usage
-------------
@ -22,218 +25,19 @@ command line options
``--doctest-modules``
search all python files for doctests
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_doctest.py`_ plugin source code
2. put it somewhere as ``pytest_doctest.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_doctest.py``:
.. sourcecode:: python
"""
collect and execute doctests from modules and test files.
Usage
-------------
By default all files matching the ``test_*.txt`` pattern will
be run with the ``doctest`` module. If you issue::
py.test --doctest-modules
all python files in your projects will be doctest-run
as well.
"""
import py
from py.__.code.excinfo import Repr, ReprFileLocation
def pytest_addoption(parser):
group = parser.addgroup("doctest options")
group.addoption("--doctest-modules",
action="store_true", default=False,
help="search all python files for doctests",
dest="doctestmodules")
def pytest_collect_file(path, parent):
if path.ext == ".py":
if parent.config.getvalue("doctestmodules"):
return DoctestModule(path, parent)
if path.check(fnmatch="test_*.txt"):
return DoctestTextfile(path, parent)
class ReprFailDoctest(Repr):
def __init__(self, reprlocation, lines):
self.reprlocation = reprlocation
self.lines = lines
def toterminal(self, tw):
for line in self.lines:
tw.line(line)
self.reprlocation.toterminal(tw)
class DoctestItem(py.test.collect.Item):
def __init__(self, path, parent):
name = self.__class__.__name__ + ":" + path.basename
super(DoctestItem, self).__init__(name=name, parent=parent)
self.fspath = path
def repr_failure(self, excinfo, outerr):
if excinfo.errisinstance(py.compat.doctest.DocTestFailure):
doctestfailure = excinfo.value
example = doctestfailure.example
test = doctestfailure.test
filename = test.filename
lineno = test.lineno + example.lineno + 1
message = excinfo.type.__name__
reprlocation = ReprFileLocation(filename, lineno, message)
checker = py.compat.doctest.OutputChecker()
REPORT_UDIFF = py.compat.doctest.REPORT_UDIFF
filelines = py.path.local(filename).readlines(cr=0)
i = max(test.lineno, max(0, lineno - 10)) # XXX?
lines = []
for line in filelines[i:lineno]:
lines.append("%03d %s" % (i+1, line))
i += 1
lines += checker.output_difference(example,
doctestfailure.got, REPORT_UDIFF).split("\n")
return ReprFailDoctest(reprlocation, lines)
elif excinfo.errisinstance(py.compat.doctest.UnexpectedException):
excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
return super(DoctestItem, self).repr_failure(excinfo, outerr)
else:
return super(DoctestItem, self).repr_failure(excinfo, outerr)
class DoctestTextfile(DoctestItem):
def runtest(self):
if not self._deprecated_testexecution():
failed, tot = py.compat.doctest.testfile(
str(self.fspath), module_relative=False,
raise_on_error=True, verbose=0)
class DoctestModule(DoctestItem):
def runtest(self):
module = self.fspath.pyimport()
failed, tot = py.compat.doctest.testmod(
module, raise_on_error=True, verbose=0)
#
# Plugin tests
#
class TestDoctests:
def test_collect_testtextfile(self, testdir):
testdir.maketxtfile(whatever="")
checkfile = testdir.maketxtfile(test_something="""
alskdjalsdk
>>> i = 5
>>> i-1
4
""")
for x in (testdir.tmpdir, checkfile):
#print "checking that %s returns custom items" % (x,)
items, reprec = testdir.inline_genitems(x)
assert len(items) == 1
assert isinstance(items[0], DoctestTextfile)
def test_collect_module(self, testdir):
path = testdir.makepyfile(whatever="#")
for p in (path, testdir.tmpdir):
items, reprec = testdir.inline_genitems(p, '--doctest-modules')
assert len(items) == 1
assert isinstance(items[0], DoctestModule)
def test_simple_doctestfile(self, testdir):
p = testdir.maketxtfile(test_doc="""
>>> x = 1
>>> x == 1
False
""")
reprec = testdir.inline_run(p)
reprec.assertoutcome(failed=1)
def test_doctest_unexpected_exception(self, testdir):
from py.__.test.outcome import Failed
p = testdir.maketxtfile("""
>>> i = 0
>>> i = 1
>>> x
2
""")
reprec = testdir.inline_run(p)
call = reprec.getcall("pytest_runtest_logreport")
assert call.rep.failed
assert call.rep.longrepr
# XXX
#testitem, = items
#excinfo = py.test.raises(Failed, "testitem.runtest()")
#repr = testitem.repr_failure(excinfo, ("", ""))
#assert repr.reprlocation
def test_doctestmodule(self, testdir):
p = testdir.makepyfile("""
'''
>>> x = 1
>>> x == 1
False
'''
""")
reprec = testdir.inline_run(p, "--doctest-modules")
reprec.assertoutcome(failed=1)
def test_doctestmodule_external(self, testdir):
p = testdir.makepyfile("""
#
def somefunc():
'''
>>> i = 0
>>> i + 1
2
'''
""")
result = testdir.runpytest(p, "--doctest-modules")
result.stdout.fnmatch_lines([
'004 *>>> i = 0',
'005 *>>> i + 1',
'*Expected:',
"* 2",
"*Got:",
"* 1",
"*:5: DocTestFailure"
])
def test_txtfile_failing(self, testdir):
p = testdir.maketxtfile("""
>>> i = 0
>>> i + 1
2
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
'001 >>> i = 0',
'002 >>> i + 1',
'Expected:',
" 2",
"Got:",
" 1",
"*test_txtfile_failing.txt:2: DocTestFailure"
])
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_doctest.py
.. _`pytest_doctest.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_doctest.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -1,86 +0,0 @@
pytest_execnetcleanup plugin
============================
cleanup execnet gateways during test function runs.
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
1. Download `pytest_execnetcleanup.py`_ plugin source code
2. put it somewhere as ``pytest_execnetcleanup.py`` into your import path
3. a subsequent test run will now use your local version!
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_execnetcleanup.py``:
.. sourcecode:: python
"""
cleanup execnet gateways during test function runs.
"""
import py
pytest_plugins = "xfail"
def pytest_configure(config):
config.pluginmanager.register(Execnetcleanup())
class Execnetcleanup:
_gateways = None
def __init__(self, debug=False):
self._debug = debug
def pyexecnet_gateway_init(self, gateway):
if self._gateways is not None:
self._gateways.append(gateway)
def pyexecnet_gateway_exit(self, gateway):
if self._gateways is not None:
self._gateways.remove(gateway)
def pytest_sessionstart(self, session):
self._gateways = []
def pytest_sessionfinish(self, session, exitstatus):
l = []
for gw in self._gateways:
gw.exit()
l.append(gw)
#for gw in l:
# gw.join()
def pytest_pyfunc_call(self, __call__, pyfuncitem):
if self._gateways is not None:
gateways = self._gateways[:]
res = __call__.execute(firstresult=True)
while len(self._gateways) > len(gateways):
self._gateways[-1].exit()
return res
def test_execnetplugin(testdir):
reprec = testdir.inline_runsource("""
import py
import sys
def test_hello():
sys._gw = py.execnet.PopenGateway()
def test_world():
assert hasattr(sys, '_gw')
py.test.raises(KeyError, "sys._gw.exit()") # already closed
""", "-s", "--debug")
reprec.assertoutcome(passed=2)
.. _`pytest_execnetcleanup.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_execnetcleanup.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html
.. _`checkout the py.test development version`: ../../download.html#checkout

View File

@ -4,6 +4,9 @@ pytest_figleaf plugin
write and report coverage data with 'figleaf'.
.. contents::
:local:
command line options
@ -17,93 +20,19 @@ command line options
``--figleaf-html=FIGLEAFHTML``
path to the coverage html dir.
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_figleaf.py`_ plugin source code
2. put it somewhere as ``pytest_figleaf.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_figleaf.py``:
.. sourcecode:: python
"""
write and report coverage data with 'figleaf'.
"""
import py
figleaf = py.test.importorskip("figleaf.annotate_html")
def pytest_addoption(parser):
group = parser.addgroup('figleaf options')
group.addoption('-F', action='store_true', default=False,
dest = 'figleaf',
help=('trace python coverage with figleaf and write HTML '
'for files below the current working dir'))
group.addoption('--figleaf-data', action='store', default='.figleaf',
dest='figleafdata',
help='path to coverage tracing file.')
group.addoption('--figleaf-html', action='store', default='html',
dest='figleafhtml',
help='path to the coverage html dir.')
def pytest_configure(config):
figleaf.start()
def pytest_terminal_summary(terminalreporter):
config = terminalreporter.config
datafile = py.path.local(config.getvalue('figleafdata'))
tw = terminalreporter._tw
tw.sep('-', 'figleaf')
tw.line('Writing figleaf data to %s' % (datafile))
figleaf.stop()
figleaf.write_coverage(str(datafile))
coverage = get_coverage(datafile, config)
reportdir = py.path.local(config.getvalue('figleafhtml'))
tw.line('Writing figleaf html to file://%s' % (reportdir))
figleaf.annotate_html.prepare_reportdir(str(reportdir))
exclude = []
figleaf.annotate_html.report_as_html(coverage,
str(reportdir), exclude, {})
def get_coverage(datafile, config):
# basepath = config.topdir
basepath = py.path.local()
data = figleaf.read_coverage(str(datafile))
d = {}
coverage = figleaf.combine_coverage(d, data)
for path in coverage.keys():
if not py.path.local(path).relto(basepath):
del coverage[path]
return coverage
def test_functional(testdir):
py.test.importorskip("figleaf")
testdir.plugins.append("figleaf")
testdir.makepyfile("""
def f():
x = 42
def test_whatever():
pass
""")
result = testdir.runpytest('-F')
assert result.ret == 0
assert result.stdout.fnmatch_lines([
'*figleaf html*'
])
#print result.stdout.str()
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_figleaf.py
.. _`pytest_figleaf.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_figleaf.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -1,72 +0,0 @@
pytest_hooklog plugin
=====================
log invocations of extension hooks to a file.
command line options
--------------------
``--hooklog=HOOKLOG``
write hook calls to the given file.
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
1. Download `pytest_hooklog.py`_ plugin source code
2. put it somewhere as ``pytest_hooklog.py`` into your import path
3. a subsequent test run will now use your local version!
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_hooklog.py``:
.. sourcecode:: python
""" log invocations of extension hooks to a file. """
import py
def pytest_addoption(parser):
parser.addoption("--hooklog", dest="hooklog", default=None,
help="write hook calls to the given file.")
def pytest_configure(config):
hooklog = config.getvalue("hooklog")
if hooklog:
assert not config.pluginmanager.comregistry.logfile
config.pluginmanager.comregistry.logfile = open(hooklog, 'w')
def pytest_unconfigure(config):
f = config.pluginmanager.comregistry.logfile
if f:
f.close()
config.pluginmanager.comregistry.logfile = None
# ===============================================================================
# plugin tests
# ===============================================================================
def test_functional(testdir):
testdir.makepyfile("""
def test_pass():
pass
""")
testdir.runpytest("--hooklog=hook.log")
s = testdir.tmpdir.join("hook.log").read()
assert s.find("pytest_sessionstart") != -1
assert s.find("ItemTestReport") != -1
assert s.find("sessionfinish") != -1
.. _`pytest_hooklog.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_hooklog.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html
.. _`checkout the py.test development version`: ../../download.html#checkout

View File

@ -8,7 +8,7 @@ figleaf_ write and report coverage data with 'figleaf'.
monkeypatch_ safely patch object attributes, dicts and environment variables.
iocapture_ convenient capturing of writes to stdout/stderror streams
iocapture_ convenient capturing of writes to stdout/stderror streams and file descriptors.
recwarn_ helpers for asserting deprecation and other warnings.
@ -32,23 +32,7 @@ pocoo_ submit failure information to paste.pocoo.org
resultlog_ resultlog plugin for machine-readable logging of test results.
terminal_ terminal reporting of the full testing process.
internal plugins / core functionality
=====================================
pdb_ interactive debugging with the Python Debugger.
keyword_ py.test.mark / keyword plugin
hooklog_ log invocations of extension hooks to a file.
runner_ collect and run test items and create reports.
execnetcleanup_ cleanup execnet gateways during test function runs.
pytester_ funcargs and support code for testing py.test's own functionality.
terminal_ Implements terminal reporting of the full testing process.
.. _`xfail`: xfail.html
@ -63,9 +47,3 @@ pytester_ funcargs and support code for testing py.test's own functionality.
.. _`pocoo`: pocoo.html
.. _`resultlog`: resultlog.html
.. _`terminal`: terminal.html
.. _`pdb`: pdb.html
.. _`keyword`: keyword.html
.. _`hooklog`: hooklog.html
.. _`runner`: runner.html
.. _`execnetcleanup`: execnetcleanup.html
.. _`pytester`: pytester.html

View File

@ -2,9 +2,10 @@
pytest_iocapture plugin
=======================
convenient capturing of writes to stdout/stderror streams
convenient capturing of writes to stdout/stderror streams and file descriptors.
and file descriptors.
.. contents::
:local:
Example Usage
----------------------
@ -29,6 +30,7 @@ The ``reset()`` call returns a tuple and will restart
capturing so that you can successively check for output.
After the test function finishes the original streams
will be restored.
.. _`capsys funcarg`:
@ -38,6 +40,7 @@ the 'capsys' test function argument
captures writes to sys.stdout/sys.stderr and makes
them available successively via a ``capsys.reset()`` method
which returns a ``(out, err)`` tuple of captured strings.
.. _`capfd funcarg`:
@ -48,123 +51,19 @@ captures writes to file descriptors 1 and 2 and makes
them available successively via a ``capsys.reset()`` method
which returns a ``(out, err)`` tuple of captured strings.
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_iocapture.py`_ plugin source code
2. put it somewhere as ``pytest_iocapture.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_iocapture.py``:
.. sourcecode:: python
"""
convenient capturing of writes to stdout/stderror streams
and file descriptors.
Example Usage
----------------------
You can use the `capsys funcarg`_ to capture writes
to stdout and stderr streams by using it in a test
likes this:
.. sourcecode:: python
def test_myoutput(capsys):
print "hello"
print >>sys.stderr, "world"
out, err = capsys.reset()
assert out == "hello\\n"
assert err == "world\\n"
print "next"
out, err = capsys.reset()
assert out == "next\\n"
The ``reset()`` call returns a tuple and will restart
capturing so that you can successively check for output.
After the test function finishes the original streams
will be restored.
"""
import py
def pytest_funcarg__capsys(request):
"""captures writes to sys.stdout/sys.stderr and makes
them available successively via a ``capsys.reset()`` method
which returns a ``(out, err)`` tuple of captured strings.
"""
capture = Capture(py.io.StdCapture)
request.addfinalizer(capture.finalize)
return capture
def pytest_funcarg__capfd(request):
"""captures writes to file descriptors 1 and 2 and makes
them available successively via a ``capsys.reset()`` method
which returns a ``(out, err)`` tuple of captured strings.
"""
capture = Capture(py.io.StdCaptureFD)
request.addfinalizer(capture.finalize)
return capture
def pytest_pyfunc_call(pyfuncitem):
if hasattr(pyfuncitem, 'funcargs'):
for funcarg, value in pyfuncitem.funcargs.items():
if funcarg == "capsys" or funcarg == "capfd":
value.reset()
class Capture:
_capture = None
def __init__(self, captureclass):
self._captureclass = captureclass
def finalize(self):
if self._capture:
self._capture.reset()
def reset(self):
res = None
if self._capture:
res = self._capture.reset()
self._capture = self._captureclass()
return res
class TestCapture:
def test_std_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capsys):
print 42
out, err = capsys.reset()
assert out.startswith("42")
""")
reprec.assertoutcome(passed=1)
def test_stdfd_functional(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello(capfd):
import os
os.write(1, "42")
out, err = capfd.reset()
assert out.startswith("42")
""")
reprec.assertoutcome(passed=1)
def test_funcall_yielded_no_funcargs(self, testdir):
reprec = testdir.inline_runsource("""
def test_hello():
yield lambda: None
""")
reprec.assertoutcome(passed=1)
.. _`pytest_iocapture.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_iocapture.py
.. _`pytest_iocapture.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_iocapture.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -1,110 +0,0 @@
pytest_keyword plugin
=====================
py.test.mark / keyword plugin
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
1. Download `pytest_keyword.py`_ plugin source code
2. put it somewhere as ``pytest_keyword.py`` into your import path
3. a subsequent test run will now use your local version!
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_keyword.py``:
.. sourcecode:: python
"""
py.test.mark / keyword plugin
"""
import py
def pytest_namespace():
mark = KeywordDecorator({})
return {'mark': mark}
class KeywordDecorator:
""" decorator for setting function attributes. """
def __init__(self, keywords, lastname=None):
self._keywords = keywords
self._lastname = lastname
def __call__(self, func=None, **kwargs):
if func is None:
kw = self._keywords.copy()
kw.update(kwargs)
return KeywordDecorator(kw)
elif not hasattr(func, 'func_dict'):
kw = self._keywords.copy()
name = self._lastname
if name is None:
name = "mark"
kw[name] = func
return KeywordDecorator(kw)
func.func_dict.update(self._keywords)
return func
def __getattr__(self, name):
if name[0] == "_":
raise AttributeError(name)
kw = self._keywords.copy()
kw[name] = True
return self.__class__(kw, lastname=name)
def test_pytest_mark_getattr():
mark = KeywordDecorator({})
def f(): pass
mark.hello(f)
assert f.hello == True
mark.hello("test")(f)
assert f.hello == "test"
py.test.raises(AttributeError, "mark._hello")
py.test.raises(AttributeError, "mark.__str__")
def test_pytest_mark_call():
mark = KeywordDecorator({})
def f(): pass
mark(x=3)(f)
assert f.x == 3
def g(): pass
mark(g)
assert not g.func_dict
mark.hello(f)
assert f.hello == True
mark.hello("test")(f)
assert f.hello == "test"
mark("x1")(f)
assert f.mark == "x1"
def test_mark_plugin(testdir):
p = testdir.makepyfile("""
import py
pytest_plugins = "keyword"
@py.test.mark.hello
def test_hello():
assert hasattr(test_hello, 'hello')
""")
result = testdir.runpytest(p)
assert result.stdout.fnmatch_lines(["*passed*"])
.. _`pytest_keyword.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_keyword.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html
.. _`checkout the py.test development version`: ../../download.html#checkout

View File

@ -4,6 +4,9 @@ pytest_monkeypatch plugin
safely patch object attributes, dicts and environment variables.
.. contents::
:local:
Usage
----------------
@ -26,6 +29,7 @@ modifications will be reverted. See the `monkeypatch blog post`_
for an extensive discussion.
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
.. _`monkeypatch funcarg`:
@ -42,147 +46,19 @@ helper methods to modify objects, dictionaries or os.environ::
All such modifications will be undone when the requesting
test function finished its execution.
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_monkeypatch.py`_ plugin source code
2. put it somewhere as ``pytest_monkeypatch.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_monkeypatch.py``:
.. sourcecode:: python
"""
safely patch object attributes, dicts and environment variables.
Usage
----------------
Use the `monkeypatch funcarg`_ to safely patch the environment
variables, object attributes or dictionaries. For example, if you want
to set the environment variable ``ENV1`` and patch the
``os.path.abspath`` function to return a particular value during a test
function execution you can write it down like this:
.. sourcecode:: python
def test_mytest(monkeypatch):
monkeypatch.setenv('ENV1', 'myval')
monkeypatch.setattr(os.path, 'abspath', lambda x: '/')
... # your test code
The function argument will do the modifications and memorize the
old state. After the test function finished execution all
modifications will be reverted. See the `monkeypatch blog post`_
for an extensive discussion.
.. _`monkeypatch blog post`: http://tetamap.wordpress.com/2009/03/03/monkeypatching-in-unit-tests-done-right/
"""
import os
def pytest_funcarg__monkeypatch(request):
"""The returned ``monkeypatch`` funcarg provides three
helper methods to modify objects, dictionaries or os.environ::
monkeypatch.setattr(obj, name, value)
monkeypatch.setitem(mapping, name, value)
monkeypatch.setenv(name, value)
All such modifications will be undone when the requesting
test function finished its execution.
"""
monkeypatch = MonkeyPatch()
request.addfinalizer(monkeypatch.finalize)
return monkeypatch
notset = object()
class MonkeyPatch:
def __init__(self):
self._setattr = []
self._setitem = []
def setattr(self, obj, name, value):
self._setattr.insert(0, (obj, name, getattr(obj, name, notset)))
setattr(obj, name, value)
def setitem(self, dictionary, name, value):
self._setitem.insert(0, (dictionary, name, dictionary.get(name, notset)))
dictionary[name] = value
def setenv(self, name, value):
self.setitem(os.environ, name, str(value))
def finalize(self):
for obj, name, value in self._setattr:
if value is not notset:
setattr(obj, name, value)
else:
delattr(obj, name)
for dictionary, name, value in self._setitem:
if value is notset:
del dictionary[name]
else:
dictionary[name] = value
def test_setattr():
class A:
x = 1
monkeypatch = MonkeyPatch()
monkeypatch.setattr(A, 'x', 2)
assert A.x == 2
monkeypatch.setattr(A, 'x', 3)
assert A.x == 3
monkeypatch.finalize()
assert A.x == 1
monkeypatch.setattr(A, 'y', 3)
assert A.y == 3
monkeypatch.finalize()
assert not hasattr(A, 'y')
def test_setitem():
d = {'x': 1}
monkeypatch = MonkeyPatch()
monkeypatch.setitem(d, 'x', 2)
monkeypatch.setitem(d, 'y', 1700)
assert d['x'] == 2
assert d['y'] == 1700
monkeypatch.setitem(d, 'x', 3)
assert d['x'] == 3
monkeypatch.finalize()
assert d['x'] == 1
assert 'y' not in d
def test_setenv():
monkeypatch = MonkeyPatch()
monkeypatch.setenv('XYZ123', 2)
import os
assert os.environ['XYZ123'] == "2"
monkeypatch.finalize()
assert 'XYZ123' not in os.environ
def test_monkeypatch_plugin(testdir):
reprec = testdir.inline_runsource("""
pytest_plugins = 'pytest_monkeypatch',
def test_method(monkeypatch):
assert monkeypatch.__class__.__name__ == "MonkeyPatch"
""")
res = reprec.countoutcomes()
assert tuple(res) == (1, 0, 0), res
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_monkeypatch.py
.. _`pytest_monkeypatch.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_monkeypatch.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -0,0 +1,12 @@
pytest_oejskit plugin (EXTERNAL)
==========================================
The `oejskit`_ offers a py.test plugin for running Javascript tests in life browers. Running inside the browsers comes with some speed cost, on the other hand it means for example the code is tested against the real-word DOM implementations.
The approach enables to write integration tests such that the JavaScript code is tested against server-side Python code mocked as necessary. Any server-side framework that can already be exposed through WSGI (or for which a subset of WSGI can be written to accommodate the jskit own needs) can play along.
For more info and download please visit the `oejskit PyPI`_ page.
.. _`oejskit`:
.. _`oejskit PyPI`: http://pypi.python.org/pypi/oejskit
.. source link 'http://bitbucket.org/pedronis/js-infrastructure/src/tip/pytest_jstests.py',

View File

@ -1,192 +0,0 @@
pytest_pdb plugin
=================
interactive debugging with the Python Debugger.
command line options
--------------------
``--pdb``
start pdb (the Python debugger) on errors.
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
1. Download `pytest_pdb.py`_ plugin source code
2. put it somewhere as ``pytest_pdb.py`` into your import path
3. a subsequent test run will now use your local version!
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_pdb.py``:
.. sourcecode:: python
"""
interactive debugging with the Python Debugger.
"""
import py
import pdb, sys, linecache
from py.__.test.outcome import Skipped
def pytest_addoption(parser):
group = parser.getgroup("general")
group._addoption('--pdb',
action="store_true", dest="usepdb", default=False,
help="start pdb (the Python debugger) on errors.")
def pytest_configure(config):
if config.option.usepdb:
if config.getvalue("looponfail"):
raise config.Error("--pdb incompatible with --looponfail.")
if config.option.dist != "no":
raise config.Error("--pdb incomptaible with distributing tests.")
config.pluginmanager.register(PdbInvoke())
class PdbInvoke:
def pytest_runtest_makereport(self, item, call):
if call.excinfo and not call.excinfo.errisinstance(Skipped):
tw = py.io.TerminalWriter()
repr = call.excinfo.getrepr()
repr.toterminal(tw)
post_mortem(call.excinfo._excinfo[2])
class Pdb(py.std.pdb.Pdb):
def do_list(self, arg):
self.lastcmd = 'list'
last = None
if arg:
try:
x = eval(arg, {}, {})
if type(x) == type(()):
first, last = x
first = int(first)
last = int(last)
if last < first:
# Assume it's a count
last = first + last
else:
first = max(1, int(x) - 5)
except:
print '*** Error in argument:', repr(arg)
return
elif self.lineno is None:
first = max(1, self.curframe.f_lineno - 5)
else:
first = self.lineno + 1
if last is None:
last = first + 10
filename = self.curframe.f_code.co_filename
breaklist = self.get_file_breaks(filename)
try:
for lineno in range(first, last+1):
# start difference from normal do_line
line = self._getline(filename, lineno)
# end difference from normal do_line
if not line:
print '[EOF]'
break
else:
s = repr(lineno).rjust(3)
if len(s) < 4: s = s + ' '
if lineno in breaklist: s = s + 'B'
else: s = s + ' '
if lineno == self.curframe.f_lineno:
s = s + '->'
print s + '\t' + line,
self.lineno = lineno
except KeyboardInterrupt:
pass
do_l = do_list
def _getline(self, filename, lineno):
if hasattr(filename, "__source__"):
try:
return filename.__source__.lines[lineno - 1] + "\n"
except IndexError:
return None
return linecache.getline(filename, lineno)
def get_stack(self, f, t):
# Modified from bdb.py to be able to walk the stack beyond generators,
# which does not work in the normal pdb :-(
stack, i = pdb.Pdb.get_stack(self, f, t)
if f is None:
i = max(0, len(stack) - 1)
return stack, i
def post_mortem(t):
# modified from pdb.py for the new get_stack() implementation
p = Pdb()
p.reset()
p.interaction(None, t)
def set_trace():
# again, a copy of the version in pdb.py
Pdb().set_trace(sys._getframe().f_back)
class TestPDB:
def pytest_funcarg__pdblist(self, request):
monkeypatch = request.getfuncargvalue("monkeypatch")
pdblist = []
def mypdb(*args):
pdblist.append(args)
monkeypatch.setitem(globals(), 'post_mortem', mypdb)
return pdblist
def test_incompatibility_messages(self, testdir):
Error = py.test.config.Error
py.test.raises(Error, "testdir.parseconfigure('--pdb', '--looponfail')")
py.test.raises(Error, "testdir.parseconfigure('--pdb', '-n 3')")
py.test.raises(Error, "testdir.parseconfigure('--pdb', '-d')")
def test_pdb_on_fail(self, testdir, pdblist):
rep = testdir.inline_runsource1('--pdb', """
def test_func():
assert 0
""")
assert rep.failed
assert len(pdblist) == 1
tb = py.code.Traceback(pdblist[0][0])
assert tb[-1].name == "test_func"
def test_pdb_on_skip(self, testdir, pdblist):
rep = testdir.inline_runsource1('--pdb', """
import py
def test_func():
py.test.skip("hello")
""")
assert rep.skipped
assert len(pdblist) == 0
def test_pdb_interaction(self, testdir):
p1 = testdir.makepyfile("""
def test_1():
i = 0
assert i == 1
""")
child = testdir.spawn_pytest("--pdb %s" % p1)
#child.expect(".*def test_1.*")
child.expect(".*i = 0.*")
child.expect("(Pdb)")
child.sendeof()
child.expect("1 failed")
if child.isalive():
child.wait()
.. _`pytest_pdb.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_pdb.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html
.. _`checkout the py.test development version`: ../../download.html#checkout

View File

@ -4,6 +4,9 @@ pytest_pocoo plugin
submit failure information to paste.pocoo.org
.. contents::
:local:
command line options
@ -13,88 +16,19 @@ command line options
``-P, --pocoo-sendfailures``
send failures to http://paste.pocoo.org paste service
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_pocoo.py`_ plugin source code
2. put it somewhere as ``pytest_pocoo.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_pocoo.py``:
.. sourcecode:: python
"""
submit failure information to paste.pocoo.org
"""
import py
class url:
base = "http://paste.pocoo.org"
xmlrpc = base + "/xmlrpc/"
show = base + "/show/"
def pytest_addoption(parser):
group = parser.addgroup("pocoo plugin")
group.addoption('-P', '--pocoo-sendfailures',
action='store_true', dest="pocoo_sendfailures",
help="send failures to %s paste service" %(url.base,))
def getproxy():
return py.std.xmlrpclib.ServerProxy(url.xmlrpc).pastes
def pytest_terminal_summary(terminalreporter):
if terminalreporter.config.option.pocoo_sendfailures:
tr = terminalreporter
if 'failed' in tr.stats and tr.config.option.tbstyle != "no":
terminalreporter.write_sep("=", "Sending failures to %s" %(url.base,))
terminalreporter.write_line("xmlrpcurl: %s" %(url.xmlrpc,))
#print self.__class__.getproxy
#print self.__class__, id(self.__class__)
serverproxy = getproxy()
for ev in terminalreporter.stats.get('failed'):
tw = py.io.TerminalWriter(stringio=True)
ev.toterminal(tw)
s = tw.stringio.getvalue()
# XXX add failure summary
assert len(s)
terminalreporter.write_line("newpaste() ...")
proxyid = serverproxy.newPaste("python", s)
terminalreporter.write_line("%s%s\n" % (url.show, proxyid))
break
def test_toproxy(testdir, monkeypatch):
l = []
class MockProxy:
def newPaste(self, language, code):
l.append((language, code))
monkeypatch.setitem(globals(), 'getproxy', MockProxy)
testdir.plugins.insert(0, globals())
testpath = testdir.makepyfile("""
import py
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
py.test.skip("")
""")
reprec = testdir.inline_run(testpath, "-P")
assert len(l) == 1
assert l[0][0] == "python"
s = l[0][1]
assert s.find("def test_fail") != -1
assert reprec.countoutcomes() == [1,1,1]
.. _`pytest_pocoo.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_pocoo.py
.. _`pytest_pocoo.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_pocoo.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -1,568 +0,0 @@
pytest_pytester plugin
======================
funcargs and support code for testing py.test's own functionality.
.. _`testdir funcarg`:
the 'testdir' test function argument
------------------------------------
XXX missing docstring
.. _`reportrecorder funcarg`:
the 'reportrecorder' test function argument
-------------------------------------------
XXX missing docstring
.. _`linecomp funcarg`:
the 'linecomp' test function argument
-------------------------------------
XXX missing docstring
.. _`LineMatcher funcarg`:
the 'LineMatcher' test function argument
----------------------------------------
XXX missing docstring
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
1. Download `pytest_pytester.py`_ plugin source code
2. put it somewhere as ``pytest_pytester.py`` into your import path
3. a subsequent test run will now use your local version!
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_pytester.py``:
.. sourcecode:: python
"""
funcargs and support code for testing py.test's own functionality.
"""
import py
import sys, os
import inspect
from py.__.test.config import Config as pytestConfig
import hookspec
pytest_plugins = '_pytest'
def pytest_funcarg__linecomp(request):
return LineComp()
def pytest_funcarg__LineMatcher(request):
return LineMatcher
def pytest_funcarg__testdir(request):
tmptestdir = TmpTestdir(request)
return tmptestdir
def pytest_funcarg__reportrecorder(request):
reprec = ReportRecorder(py._com.comregistry)
request.addfinalizer(lambda: reprec.comregistry.unregister(reprec))
return reprec
class RunResult:
def __init__(self, ret, outlines, errlines):
self.ret = ret
self.outlines = outlines
self.errlines = errlines
self.stdout = LineMatcher(outlines)
self.stderr = LineMatcher(errlines)
class TmpTestdir:
def __init__(self, request):
self.request = request
self._pytest = request.getfuncargvalue("_pytest")
# XXX remove duplication with tmpdir plugin
basetmp = request.config.ensuretemp("testdir")
name = request.function.__name__
for i in range(100):
try:
tmpdir = basetmp.mkdir(name + str(i))
except py.error.EEXIST:
continue
break
# we need to create another subdir
# because Directory.collect() currently loads
# conftest.py from sibling directories
self.tmpdir = tmpdir.mkdir(name)
self.plugins = []
self._syspathremove = []
self.chdir() # always chdir
assert hasattr(self, '_olddir')
self.request.addfinalizer(self.finalize)
def __repr__(self):
return "<TmpTestdir %r>" % (self.tmpdir,)
def Config(self, comregistry=None, topdir=None):
if topdir is None:
topdir = self.tmpdir.dirpath()
return pytestConfig(comregistry, topdir=topdir)
def finalize(self):
for p in self._syspathremove:
py.std.sys.path.remove(p)
if hasattr(self, '_olddir'):
self._olddir.chdir()
def getreportrecorder(self, obj):
if isinstance(obj, py._com.Registry):
registry = obj
elif hasattr(obj, 'comregistry'):
registry = obj.comregistry
elif hasattr(obj, 'pluginmanager'):
registry = obj.pluginmanager.comregistry
elif hasattr(obj, 'config'):
registry = obj.config.pluginmanager.comregistry
else:
raise ValueError("obj %r provides no comregistry" %(obj,))
assert isinstance(registry, py._com.Registry)
reprec = ReportRecorder(registry)
reprec.hookrecorder = self._pytest.gethookrecorder(hookspec, registry)
reprec.hook = reprec.hookrecorder.hook
return reprec
def chdir(self):
old = self.tmpdir.chdir()
if not hasattr(self, '_olddir'):
self._olddir = old
def _makefile(self, ext, args, kwargs):
items = kwargs.items()
if args:
source = "\n".join(map(str, args))
basename = self.request.function.__name__
items.insert(0, (basename, source))
ret = None
for name, value in items:
p = self.tmpdir.join(name).new(ext=ext)
source = py.code.Source(value)
p.write(str(py.code.Source(value)).lstrip())
if ret is None:
ret = p
return ret
def makefile(self, ext, *args, **kwargs):
return self._makefile(ext, args, kwargs)
def makeconftest(self, source):
return self.makepyfile(conftest=source)
def makepyfile(self, *args, **kwargs):
return self._makefile('.py', args, kwargs)
def maketxtfile(self, *args, **kwargs):
return self._makefile('.txt', args, kwargs)
def syspathinsert(self, path=None):
if path is None:
path = self.tmpdir
py.std.sys.path.insert(0, str(path))
self._syspathremove.append(str(path))
def mkdir(self, name):
return self.tmpdir.mkdir(name)
def genitems(self, colitems):
return list(self.session.genitems(colitems))
def inline_genitems(self, *args):
#config = self.parseconfig(*args)
config = self.parseconfig(*args)
session = config.initsession()
rec = self.getreportrecorder(config)
colitems = [config.getfsnode(arg) for arg in config.args]
items = list(session.genitems(colitems))
return items, rec
def runitem(self, source):
# used from runner functional tests
item = self.getitem(source)
# the test class where we are called from wants to provide the runner
testclassinstance = self.request.function.im_self
runner = testclassinstance.getrunner()
return runner(item)
def inline_runsource(self, source, *cmdlineargs):
p = self.makepyfile(source)
l = list(cmdlineargs) + [p]
return self.inline_run(*l)
def inline_runsource1(self, *args):
args = list(args)
source = args.pop()
p = self.makepyfile(source)
l = list(args) + [p]
reprec = self.inline_run(*l)
reports = reprec.getreports("pytest_runtest_logreport")
assert len(reports) == 1, reports
return reports[0]
def inline_run(self, *args):
config = self.parseconfig(*args)
config.pluginmanager.do_configure(config)
session = config.initsession()
reprec = self.getreportrecorder(config)
session.main()
config.pluginmanager.do_unconfigure(config)
return reprec
def config_preparse(self):
config = self.Config()
for plugin in self.plugins:
if isinstance(plugin, str):
config.pluginmanager.import_plugin(plugin)
else:
if isinstance(plugin, dict):
plugin = PseudoPlugin(plugin)
if not config.pluginmanager.isregistered(plugin):
config.pluginmanager.register(plugin)
#print "config.pluginmanager.impname2plugin", config.pluginmanager.impname2plugin
return config
def parseconfig(self, *args):
if not args:
args = (self.tmpdir,)
config = self.config_preparse()
args = list(args) + ["--basetemp=%s" % self.tmpdir.dirpath('basetemp')]
config.parse(args)
return config
def parseconfigure(self, *args):
config = self.parseconfig(*args)
config.pluginmanager.do_configure(config)
return config
def getitem(self, source, funcname="test_func"):
modcol = self.getmodulecol(source)
moditems = modcol.collect()
for item in modcol.collect():
if item.name == funcname:
return item
else:
assert 0, "%r item not found in module:\n%s" %(funcname, source)
def getitems(self, source):
modcol = self.getmodulecol(source)
return list(modcol.config.initsession().genitems([modcol]))
#assert item is not None, "%r item not found in module:\n%s" %(funcname, source)
#return item
def getfscol(self, path, configargs=()):
self.config = self.parseconfig(path, *configargs)
self.session = self.config.initsession()
return self.config.getfsnode(path)
def getmodulecol(self, source, configargs=(), withinit=False):
kw = {self.request.function.__name__: py.code.Source(source).strip()}
path = self.makepyfile(**kw)
if withinit:
self.makepyfile(__init__ = "#")
self.config = self.parseconfig(path, *configargs)
self.session = self.config.initsession()
#self.config.pluginmanager.do_configure(config=self.config)
# XXX
self.config.pluginmanager.import_plugin("runner")
plugin = self.config.pluginmanager.getplugin("runner")
plugin.pytest_configure(config=self.config)
return self.config.getfsnode(path)
def prepare(self):
p = self.tmpdir.join("conftest.py")
if not p.check():
plugins = [x for x in self.plugins if isinstance(x, str)]
if not plugins:
return
p.write("import py ; pytest_plugins = %r" % plugins)
else:
if self.plugins:
print "warning, ignoring reusing existing con", p
def popen(self, cmdargs, stdout, stderr, **kw):
if not hasattr(py.std, 'subprocess'):
py.test.skip("no subprocess module")
env = os.environ.copy()
env['PYTHONPATH'] = ":".join(filter(None, [
str(os.getcwd()), env.get('PYTHONPATH', '')]))
kw['env'] = env
#print "env", env
return py.std.subprocess.Popen(cmdargs, stdout=stdout, stderr=stderr, **kw)
def run(self, *cmdargs):
self.prepare()
old = self.tmpdir.chdir()
#print "chdir", self.tmpdir
try:
return self._run(*cmdargs)
finally:
old.chdir()
def _run(self, *cmdargs):
cmdargs = map(str, cmdargs)
p1 = py.path.local("stdout")
p2 = py.path.local("stderr")
print "running", cmdargs, "curdir=", py.path.local()
f1 = p1.open("w")
f2 = p2.open("w")
popen = self.popen(cmdargs, stdout=f1, stderr=f2,
close_fds=(sys.platform != "win32"))
ret = popen.wait()
f1.close()
f2.close()
out, err = p1.readlines(cr=0), p2.readlines(cr=0)
if err:
for line in err:
print >>py.std.sys.stderr, line
if out:
for line in out:
print >>py.std.sys.stdout, line
return RunResult(ret, out, err)
def runpybin(self, scriptname, *args):
fullargs = self._getpybinargs(scriptname) + args
return self.run(*fullargs)
def _getpybinargs(self, scriptname):
bindir = py.path.local(py.__file__).dirpath("bin")
script = bindir.join(scriptname)
assert script.check()
return py.std.sys.executable, script
def runpytest(self, *args):
p = py.path.local.make_numbered_dir(prefix="runpytest-",
keep=None, rootdir=self.tmpdir)
args = ('--basetemp=%s' % p, ) + args
return self.runpybin("py.test", *args)
def spawn_pytest(self, string, expect_timeout=10.0):
pexpect = py.test.importorskip("pexpect", "2.3")
basetemp = self.tmpdir.mkdir("pexpect")
invoke = "%s %s" % self._getpybinargs("py.test")
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
child = pexpect.spawn(cmd, logfile=basetemp.join("spawn.out").open("w"))
child.timeout = expect_timeout
return child
class PseudoPlugin:
def __init__(self, vars):
self.__dict__.update(vars)
class ReportRecorder(object):
def __init__(self, comregistry):
self.comregistry = comregistry
comregistry.register(self)
def getcall(self, name):
return self.hookrecorder.getcall(name)
def popcall(self, name):
return self.hookrecorder.popcall(name)
def getcalls(self, names):
""" return list of ParsedCall instances matching the given eventname. """
return self.hookrecorder.getcalls(names)
# functionality for test reports
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
return [x.rep for x in self.getcalls(names)]
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"):
""" return a testreport whose dotted import path matches """
l = []
for rep in self.getreports(names=names):
colitem = rep.getnode()
if not inamepart or inamepart in colitem.listnames():
l.append(rep)
if not l:
raise ValueError("could not find test report matching %r: no test reports at all!" %
(inamepart,))
if len(l) > 1:
raise ValueError("found more than one testreport matching %r: %s" %(
inamepart, l))
return l[0]
def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
return [rep for rep in self.getreports(names) if rep.failed]
def getfailedcollections(self):
return self.getfailures('pytest_collectreport')
def listoutcomes(self):
passed = []
skipped = []
failed = []
for rep in self.getreports("pytest_runtest_logreport"):
if rep.passed:
if rep.when == "call":
passed.append(rep)
elif rep.skipped:
skipped.append(rep)
elif rep.failed:
failed.append(rep)
return passed, skipped, failed
def countoutcomes(self):
return map(len, self.listoutcomes())
def assertoutcome(self, passed=0, skipped=0, failed=0):
realpassed, realskipped, realfailed = self.listoutcomes()
assert passed == len(realpassed)
assert skipped == len(realskipped)
assert failed == len(realfailed)
def clear(self):
self.hookrecorder.calls[:] = []
def unregister(self):
self.comregistry.unregister(self)
self.hookrecorder.finish_recording()
def test_reportrecorder(testdir):
registry = py._com.Registry()
recorder = testdir.getreportrecorder(registry)
assert not recorder.getfailures()
item = testdir.getitem("def test_func(): pass")
class rep:
excinfo = None
passed = False
failed = True
skipped = False
when = "call"
recorder.hook.pytest_runtest_logreport(rep=rep)
failures = recorder.getfailures()
assert failures == [rep]
failures = recorder.getfailures()
assert failures == [rep]
class rep:
excinfo = None
passed = False
failed = False
skipped = True
when = "call"
rep.passed = False
rep.skipped = True
recorder.hook.pytest_runtest_logreport(rep=rep)
modcol = testdir.getmodulecol("")
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
rep.passed = False
rep.failed = True
rep.skipped = False
recorder.hook.pytest_collectreport(rep=rep)
passed, skipped, failed = recorder.listoutcomes()
assert not passed and skipped and failed
numpassed, numskipped, numfailed = recorder.countoutcomes()
assert numpassed == 0
assert numskipped == 1
assert numfailed == 1
assert len(recorder.getfailedcollections()) == 1
recorder.unregister()
recorder.clear()
recorder.hook.pytest_runtest_logreport(rep=rep)
py.test.raises(ValueError, "recorder.getfailures()")
class LineComp:
def __init__(self):
self.stringio = py.std.StringIO.StringIO()
def assert_contains_lines(self, lines2):
""" assert that lines2 are contained (linearly) in lines1.
return a list of extralines found.
"""
__tracebackhide__ = True
val = self.stringio.getvalue()
self.stringio.truncate(0) # remove what we got
lines1 = val.split("\n")
return LineMatcher(lines1).fnmatch_lines(lines2)
class LineMatcher:
def __init__(self, lines):
self.lines = lines
def str(self):
return "\n".join(self.lines)
def fnmatch_lines(self, lines2):
if isinstance(lines2, str):
lines2 = py.code.Source(lines2)
if isinstance(lines2, py.code.Source):
lines2 = lines2.strip().lines
from fnmatch import fnmatch
__tracebackhide__ = True
lines1 = self.lines[:]
nextline = None
extralines = []
for line in lines2:
nomatchprinted = False
while lines1:
nextline = lines1.pop(0)
if line == nextline:
print "exact match:", repr(line)
break
elif fnmatch(nextline, line):
print "fnmatch:", repr(line)
print " with:", repr(nextline)
break
else:
if not nomatchprinted:
print "nomatch:", repr(line)
nomatchprinted = True
print " and:", repr(nextline)
extralines.append(nextline)
else:
if line != nextline:
#__tracebackhide__ = True
raise AssertionError("expected line not found: %r" % line)
extralines.extend(lines1)
return extralines
def test_parseconfig(testdir):
config1 = testdir.parseconfig()
config2 = testdir.parseconfig()
assert config2 != config1
assert config1 != py.test.config
def test_testdir_runs_with_plugin(testdir):
testdir.makepyfile("""
pytest_plugins = "pytest_pytester"
def test_hello(testdir):
assert 1
""")
result = testdir.runpytest()
assert result.stdout.fnmatch_lines([
"*1 passed*"
])
.. _`pytest_pytester.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_pytester.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html
.. _`checkout the py.test development version`: ../../download.html#checkout

View File

@ -4,207 +4,61 @@ pytest_recwarn plugin
helpers for asserting deprecation and other warnings.
**recwarn**: function argument where one can call recwarn.pop() to get
the last warning that would have been shown.
.. contents::
:local:
Example usage
---------------------
You can use the ``recwarn`` funcarg to track
warnings within a test function:
.. sourcecode:: python
def test_hello(recwarn):
from warnings import warn
warn("hello", DeprecationWarning)
w = recwarn.pop(DeprecationWarning)
assert issubclass(w.category, DeprecationWarning)
assert 'hello' in str(w.message)
assert w.filename
assert w.lineno
You can also call a global helper for checking
taht a certain function call yields a Deprecation
warning:
.. sourcecode:: python
import py
def test_global():
py.test.deprecated_call(myfunction, 17)
**py.test.deprecated_call(func, *args, **kwargs)**: assert that the given function call triggers a deprecation warning.
.. _`recwarn funcarg`:
the 'recwarn' test function argument
------------------------------------
check that warnings have been raised.
Return a WarningsRecorder instance that provides these methods:
Getting and improving this plugin
---------------------------------
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_recwarn.py`_ plugin source code
2. put it somewhere as ``pytest_recwarn.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_recwarn.py``:
.. sourcecode:: python
"""
helpers for asserting deprecation and other warnings.
**recwarn**: function argument where one can call recwarn.pop() to get
the last warning that would have been shown.
**py.test.deprecated_call(func, *args, **kwargs)**: assert that the given function call triggers a deprecation warning.
"""
import py
import os
def pytest_funcarg__recwarn(request):
""" check that warnings have been raised. """
warnings = WarningsRecorder()
request.addfinalizer(warnings.finalize)
return warnings
def pytest_namespace():
return {'deprecated_call': deprecated_call}
def deprecated_call(func, *args, **kwargs):
""" assert that calling func(*args, **kwargs)
triggers a DeprecationWarning.
"""
warningmodule = py.std.warnings
l = []
oldwarn_explicit = getattr(warningmodule, 'warn_explicit')
def warn_explicit(*args, **kwargs):
l.append(args)
oldwarn_explicit(*args, **kwargs)
oldwarn = getattr(warningmodule, 'warn')
def warn(*args, **kwargs):
l.append(args)
oldwarn(*args, **kwargs)
warningmodule.warn_explicit = warn_explicit
warningmodule.warn = warn
try:
ret = func(*args, **kwargs)
finally:
warningmodule.warn_explicit = warn_explicit
warningmodule.warn = warn
if not l:
#print warningmodule
raise AssertionError("%r did not produce DeprecationWarning" %(func,))
return ret
class RecordedWarning:
def __init__(self, message, category, filename, lineno, line):
self.message = message
self.category = category
self.filename = filename
self.lineno = lineno
self.line = line
class WarningsRecorder:
def __init__(self):
warningmodule = py.std.warnings
self.list = []
def showwarning(message, category, filename, lineno, line=0):
self.list.append(RecordedWarning(
message, category, filename, lineno, line))
try:
self.old_showwarning(message, category,
filename, lineno, line=line)
except TypeError:
# < python2.6
self.old_showwarning(message, category, filename, lineno)
self.old_showwarning = warningmodule.showwarning
warningmodule.showwarning = showwarning
def pop(self, cls=Warning):
""" pop the first recorded warning, raise exception if not exists."""
for i, w in py.builtin.enumerate(self.list):
if issubclass(w.category, cls):
return self.list.pop(i)
__tracebackhide__ = True
assert 0, "%r not found in %r" %(cls, self.list)
#def resetregistry(self):
# import warnings
# warnings.onceregistry.clear()
# warnings.__warningregistry__.clear()
def clear(self):
self.list[:] = []
def finalize(self):
py.std.warnings.showwarning = self.old_showwarning
def test_WarningRecorder():
showwarning = py.std.warnings.showwarning
rec = WarningsRecorder()
assert py.std.warnings.showwarning != showwarning
assert not rec.list
py.std.warnings.warn_explicit("hello", UserWarning, "xyz", 13)
assert len(rec.list) == 1
py.std.warnings.warn(DeprecationWarning("hello"))
assert len(rec.list) == 2
warn = rec.pop()
assert str(warn.message) == "hello"
l = rec.list
rec.clear()
assert len(rec.list) == 0
assert l is rec.list
py.test.raises(AssertionError, "rec.pop()")
rec.finalize()
assert showwarning == py.std.warnings.showwarning
def test_recwarn_functional(testdir):
reprec = testdir.inline_runsource("""
pytest_plugins = 'pytest_recwarn',
import warnings
oldwarn = warnings.showwarning
def test_method(recwarn):
assert warnings.showwarning != oldwarn
warnings.warn("hello")
warn = recwarn.pop()
assert isinstance(warn.message, UserWarning)
def test_finalized():
assert warnings.showwarning == oldwarn
""")
res = reprec.countoutcomes()
assert tuple(res) == (2, 0, 0), res
#
# ============ test py.test.deprecated_call() ==============
#
def dep(i):
if i == 0:
py.std.warnings.warn("is deprecated", DeprecationWarning)
return 42
reg = {}
def dep_explicit(i):
if i == 0:
py.std.warnings.warn_explicit("dep_explicit", category=DeprecationWarning,
filename="hello", lineno=3)
def test_deprecated_call_raises():
excinfo = py.test.raises(AssertionError,
"py.test.deprecated_call(dep, 3)")
assert str(excinfo).find("did not produce") != -1
def test_deprecated_call():
py.test.deprecated_call(dep, 0)
def test_deprecated_call_ret():
ret = py.test.deprecated_call(dep, 0)
assert ret == 42
def test_deprecated_call_preserves():
r = py.std.warnings.onceregistry.copy()
f = py.std.warnings.filters[:]
test_deprecated_call_raises()
test_deprecated_call()
assert r == py.std.warnings.onceregistry
assert f == py.std.warnings.filters
def test_deprecated_explicit_call_raises():
py.test.raises(AssertionError,
"py.test.deprecated_call(dep_explicit, 3)")
def test_deprecated_explicit_call():
py.test.deprecated_call(dep_explicit, 0)
py.test.deprecated_call(dep_explicit, 0)
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_recwarn.py
.. _`pytest_recwarn.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_recwarn.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -4,6 +4,9 @@ pytest_restdoc plugin
perform ReST syntax, local and remote reference tests on .rst/.txt files.
.. contents::
:local:
command line options
@ -17,514 +20,19 @@ command line options
``--forcegen``
force generation of html files.
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_restdoc.py`_ plugin source code
2. put it somewhere as ``pytest_restdoc.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_restdoc.py``:
.. sourcecode:: python
"""
perform ReST syntax, local and remote reference tests on .rst/.txt files.
"""
import py
def pytest_addoption(parser):
group = parser.addgroup("ReST", "ReST documentation check options")
group.addoption('-R', '--urlcheck',
action="store_true", dest="urlcheck", default=False,
help="urlopen() remote links found in ReST text files.")
group.addoption('--urltimeout', action="store", metavar="secs",
type="int", dest="urlcheck_timeout", default=5,
help="timeout in seconds for remote urlchecks")
group.addoption('--forcegen',
action="store_true", dest="forcegen", default=False,
help="force generation of html files.")
def pytest_collect_file(path, parent):
if path.ext in (".txt", ".rst"):
project = getproject(path)
if project is not None:
return ReSTFile(path, parent=parent, project=project)
def getproject(path):
for parent in path.parts(reverse=True):
confrest = parent.join("confrest.py")
if confrest.check():
Project = confrest.pyimport().Project
return Project(parent)
class ReSTFile(py.test.collect.File):
def __init__(self, fspath, parent, project=None):
super(ReSTFile, self).__init__(fspath=fspath, parent=parent)
if project is None:
project = getproject(fspath)
assert project is not None
self.project = project
def collect(self):
return [
ReSTSyntaxTest(self.project, "ReSTSyntax", parent=self),
LinkCheckerMaker("checklinks", parent=self),
DoctestText("doctest", parent=self),
]
def deindent(s, sep='\n'):
leastspaces = -1
lines = s.split(sep)
for line in lines:
if not line.strip():
continue
spaces = len(line) - len(line.lstrip())
if leastspaces == -1 or spaces < leastspaces:
leastspaces = spaces
if leastspaces == -1:
return s
for i, line in py.builtin.enumerate(lines):
if not line.strip():
lines[i] = ''
else:
lines[i] = line[leastspaces:]
return sep.join(lines)
class ReSTSyntaxTest(py.test.collect.Item):
def __init__(self, project, *args, **kwargs):
super(ReSTSyntaxTest, self).__init__(*args, **kwargs)
self.project = project
def reportinfo(self):
return self.fspath, None, "syntax check"
def runtest(self):
self.restcheck(py.path.svnwc(self.fspath))
def restcheck(self, path):
py.test.importorskip("docutils")
self.register_linkrole()
from docutils.utils import SystemMessage
try:
self._checkskip(path, self.project.get_htmloutputpath(path))
self.project.process(path)
except KeyboardInterrupt:
raise
except SystemMessage:
# we assume docutils printed info on stdout
py.test.fail("docutils processing failed, see captured stderr")
def register_linkrole(self):
from py.__.rest import directive
directive.register_linkrole('api', self.resolve_linkrole)
directive.register_linkrole('source', self.resolve_linkrole)
# XXX fake sphinx' "toctree" and refs
directive.register_linkrole('ref', self.resolve_linkrole)
from docutils.parsers.rst import directives
def toctree_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return []
toctree_directive.content = 1
toctree_directive.options = {'maxdepth': int, 'glob': directives.flag,
'hidden': directives.flag}
directives.register_directive('toctree', toctree_directive)
self.register_pygments()
def register_pygments(self):
# taken from pygments-main/external/rst-directive.py
try:
from pygments.formatters import HtmlFormatter
except ImportError:
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
return []
else:
# The default formatter
DEFAULT = HtmlFormatter(noclasses=True)
# Add name -> formatter pairs for every variant you want to use
VARIANTS = {
# 'linenos': HtmlFormatter(noclasses=INLINESTYLES, linenos=True),
}
from docutils import nodes
from docutils.parsers.rst import directives
from pygments import highlight
from pygments.lexers import get_lexer_by_name, TextLexer
def pygments_directive(name, arguments, options, content, lineno,
content_offset, block_text, state, state_machine):
try:
lexer = get_lexer_by_name(arguments[0])
except ValueError:
# no lexer found - use the text one instead of an exception
lexer = TextLexer()
# take an arbitrary option if more than one is given
formatter = options and VARIANTS[options.keys()[0]] or DEFAULT
parsed = highlight(u'\n'.join(content), lexer, formatter)
return [nodes.raw('', parsed, format='html')]
pygments_directive.arguments = (1, 0, 1)
pygments_directive.content = 1
pygments_directive.options = dict([(key, directives.flag) for key in VARIANTS])
directives.register_directive('sourcecode', pygments_directive)
def resolve_linkrole(self, name, text, check=True):
apigen_relpath = self.project.apigen_relpath
if name == 'api':
if text == 'py':
return ('py', apigen_relpath + 'api/index.html')
else:
assert text.startswith('py.'), (
'api link "%s" does not point to the py package') % (text,)
dotted_name = text
if dotted_name.find('(') > -1:
dotted_name = dotted_name[:text.find('(')]
# remove pkg root
path = dotted_name.split('.')[1:]
dotted_name = '.'.join(path)
obj = py
if check:
for chunk in path:
try:
obj = getattr(obj, chunk)
except AttributeError:
raise AssertionError(
'problem with linkrole :api:`%s`: can not resolve '
'dotted name %s' % (text, dotted_name,))
return (text, apigen_relpath + 'api/%s.html' % (dotted_name,))
elif name == 'source':
assert text.startswith('py/'), ('source link "%s" does not point '
'to the py package') % (text,)
relpath = '/'.join(text.split('/')[1:])
if check:
pkgroot = py.__pkg__.getpath()
abspath = pkgroot.join(relpath)
assert pkgroot.join(relpath).check(), (
'problem with linkrole :source:`%s`: '
'path %s does not exist' % (text, relpath))
if relpath.endswith('/') or not relpath:
relpath += 'index.html'
else:
relpath += '.html'
return (text, apigen_relpath + 'source/%s' % (relpath,))
elif name == 'ref':
return ("", "")
def _checkskip(self, lpath, htmlpath=None):
if not self.config.getvalue("forcegen"):
lpath = py.path.local(lpath)
if htmlpath is not None:
htmlpath = py.path.local(htmlpath)
if lpath.ext == '.txt':
htmlpath = htmlpath or lpath.new(ext='.html')
if htmlpath.check(file=1) and htmlpath.mtime() >= lpath.mtime():
py.test.skip("html file is up to date, use --forcegen to regenerate")
#return [] # no need to rebuild
class DoctestText(py.test.collect.Item):
def reportinfo(self):
return self.fspath, None, "doctest"
def runtest(self):
content = self._normalize_linesep()
newcontent = self.config.hook.pytest_doctest_prepare_content(content=content)
if newcontent is not None:
content = newcontent
s = content
l = []
prefix = '.. >>> '
mod = py.std.types.ModuleType(self.fspath.purebasename)
skipchunk = False
for line in deindent(s).split('\n'):
stripped = line.strip()
if skipchunk and line.startswith(skipchunk):
print "skipping", line
continue
skipchunk = False
if stripped.startswith(prefix):
try:
exec py.code.Source(stripped[len(prefix):]).compile() in \
mod.__dict__
except ValueError, e:
if e.args and e.args[0] == "skipchunk":
skipchunk = " " * (len(line) - len(line.lstrip()))
else:
raise
else:
l.append(line)
docstring = "\n".join(l)
mod.__doc__ = docstring
failed, tot = py.compat.doctest.testmod(mod, verbose=1)
if failed:
py.test.fail("doctest %s: %s failed out of %s" %(
self.fspath, failed, tot))
def _normalize_linesep(self):
# XXX quite nasty... but it works (fixes win32 issues)
s = self.fspath.read()
linesep = '\n'
if '\r' in s:
if '\n' not in s:
linesep = '\r'
else:
linesep = '\r\n'
s = s.replace(linesep, '\n')
return s
class LinkCheckerMaker(py.test.collect.Collector):
def collect(self):
return list(self.genlinkchecks())
def genlinkchecks(self):
path = self.fspath
# generating functions + args as single tests
timeout = self.config.getvalue("urlcheck_timeout")
for lineno, line in py.builtin.enumerate(path.readlines()):
line = line.strip()
if line.startswith('.. _'):
if line.startswith('.. _`'):
delim = '`:'
else:
delim = ':'
l = line.split(delim, 1)
if len(l) != 2:
continue
tryfn = l[1].strip()
name = "%s:%d" %(tryfn, lineno)
if tryfn.startswith('http:') or tryfn.startswith('https'):
if self.config.getvalue("urlcheck"):
yield CheckLink(name, parent=self,
args=(tryfn, path, lineno, timeout), checkfunc=urlcheck)
elif tryfn.startswith('webcal:'):
continue
else:
i = tryfn.find('#')
if i != -1:
checkfn = tryfn[:i]
else:
checkfn = tryfn
if checkfn.strip() and (1 or checkfn.endswith('.html')):
yield CheckLink(name, parent=self,
args=(tryfn, path, lineno), checkfunc=localrefcheck)
class CheckLink(py.test.collect.Item):
def __init__(self, name, parent, args, checkfunc):
super(CheckLink, self).__init__(name, parent)
self.args = args
self.checkfunc = checkfunc
def runtest(self):
return self.checkfunc(*self.args)
def reportinfo(self, basedir=None):
return (self.fspath, self.args[2], "checklink: %s" % self.args[0])
def urlcheck(tryfn, path, lineno, TIMEOUT_URLOPEN):
old = py.std.socket.getdefaulttimeout()
py.std.socket.setdefaulttimeout(TIMEOUT_URLOPEN)
try:
try:
print "trying remote", tryfn
py.std.urllib2.urlopen(tryfn)
finally:
py.std.socket.setdefaulttimeout(old)
except (py.std.urllib2.URLError, py.std.urllib2.HTTPError), e:
if getattr(e, 'code', None) in (401, 403): # authorization required, forbidden
py.test.skip("%s: %s" %(tryfn, str(e)))
else:
py.test.fail("remote reference error %r in %s:%d\n%s" %(
tryfn, path.basename, lineno+1, e))
def localrefcheck(tryfn, path, lineno):
# assume it should be a file
i = tryfn.find('#')
if tryfn.startswith('javascript:'):
return # don't check JS refs
if i != -1:
anchor = tryfn[i+1:]
tryfn = tryfn[:i]
else:
anchor = ''
fn = path.dirpath(tryfn)
ishtml = fn.ext == '.html'
fn = ishtml and fn.new(ext='.txt') or fn
print "filename is", fn
if not fn.check(): # not ishtml or not fn.check():
if not py.path.local(tryfn).check(): # the html could be there
py.test.fail("reference error %r in %s:%d" %(
tryfn, path.basename, lineno+1))
if anchor:
source = unicode(fn.read(), 'latin1')
source = source.lower().replace('-', ' ') # aehem
anchor = anchor.replace('-', ' ')
match2 = ".. _`%s`:" % anchor
match3 = ".. _%s:" % anchor
candidates = (anchor, match2, match3)
print "candidates", repr(candidates)
for line in source.split('\n'):
line = line.strip()
if line in candidates:
break
else:
py.test.fail("anchor reference error %s#%s in %s:%d" %(
tryfn, anchor, path.basename, lineno+1))
#
# PLUGIN tests
#
def test_deindent():
assert deindent('foo') == 'foo'
assert deindent('foo\n bar') == 'foo\n bar'
assert deindent(' foo\n bar\n') == 'foo\nbar\n'
assert deindent(' foo\n\n bar\n') == 'foo\n\nbar\n'
assert deindent(' foo\n bar\n') == 'foo\n bar\n'
assert deindent(' foo\n bar\n') == ' foo\nbar\n'
class TestApigenLinkRole:
disabled = True
# these tests are moved here from the former py/doc/conftest.py
def test_resolve_linkrole(self):
from py.__.doc.conftest import get_apigen_relpath
apigen_relpath = get_apigen_relpath()
assert resolve_linkrole('api', 'py.foo.bar', False) == (
'py.foo.bar', apigen_relpath + 'api/foo.bar.html')
assert resolve_linkrole('api', 'py.foo.bar()', False) == (
'py.foo.bar()', apigen_relpath + 'api/foo.bar.html')
assert resolve_linkrole('api', 'py', False) == (
'py', apigen_relpath + 'api/index.html')
py.test.raises(AssertionError, 'resolve_linkrole("api", "foo.bar")')
assert resolve_linkrole('source', 'py/foo/bar.py', False) == (
'py/foo/bar.py', apigen_relpath + 'source/foo/bar.py.html')
assert resolve_linkrole('source', 'py/foo/', False) == (
'py/foo/', apigen_relpath + 'source/foo/index.html')
assert resolve_linkrole('source', 'py/', False) == (
'py/', apigen_relpath + 'source/index.html')
py.test.raises(AssertionError, 'resolve_linkrole("source", "/foo/bar/")')
def test_resolve_linkrole_check_api(self):
assert resolve_linkrole('api', 'py.test.ensuretemp')
py.test.raises(AssertionError, "resolve_linkrole('api', 'py.foo.baz')")
def test_resolve_linkrole_check_source(self):
assert resolve_linkrole('source', 'py/path/common.py')
py.test.raises(AssertionError,
"resolve_linkrole('source', 'py/foo/bar.py')")
class TestDoctest:
def pytest_funcarg__testdir(self, request):
testdir = request.getfuncargvalue("testdir")
assert request.module.__name__ == __name__
testdir.makepyfile(confrest="from py.__.misc.rest import Project")
for p in testdir.plugins:
if p == globals():
break
else:
testdir.plugins.append(globals())
return testdir
def test_doctest_extra_exec(self, testdir):
xtxt = testdir.maketxtfile(x="""
hello::
.. >>> raise ValueError
>>> None
""")
reprec = testdir.inline_run(xtxt)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 1
def test_doctest_basic(self, testdir):
xtxt = testdir.maketxtfile(x="""
..
>>> from os.path import abspath
hello world
>>> assert abspath
>>> i=3
>>> print i
3
yes yes
>>> i
3
end
""")
reprec = testdir.inline_run(xtxt)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0
assert passed + skipped == 2
def test_doctest_eol(self, testdir):
ytxt = testdir.maketxtfile(y=".. >>> 1 + 1\r\n 2\r\n\r\n")
reprec = testdir.inline_run(ytxt)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0
assert passed + skipped == 2
def test_doctest_indentation(self, testdir):
footxt = testdir.maketxtfile(foo=
'..\n >>> print "foo\\n bar"\n foo\n bar\n')
reprec = testdir.inline_run(footxt)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0
assert skipped + passed == 2
def test_js_ignore(self, testdir):
xtxt = testdir.maketxtfile(xtxt="""
`blah`_
.. _`blah`: javascript:some_function()
""")
reprec = testdir.inline_run(xtxt)
passed, skipped, failed = reprec.countoutcomes()
assert failed == 0
assert skipped + passed == 3
def test_pytest_doctest_prepare_content(self, testdir):
l = []
class MyPlugin:
def pytest_doctest_prepare_content(self, content):
l.append(content)
return content.replace("False", "True")
testdir.plugins.append(MyPlugin())
xtxt = testdir.maketxtfile(x="""
hello:
>>> 2 == 2
False
""")
reprec = testdir.inline_run(xtxt)
assert len(l) == 1
passed, skipped, failed = reprec.countoutcomes()
assert passed >= 1
assert not failed
assert skipped <= 1
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_restdoc.py
.. _`pytest_restdoc.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_restdoc.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -4,6 +4,9 @@ pytest_resultlog plugin
resultlog plugin for machine-readable logging of test results.
.. contents::
:local:
Useful for buildbot integration code.
command line options
@ -13,268 +16,19 @@ command line options
``--resultlog=path``
path for machine-readable result log.
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_resultlog.py`_ plugin source code
2. put it somewhere as ``pytest_resultlog.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_resultlog.py``:
.. sourcecode:: python
"""resultlog plugin for machine-readable logging of test results.
Useful for buildbot integration code.
"""
import py
def pytest_addoption(parser):
group = parser.addgroup("resultlog", "resultlog plugin options")
group.addoption('--resultlog', action="store", dest="resultlog", metavar="path", default=None,
help="path for machine-readable result log.")
def pytest_configure(config):
resultlog = config.option.resultlog
if resultlog:
logfile = open(resultlog, 'w', 1) # line buffered
config._resultlog = ResultLog(logfile)
config.pluginmanager.register(config._resultlog)
def pytest_unconfigure(config):
resultlog = getattr(config, '_resultlog', None)
if resultlog:
resultlog.logfile.close()
del config._resultlog
config.pluginmanager.unregister(resultlog)
def generic_path(item):
chain = item.listchain()
gpath = [chain[0].name]
fspath = chain[0].fspath
fspart = False
for node in chain[1:]:
newfspath = node.fspath
if newfspath == fspath:
if fspart:
gpath.append(':')
fspart = False
else:
gpath.append('.')
else:
gpath.append('/')
fspart = True
name = node.name
if name[0] in '([':
gpath.pop()
gpath.append(name)
fspath = newfspath
return ''.join(gpath)
class ResultLog(object):
def __init__(self, logfile):
self.logfile = logfile # preferably line buffered
def write_log_entry(self, testpath, shortrepr, longrepr):
print >>self.logfile, "%s %s" % (shortrepr, testpath)
for line in longrepr.splitlines():
print >>self.logfile, " %s" % line
def log_outcome(self, node, shortrepr, longrepr):
testpath = generic_path(node)
self.write_log_entry(testpath, shortrepr, longrepr)
def pytest_runtest_logreport(self, rep):
code = rep.shortrepr
if rep.passed:
longrepr = ""
elif rep.failed:
longrepr = str(rep.longrepr)
elif rep.skipped:
longrepr = str(rep.longrepr.reprcrash.message)
self.log_outcome(rep.item, code, longrepr)
def pytest_collectreport(self, rep):
if not rep.passed:
if rep.failed:
code = "F"
else:
assert rep.skipped
code = "S"
longrepr = str(rep.longrepr.reprcrash)
self.log_outcome(rep.collector, code, longrepr)
def pytest_internalerror(self, excrepr):
path = excrepr.reprcrash.path
self.write_log_entry(path, '!', str(excrepr))
# ===============================================================================
#
# plugin tests
#
# ===============================================================================
import os, StringIO
def test_generic_path():
from py.__.test.collect import Node, Item, FSCollector
p1 = Node('a')
assert p1.fspath is None
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
item = Item('c', parent = p3)
res = generic_path(item)
assert res == 'a.B().c'
p0 = FSCollector('proj/test')
p1 = FSCollector('proj/test/a', parent=p0)
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
p4 = Node('c', parent=p3)
item = Item('[1]', parent = p4)
res = generic_path(item)
assert res == 'test/a:B().c[1]'
def test_write_log_entry():
reslog = ResultLog(None)
reslog.logfile = StringIO.StringIO()
reslog.write_log_entry('name', '.', '')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 1
assert entry_lines[0] == '. name'
reslog.logfile = StringIO.StringIO()
reslog.write_log_entry('name', 's', 'Skipped')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == 's name'
assert entry_lines[1] == ' Skipped'
reslog.logfile = StringIO.StringIO()
reslog.write_log_entry('name', 's', 'Skipped\n')
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 2
assert entry_lines[0] == 's name'
assert entry_lines[1] == ' Skipped'
reslog.logfile = StringIO.StringIO()
longrepr = ' tb1\n tb 2\nE tb3\nSome Error'
reslog.write_log_entry('name', 'F', longrepr)
entry = reslog.logfile.getvalue()
assert entry[-1] == '\n'
entry_lines = entry.splitlines()
assert len(entry_lines) == 5
assert entry_lines[0] == 'F name'
assert entry_lines[1:] == [' '+line for line in longrepr.splitlines()]
class TestWithFunctionIntegration:
# XXX (hpk) i think that the resultlog plugin should
# provide a Parser object so that one can remain
# ignorant regarding formatting details.
def getresultlog(self, testdir, arg):
resultlog = testdir.tmpdir.join("resultlog")
testdir.plugins.append("resultlog")
args = ["--resultlog=%s" % resultlog] + [arg]
testdir.runpytest(*args)
return filter(None, resultlog.readlines(cr=0))
def test_collection_report(self, testdir):
ok = testdir.makepyfile(test_collection_ok="")
skip = testdir.makepyfile(test_collection_skip="import py ; py.test.skip('hello')")
fail = testdir.makepyfile(test_collection_fail="XXX")
lines = self.getresultlog(testdir, ok)
assert not lines
lines = self.getresultlog(testdir, skip)
assert len(lines) == 2
assert lines[0].startswith("S ")
assert lines[0].endswith("test_collection_skip.py")
assert lines[1].startswith(" ")
assert lines[1].endswith("test_collection_skip.py:1: Skipped: 'hello'")
lines = self.getresultlog(testdir, fail)
assert lines
assert lines[0].startswith("F ")
assert lines[0].endswith("test_collection_fail.py"), lines[0]
for x in lines[1:]:
assert x.startswith(" ")
assert "XXX" in "".join(lines[1:])
def test_log_test_outcomes(self, testdir):
mod = testdir.makepyfile(test_mod="""
import py
def test_pass(): pass
def test_skip(): py.test.skip("hello")
def test_fail(): raise ValueError("val")
""")
lines = self.getresultlog(testdir, mod)
assert len(lines) >= 3
assert lines[0].startswith(". ")
assert lines[0].endswith("test_pass")
assert lines[1].startswith("s "), lines[1]
assert lines[1].endswith("test_skip")
assert lines[2].find("hello") != -1
assert lines[3].startswith("F ")
assert lines[3].endswith("test_fail")
tb = "".join(lines[4:])
assert tb.find("ValueError") != -1
def test_internal_exception(self):
# they are produced for example by a teardown failing
# at the end of the run
try:
raise ValueError
except ValueError:
excinfo = py.code.ExceptionInfo()
reslog = ResultLog(StringIO.StringIO())
reslog.pytest_internalerror(excinfo.getrepr())
entry = reslog.logfile.getvalue()
entry_lines = entry.splitlines()
assert entry_lines[0].startswith('! ')
assert os.path.basename(__file__)[:-1] in entry_lines[0] #.py/.pyc
assert entry_lines[-1][0] == ' '
assert 'ValueError' in entry
def test_generic(testdir, LineMatcher):
testdir.plugins.append("resultlog")
testdir.makepyfile("""
import py
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
py.test.skip("")
""")
testdir.runpytest("--resultlog=result.log")
lines = testdir.tmpdir.join("result.log").readlines(cr=0)
LineMatcher(lines).fnmatch_lines([
". *:test_pass",
"F *:test_fail",
"s *:test_skip",
])
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_resultlog.py
.. _`pytest_resultlog.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_resultlog.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -1,304 +0,0 @@
pytest_runner plugin
====================
collect and run test items and create reports.
command line options
--------------------
``--boxed``
box each test run in a separate process
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
1. Download `pytest_runner.py`_ plugin source code
2. put it somewhere as ``pytest_runner.py`` into your import path
3. a subsequent test run will now use your local version!
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_runner.py``:
.. sourcecode:: python
"""
collect and run test items and create reports.
"""
import py
from py.__.test.outcome import Skipped
#
# pytest plugin hooks
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--boxed',
action="store_true", dest="boxed", default=False,
help="box each test run in a separate process")
# XXX move to pytest_sessionstart and fix py.test owns tests
def pytest_configure(config):
config._setupstate = SetupState()
def pytest_sessionfinish(session, exitstatus):
# XXX see above
if hasattr(session.config, '_setupstate'):
session.config._setupstate.teardown_all()
# prevent logging module atexit handler from choking on
# its attempt to close already closed streams
# see http://bugs.python.org/issue6333
mod = py.std.sys.modules.get("logging", None)
if mod is not None:
mod.raiseExceptions = False
def pytest_make_collect_report(collector):
call = collector.config.guardedcall(
lambda: collector._memocollect()
)
result = None
if not call.excinfo:
result = call.result
return CollectReport(collector, result, call.excinfo, call.outerr)
return report
def pytest_runtest_protocol(item):
if item.config.getvalue("boxed"):
reports = forked_run_report(item)
for rep in reports:
item.config.hook.pytest_runtest_logreport(rep=rep)
else:
runtestprotocol(item)
return True
def runtestprotocol(item, log=True):
rep = call_and_report(item, "setup", log)
reports = [rep]
if rep.passed:
reports.append(call_and_report(item, "call", log))
reports.append(call_and_report(item, "teardown", log))
return reports
def pytest_runtest_setup(item):
item.config._setupstate.prepare(item)
def pytest_runtest_call(item):
if not item._deprecated_testexecution():
item.runtest()
def pytest_runtest_makereport(item, call):
return ItemTestReport(item, call.excinfo, call.when, call.outerr)
def pytest_runtest_teardown(item):
item.config._setupstate.teardown_exact(item)
#
# Implementation
def call_and_report(item, when, log=True):
call = RuntestHookCall(item, when)
hook = item.config.hook
report = hook.pytest_runtest_makereport(item=item, call=call)
if log and (when == "call" or not report.passed):
hook.pytest_runtest_logreport(rep=report)
return report
class RuntestHookCall:
excinfo = None
_prefix = "pytest_runtest_"
def __init__(self, item, when):
self.when = when
hookname = self._prefix + when
hook = getattr(item.config.hook, hookname)
capture = item.config._getcapture()
try:
try:
self.result = hook(item=item)
except KeyboardInterrupt:
raise
except:
self.excinfo = py.code.ExceptionInfo()
finally:
self.outerr = capture.reset()
def forked_run_report(item):
# for now, we run setup/teardown in the subprocess
# XXX optionally allow sharing of setup/teardown
EXITSTATUS_TESTEXIT = 4
from py.__.test.dist.mypickle import ImmutablePickler
ipickle = ImmutablePickler(uneven=0)
ipickle.selfmemoize(item.config)
# XXX workaround the issue that 2.6 cannot pickle
# instances of classes defined in global conftest.py files
ipickle.selfmemoize(item)
def runforked():
try:
reports = runtestprotocol(item, log=False)
except KeyboardInterrupt:
py.std.os._exit(EXITSTATUS_TESTEXIT)
return ipickle.dumps(reports)
ff = py.process.ForkedFunc(runforked)
result = ff.waitfinish()
if result.retval is not None:
return ipickle.loads(result.retval)
else:
if result.exitstatus == EXITSTATUS_TESTEXIT:
py.test.exit("forked test item %s raised Exit" %(item,))
return [report_process_crash(item, result)]
def report_process_crash(item, result):
path, lineno = item._getfslineno()
info = "%s:%s: running the test CRASHED with signal %d" %(
path, lineno, result.signal)
return ItemTestReport(item, excinfo=info, when="???")
class BaseReport(object):
def __repr__(self):
l = ["%s=%s" %(key, value)
for key, value in self.__dict__.items()]
return "<%s %s>" %(self.__class__.__name__, " ".join(l),)
def toterminal(self, out):
longrepr = self.longrepr
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
else:
out.line(str(longrepr))
class ItemTestReport(BaseReport):
failed = passed = skipped = False
def __init__(self, item, excinfo=None, when=None, outerr=None):
self.item = item
self.when = when
self.outerr = outerr
if item and when != "setup":
self.keywords = item.readkeywords()
else:
# if we fail during setup it might mean
# we are not able to access the underlying object
# this might e.g. happen if we are unpickled
# and our parent collector did not collect us
# (because it e.g. skipped for platform reasons)
self.keywords = {}
if not excinfo:
self.passed = True
self.shortrepr = "."
else:
if not isinstance(excinfo, py.code.ExceptionInfo):
self.failed = True
shortrepr = "?"
longrepr = excinfo
elif excinfo.errisinstance(Skipped):
self.skipped = True
shortrepr = "s"
longrepr = self.item._repr_failure_py(excinfo, outerr)
else:
self.failed = True
shortrepr = self.item.shortfailurerepr
if self.when == "call":
longrepr = self.item.repr_failure(excinfo, outerr)
else: # exception in setup or teardown
longrepr = self.item._repr_failure_py(excinfo, outerr)
shortrepr = shortrepr.lower()
self.shortrepr = shortrepr
self.longrepr = longrepr
def getnode(self):
return self.item
class CollectReport(BaseReport):
skipped = failed = passed = False
def __init__(self, collector, result, excinfo=None, outerr=None):
self.collector = collector
if not excinfo:
self.passed = True
self.result = result
else:
self.outerr = outerr
self.longrepr = self.collector._repr_failure_py(excinfo, outerr)
if excinfo.errisinstance(Skipped):
self.skipped = True
self.reason = str(excinfo.value)
else:
self.failed = True
def getnode(self):
return self.collector
class SetupState(object):
""" shared state for setting up/tearing down test items or collectors. """
def __init__(self):
self.stack = []
self._finalizers = {}
def addfinalizer(self, finalizer, colitem):
""" attach a finalizer to the given colitem.
if colitem is None, this will add a finalizer that
is called at the end of teardown_all().
"""
assert callable(finalizer)
#assert colitem in self.stack
self._finalizers.setdefault(colitem, []).append(finalizer)
def _pop_and_teardown(self):
colitem = self.stack.pop()
self._teardown_with_finalization(colitem)
def _callfinalizers(self, colitem):
finalizers = self._finalizers.pop(colitem, None)
while finalizers:
fin = finalizers.pop()
fin()
def _teardown_with_finalization(self, colitem):
self._callfinalizers(colitem)
if colitem:
colitem.teardown()
for colitem in self._finalizers:
assert colitem is None or colitem in self.stack
def teardown_all(self):
while self.stack:
self._pop_and_teardown()
self._teardown_with_finalization(None)
assert not self._finalizers
def teardown_exact(self, item):
if item == self.stack[-1]:
self._pop_and_teardown()
else:
self._callfinalizers(item)
def prepare(self, colitem):
""" setup objects along the collector chain to the test-method
and teardown previously setup objects."""
needed_collectors = colitem.listchain()
while self.stack:
if self.stack == needed_collectors[:len(self.stack)]:
break
self._pop_and_teardown()
for col in needed_collectors[len(self.stack):]:
col.setup()
self.stack.append(col)
.. _`pytest_runner.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_runner.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html
.. _`checkout the py.test development version`: ../../download.html#checkout

View File

@ -2,430 +2,26 @@
pytest_terminal plugin
======================
terminal reporting of the full testing process.
Implements terminal reporting of the full testing process.
.. contents::
:local:
This is a good source for looking at the various reporting hooks.
Start improving this plugin in 30 seconds
=========================================
Getting and improving this plugin
---------------------------------
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_terminal.py`_ plugin source code
2. put it somewhere as ``pytest_terminal.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_terminal.py``:
.. sourcecode:: python
"""
terminal reporting of the full testing process.
"""
import py
import sys
def pytest_configure(config):
if config.option.collectonly:
reporter = CollectonlyReporter(config)
else:
reporter = TerminalReporter(config)
# XXX see remote.py's XXX
for attr in 'pytest_terminal_hasmarkup', 'pytest_terminal_fullwidth':
if hasattr(config, attr):
#print "SETTING TERMINAL OPTIONS", attr, getattr(config, attr)
name = attr.split("_")[-1]
assert hasattr(self.reporter._tw, name), name
setattr(reporter._tw, name, getattr(config, attr))
config.pluginmanager.register(reporter)
class TerminalReporter:
def __init__(self, config, file=None):
self.config = config
self.stats = {}
self.curdir = py.path.local()
if file is None:
file = py.std.sys.stdout
self._tw = py.io.TerminalWriter(file)
self.currentfspath = None
self.gateway2info = {}
def write_fspath_result(self, fspath, res):
fspath = self.curdir.bestrelpath(fspath)
if fspath != self.currentfspath:
self._tw.line()
relpath = self.curdir.bestrelpath(fspath)
self._tw.write(relpath + " ")
self.currentfspath = fspath
self._tw.write(res)
def write_ensure_prefix(self, prefix, extra="", **kwargs):
if self.currentfspath != prefix:
self._tw.line()
self.currentfspath = prefix
self._tw.write(prefix)
if extra:
self._tw.write(extra, **kwargs)
self.currentfspath = -2
def ensure_newline(self):
if self.currentfspath:
self._tw.line()
self.currentfspath = None
def write_line(self, line, **markup):
line = str(line)
self.ensure_newline()
self._tw.line(line, **markup)
def write_sep(self, sep, title=None, **markup):
self.ensure_newline()
self._tw.sep(sep, title, **markup)
def getcategoryletterword(self, rep):
res = self.config.hook.pytest_report_teststatus(rep=rep)
if res:
return res
for cat in 'skipped failed passed ???'.split():
if getattr(rep, cat, None):
break
return cat, self.getoutcomeletter(rep), self.getoutcomeword(rep)
def getoutcomeletter(self, rep):
return rep.shortrepr
def getoutcomeword(self, rep):
if rep.passed:
return "PASS", dict(green=True)
elif rep.failed:
return "FAIL", dict(red=True)
elif rep.skipped:
return "SKIP"
else:
return "???", dict(red=True)
def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"):
self.write_line("INTERNALERROR> " + line)
def pyexecnet_gwmanage_newgateway(self, gateway, platinfo):
#self.write_line("%s instantiated gateway from spec %r" %(gateway.id, gateway.spec._spec))
d = {}
d['version'] = repr_pythonversion(platinfo.version_info)
d['id'] = gateway.id
d['spec'] = gateway.spec._spec
d['platform'] = platinfo.platform
if self.config.option.verbose:
d['extra'] = "- " + platinfo.executable
else:
d['extra'] = ""
d['cwd'] = platinfo.cwd
infoline = ("%(id)s %(spec)s -- platform %(platform)s, "
"Python %(version)s "
"cwd: %(cwd)s"
"%(extra)s" % d)
self.write_line(infoline)
self.gateway2info[gateway] = infoline
def pyexecnet_gwmanage_rsyncstart(self, source, gateways):
targets = ", ".join([gw.id for gw in gateways])
msg = "rsyncstart: %s -> %s" %(source, targets)
if not self.config.option.verbose:
msg += " # use --verbose to see rsync progress"
self.write_line(msg)
def pyexecnet_gwmanage_rsyncfinish(self, source, gateways):
targets = ", ".join([gw.id for gw in gateways])
self.write_line("rsyncfinish: %s -> %s" %(source, targets))
def pytest_plugin_registered(self, plugin):
if self.config.option.traceconfig:
msg = "PLUGIN registered: %s" %(plugin,)
# XXX this event may happen during setup/teardown time
# which unfortunately captures our output here
# which garbles our output if we use self.write_line
self.write_line(msg)
def pytest_testnodeready(self, node):
self.write_line("%s txnode ready to receive tests" %(node.gateway.id,))
def pytest_testnodedown(self, node, error):
if error:
self.write_line("%s node down, error: %s" %(node.gateway.id, error))
def pytest_trace(self, category, msg):
if self.config.option.debug or \
self.config.option.traceconfig and category.find("config") != -1:
self.write_line("[%s] %s" %(category, msg))
def pytest_rescheduleitems(self, items):
if self.config.option.debug:
self.write_sep("!", "RESCHEDULING %s " %(items,))
def pytest_deselected(self, items):
self.stats.setdefault('deselected', []).append(items)
def pytest_itemstart(self, item, node=None):
if self.config.option.dist != "no":
# for dist-testing situations itemstart means we
# queued the item for sending, not interesting (unless debugging)
if self.config.option.debug:
line = self._reportinfoline(item)
extra = ""
if node:
extra = "-> " + str(node.gateway.id)
self.write_ensure_prefix(line, extra)
else:
if self.config.option.verbose:
line = self._reportinfoline(item)
self.write_ensure_prefix(line, "")
else:
# ensure that the path is printed before the
# 1st test of a module starts running
fspath, lineno, msg = self._getreportinfo(item)
self.write_fspath_result(fspath, "")
def pytest_runtest_logreport(self, rep):
if rep.passed and rep.when in ("setup", "teardown"):
return
fspath = rep.item.fspath
cat, letter, word = self.getcategoryletterword(rep)
if isinstance(word, tuple):
word, markup = word
else:
markup = {}
self.stats.setdefault(cat, []).append(rep)
if not self.config.option.verbose:
fspath, lineno, msg = self._getreportinfo(rep.item)
self.write_fspath_result(fspath, letter)
else:
line = self._reportinfoline(rep.item)
if not hasattr(rep, 'node'):
self.write_ensure_prefix(line, word, **markup)
else:
self.ensure_newline()
if hasattr(rep, 'node'):
self._tw.write("%s " % rep.node.gateway.id)
self._tw.write(word, **markup)
self._tw.write(" " + line)
self.currentfspath = -2
def pytest_collectreport(self, rep):
if not rep.passed:
if rep.failed:
self.stats.setdefault("failed", []).append(rep)
msg = rep.longrepr.reprcrash.message
self.write_fspath_result(rep.collector.fspath, "F")
elif rep.skipped:
self.stats.setdefault("skipped", []).append(rep)
self.write_fspath_result(rep.collector.fspath, "S")
def pytest_sessionstart(self, session):
self.write_sep("=", "test session starts", bold=True)
self._sessionstarttime = py.std.time.time()
verinfo = ".".join(map(str, sys.version_info[:3]))
msg = "python: platform %s -- Python %s" % (sys.platform, verinfo)
if self.config.option.verbose or self.config.option.debug:
msg += " -- " + str(sys.executable)
msg += " -- pytest-%s" % (py.__version__)
self.write_line(msg)
if self.config.option.debug or self.config.option.traceconfig:
rev = py.__pkg__.getrev()
self.write_line("using py lib: %s <rev %s>" % (
py.path.local(py.__file__).dirpath(), rev))
if self.config.option.traceconfig:
plugins = []
for plugin in self.config.pluginmanager.comregistry:
name = plugin.__class__.__name__
if name.endswith("Plugin"):
name = name[:-6]
#if name == "Conftest":
# XXX get filename
plugins.append(name)
else:
plugins.append(str(plugin))
plugins = ", ".join(plugins)
self.write_line("active plugins: %s" %(plugins,))
for i, testarg in py.builtin.enumerate(self.config.args):
self.write_line("test object %d: %s" %(i+1, testarg))
def pytest_sessionfinish(self, __call__, session, exitstatus):
__call__.execute()
self._tw.line("")
if exitstatus in (0, 1, 2):
self.summary_failures()
self.summary_skips()
self.config.hook.pytest_terminal_summary(terminalreporter=self)
if exitstatus == 2:
self._report_keyboardinterrupt()
self.summary_deselected()
self.summary_stats()
def pytest_keyboard_interrupt(self, excinfo):
self._keyboardinterrupt_memo = excinfo.getrepr()
def _report_keyboardinterrupt(self):
self.write_sep("!", "KEYBOARD INTERRUPT")
excrepr = self._keyboardinterrupt_memo
if self.config.option.verbose:
excrepr.toterminal(self._tw)
else:
excrepr.reprcrash.toterminal(self._tw)
def pytest_looponfailinfo(self, failreports, rootdirs):
if failreports:
self.write_sep("#", "LOOPONFAILING", red=True)
for report in failreports:
try:
loc = report.longrepr.reprcrash
except AttributeError:
loc = str(report.longrepr)[:50]
self.write_line(loc, red=True)
self.write_sep("#", "waiting for changes")
for rootdir in rootdirs:
self.write_line("### Watching: %s" %(rootdir,), bold=True)
def _reportinfoline(self, item):
fspath, lineno, msg = self._getreportinfo(item)
if fspath:
fspath = self.curdir.bestrelpath(fspath)
if lineno is not None:
lineno += 1
if fspath and lineno and msg:
line = "%(fspath)s:%(lineno)s: %(msg)s"
elif fspath and msg:
line = "%(fspath)s: %(msg)s"
elif fspath and lineno:
line = "%(fspath)s:%(lineno)s"
else:
line = "[noreportinfo]"
return line % locals() + " "
def _getfailureheadline(self, rep):
if hasattr(rep, "collector"):
return str(rep.collector.fspath)
else:
fspath, lineno, msg = self._getreportinfo(rep.item)
return msg
def _getreportinfo(self, item):
try:
return item.__reportinfo
except AttributeError:
pass
reportinfo = item.config.hook.pytest_report_iteminfo(item=item)
# cache on item
item.__reportinfo = reportinfo
return reportinfo
#
# summaries for sessionfinish
#
def summary_failures(self):
if 'failed' in self.stats and self.config.option.tbstyle != "no":
self.write_sep("=", "FAILURES")
for rep in self.stats['failed']:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
if hasattr(rep, 'node'):
self.write_line(self.gateway2info.get(
rep.node.gateway, "node %r (platinfo not found? strange)")
[:self._tw.fullwidth-1])
rep.toterminal(self._tw)
def summary_stats(self):
session_duration = py.std.time.time() - self._sessionstarttime
keys = "failed passed skipped deselected".split()
parts = []
for key in keys:
val = self.stats.get(key, None)
if val:
parts.append("%d %s" %(len(val), key))
line = ", ".join(parts)
# XXX coloring
self.write_sep("=", "%s in %.2f seconds" %(line, session_duration))
def summary_deselected(self):
if 'deselected' in self.stats:
self.write_sep("=", "%d tests deselected by %r" %(
len(self.stats['deselected']), self.config.option.keyword), bold=True)
def summary_skips(self):
if 'skipped' in self.stats:
if 'failed' not in self.stats: # or self.config.option.showskipsummary:
fskips = folded_skips(self.stats['skipped'])
if fskips:
self.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
self._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason))
class CollectonlyReporter:
INDENT = " "
def __init__(self, config, out=None):
self.config = config
if out is None:
out = py.std.sys.stdout
self.out = py.io.TerminalWriter(out)
self.indent = ""
self._failed = []
def outindent(self, line):
self.out.line(self.indent + str(line))
def pytest_internalerror(self, excrepr):
for line in str(excrepr).split("\n"):
self.out.line("INTERNALERROR> " + line)
def pytest_collectstart(self, collector):
self.outindent(collector)
self.indent += self.INDENT
def pytest_itemstart(self, item, node=None):
self.outindent(item)
def pytest_collectreport(self, rep):
if not rep.passed:
self.outindent("!!! %s !!!" % rep.longrepr.reprcrash.message)
self._failed.append(rep)
self.indent = self.indent[:-len(self.INDENT)]
def pytest_sessionfinish(self, session, exitstatus):
if self._failed:
self.out.sep("!", "collection failures")
for rep in self._failed:
rep.toterminal(self.out)
def folded_skips(skipped):
d = {}
for event in skipped:
entry = event.longrepr.reprcrash
key = entry.path, entry.lineno, entry.message
d.setdefault(key, []).append(event)
l = []
for key, events in d.iteritems():
l.append((len(events),) + key)
return l
def repr_pythonversion(v=None):
if v is None:
v = sys.version_info
try:
return "%s.%s.%s-%s-%s" % v
except (TypeError, ValueError):
return str(v)
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_terminal.py
.. _`pytest_terminal.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_terminal.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -4,6 +4,9 @@ pytest_unittest plugin
automatically discover and run traditional "unittest.py" style tests.
.. contents::
:local:
Usage
----------------
@ -16,146 +19,19 @@ This plugin is enabled by default.
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_unittest.py`_ plugin source code
2. put it somewhere as ``pytest_unittest.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_unittest.py``:
.. sourcecode:: python
"""
automatically discover and run traditional "unittest.py" style tests.
Usage
----------------
This plugin collects and runs Python `unittest.py style`_ tests.
It will automatically collect ``unittest.TestCase`` subclasses
and their ``test`` methods from the test modules of a project
(usually following the ``test_*.py`` pattern).
This plugin is enabled by default.
.. _`unittest.py style`: http://docs.python.org/library/unittest.html
"""
import py
import sys
def pytest_pycollect_makeitem(collector, name, obj):
if 'unittest' not in sys.modules:
return # nobody could have possibly derived a subclass
if py.std.inspect.isclass(obj) and issubclass(obj, py.std.unittest.TestCase):
return UnitTestCase(name, parent=collector)
class UnitTestCase(py.test.collect.Class):
def collect(self):
return [UnitTestCaseInstance("()", self)]
def setup(self):
pass
def teardown(self):
pass
_dummy = object()
class UnitTestCaseInstance(py.test.collect.Instance):
def collect(self):
loader = py.std.unittest.TestLoader()
names = loader.getTestCaseNames(self.obj.__class__)
l = []
for name in names:
callobj = getattr(self.obj, name)
if callable(callobj):
l.append(UnitTestFunction(name, parent=self))
return l
def _getobj(self):
x = self.parent.obj
return self.parent.obj(methodName='run')
class UnitTestFunction(py.test.collect.Function):
def __init__(self, name, parent, args=(), obj=_dummy, sort_value=None):
super(UnitTestFunction, self).__init__(name, parent)
self._args = args
if obj is not _dummy:
self._obj = obj
self._sort_value = sort_value
def runtest(self):
target = self.obj
args = self._args
target(*args)
def setup(self):
instance = self.obj.im_self
instance.setUp()
def teardown(self):
instance = self.obj.im_self
instance.tearDown()
def test_simple_unittest(testdir):
testpath = testdir.makepyfile("""
import unittest
pytest_plugins = "pytest_unittest"
class MyTestCase(unittest.TestCase):
def testpassing(self):
self.assertEquals('foo', 'foo')
def test_failing(self):
self.assertEquals('foo', 'bar')
""")
reprec = testdir.inline_run(testpath)
assert reprec.matchreport("testpassing").passed
assert reprec.matchreport("test_failing").failed
def test_setup(testdir):
testpath = testdir.makepyfile(test_two="""
import unittest
pytest_plugins = "pytest_unittest" # XXX
class MyTestCase(unittest.TestCase):
def setUp(self):
self.foo = 1
def test_setUp(self):
self.assertEquals(1, self.foo)
""")
reprec = testdir.inline_run(testpath)
rep = reprec.matchreport("test_setUp")
assert rep.passed
def test_teardown(testdir):
testpath = testdir.makepyfile(test_three="""
import unittest
pytest_plugins = "pytest_unittest" # XXX
class MyTestCase(unittest.TestCase):
l = []
def test_one(self):
pass
def tearDown(self):
self.l.append(None)
class Second(unittest.TestCase):
def test_check(self):
self.assertEquals(MyTestCase.l, [None])
""")
reprec = testdir.inline_run(testpath)
passed, skipped, failed = reprec.countoutcomes()
print "COUNTS", passed, skipped, failed
assert failed == 0, failed
assert passed == 2
assert passed + skipped + failed == 2
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_unittest.py
.. _`pytest_unittest.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_unittest.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -4,6 +4,9 @@ pytest_xfail plugin
mark python tests as expected-to-fail and report them separately.
.. contents::
:local:
usage
------------
@ -18,116 +21,19 @@ This test will be executed but no traceback will be reported
when it fails. Instead terminal reporting will list it in the
"expected to fail" section or "unexpectedly passing" section.
Getting and improving this plugin
---------------------------------
Start improving this plugin in 30 seconds
=========================================
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `pytest_xfail.py`_ plugin source code
2. put it somewhere as ``pytest_xfail.py`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
For your convenience here is also an inlined version of ``pytest_xfail.py``:
.. sourcecode:: python
"""
mark python tests as expected-to-fail and report them separately.
usage
------------
Use the generic mark decorator to add the 'xfail' keyword to your
test function::
@py.test.mark.xfail
def test_hello():
...
This test will be executed but no traceback will be reported
when it fails. Instead terminal reporting will list it in the
"expected to fail" section or "unexpectedly passing" section.
"""
import py
pytest_plugins = ['keyword']
def pytest_runtest_makereport(__call__, item, call):
if call.when != "call":
return
if hasattr(item, 'obj') and hasattr(item.obj, 'func_dict'):
if 'xfail' in item.obj.func_dict:
res = __call__.execute(firstresult=True)
if call.excinfo:
res.skipped = True
res.failed = res.passed = False
else:
res.skipped = res.passed = False
res.failed = True
return res
def pytest_report_teststatus(rep):
""" return shortletter and verbose word. """
if 'xfail' in rep.keywords:
if rep.skipped:
return "xfailed", "x", "xfail"
elif rep.failed:
return "xpassed", "P", "xpass"
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
tr = terminalreporter
xfailed = tr.stats.get("xfailed")
if xfailed:
tr.write_sep("_", "expected failures")
for event in xfailed:
entry = event.longrepr.reprcrash
key = entry.path, entry.lineno, entry.message
reason = event.longrepr.reprcrash.message
modpath = event.item.getmodpath(includemodule=True)
#tr._tw.line("%s %s:%d: %s" %(modpath, entry.path, entry.lineno, entry.message))
tr._tw.line("%s %s:%d: " %(modpath, entry.path, entry.lineno))
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
for event in xpassed:
tr._tw.line("%s: xpassed" %(event.item,))
# ===============================================================================
#
# plugin tests
#
# ===============================================================================
def test_xfail(testdir, linecomp):
p = testdir.makepyfile(test_one="""
import py
@py.test.mark.xfail
def test_this():
assert 0
@py.test.mark.xfail
def test_that():
assert 1
""")
result = testdir.runpytest(p)
extra = result.stdout.fnmatch_lines([
"*expected failures*",
"*test_one.test_this*test_one.py:4*",
"*UNEXPECTEDLY PASSING*",
"*test_that*",
])
assert result.ret == 1
.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/c28e76a64569475dda8b92c68f9c1c0902c5049e/py/test/plugin/pytest_xfail.py
.. _`pytest_xfail.py`: http://bitbucket.org/hpk42/py-trunk/raw/85fe614ab05f301f206935d11a477df184cbbce6/py/test/plugin/pytest_xfail.py
.. _`extend`: ../extend.html
.. _`plugins`: index.html
.. _`contact`: ../../contact.html

View File

@ -10,9 +10,10 @@ plugins = [
'unittest doctest oejskit restdoc'),
('Plugins for generic reporting and failure logging',
'pocoo resultlog terminal',),
('internal plugins / core functionality',
'pdb keyword hooklog runner execnetcleanup pytester',
)
#('internal plugins / core functionality',
# #'pdb keyword hooklog runner execnetcleanup # pytester',
# 'pdb keyword hooklog runner execnetcleanup' # pytester',
#)
]
externals = {
@ -152,6 +153,9 @@ class PluginDoc(RestWriter):
self.h1("%s plugin" % self.name) # : %s" %(self.name, self.oneliner))
self.Print(self.oneliner)
self.Print()
self.Print(".. contents::")
self.Print(" :local:")
self.Print()
self.Print(moduledoc)
@ -170,15 +174,13 @@ class PluginDoc(RestWriter):
#self.links.append((basename,
# "http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/" +
# basename))
self.h2("Getting and improving this plugin")
self.h1("Start improving this plugin in 30 seconds")
self.para(py.code.Source("""
Do you find the above documentation or the plugin itself lacking,
not fit for what you need? Here is a **30 seconds guide**
to get you started on improving the plugin:
Do you find the above documentation or the plugin itself lacking?
1. Download `%s`_ plugin source code
2. put it somewhere as ``%s`` into your import path
3. a subsequent test run will now use your local version!
3. a subsequent ``py.test`` run will use your local version
Further information: extend_ documentation, other plugins_ or contact_.
""" % (basename, basename)))
@ -194,14 +196,15 @@ class PluginDoc(RestWriter):
self.links.append(('contact', '../../contact.html'))
self.links.append(('checkout the py.test development version',
'../../download.html#checkout'))
#self.h2("plugin source code")
self.Print()
self.para("For your convenience here is also an inlined version "
"of ``%s``:" %basename)
#self(or copy-paste from below)
self.Print()
self.sourcecode(py.code.Source(plugin))
if 0: # this breaks the page layout and makes large doc files
#self.h2("plugin source code")
self.Print()
self.para("For your convenience here is also an inlined version "
"of ``%s``:" %basename)
#self(or copy-paste from below)
self.Print()
self.sourcecode(py.code.Source(plugin))
def emit_funcargs(self, plugin):
funcargfuncs = []
@ -213,6 +216,7 @@ class PluginDoc(RestWriter):
return
for func in funcargfuncs:
argname = func.__name__[len(prefix):]
self.Print()
self.Print(".. _`%s funcarg`:" % argname)
self.Print()
self.h2("the %r test function argument" % argname)

View File

@ -63,12 +63,15 @@ class PopenCmdGateway(InstallableGateway):
def __init__(self, cmd):
# on win close_fds=True does not work, not sure it'd needed
#p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE, close_fds=True)
p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE)
self._popen = p = Popen(cmd, shell=True, stdin=PIPE, stdout=PIPE)
infile, outfile = p.stdin, p.stdout
self._cmd = cmd
io = inputoutput.Popen2IO(infile, outfile)
super(PopenCmdGateway, self).__init__(io=io)
def exit(self):
super(PopenCmdGateway, self).exit()
self._popen.poll()
class PopenGateway(PopenCmdGateway):
""" This Gateway provides interaction with a newly started

View File

@ -138,6 +138,8 @@ class DSession(Session):
exitstatus = loopstate.exitstatus
break
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
self.config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
exitstatus = outcome.EXIT_INTERRUPTED
except:
self.config.pluginmanager.notify_exception()

View File

@ -57,7 +57,7 @@ class HookRecorder:
def _makecallparser(self, method):
name = method.__name__
args, varargs, varkw, default = py.std.inspect.getargspec(method)
if args and args[0] != "self":
if not args or args[0] != "self":
args.insert(0, 'self')
fspec = py.std.inspect.formatargspec(args, varargs, varkw, default)
# we use exec because we want to have early type
@ -113,6 +113,19 @@ def test_hookrecorder_basic():
assert call._name == "xyz"
py.test.raises(ValueError, "rec.popcall('abc')")
def test_hookrecorder_basic_no_args_hook():
import sys
comregistry = py._com.Registry()
rec = HookRecorder(comregistry)
apimod = type(sys)('api')
def xyz():
pass
apimod.xyz = xyz
rec.start_recording(apimod)
rec.hook.xyz()
call = rec.popcall("xyz")
assert call._name == "xyz"
reg = py._com.comregistry
def test_functional_default(testdir, _pytest):
assert _pytest.comregistry == py._com.comregistry

View File

@ -1,6 +1,5 @@
"""
convenient capturing of writes to stdout/stderror streams
and file descriptors.
convenient capturing of writes to stdout/stderror streams and file descriptors.
Example Usage
----------------------

View File

@ -7,6 +7,7 @@ import sys, os
import inspect
from py.__.test.config import Config as pytestConfig
import hookspec
import subprocess
pytest_plugins = '_pytest'
@ -506,3 +507,104 @@ def test_testdir_runs_with_plugin(testdir):
assert result.stdout.fnmatch_lines([
"*1 passed*"
])
#
# experimental funcargs for venv/install-tests
#
def pytest_funcarg__venv(request):
p = request.config.mktemp(request.function.__name__, numbered=True)
venv = VirtualEnv(str(p))
venv.create()
return venv
def pytest_funcarg__py_setup(request):
rootdir = py.path.local(py.__file__).dirpath().dirpath()
setup = rootdir.join('setup.py')
if not setup.check():
py.test.skip("not found: %r" % setup)
return SetupBuilder(setup)
class SetupBuilder:
def __init__(self, setup_path):
self.setup_path = setup_path
def make_sdist(self, destdir=None):
temp = py.path.local.mkdtemp()
try:
args = ['python', str(self.setup_path), 'sdist',
'--dist-dir', str(temp)]
subprocess.check_call(args)
l = temp.listdir('py-*')
assert len(l) == 1
sdist = l[0]
if destdir is None:
destdir = self.setup_path.dirpath('build')
assert destdir.check()
else:
destdir = py.path.local(destdir)
target = destdir.join(sdist.basename)
sdist.copy(target)
return target
finally:
temp.remove()
# code taken from Ronny Pfannenschmidt's virtualenvmanager
class VirtualEnv(object):
def __init__(self, path):
#XXX: supply the python executable
self.path = path
def __repr__(self):
return "<VirtualEnv at %r>" %(self.path)
def _cmd(self, name):
return os.path.join(self.path, 'bin', name)
@property
def valid(self):
return os.path.exists(self._cmd('python'))
def create(self, sitepackages=False):
args = ['virtualenv', self.path]
if not sitepackages:
args.append('--no-site-packages')
subprocess.check_call(args)
def makegateway(self):
python = self._cmd('python')
return py.execnet.makegateway("popen//python=%s" %(python,))
def pcall(self, cmd, *args, **kw):
assert self.valid
return subprocess.call([
self._cmd(cmd)
] + list(args),
**kw)
def easy_install(self, *packages, **kw):
args = []
if 'index' in kw:
index = kw['index']
if isinstance(index, (list, tuple)):
for i in index:
args.extend(['-i', i])
else:
args.extend(['-i', index])
args.extend(packages)
self.pcall('easy_install', *args)
@property
def has_pip(self):
return os.path.exists(self._cmd('pip'))
def pip_install(self, *packages):
if not self.has_pip:
self.easy_install('pip')
self.pcall('pip', *packages)

View File

@ -1,17 +1,46 @@
"""
helpers for asserting deprecation and other warnings.
**recwarn**: function argument where one can call recwarn.pop() to get
the last warning that would have been shown.
Example usage
---------------------
**py.test.deprecated_call(func, *args, **kwargs)**: assert that the given function call triggers a deprecation warning.
You can use the ``recwarn`` funcarg to track
warnings within a test function:
.. sourcecode:: python
def test_hello(recwarn):
from warnings import warn
warn("hello", DeprecationWarning)
w = recwarn.pop(DeprecationWarning)
assert issubclass(w.category, DeprecationWarning)
assert 'hello' in str(w.message)
assert w.filename
assert w.lineno
You can also call a global helper for checking
taht a certain function call yields a Deprecation
warning:
.. sourcecode:: python
import py
def test_global():
py.test.deprecated_call(myfunction, 17)
"""
import py
import os
def pytest_funcarg__recwarn(request):
""" check that warnings have been raised. """
"""Return a WarningsRecorder instance that provides these methods:
* ``pop(category=None)``: return last warning matching the category.
* ``clear()``: clear list of warnings
"""
warnings = WarningsRecorder()
request.addfinalizer(warnings.finalize)
return warnings

View File

@ -1,5 +1,7 @@
"""
terminal reporting of the full testing process.
Implements terminal reporting of the full testing process.
This is a good source for looking at the various reporting hooks.
"""
import py
import sys

View File

@ -17,12 +17,16 @@ def basic_run_report(item):
return runner.call_and_report(item, "call", log=False)
class Option:
def __init__(self, verbose=False):
def __init__(self, verbose=False, dist=None):
self.verbose = verbose
self.dist = dist
def _getcmdargs(self):
l = []
if self.verbose:
l.append('-v')
if self.dist:
l.append('--dist=%s' % self.dist)
l.append('--tx=popen')
return l
def _getcmdstring(self):
return " ".join(self._getcmdargs())
@ -37,6 +41,12 @@ def pytest_generate_tests(metafunc):
id="verbose",
funcargs={'option': Option(verbose=True)}
)
nodist = getattr(metafunc.function, 'nodist', False)
if not nodist:
metafunc.addcall(
id="verbose-dist",
funcargs={'option': Option(dist='each', verbose=True)}
)
class TestTerminal:
def test_pass_skip_fail(self, testdir, option):
@ -49,13 +59,22 @@ class TestTerminal:
def test_func():
assert 0
""")
result = testdir.runpytest(option._getcmdstring())
result = testdir.runpytest(*option._getcmdargs())
if option.verbose:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py:2: *test_ok*PASS*",
"*test_pass_skip_fail.py:4: *test_skip*SKIP*",
"*test_pass_skip_fail.py:6: *test_func*FAIL*",
])
if not option.dist:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py:2: *test_ok*PASS*",
"*test_pass_skip_fail.py:4: *test_skip*SKIP*",
"*test_pass_skip_fail.py:6: *test_func*FAIL*",
])
else:
expected = [
"*PASS*test_pass_skip_fail.py:2: *test_ok*",
"*SKIP*test_pass_skip_fail.py:4: *test_skip*",
"*FAIL*test_pass_skip_fail.py:6: *test_func*",
]
for line in expected:
result.stdout.fnmatch_lines([line])
else:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py .sF"
@ -68,7 +87,7 @@ class TestTerminal:
def test_collect_fail(self, testdir, option):
p = testdir.makepyfile("import xyz")
result = testdir.runpytest(option._getcmdstring())
result = testdir.runpytest(*option._getcmdargs())
result.stdout.fnmatch_lines([
"*test_collect_fail.py F*",
"> import xyz",
@ -217,7 +236,15 @@ class TestTerminal:
"*FGHJ:43: custom*"
])
def test_keyboard_interrupt_dist(self, testdir, option):
p = testdir.makepyfile("""
raise KeyboardInterrupt
""")
result = testdir.runpytest(*option._getcmdargs())
assert result.ret == 2
result.stdout.fnmatch_lines(['*KEYBOARD INTERRUPT*'])
@py.test.mark.nodist
def test_keyboard_interrupt(self, testdir, option):
p = testdir.makepyfile("""
def test_foobar():
@ -228,7 +255,7 @@ class TestTerminal:
raise KeyboardInterrupt # simulating the user
""")
result = testdir.runpytest(option._getcmdstring())
result = testdir.runpytest(*option._getcmdargs())
result.stdout.fnmatch_lines([
" def test_foobar():",
"> assert 0",
@ -239,6 +266,7 @@ class TestTerminal:
result.stdout.fnmatch_lines([
"*raise KeyboardInterrupt # simulating the user*",
])
result.stdout.fnmatch_lines(['*KEYBOARD INTERRUPT*'])
def test_skip_reasons_folding(self):
class longrepr:

View File

@ -114,7 +114,7 @@ class Session(object):
item.config.hook.pytest_runtest_protocol(item=item)
except KeyboardInterrupt:
excinfo = py.code.ExceptionInfo()
item.config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
self.config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
exitstatus = outcome.EXIT_INTERRUPTED
except:
excinfo = py.code.ExceptionInfo()

View File

@ -0,0 +1,9 @@
import py
def test_make_sdist_and_run_it(py_setup, venv):
sdist = py_setup.make_sdist(venv.path)
venv.easy_install(str(sdist))
gw = venv.makegateway()
ch = gw.remote_exec("import py ; channel.send(py.__version__)")
version = ch.receive()
assert version == py.__version__