document and refine/rename item **runtest** hook invocations
capture output separately for fixture and actual test run --HG-- branch : trunk
This commit is contained in:
parent
d16688a1e6
commit
ed216e77d0
|
@ -32,10 +32,6 @@ or class with a leading ``Test`` name is collected.
|
||||||
|
|
||||||
.. _`collection process`: ext.html#collection-process
|
.. _`collection process`: ext.html#collection-process
|
||||||
|
|
||||||
Rapidly write integration, functional, unit tests
|
|
||||||
===================================================
|
|
||||||
|
|
||||||
XXX
|
|
||||||
|
|
||||||
funcargs and xUnit style setups
|
funcargs and xUnit style setups
|
||||||
===================================================
|
===================================================
|
||||||
|
@ -95,17 +91,20 @@ debug with the ``print`` statement
|
||||||
----------------------------------
|
----------------------------------
|
||||||
|
|
||||||
By default, ``py.test`` catches text written to stdout/stderr during
|
By default, ``py.test`` catches text written to stdout/stderr during
|
||||||
the execution of each individual test. This output will only be
|
the execution of each individual test. This output will only be
|
||||||
displayed however if the test fails; you will not see it
|
displayed however if the test fails; you will not see it
|
||||||
otherwise. This allows you to put debugging print statements in your
|
otherwise. This allows you to put debugging print statements in your
|
||||||
code without being overwhelmed by all the output that might be
|
code without being overwhelmed by all the output that might be
|
||||||
generated by tests that do not fail.
|
generated by tests that do not fail.
|
||||||
|
|
||||||
Each failing test that produced output during the running of the test
|
Each failing test that produced output during the running of the test
|
||||||
will have its output displayed in the ``recorded stdout`` section.
|
function will have its output displayed in the ``recorded stdout`` section.
|
||||||
|
|
||||||
|
During Setup and Teardown ("Fixture") capturing is performed separately so
|
||||||
|
that you will only see this output if the actual fixture functions fail.
|
||||||
|
|
||||||
The catching of stdout/stderr output can be disabled using the
|
The catching of stdout/stderr output can be disabled using the
|
||||||
``--nocapture`` option to the ``py.test`` tool. Any output will
|
``--nocapture`` or ``-s`` option to the ``py.test`` tool. Any output will
|
||||||
in this case be displayed as soon as it is generated.
|
in this case be displayed as soon as it is generated.
|
||||||
|
|
||||||
test execution order
|
test execution order
|
||||||
|
|
|
@ -2,16 +2,17 @@
|
||||||
hooks and plugins
|
hooks and plugins
|
||||||
==========================
|
==========================
|
||||||
|
|
||||||
py.test implements much of its functionality by calling so called
|
py.test implements much of its functionality by calling **hooks**.
|
||||||
**hooks**. A hook is a function with a ``pytest_`` prefix and a list of
|
A hook is a function with a ``pytest_`` prefixed name. Hook functions
|
||||||
named arguments. Hook functions are usually defined in plugins.
|
are usually defined in plugins. A plugin is a regular python module or
|
||||||
A plugin is a module or package that makes hook functions available.
|
package that makes hook functions available.
|
||||||
|
|
||||||
When loading a plugin module (which needs to have a ``pytest_`` prefix as well)
|
When loading a plugin module (which needs to have a ``pytest_`` prefix as well)
|
||||||
py.test performs strict checking on the function signature. Function
|
py.test performs strict checking on the function signature. Function
|
||||||
and argument names need to match exactly the `original definition of the hook`_.
|
and argument names need to match exactly the `original definition of the hook`_.
|
||||||
This allows for early mismatch reporting and minimizes version incompatibilites.
|
This allows for early mismatch reporting and minimizes version incompatibilites.
|
||||||
|
|
||||||
|
.. _`original definition of the hook`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/api.py
|
||||||
|
|
||||||
Loading plugins and specifying dependencies
|
Loading plugins and specifying dependencies
|
||||||
============================================
|
============================================
|
||||||
|
@ -42,7 +43,13 @@ Included plugins
|
||||||
================
|
================
|
||||||
|
|
||||||
You can find the source code of all default plugins in
|
You can find the source code of all default plugins in
|
||||||
http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/
|
|
||||||
|
http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/
|
||||||
|
|
||||||
|
Additionally you can check out some more contributed plugins here
|
||||||
|
|
||||||
|
http://bitbucket.org/hpk42/py-trunk/src/tip/contrib/
|
||||||
|
|
||||||
|
|
||||||
Overview on available hooks
|
Overview on available hooks
|
||||||
====================================
|
====================================
|
||||||
|
@ -50,16 +57,54 @@ Overview on available hooks
|
||||||
"runtest" hooks
|
"runtest" hooks
|
||||||
-------------------
|
-------------------
|
||||||
|
|
||||||
A number of hooks allow interacting with the running of a test.
|
Each test item is usually executed by calling the following three hooks::
|
||||||
A test can be many things - for example a python, javascript
|
|
||||||
or prolog test function or a doctest. The following hooks are
|
|
||||||
usually invoked on running a test item::
|
|
||||||
|
|
||||||
pytest_runtest_protocol(item) -> True # and invokes:
|
pytest_runtest_setup(item)
|
||||||
pytest_runtest_setup(item) -> None
|
pytest_runtest_call(item)
|
||||||
pytest_runtest_call(item) -> (excinfo, when, outerr)
|
pytest_runtest_teardown(item)
|
||||||
pytest_runtest_makereport(item, excinfo, when, outerr) -> report
|
|
||||||
pytest_runtest_logreport(report) -> None
|
|
||||||
pytest_runtest_teardown(item) -> None
|
|
||||||
|
|
||||||
|
For each of the three invocations a `call object`_ encapsulates
|
||||||
|
information about the outcome of the call and is subsequently used
|
||||||
|
to make a report object::
|
||||||
|
|
||||||
|
report = hook.pytest_runtest_makereport(item, call)
|
||||||
|
|
||||||
|
For example, the `pytest_pdb plugin`_ uses this hook to activate
|
||||||
|
interactive debugging on failures when ``--pdb`` is specified on the
|
||||||
|
command line.
|
||||||
|
|
||||||
|
Usually three reports will be generated for a single test item. However,
|
||||||
|
if the ``pytest_runtest_setup`` fails no call or teardown hooks
|
||||||
|
will be called and only one report will be created.
|
||||||
|
|
||||||
|
Each of the up to three reports is eventually fed to the logreport hook::
|
||||||
|
|
||||||
|
pytest_runtest_logreport(report)
|
||||||
|
|
||||||
|
A ``report`` object contains status and reporting information::
|
||||||
|
|
||||||
|
report.longrepr = string/lines/object to print
|
||||||
|
report.when = "setup", "call" or "teardown"
|
||||||
|
report.shortrepr = letter for progress-report
|
||||||
|
report.passed = True or False
|
||||||
|
report.failed = True or False
|
||||||
|
report.skipped = True or False
|
||||||
|
|
||||||
|
The `pytest_terminal plugin`_ uses this hook to print information
|
||||||
|
about a test run.
|
||||||
|
|
||||||
|
The protocol described here is implemented via this hook::
|
||||||
|
|
||||||
|
pytest_runtest_protocol(item) -> True
|
||||||
|
|
||||||
|
.. _`call object`:
|
||||||
|
|
||||||
|
The call object contains information about a performed call::
|
||||||
|
|
||||||
|
call.excinfo = ExceptionInfo object or None
|
||||||
|
call.when = "setup", "call" or "teardown"
|
||||||
|
call.outerr = None or tuple of strings representing captured stdout/stderr
|
||||||
|
|
||||||
|
.. _`pytest_pdb plugin`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_pdb.py
|
||||||
|
.. _`pytest_terminal plugin`: http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/pytest_terminal.py
|
||||||
|
|
||||||
|
|
|
@ -238,9 +238,9 @@ class DSession(Session):
|
||||||
self.node2pending[node].remove(item)
|
self.node2pending[node].remove(item)
|
||||||
|
|
||||||
def handle_crashitem(self, item, node):
|
def handle_crashitem(self, item, node):
|
||||||
longrepr = "!!! Node %r crashed during running of test %r" %(node, item)
|
runner = item.config.pluginmanager.getplugin("runner")
|
||||||
rep = item.config.hook.pytest_runtest_makereport(
|
info = "!!! Node %r crashed during running of test %r" %(node, item)
|
||||||
item=item, when="???", excinfo=longrepr, outerr=None)
|
rep = runner.ItemTestReport(item=item, excinfo=info, when="???")
|
||||||
rep.node = node
|
rep.node = node
|
||||||
self.config.hook.pytest_runtest_logreport(rep=rep)
|
self.config.hook.pytest_runtest_logreport(rep=rep)
|
||||||
|
|
||||||
|
|
|
@ -5,8 +5,9 @@ import py
|
||||||
XSpec = py.execnet.XSpec
|
XSpec = py.execnet.XSpec
|
||||||
|
|
||||||
def run(item, node, excinfo=None):
|
def run(item, node, excinfo=None):
|
||||||
rep = item.config.hook.pytest_runtest_makereport(
|
runner = item.config.pluginmanager.getplugin("runner")
|
||||||
item=item, excinfo=excinfo, when="call", outerr=("", ""))
|
rep = runner.ItemTestReport(item=item,
|
||||||
|
excinfo=excinfo, when="call", outerr=("", ""))
|
||||||
rep.node = node
|
rep.node = node
|
||||||
return rep
|
return rep
|
||||||
|
|
||||||
|
|
|
@ -72,18 +72,14 @@ class PluginHooks:
|
||||||
# runtest related hooks
|
# runtest related hooks
|
||||||
# ------------------------------------------------------------------------------
|
# ------------------------------------------------------------------------------
|
||||||
def pytest_runtest_setup(self, item):
|
def pytest_runtest_setup(self, item):
|
||||||
""" called before pytest_runtest(). """
|
""" called before pytest_runtest_call(). """
|
||||||
|
|
||||||
def pytest_runtest_teardown(self, item):
|
|
||||||
""" called after pytest_runtest_call. """
|
|
||||||
|
|
||||||
def pytest_runtest_call(self, item):
|
def pytest_runtest_call(self, item):
|
||||||
""" called after pytest_runtest_call. """
|
""" execute test item. """
|
||||||
|
|
||||||
|
def pytest_runtest_teardown(self, item):
|
||||||
|
""" called after pytest_runtest_call(). """
|
||||||
|
|
||||||
def pytest_runtest_makereport(self, item, excinfo, when, outerr):
|
|
||||||
""" make ItemTestReport for the specified test outcome. """
|
|
||||||
pytest_runtest_makereport.firstresult = True
|
|
||||||
|
|
||||||
def pytest_runtest_protocol(self, item):
|
def pytest_runtest_protocol(self, item):
|
||||||
""" run given test item and return test report. """
|
""" run given test item and return test report. """
|
||||||
pytest_runtest_protocol.firstresult = True
|
pytest_runtest_protocol.firstresult = True
|
||||||
|
@ -92,6 +88,9 @@ class PluginHooks:
|
||||||
""" return True if we consumed/did the call to the python function item. """
|
""" return True if we consumed/did the call to the python function item. """
|
||||||
pytest_pyfunc_call.firstresult = True
|
pytest_pyfunc_call.firstresult = True
|
||||||
|
|
||||||
|
def pytest_runtest_makereport(self, item, call):
|
||||||
|
""" make ItemTestReport for the specified test outcome. """
|
||||||
|
pytest_runtest_makereport.firstresult = True
|
||||||
|
|
||||||
def pytest_runtest_logreport(self, rep):
|
def pytest_runtest_logreport(self, rep):
|
||||||
""" process item test report. """
|
""" process item test report. """
|
||||||
|
|
|
@ -2,8 +2,9 @@
|
||||||
|
|
||||||
import py
|
import py
|
||||||
|
|
||||||
def pytest_pyfunc_call(pyfuncitem, args, kwargs):
|
def pytest_pyfunc_call(__call__, pyfuncitem, args, kwargs):
|
||||||
pyfuncitem.obj(*args, **kwargs)
|
if not __call__.execute(firstresult=True):
|
||||||
|
pyfuncitem.obj(*args, **kwargs)
|
||||||
|
|
||||||
def pytest_collect_file(path, parent):
|
def pytest_collect_file(path, parent):
|
||||||
ext = path.ext
|
ext = path.ext
|
||||||
|
|
|
@ -22,16 +22,24 @@ def pytest_funcarg__capfd(request):
|
||||||
request.addfinalizer(capture.finalize)
|
request.addfinalizer(capture.finalize)
|
||||||
return capture
|
return capture
|
||||||
|
|
||||||
|
def pytest_pyfunc_call(pyfuncitem, args, kwargs):
|
||||||
|
for funcarg, value in kwargs.items():
|
||||||
|
if funcarg == "capsys" or funcarg == "capfd":
|
||||||
|
value.reset()
|
||||||
|
|
||||||
class Capture:
|
class Capture:
|
||||||
|
_capture = None
|
||||||
def __init__(self, captureclass):
|
def __init__(self, captureclass):
|
||||||
self._captureclass = captureclass
|
self._captureclass = captureclass
|
||||||
self._capture = self._captureclass()
|
|
||||||
|
|
||||||
def finalize(self):
|
def finalize(self):
|
||||||
self._capture.reset()
|
if self._capture:
|
||||||
|
self._capture.reset()
|
||||||
|
|
||||||
def reset(self):
|
def reset(self):
|
||||||
res = self._capture.reset()
|
res = None
|
||||||
|
if self._capture:
|
||||||
|
res = self._capture.reset()
|
||||||
self._capture = self._captureclass()
|
self._capture = self._captureclass()
|
||||||
return res
|
return res
|
||||||
|
|
||||||
|
|
|
@ -22,12 +22,12 @@ def pytest_configure(config):
|
||||||
config.pluginmanager.register(PdbInvoke())
|
config.pluginmanager.register(PdbInvoke())
|
||||||
|
|
||||||
class PdbInvoke:
|
class PdbInvoke:
|
||||||
def pytest_runtest_makereport(self, item, excinfo, when, outerr):
|
def pytest_runtest_makereport(self, item, call):
|
||||||
if excinfo and not excinfo.errisinstance(Skipped):
|
if call.excinfo and not call.excinfo.errisinstance(Skipped):
|
||||||
tw = py.io.TerminalWriter()
|
tw = py.io.TerminalWriter()
|
||||||
repr = excinfo.getrepr()
|
repr = call.excinfo.getrepr()
|
||||||
repr.toterminal(tw)
|
repr.toterminal(tw)
|
||||||
post_mortem(excinfo._excinfo[2])
|
post_mortem(call.excinfo._excinfo[2])
|
||||||
|
|
||||||
class Pdb(py.std.pdb.Pdb):
|
class Pdb(py.std.pdb.Pdb):
|
||||||
def do_list(self, arg):
|
def do_list(self, arg):
|
||||||
|
|
|
@ -269,8 +269,12 @@ class TmpTestdir:
|
||||||
p1 = py.path.local("stdout")
|
p1 = py.path.local("stdout")
|
||||||
p2 = py.path.local("stderr")
|
p2 = py.path.local("stderr")
|
||||||
print "running", cmdargs, "curdir=", py.path.local()
|
print "running", cmdargs, "curdir=", py.path.local()
|
||||||
popen = self.popen(cmdargs, stdout=p1.open("w"), stderr=p2.open("w"))
|
f1 = p1.open("w")
|
||||||
|
f2 = p2.open("w")
|
||||||
|
popen = self.popen(cmdargs, stdout=f1, stderr=f2, close_fds=True)
|
||||||
ret = popen.wait()
|
ret = popen.wait()
|
||||||
|
f1.close()
|
||||||
|
f2.close()
|
||||||
out, err = p1.readlines(cr=0), p2.readlines(cr=0)
|
out, err = p1.readlines(cr=0), p2.readlines(cr=0)
|
||||||
if err:
|
if err:
|
||||||
for line in err:
|
for line in err:
|
||||||
|
@ -356,7 +360,8 @@ class ReportRecorder(object):
|
||||||
failed = []
|
failed = []
|
||||||
for rep in self.getreports("pytest_runtest_logreport"):
|
for rep in self.getreports("pytest_runtest_logreport"):
|
||||||
if rep.passed:
|
if rep.passed:
|
||||||
passed.append(rep)
|
if rep.when == "call":
|
||||||
|
passed.append(rep)
|
||||||
elif rep.skipped:
|
elif rep.skipped:
|
||||||
skipped.append(rep)
|
skipped.append(rep)
|
||||||
elif rep.failed:
|
elif rep.failed:
|
||||||
|
@ -384,19 +389,25 @@ def test_reportrecorder(testdir):
|
||||||
recorder = testdir.getreportrecorder(registry)
|
recorder = testdir.getreportrecorder(registry)
|
||||||
assert not recorder.getfailures()
|
assert not recorder.getfailures()
|
||||||
item = testdir.getitem("def test_func(): pass")
|
item = testdir.getitem("def test_func(): pass")
|
||||||
rep = item.config.hook.pytest_runtest_makereport(
|
class rep:
|
||||||
item=item, excinfo=None, when="call", outerr=None)
|
excinfo = None
|
||||||
|
passed = False
|
||||||
|
failed = True
|
||||||
|
skipped = False
|
||||||
|
when = "call"
|
||||||
|
|
||||||
rep.passed = False
|
|
||||||
rep.failed = True
|
|
||||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||||
failures = recorder.getfailures()
|
failures = recorder.getfailures()
|
||||||
assert failures == [rep]
|
assert failures == [rep]
|
||||||
failures = recorder.getfailures()
|
failures = recorder.getfailures()
|
||||||
assert failures == [rep]
|
assert failures == [rep]
|
||||||
|
|
||||||
rep = item.config.hook.pytest_runtest_makereport(
|
class rep:
|
||||||
item=item, excinfo=None, when="call", outerr=None)
|
excinfo = None
|
||||||
|
passed = False
|
||||||
|
failed = False
|
||||||
|
skipped = True
|
||||||
|
when = "call"
|
||||||
rep.passed = False
|
rep.passed = False
|
||||||
rep.skipped = True
|
rep.skipped = True
|
||||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||||
|
|
|
@ -12,7 +12,6 @@ from py.__.test.outcome import Skipped
|
||||||
|
|
||||||
#
|
#
|
||||||
# pytest plugin hooks
|
# pytest plugin hooks
|
||||||
#
|
|
||||||
|
|
||||||
def pytest_addoption(parser):
|
def pytest_addoption(parser):
|
||||||
group = parser.getgroup("general")
|
group = parser.getgroup("general")
|
||||||
|
@ -38,13 +37,22 @@ def pytest_make_collect_report(collector):
|
||||||
return report
|
return report
|
||||||
|
|
||||||
def pytest_runtest_protocol(item):
|
def pytest_runtest_protocol(item):
|
||||||
if item.config.option.boxed:
|
if item.config.getvalue("boxed"):
|
||||||
report = forked_run_report(item)
|
reports = forked_run_report(item)
|
||||||
|
for rep in reports:
|
||||||
|
item.config.hook.pytest_runtest_logreport(rep=rep)
|
||||||
else:
|
else:
|
||||||
report = basic_run_report(item)
|
runtestprotocol(item)
|
||||||
item.config.hook.pytest_runtest_logreport(rep=report)
|
|
||||||
return True
|
return True
|
||||||
|
|
||||||
|
def runtestprotocol(item, log=True):
|
||||||
|
rep = call_and_report(item, "setup", log)
|
||||||
|
reports = [rep]
|
||||||
|
if rep.passed:
|
||||||
|
reports.append(call_and_report(item, "call", log))
|
||||||
|
reports.append(call_and_report(item, "teardown", log))
|
||||||
|
return reports
|
||||||
|
|
||||||
def pytest_runtest_setup(item):
|
def pytest_runtest_setup(item):
|
||||||
item.config._setupstate.prepare(item)
|
item.config._setupstate.prepare(item)
|
||||||
|
|
||||||
|
@ -52,44 +60,45 @@ def pytest_runtest_call(item):
|
||||||
if not item._deprecated_testexecution():
|
if not item._deprecated_testexecution():
|
||||||
item.runtest()
|
item.runtest()
|
||||||
|
|
||||||
def pytest_runtest_makereport(item, excinfo, when, outerr):
|
def pytest_runtest_makereport(item, call):
|
||||||
return ItemTestReport(item, excinfo, when, outerr)
|
if isinstance(call, str):
|
||||||
|
# crashed item
|
||||||
|
return ItemTestReport(item, excinfo=call, when="???")
|
||||||
|
else:
|
||||||
|
return ItemTestReport(item, call.excinfo, call.when, call.outerr)
|
||||||
|
|
||||||
def pytest_runtest_teardown(item):
|
def pytest_runtest_teardown(item):
|
||||||
item.config._setupstate.teardown_exact(item)
|
item.config._setupstate.teardown_exact(item)
|
||||||
|
|
||||||
#
|
#
|
||||||
# Implementation
|
# Implementation
|
||||||
#
|
|
||||||
|
|
||||||
class Call:
|
def call_and_report(item, when, log=True):
|
||||||
excinfo = None
|
call = RuntestHookCall(item, when)
|
||||||
def __init__(self, when, func):
|
|
||||||
self.when = when
|
|
||||||
try:
|
|
||||||
self.result = func()
|
|
||||||
except KeyboardInterrupt:
|
|
||||||
raise
|
|
||||||
except:
|
|
||||||
self.excinfo = py.code.ExceptionInfo()
|
|
||||||
|
|
||||||
|
|
||||||
def basic_run_report(item):
|
|
||||||
""" return report about setting up and running a test item. """
|
|
||||||
capture = item.config._getcapture()
|
|
||||||
hook = item.config.hook
|
hook = item.config.hook
|
||||||
try:
|
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||||
call = Call("setup", lambda: hook.pytest_runtest_setup(item=item))
|
if log and (when == "call" or not report.passed):
|
||||||
if not call.excinfo:
|
hook.pytest_runtest_logreport(rep=report)
|
||||||
call = Call("call", lambda: hook.pytest_runtest_call(item=item))
|
return report
|
||||||
# in case of an error we defer teardown to not shadow the error
|
|
||||||
if not call.excinfo:
|
|
||||||
call = Call("teardown", lambda: hook.pytest_runtest_teardown(item=item))
|
class RuntestHookCall:
|
||||||
finally:
|
excinfo = None
|
||||||
outerr = capture.reset()
|
_prefix = "pytest_runtest_"
|
||||||
return item.config.hook.pytest_runtest_makereport(
|
def __init__(self, item, when):
|
||||||
item=item, excinfo=call.excinfo,
|
self.when = when
|
||||||
when=call.when, outerr=outerr)
|
hookname = self._prefix + when
|
||||||
|
hook = getattr(item.config.hook, hookname)
|
||||||
|
capture = item.config._getcapture()
|
||||||
|
try:
|
||||||
|
try:
|
||||||
|
self.result = hook(item=item)
|
||||||
|
except KeyboardInterrupt:
|
||||||
|
raise
|
||||||
|
except:
|
||||||
|
self.excinfo = py.code.ExceptionInfo()
|
||||||
|
finally:
|
||||||
|
self.outerr = capture.reset()
|
||||||
|
|
||||||
def forked_run_report(item):
|
def forked_run_report(item):
|
||||||
EXITSTATUS_TESTEXIT = 4
|
EXITSTATUS_TESTEXIT = 4
|
||||||
|
@ -98,10 +107,10 @@ def forked_run_report(item):
|
||||||
ipickle.selfmemoize(item.config)
|
ipickle.selfmemoize(item.config)
|
||||||
def runforked():
|
def runforked():
|
||||||
try:
|
try:
|
||||||
testrep = basic_run_report(item)
|
reports = runtestprotocol(item, log=False)
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
py.std.os._exit(EXITSTATUS_TESTEXIT)
|
py.std.os._exit(EXITSTATUS_TESTEXIT)
|
||||||
return ipickle.dumps(testrep)
|
return ipickle.dumps(reports)
|
||||||
|
|
||||||
ff = py.process.ForkedFunc(runforked)
|
ff = py.process.ForkedFunc(runforked)
|
||||||
result = ff.waitfinish()
|
result = ff.waitfinish()
|
||||||
|
@ -110,15 +119,13 @@ def forked_run_report(item):
|
||||||
else:
|
else:
|
||||||
if result.exitstatus == EXITSTATUS_TESTEXIT:
|
if result.exitstatus == EXITSTATUS_TESTEXIT:
|
||||||
py.test.exit("forked test item %s raised Exit" %(item,))
|
py.test.exit("forked test item %s raised Exit" %(item,))
|
||||||
return report_process_crash(item, result)
|
return [report_process_crash(item, result)]
|
||||||
|
|
||||||
def report_process_crash(item, result):
|
def report_process_crash(item, result):
|
||||||
path, lineno = item._getfslineno()
|
path, lineno = item._getfslineno()
|
||||||
longrepr = [
|
info = "%s:%s: running the test CRASHED with signal %d" %(
|
||||||
("X", "CRASHED"),
|
path, lineno, result.signal)
|
||||||
("%s:%s: CRASHED with signal %d" %(path, lineno, result.signal)),
|
return ItemTestReport(item, excinfo=info, when="???")
|
||||||
]
|
|
||||||
return ItemTestReport(item, excinfo=longrepr, when="???")
|
|
||||||
|
|
||||||
class BaseReport(object):
|
class BaseReport(object):
|
||||||
def __repr__(self):
|
def __repr__(self):
|
||||||
|
@ -138,6 +145,8 @@ class ItemTestReport(BaseReport):
|
||||||
|
|
||||||
def __init__(self, item, excinfo=None, when=None, outerr=None):
|
def __init__(self, item, excinfo=None, when=None, outerr=None):
|
||||||
self.item = item
|
self.item = item
|
||||||
|
self.when = when
|
||||||
|
self.outerr = outerr
|
||||||
if item and when != "setup":
|
if item and when != "setup":
|
||||||
self.keywords = item.readkeywords()
|
self.keywords = item.readkeywords()
|
||||||
else:
|
else:
|
||||||
|
@ -151,7 +160,6 @@ class ItemTestReport(BaseReport):
|
||||||
self.passed = True
|
self.passed = True
|
||||||
self.shortrepr = "."
|
self.shortrepr = "."
|
||||||
else:
|
else:
|
||||||
self.when = when
|
|
||||||
if not isinstance(excinfo, py.code.ExceptionInfo):
|
if not isinstance(excinfo, py.code.ExceptionInfo):
|
||||||
self.failed = True
|
self.failed = True
|
||||||
shortrepr = "?"
|
shortrepr = "?"
|
||||||
|
|
|
@ -162,6 +162,8 @@ class TerminalReporter:
|
||||||
self.write_fspath_result(fspath, "")
|
self.write_fspath_result(fspath, "")
|
||||||
|
|
||||||
def pytest_runtest_logreport(self, rep):
|
def pytest_runtest_logreport(self, rep):
|
||||||
|
if rep.passed and rep.when in ("setup", "teardown"):
|
||||||
|
return
|
||||||
fspath = rep.item.fspath
|
fspath = rep.item.fspath
|
||||||
cat, letter, word = self.getcategoryletterword(rep)
|
cat, letter, word = self.getcategoryletterword(rep)
|
||||||
if isinstance(word, tuple):
|
if isinstance(word, tuple):
|
||||||
|
@ -399,6 +401,9 @@ def repr_pythonversion(v=None):
|
||||||
|
|
||||||
import pytest_runner as runner # XXX
|
import pytest_runner as runner # XXX
|
||||||
|
|
||||||
|
def basic_run_report(item):
|
||||||
|
return runner.call_and_report(item, "call", log=False)
|
||||||
|
|
||||||
class TestTerminal:
|
class TestTerminal:
|
||||||
def test_pass_skip_fail(self, testdir, linecomp):
|
def test_pass_skip_fail(self, testdir, linecomp):
|
||||||
modcol = testdir.getmodulecol("""
|
modcol = testdir.getmodulecol("""
|
||||||
|
@ -415,7 +420,7 @@ class TestTerminal:
|
||||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||||
|
|
||||||
for item in testdir.genitems([modcol]):
|
for item in testdir.genitems([modcol]):
|
||||||
ev = runner.basic_run_report(item)
|
ev = basic_run_report(item)
|
||||||
rep.config.hook.pytest_runtest_logreport(rep=ev)
|
rep.config.hook.pytest_runtest_logreport(rep=ev)
|
||||||
linecomp.assert_contains_lines([
|
linecomp.assert_contains_lines([
|
||||||
"*test_pass_skip_fail.py .sF"
|
"*test_pass_skip_fail.py .sF"
|
||||||
|
@ -446,7 +451,7 @@ class TestTerminal:
|
||||||
rep.config.hook.pytest_itemstart(item=item, node=None)
|
rep.config.hook.pytest_itemstart(item=item, node=None)
|
||||||
s = linecomp.stringio.getvalue().strip()
|
s = linecomp.stringio.getvalue().strip()
|
||||||
assert s.endswith(item.name)
|
assert s.endswith(item.name)
|
||||||
rep.config.hook.pytest_runtest_logreport(rep=runner.basic_run_report(item))
|
rep.config.hook.pytest_runtest_logreport(rep=basic_run_report(item))
|
||||||
|
|
||||||
linecomp.assert_contains_lines([
|
linecomp.assert_contains_lines([
|
||||||
"*test_pass_skip_fail_verbose.py:2: *test_ok*PASS*",
|
"*test_pass_skip_fail_verbose.py:2: *test_ok*PASS*",
|
||||||
|
@ -537,7 +542,7 @@ class TestTerminal:
|
||||||
raise ValueError()
|
raise ValueError()
|
||||||
""")
|
""")
|
||||||
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
|
||||||
reports = [runner.basic_run_report(x) for x in modcol.collect()]
|
reports = [basic_run_report(x) for x in modcol.collect()]
|
||||||
rep.pytest_looponfailinfo(reports, [modcol.config.topdir])
|
rep.pytest_looponfailinfo(reports, [modcol.config.topdir])
|
||||||
linecomp.assert_contains_lines([
|
linecomp.assert_contains_lines([
|
||||||
"*test_looponfailreport.py:2: assert 0",
|
"*test_looponfailreport.py:2: assert 0",
|
||||||
|
@ -563,7 +568,7 @@ class TestTerminal:
|
||||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||||
for item in testdir.genitems([modcol]):
|
for item in testdir.genitems([modcol]):
|
||||||
rep.config.hook.pytest_runtest_logreport(
|
rep.config.hook.pytest_runtest_logreport(
|
||||||
rep=runner.basic_run_report(item))
|
rep=basic_run_report(item))
|
||||||
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
||||||
s = linecomp.stringio.getvalue()
|
s = linecomp.stringio.getvalue()
|
||||||
if tbopt == "long":
|
if tbopt == "long":
|
||||||
|
@ -644,7 +649,7 @@ class TestTerminal:
|
||||||
try:
|
try:
|
||||||
for item in testdir.genitems([modcol]):
|
for item in testdir.genitems([modcol]):
|
||||||
modcol.config.hook.pytest_runtest_logreport(
|
modcol.config.hook.pytest_runtest_logreport(
|
||||||
rep=runner.basic_run_report(item))
|
rep=basic_run_report(item))
|
||||||
except KeyboardInterrupt:
|
except KeyboardInterrupt:
|
||||||
excinfo = py.code.ExceptionInfo()
|
excinfo = py.code.ExceptionInfo()
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -10,11 +10,13 @@ example:
|
||||||
"""
|
"""
|
||||||
import py
|
import py
|
||||||
|
|
||||||
def pytest_runtest_makereport(__call__, item, excinfo, when, outerr):
|
def pytest_runtest_makereport(__call__, item, call):
|
||||||
|
if call.when != "call":
|
||||||
|
return
|
||||||
if hasattr(item, 'obj') and hasattr(item.obj, 'func_dict'):
|
if hasattr(item, 'obj') and hasattr(item.obj, 'func_dict'):
|
||||||
if 'xfail' in item.obj.func_dict:
|
if 'xfail' in item.obj.func_dict:
|
||||||
res = __call__.execute(firstresult=True)
|
res = __call__.execute(firstresult=True)
|
||||||
if excinfo:
|
if call.excinfo:
|
||||||
res.skipped = True
|
res.skipped = True
|
||||||
res.failed = res.passed = False
|
res.failed = res.passed = False
|
||||||
else:
|
else:
|
||||||
|
|
|
@ -30,29 +30,32 @@ class TestSetupState:
|
||||||
|
|
||||||
class BaseFunctionalTests:
|
class BaseFunctionalTests:
|
||||||
def test_funcattr(self, testdir):
|
def test_funcattr(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
@py.test.mark(xfail="needs refactoring")
|
@py.test.mark(xfail="needs refactoring")
|
||||||
def test_func():
|
def test_func():
|
||||||
raise Exit()
|
raise Exit()
|
||||||
""")
|
""")
|
||||||
|
rep = reports[1]
|
||||||
assert rep.keywords['xfail'] == "needs refactoring"
|
assert rep.keywords['xfail'] == "needs refactoring"
|
||||||
|
|
||||||
def test_passfunction(self, testdir):
|
def test_passfunction(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
def test_func():
|
def test_func():
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
|
rep = reports[1]
|
||||||
assert rep.passed
|
assert rep.passed
|
||||||
assert not rep.failed
|
assert not rep.failed
|
||||||
assert rep.shortrepr == "."
|
assert rep.shortrepr == "."
|
||||||
assert not hasattr(rep, 'longrepr')
|
assert not hasattr(rep, 'longrepr')
|
||||||
|
|
||||||
def test_failfunction(self, testdir):
|
def test_failfunction(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
def test_func():
|
def test_func():
|
||||||
assert 0
|
assert 0
|
||||||
""")
|
""")
|
||||||
|
rep = reports[1]
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert not rep.skipped
|
assert not rep.skipped
|
||||||
assert rep.failed
|
assert rep.failed
|
||||||
|
@ -61,11 +64,12 @@ class BaseFunctionalTests:
|
||||||
assert str(rep.shortrepr) == "F"
|
assert str(rep.shortrepr) == "F"
|
||||||
|
|
||||||
def test_skipfunction(self, testdir):
|
def test_skipfunction(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
def test_func():
|
def test_func():
|
||||||
py.test.skip("hello")
|
py.test.skip("hello")
|
||||||
""")
|
""")
|
||||||
|
rep = reports[1]
|
||||||
assert not rep.failed
|
assert not rep.failed
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert rep.skipped
|
assert rep.skipped
|
||||||
|
@ -77,44 +81,49 @@ class BaseFunctionalTests:
|
||||||
#assert not rep.skipped.failurerepr
|
#assert not rep.skipped.failurerepr
|
||||||
|
|
||||||
def test_skip_in_setup_function(self, testdir):
|
def test_skip_in_setup_function(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
def setup_function(func):
|
def setup_function(func):
|
||||||
py.test.skip("hello")
|
py.test.skip("hello")
|
||||||
def test_func():
|
def test_func():
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
print rep
|
print reports
|
||||||
|
rep = reports[0]
|
||||||
assert not rep.failed
|
assert not rep.failed
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert rep.skipped
|
assert rep.skipped
|
||||||
#assert rep.skipped.reason == "hello"
|
#assert rep.skipped.reason == "hello"
|
||||||
#assert rep.skipped.location.lineno == 3
|
#assert rep.skipped.location.lineno == 3
|
||||||
#assert rep.skipped.location.lineno == 3
|
#assert rep.skipped.location.lineno == 3
|
||||||
|
assert len(reports) == 1
|
||||||
|
|
||||||
def test_failure_in_setup_function(self, testdir):
|
def test_failure_in_setup_function(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
def setup_function(func):
|
def setup_function(func):
|
||||||
raise ValueError(42)
|
raise ValueError(42)
|
||||||
def test_func():
|
def test_func():
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
print rep
|
rep = reports[0]
|
||||||
assert not rep.skipped
|
assert not rep.skipped
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert rep.failed
|
assert rep.failed
|
||||||
assert rep.when == "setup"
|
assert rep.when == "setup"
|
||||||
|
assert len(reports) == 1
|
||||||
|
|
||||||
def test_failure_in_teardown_function(self, testdir):
|
def test_failure_in_teardown_function(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
def teardown_function(func):
|
def teardown_function(func):
|
||||||
raise ValueError(42)
|
raise ValueError(42)
|
||||||
def test_func():
|
def test_func():
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
print rep
|
print reports
|
||||||
|
assert len(reports) == 3
|
||||||
|
rep = reports[2]
|
||||||
assert not rep.skipped
|
assert not rep.skipped
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert rep.failed
|
assert rep.failed
|
||||||
|
@ -129,11 +138,12 @@ class BaseFunctionalTests:
|
||||||
def repr_failure(self, excinfo, outerr):
|
def repr_failure(self, excinfo, outerr):
|
||||||
return "hello"
|
return "hello"
|
||||||
""")
|
""")
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
def test_func():
|
def test_func():
|
||||||
assert 0
|
assert 0
|
||||||
""")
|
""")
|
||||||
|
rep = reports[1]
|
||||||
assert not rep.skipped
|
assert not rep.skipped
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert rep.failed
|
assert rep.failed
|
||||||
|
@ -149,13 +159,15 @@ class BaseFunctionalTests:
|
||||||
def repr_failure(self, excinfo):
|
def repr_failure(self, excinfo):
|
||||||
assert 0
|
assert 0
|
||||||
""")
|
""")
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import py
|
||||||
def setup_function(func):
|
def setup_function(func):
|
||||||
raise ValueError(42)
|
raise ValueError(42)
|
||||||
def test_func():
|
def test_func():
|
||||||
pass
|
pass
|
||||||
""")
|
""")
|
||||||
|
assert len(reports) == 1
|
||||||
|
rep = reports[0]
|
||||||
print rep
|
print rep
|
||||||
assert not rep.skipped
|
assert not rep.skipped
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
|
@ -166,29 +178,29 @@ class BaseFunctionalTests:
|
||||||
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
|
#assert instanace(rep.failed.failurerepr, PythonFailureRepr)
|
||||||
|
|
||||||
def test_capture_in_func(self, testdir):
|
def test_capture_in_func(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
import py
|
import sys
|
||||||
def setup_function(func):
|
def setup_function(func):
|
||||||
print >>py.std.sys.stderr, "in setup"
|
print "in setup"
|
||||||
def test_func():
|
def test_func():
|
||||||
print "in function"
|
print "in function"
|
||||||
assert 0
|
assert 0
|
||||||
def teardown_func(func):
|
def teardown_function(func):
|
||||||
print "in teardown"
|
print "in teardown"
|
||||||
""")
|
""")
|
||||||
assert rep.failed
|
assert reports[0].outerr[0] == "in setup\n"
|
||||||
# out, err = rep.failed.outerr
|
assert reports[1].outerr[0] == "in function\n"
|
||||||
# assert out == ['in function\nin teardown\n']
|
assert reports[2].outerr[0] == "in teardown\n"
|
||||||
# assert err == ['in setup\n']
|
|
||||||
|
|
||||||
def test_systemexit_does_not_bail_out(self, testdir):
|
def test_systemexit_does_not_bail_out(self, testdir):
|
||||||
try:
|
try:
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
def test_func():
|
def test_func():
|
||||||
raise SystemExit(42)
|
raise SystemExit(42)
|
||||||
""")
|
""")
|
||||||
except SystemExit:
|
except SystemExit:
|
||||||
py.test.fail("runner did not catch SystemExit")
|
py.test.fail("runner did not catch SystemExit")
|
||||||
|
rep = reports[1]
|
||||||
assert rep.failed
|
assert rep.failed
|
||||||
assert rep.when == "call"
|
assert rep.when == "call"
|
||||||
|
|
||||||
|
@ -208,7 +220,9 @@ class BaseFunctionalTests:
|
||||||
|
|
||||||
class TestExecutionNonForked(BaseFunctionalTests):
|
class TestExecutionNonForked(BaseFunctionalTests):
|
||||||
def getrunner(self):
|
def getrunner(self):
|
||||||
return runner.basic_run_report
|
def f(item):
|
||||||
|
return runner.runtestprotocol(item, log=False)
|
||||||
|
return f
|
||||||
|
|
||||||
def test_keyboardinterrupt_propagates(self, testdir):
|
def test_keyboardinterrupt_propagates(self, testdir):
|
||||||
from py.__.test.outcome import Exit
|
from py.__.test.outcome import Exit
|
||||||
|
@ -229,11 +243,12 @@ class TestExecutionForked(BaseFunctionalTests):
|
||||||
return runner.forked_run_report
|
return runner.forked_run_report
|
||||||
|
|
||||||
def test_suicide(self, testdir):
|
def test_suicide(self, testdir):
|
||||||
rep = testdir.runitem("""
|
reports = testdir.runitem("""
|
||||||
def test_func():
|
def test_func():
|
||||||
import os
|
import os
|
||||||
os.kill(os.getpid(), 15)
|
os.kill(os.getpid(), 15)
|
||||||
""")
|
""")
|
||||||
|
rep = reports[0]
|
||||||
assert rep.failed
|
assert rep.failed
|
||||||
assert rep.when == "???"
|
assert rep.when == "???"
|
||||||
|
|
||||||
|
@ -265,3 +280,18 @@ class TestCollectionReports:
|
||||||
assert not rep.failed
|
assert not rep.failed
|
||||||
assert not rep.passed
|
assert not rep.passed
|
||||||
assert rep.skipped
|
assert rep.skipped
|
||||||
|
|
||||||
|
|
||||||
|
def test_functional_boxed(testdir):
|
||||||
|
if not hasattr(py.std.os, 'fork'):
|
||||||
|
py.test.skip("needs os.fork")
|
||||||
|
p1 = testdir.makepyfile("""
|
||||||
|
import os
|
||||||
|
def test_function():
|
||||||
|
os.kill(os.getpid(), 15)
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest(p1, "--boxed")
|
||||||
|
assert result.stdout.fnmatch_lines([
|
||||||
|
"*CRASHED*",
|
||||||
|
"*1 failed*"
|
||||||
|
])
|
||||||
|
|
Loading…
Reference in New Issue