* rename, cleanup and document runtest hooks
* factor runner code into pytest_runner plugin * cleanup setupstate handling --HG-- branch : trunk
This commit is contained in:
parent
58eba8a9a4
commit
d16688a1e6
|
@ -1,39 +1,17 @@
|
|||
==========================
|
||||
py.test plugins
|
||||
hooks and plugins
|
||||
==========================
|
||||
|
||||
Much of py.test's functionality is implemented as a plugin.
|
||||
py.test implements much of its functionality by calling so called
|
||||
**hooks**. A hook is a function with a ``pytest_`` prefix and a list of
|
||||
named arguments. Hook functions are usually defined in plugins.
|
||||
A plugin is a module or package that makes hook functions available.
|
||||
|
||||
Included plugins
|
||||
================
|
||||
When loading a plugin module (which needs to have a ``pytest_`` prefix as well)
|
||||
py.test performs strict checking on the function signature. Function
|
||||
and argument names need to match exactly the `original definition of the hook`_.
|
||||
This allows for early mismatch reporting and minimizes version incompatibilites.
|
||||
|
||||
You can find the source code of all default plugins in
|
||||
http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/
|
||||
|
||||
plugins that add reporting asepcts
|
||||
-----------------------------------
|
||||
|
||||
pytest_terminal: default reporter for writing info to terminals
|
||||
|
||||
pytest_resultlog: log test results in machine-readable form to a file
|
||||
|
||||
plugins for adding new test types
|
||||
-----------------------------------
|
||||
|
||||
pytest_unittest: run traditional unittest TestCase instances
|
||||
|
||||
pytest_doctest: run doctests in python modules or .txt files
|
||||
|
||||
pytest_restdoc: provide RestructuredText syntax and link checking
|
||||
|
||||
plugins for python test functions
|
||||
-----------------------------------
|
||||
|
||||
pytest_xfail: provides "expected to fail" test marker
|
||||
|
||||
pytest_tmpdir: provide temporary directories to test functions
|
||||
|
||||
pytest_plugintester: generic plugin apichecks, support for functional plugin tests
|
||||
|
||||
Loading plugins and specifying dependencies
|
||||
============================================
|
||||
|
@ -46,26 +24,42 @@ py.test loads and configures plugins at tool startup:
|
|||
* by pre-scanning the command line for the ``-p name`` option
|
||||
and loading the specified plugin *before actual command line parsing*.
|
||||
|
||||
* by loading all plugins specified via a ``pytest_plugins``
|
||||
variable in ``conftest.py`` files or test modules.
|
||||
* by loading all plugins specified by the ``pytest_plugins``
|
||||
variable in a ``conftest.py`` file or test modules.
|
||||
|
||||
Specifying a plugin in a test module or ``conftest.py`` will
|
||||
only lead to activitation when ``py.test`` actually sees the
|
||||
directory and the file during the collection process. This is
|
||||
directory and the file during the collection process. This happens
|
||||
already after command line parsing and there is no try to do
|
||||
a "pre-scan of all subdirs" as this would mean a potentially
|
||||
very large delay. As long as you don't add command line
|
||||
options this detail does not need to worry you.
|
||||
|
||||
A plugin module may specify its dependencies via
|
||||
another ``pytest_plugins`` definition.
|
||||
|
||||
ensure a plugin is loaded
|
||||
-----------------------------------
|
||||
Included plugins
|
||||
================
|
||||
|
||||
If you create a ``conftest.py`` file with the following content::
|
||||
You can find the source code of all default plugins in
|
||||
http://bitbucket.org/hpk42/py-trunk/src/tip/py/test/plugin/
|
||||
|
||||
Overview on available hooks
|
||||
====================================
|
||||
|
||||
"runtest" hooks
|
||||
-------------------
|
||||
|
||||
A number of hooks allow interacting with the running of a test.
|
||||
A test can be many things - for example a python, javascript
|
||||
or prolog test function or a doctest. The following hooks are
|
||||
usually invoked on running a test item::
|
||||
|
||||
pytest_runtest_protocol(item) -> True # and invokes:
|
||||
pytest_runtest_setup(item) -> None
|
||||
pytest_runtest_call(item) -> (excinfo, when, outerr)
|
||||
pytest_runtest_makereport(item, excinfo, when, outerr) -> report
|
||||
pytest_runtest_logreport(report) -> None
|
||||
pytest_runtest_teardown(item) -> None
|
||||
|
||||
pytest_plugins = "pytest_myextension",
|
||||
|
||||
then all tests in and below that directory will consult the hooks
|
||||
defined in the imported ``pytest_myextension``. A plugin
|
||||
may specify its dependencies via another ``pytest_plugins``
|
||||
definition.
|
||||
|
|
|
@ -2,7 +2,6 @@ import py, os
|
|||
from conftesthandle import Conftest
|
||||
|
||||
from py.__.test import parseopt
|
||||
from py.__.test.runner import SetupState
|
||||
|
||||
def ensuretemp(string, dir=1):
|
||||
""" return temporary directory path with
|
||||
|
@ -41,7 +40,6 @@ class Config(object):
|
|||
assert isinstance(pluginmanager, py.test._PluginManager)
|
||||
self.pluginmanager = pluginmanager
|
||||
self._conftest = Conftest(onimport=self._onimportconftest)
|
||||
self._setupstate = SetupState()
|
||||
self.hook = pluginmanager.hook
|
||||
|
||||
def _onimportconftest(self, conftestmodule):
|
||||
|
|
|
@ -10,5 +10,5 @@ Generator = py.test.collect.Generator
|
|||
Function = py.test.collect.Function
|
||||
Instance = py.test.collect.Instance
|
||||
|
||||
pytest_plugins = "default terminal xfail tmpdir execnetcleanup monkeypatch recwarn pdb".split()
|
||||
pytest_plugins = "default runner terminal xfail tmpdir execnetcleanup monkeypatch recwarn pdb".split()
|
||||
|
||||
|
|
|
@ -5,7 +5,6 @@
|
|||
"""
|
||||
|
||||
import py
|
||||
from py.__.test.runner import basic_run_report, basic_collect_report, ItemTestReport
|
||||
from py.__.test.session import Session
|
||||
from py.__.test import outcome
|
||||
from py.__.test.dist.nodemanage import NodeManager
|
||||
|
@ -24,7 +23,7 @@ class LoopState(object):
|
|||
self.shuttingdown = False
|
||||
self.testsfailed = False
|
||||
|
||||
def pytest_itemtestreport(self, rep):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.item in self.dsession.item2nodes:
|
||||
self.dsession.removeitem(rep.item, rep.node)
|
||||
if rep.failed:
|
||||
|
@ -61,14 +60,14 @@ class DSession(Session):
|
|||
self.item2nodes = {}
|
||||
super(DSession, self).__init__(config=config)
|
||||
|
||||
def pytest_configure(self, __call__, config):
|
||||
__call__.execute()
|
||||
try:
|
||||
config.getxspecs()
|
||||
except config.Error:
|
||||
print
|
||||
raise config.Error("dist mode %r needs test execution environments, "
|
||||
"none found." %(config.option.dist))
|
||||
#def pytest_configure(self, __call__, config):
|
||||
# __call__.execute()
|
||||
# try:
|
||||
# config.getxspecs()
|
||||
# except config.Error:
|
||||
# print
|
||||
# raise config.Error("dist mode %r needs test execution environments, "
|
||||
# "none found." %(config.option.dist))
|
||||
|
||||
def main(self, colitems=None):
|
||||
colitems = self.getinitialitems(colitems)
|
||||
|
@ -177,7 +176,8 @@ class DSession(Session):
|
|||
senditems.append(next)
|
||||
else:
|
||||
self.config.hook.pytest_collectstart(collector=next)
|
||||
self.queueevent("pytest_collectreport", rep=basic_collect_report(next))
|
||||
colrep = self.config.hook.pytest_make_collect_report(collector=next)
|
||||
self.queueevent("pytest_collectreport", rep=colrep)
|
||||
if self.config.option.dist == "each":
|
||||
self.senditems_each(senditems)
|
||||
else:
|
||||
|
@ -239,9 +239,10 @@ class DSession(Session):
|
|||
|
||||
def handle_crashitem(self, item, node):
|
||||
longrepr = "!!! Node %r crashed during running of test %r" %(node, item)
|
||||
rep = ItemTestReport(item, when="???", excinfo=longrepr)
|
||||
rep = item.config.hook.pytest_runtest_makereport(
|
||||
item=item, when="???", excinfo=longrepr, outerr=None)
|
||||
rep.node = node
|
||||
self.config.hook.pytest_itemtestreport(rep=rep)
|
||||
self.config.hook.pytest_runtest_logreport(rep=rep)
|
||||
|
||||
def setup(self):
|
||||
""" setup any neccessary resources ahead of the test run. """
|
||||
|
|
|
@ -1,13 +1,12 @@
|
|||
from py.__.test.dist.dsession import DSession
|
||||
from py.__.test.runner import basic_collect_report
|
||||
from py.__.test import outcome
|
||||
import py
|
||||
|
||||
XSpec = py.execnet.XSpec
|
||||
|
||||
def run(item, node):
|
||||
from py.__.test.runner import basic_run_report
|
||||
rep = basic_run_report(item)
|
||||
def run(item, node, excinfo=None):
|
||||
rep = item.config.hook.pytest_runtest_makereport(
|
||||
item=item, excinfo=excinfo, when="call", outerr=("", ""))
|
||||
rep.node = node
|
||||
return rep
|
||||
|
||||
|
@ -134,7 +133,7 @@ class TestDSession:
|
|||
session.queueevent(None)
|
||||
session.loop_once(loopstate)
|
||||
assert node.sent == [[item]]
|
||||
session.queueevent("pytest_itemtestreport", rep=run(item, node))
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item, node))
|
||||
session.loop_once(loopstate)
|
||||
assert loopstate.shuttingdown
|
||||
assert not loopstate.testsfailed
|
||||
|
@ -180,7 +179,7 @@ class TestDSession:
|
|||
session.loop_once(loopstate)
|
||||
|
||||
assert loopstate.colitems == [item2] # do not reschedule crash item
|
||||
rep = reprec.matchreport(names="pytest_itemtestreport")
|
||||
rep = reprec.matchreport(names="pytest_runtest_logreport")
|
||||
assert rep.failed
|
||||
assert rep.item == item1
|
||||
assert str(rep.longrepr).find("crashed") != -1
|
||||
|
@ -198,7 +197,7 @@ class TestDSession:
|
|||
session.loop_once(loopstate)
|
||||
assert len(session.node2pending) == 1
|
||||
|
||||
def runthrough(self, item):
|
||||
def runthrough(self, item, excinfo=None):
|
||||
session = DSession(item.config)
|
||||
node = MockNode()
|
||||
session.addnode(node)
|
||||
|
@ -208,8 +207,8 @@ class TestDSession:
|
|||
session.loop_once(loopstate)
|
||||
|
||||
assert node.sent == [[item]]
|
||||
ev = run(item, node)
|
||||
session.queueevent("pytest_itemtestreport", rep=ev)
|
||||
ev = run(item, node, excinfo=excinfo)
|
||||
session.queueevent("pytest_runtest_logreport", rep=ev)
|
||||
session.loop_once(loopstate)
|
||||
assert loopstate.shuttingdown
|
||||
session.queueevent("pytest_testnodedown", node=node, error=None)
|
||||
|
@ -224,7 +223,7 @@ class TestDSession:
|
|||
|
||||
def test_exit_completed_tests_fail(self, testdir):
|
||||
item = testdir.getitem("def test_func(): 0/0")
|
||||
session, exitstatus = self.runthrough(item)
|
||||
session, exitstatus = self.runthrough(item, excinfo="fail")
|
||||
assert exitstatus == outcome.EXIT_TESTSFAILED
|
||||
|
||||
def test_exit_on_first_failing(self, testdir):
|
||||
|
@ -238,16 +237,16 @@ class TestDSession:
|
|||
session = DSession(modcol.config)
|
||||
node = MockNode()
|
||||
session.addnode(node)
|
||||
items = basic_collect_report(modcol).result
|
||||
items = modcol.config.hook.pytest_make_collect_report(collector=modcol).result
|
||||
|
||||
# trigger testing - this sends tests to the node
|
||||
session.triggertesting(items)
|
||||
|
||||
# run tests ourselves and produce reports
|
||||
ev1 = run(items[0], node)
|
||||
ev2 = run(items[1], node)
|
||||
session.queueevent("pytest_itemtestreport", rep=ev1) # a failing one
|
||||
session.queueevent("pytest_itemtestreport", rep=ev2)
|
||||
ev1 = run(items[0], node, "fail")
|
||||
ev2 = run(items[1], node, None)
|
||||
session.queueevent("pytest_runtest_logreport", rep=ev1) # a failing one
|
||||
session.queueevent("pytest_runtest_logreport", rep=ev2)
|
||||
# now call the loop
|
||||
loopstate = session._initloopstate(items)
|
||||
session.loop_once(loopstate)
|
||||
|
@ -262,7 +261,7 @@ class TestDSession:
|
|||
loopstate = session._initloopstate([])
|
||||
loopstate.shuttingdown = True
|
||||
reprec = testdir.getreportrecorder(session)
|
||||
session.queueevent("pytest_itemtestreport", rep=run(item, node))
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item, node))
|
||||
session.loop_once(loopstate)
|
||||
assert not reprec.getcalls("pytest_testnodedown")
|
||||
session.queueevent("pytest_testnodedown", node=node, error=None)
|
||||
|
@ -303,7 +302,7 @@ class TestDSession:
|
|||
node = MockNode()
|
||||
session.addnode(node)
|
||||
session.senditems_load([item])
|
||||
session.queueevent("pytest_itemtestreport", rep=run(item, node))
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item, node))
|
||||
loopstate = session._initloopstate([])
|
||||
session.loop_once(loopstate)
|
||||
assert node._shutdown is True
|
||||
|
@ -324,12 +323,12 @@ class TestDSession:
|
|||
node = MockNode()
|
||||
session.addnode(node)
|
||||
|
||||
colreport = basic_collect_report(modcol)
|
||||
colreport = modcol.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
item1, item2 = colreport.result
|
||||
session.senditems_load([item1])
|
||||
# node2pending will become empty when the loop sees the report
|
||||
rep = run(item1, node)
|
||||
session.queueevent("pytest_itemtestreport", rep=run(item1, node))
|
||||
session.queueevent("pytest_runtest_logreport", rep=run(item1, node))
|
||||
|
||||
# but we have a collection pending
|
||||
session.queueevent("pytest_collectreport", rep=colreport)
|
||||
|
@ -356,11 +355,11 @@ class TestDSession:
|
|||
dsession = DSession(config)
|
||||
hookrecorder = testdir.getreportrecorder(config).hookrecorder
|
||||
dsession.main([config.getfsnode(p1)])
|
||||
rep = hookrecorder.popcall("pytest_itemtestreport").rep
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").rep
|
||||
assert rep.passed
|
||||
rep = hookrecorder.popcall("pytest_itemtestreport").rep
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").rep
|
||||
assert rep.skipped
|
||||
rep = hookrecorder.popcall("pytest_itemtestreport").rep
|
||||
rep = hookrecorder.popcall("pytest_runtest_logreport").rep
|
||||
assert rep.failed
|
||||
# see that the node is really down
|
||||
node = hookrecorder.popcall("pytest_testnodedown").node
|
||||
|
|
|
@ -122,6 +122,6 @@ class TestNodeManager:
|
|||
""")
|
||||
reprec = testdir.inline_run("-d", "--rsyncdir=%s" % testdir.tmpdir,
|
||||
"--tx", specssh, testdir.tmpdir)
|
||||
rep, = reprec.getreports("pytest_itemtestreport")
|
||||
rep, = reprec.getreports("pytest_runtest_logreport")
|
||||
assert rep.passed
|
||||
|
||||
|
|
|
@ -111,7 +111,7 @@ class TestMasterSlaveConnection:
|
|||
item = testdir.getitem("def test_func(): pass")
|
||||
node = mysetup.makenode(item.config)
|
||||
node.send(item)
|
||||
kwargs = mysetup.geteventargs("pytest_itemtestreport")
|
||||
kwargs = mysetup.geteventargs("pytest_runtest_logreport")
|
||||
rep = kwargs['rep']
|
||||
assert rep.passed
|
||||
print rep
|
||||
|
@ -131,11 +131,11 @@ class TestMasterSlaveConnection:
|
|||
for item in items:
|
||||
node.send(item)
|
||||
for outcome in "passed failed skipped".split():
|
||||
kwargs = mysetup.geteventargs("pytest_itemtestreport")
|
||||
kwargs = mysetup.geteventargs("pytest_runtest_logreport")
|
||||
rep = kwargs['rep']
|
||||
assert getattr(rep, outcome)
|
||||
|
||||
node.sendlist(items)
|
||||
for outcome in "passed failed skipped".split():
|
||||
rep = mysetup.geteventargs("pytest_itemtestreport")['rep']
|
||||
rep = mysetup.geteventargs("pytest_runtest_logreport")['rep']
|
||||
assert getattr(rep, outcome)
|
||||
|
|
|
@ -50,10 +50,10 @@ class TXNode(object):
|
|||
elif eventname == "slavefinished":
|
||||
self._down = True
|
||||
self.notify("pytest_testnodedown", error=None, node=self)
|
||||
elif eventname == "pytest_itemtestreport":
|
||||
elif eventname == "pytest_runtest_logreport":
|
||||
rep = kwargs['rep']
|
||||
rep.node = self
|
||||
self.notify("pytest_itemtestreport", rep=rep)
|
||||
self.notify("pytest_runtest_logreport", rep=rep)
|
||||
else:
|
||||
self.notify(eventname, *args, **kwargs)
|
||||
except KeyboardInterrupt:
|
||||
|
@ -105,8 +105,8 @@ class SlaveNode(object):
|
|||
def sendevent(self, eventname, *args, **kwargs):
|
||||
self.channel.send((eventname, args, kwargs))
|
||||
|
||||
def pytest_itemtestreport(self, rep):
|
||||
self.sendevent("pytest_itemtestreport", rep=rep)
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
self.sendevent("pytest_runtest_logreport", rep=rep)
|
||||
|
||||
def run(self):
|
||||
channel = self.channel
|
||||
|
@ -124,9 +124,9 @@ class SlaveNode(object):
|
|||
break
|
||||
if isinstance(task, list):
|
||||
for item in task:
|
||||
item.config.pluginmanager.do_itemrun(item)
|
||||
item.config.hook.pytest_runtest_protocol(item=item)
|
||||
else:
|
||||
task.config.pluginmanager.do_itemrun(item=task)
|
||||
task.config.hook.pytest_runtest_protocol(item=task)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
|
|
|
@ -137,10 +137,10 @@ def slave_runsession(channel, config, fullwidth, hasmarkup):
|
|||
session.shouldclose = channel.isclosed
|
||||
|
||||
class Failures(list):
|
||||
def pytest_itemtestreport(self, rep):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.failed:
|
||||
self.append(rep)
|
||||
pytest_collectreport = pytest_itemtestreport
|
||||
pytest_collectreport = pytest_runtest_logreport
|
||||
|
||||
failreports = Failures()
|
||||
session.pluginmanager.register(failreports)
|
||||
|
|
|
@ -35,6 +35,10 @@ class PluginHooks:
|
|||
# ------------------------------------------------------------------------------
|
||||
# collection hooks
|
||||
# ------------------------------------------------------------------------------
|
||||
def pytest_make_collect_report(self, collector):
|
||||
""" perform a collection and return a collection. """
|
||||
pytest_make_collect_report.firstresult = True
|
||||
|
||||
def pytest_collect_file(self, path, parent):
|
||||
""" return Collection node or None. """
|
||||
|
||||
|
@ -67,20 +71,29 @@ class PluginHooks:
|
|||
# ------------------------------------------------------------------------------
|
||||
# runtest related hooks
|
||||
# ------------------------------------------------------------------------------
|
||||
def pytest_runtest_setup(self, item):
|
||||
""" called before pytest_runtest(). """
|
||||
|
||||
def pytest_runtest_teardown(self, item):
|
||||
""" called after pytest_runtest_call. """
|
||||
|
||||
def pytest_runtest_call(self, item):
|
||||
""" called after pytest_runtest_call. """
|
||||
|
||||
def pytest_runtest_makereport(self, item, excinfo, when, outerr):
|
||||
""" make ItemTestReport for the specified test outcome. """
|
||||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_itemrun(self, item, pdb=None):
|
||||
def pytest_runtest_protocol(self, item):
|
||||
""" run given test item and return test report. """
|
||||
pytest_itemrun.firstresult = True
|
||||
pytest_runtest_protocol.firstresult = True
|
||||
|
||||
def pytest_pyfunc_call(self, pyfuncitem, args, kwargs):
|
||||
""" return True if we consumed/did the call to the python function item. """
|
||||
pytest_pyfunc_call.firstresult = True
|
||||
|
||||
def pytest_item_makereport(self, item, excinfo, when, outerr):
|
||||
""" make ItemTestReport for the specified test outcome. """
|
||||
pytest_item_makereport.firstresult = True
|
||||
|
||||
def pytest_itemtestreport(self, rep):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
""" process item test report. """
|
||||
|
||||
# ------------------------------------------------------------------------------
|
||||
|
|
|
@ -2,19 +2,6 @@
|
|||
|
||||
import py
|
||||
|
||||
def pytest_itemrun(item):
|
||||
from py.__.test.runner import basic_run_report, forked_run_report
|
||||
if item.config.option.boxed:
|
||||
report = forked_run_report(item)
|
||||
else:
|
||||
report = basic_run_report(item)
|
||||
item.config.hook.pytest_itemtestreport(rep=report)
|
||||
return True
|
||||
|
||||
def pytest_item_makereport(item, excinfo, when, outerr):
|
||||
from py.__.test import runner
|
||||
return runner.ItemTestReport(item, excinfo, when, outerr)
|
||||
|
||||
def pytest_pyfunc_call(pyfuncitem, args, kwargs):
|
||||
pyfuncitem.obj(*args, **kwargs)
|
||||
|
||||
|
@ -76,9 +63,6 @@ def pytest_addoption(parser):
|
|||
group._addoption('-s',
|
||||
action="store_true", dest="nocapture", default=False,
|
||||
help="disable catching of stdout/stderr during test run.")
|
||||
group.addoption('--boxed',
|
||||
action="store_true", dest="boxed", default=False,
|
||||
help="box each test run in a separate process")
|
||||
group._addoption('-p', action="append", dest="plugin", default = [],
|
||||
help=("load the specified plugin after command line parsing. "))
|
||||
group._addoption('-f', '--looponfail',
|
||||
|
|
|
@ -119,7 +119,7 @@ class TestDoctests:
|
|||
2
|
||||
""")
|
||||
reprec = testdir.inline_run(p)
|
||||
call = reprec.getcall("pytest_itemtestreport")
|
||||
call = reprec.getcall("pytest_runtest_logreport")
|
||||
assert call.rep.failed
|
||||
assert call.rep.longrepr
|
||||
# XXX
|
||||
|
|
|
@ -22,7 +22,7 @@ def pytest_configure(config):
|
|||
config.pluginmanager.register(PdbInvoke())
|
||||
|
||||
class PdbInvoke:
|
||||
def pytest_item_makereport(self, item, excinfo, when, outerr):
|
||||
def pytest_runtest_makereport(self, item, excinfo, when, outerr):
|
||||
if excinfo and not excinfo.errisinstance(Skipped):
|
||||
tw = py.io.TerminalWriter()
|
||||
repr = excinfo.getrepr()
|
||||
|
|
|
@ -5,7 +5,6 @@ funcargs and support code for testing py.test functionality.
|
|||
import py
|
||||
import os
|
||||
import inspect
|
||||
from py.__.test import runner
|
||||
from py.__.test.config import Config as pytestConfig
|
||||
import api
|
||||
|
||||
|
@ -161,7 +160,7 @@ class TmpTestdir:
|
|||
p = self.makepyfile(source)
|
||||
l = list(args) + [p]
|
||||
reprec = self.inline_run(*l)
|
||||
reports = reprec.getreports("pytest_itemtestreport")
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 1, reports
|
||||
return reports[0]
|
||||
|
||||
|
@ -227,6 +226,12 @@ class TmpTestdir:
|
|||
self.makepyfile(__init__ = "#")
|
||||
self.config = self.parseconfig(path, *configargs)
|
||||
self.session = self.config.initsession()
|
||||
#self.config.pluginmanager.do_configure(config=self.config)
|
||||
# XXX
|
||||
self.config.pluginmanager.import_plugin("runner")
|
||||
plugin = self.config.pluginmanager.getplugin("runner")
|
||||
plugin.pytest_configure(config=self.config)
|
||||
|
||||
return self.config.getfsnode(path)
|
||||
|
||||
def prepare(self):
|
||||
|
@ -321,10 +326,10 @@ class ReportRecorder(object):
|
|||
|
||||
# functionality for test reports
|
||||
|
||||
def getreports(self, names="pytest_itemtestreport pytest_collectreport"):
|
||||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.rep for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="", names="pytest_itemtestreport pytest_collectreport"):
|
||||
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport"):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
l = []
|
||||
for rep in self.getreports(names=names):
|
||||
|
@ -339,7 +344,7 @@ class ReportRecorder(object):
|
|||
inamepart, l))
|
||||
return l[0]
|
||||
|
||||
def getfailures(self, names='pytest_itemtestreport pytest_collectreport'):
|
||||
def getfailures(self, names='pytest_runtest_logreport pytest_collectreport'):
|
||||
return [rep for rep in self.getreports(names) if rep.failed]
|
||||
|
||||
def getfailedcollections(self):
|
||||
|
@ -349,7 +354,7 @@ class ReportRecorder(object):
|
|||
passed = []
|
||||
skipped = []
|
||||
failed = []
|
||||
for rep in self.getreports("pytest_itemtestreport"):
|
||||
for rep in self.getreports("pytest_runtest_logreport"):
|
||||
if rep.passed:
|
||||
passed.append(rep)
|
||||
elif rep.skipped:
|
||||
|
@ -378,23 +383,29 @@ def test_reportrecorder(testdir):
|
|||
registry = py._com.Registry()
|
||||
recorder = testdir.getreportrecorder(registry)
|
||||
assert not recorder.getfailures()
|
||||
rep = runner.ItemTestReport(None, None)
|
||||
item = testdir.getitem("def test_func(): pass")
|
||||
rep = item.config.hook.pytest_runtest_makereport(
|
||||
item=item, excinfo=None, when="call", outerr=None)
|
||||
|
||||
rep.passed = False
|
||||
rep.failed = True
|
||||
recorder.hook.pytest_itemtestreport(rep=rep)
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
failures = recorder.getfailures()
|
||||
assert failures == [rep]
|
||||
failures = recorder.getfailures()
|
||||
assert failures == [rep]
|
||||
|
||||
rep = runner.ItemTestReport(None, None)
|
||||
rep = item.config.hook.pytest_runtest_makereport(
|
||||
item=item, excinfo=None, when="call", outerr=None)
|
||||
rep.passed = False
|
||||
rep.skipped = True
|
||||
recorder.hook.pytest_itemtestreport(rep=rep)
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
|
||||
rep = runner.CollectReport(None, None)
|
||||
modcol = testdir.getmodulecol("")
|
||||
rep = modcol.config.hook.pytest_make_collect_report(collector=modcol)
|
||||
rep.passed = False
|
||||
rep.failed = True
|
||||
rep.skipped = False
|
||||
recorder.hook.pytest_collectreport(rep=rep)
|
||||
|
||||
passed, skipped, failed = recorder.listoutcomes()
|
||||
|
@ -408,7 +419,7 @@ def test_reportrecorder(testdir):
|
|||
|
||||
recorder.unregister()
|
||||
recorder.clear()
|
||||
recorder.hook.pytest_itemtestreport(rep=rep)
|
||||
recorder.hook.pytest_runtest_logreport(rep=rep)
|
||||
py.test.raises(ValueError, "recorder.getfailures()")
|
||||
|
||||
class LineComp:
|
||||
|
|
|
@ -59,7 +59,7 @@ class ResultLog(object):
|
|||
testpath = generic_path(node)
|
||||
self.write_log_entry(testpath, shortrepr, longrepr)
|
||||
|
||||
def pytest_itemtestreport(self, rep):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
code = rep.shortrepr
|
||||
if rep.passed:
|
||||
longrepr = ""
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
"""
|
||||
internal classes for
|
||||
collect and run test items.
|
||||
|
||||
* executing test items
|
||||
* running collectors
|
||||
|
@ -10,6 +10,58 @@ import py
|
|||
|
||||
from py.__.test.outcome import Skipped
|
||||
|
||||
#
|
||||
# pytest plugin hooks
|
||||
#
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group.addoption('--boxed',
|
||||
action="store_true", dest="boxed", default=False,
|
||||
help="box each test run in a separate process")
|
||||
|
||||
def pytest_configure(config):
|
||||
config._setupstate = SetupState()
|
||||
|
||||
def pytest_unconfigure(config):
|
||||
config._setupstate.teardown_all()
|
||||
|
||||
def pytest_make_collect_report(collector):
|
||||
call = collector.config.guardedcall(
|
||||
lambda: collector._memocollect()
|
||||
)
|
||||
result = None
|
||||
if not call.excinfo:
|
||||
result = call.result
|
||||
return CollectReport(collector, result, call.excinfo, call.outerr)
|
||||
|
||||
return report
|
||||
|
||||
def pytest_runtest_protocol(item):
|
||||
if item.config.option.boxed:
|
||||
report = forked_run_report(item)
|
||||
else:
|
||||
report = basic_run_report(item)
|
||||
item.config.hook.pytest_runtest_logreport(rep=report)
|
||||
return True
|
||||
|
||||
def pytest_runtest_setup(item):
|
||||
item.config._setupstate.prepare(item)
|
||||
|
||||
def pytest_runtest_call(item):
|
||||
if not item._deprecated_testexecution():
|
||||
item.runtest()
|
||||
|
||||
def pytest_runtest_makereport(item, excinfo, when, outerr):
|
||||
return ItemTestReport(item, excinfo, when, outerr)
|
||||
|
||||
def pytest_runtest_teardown(item):
|
||||
item.config._setupstate.teardown_exact(item)
|
||||
|
||||
#
|
||||
# Implementation
|
||||
#
|
||||
|
||||
class Call:
|
||||
excinfo = None
|
||||
def __init__(self, when, func):
|
||||
|
@ -21,35 +73,24 @@ class Call:
|
|||
except:
|
||||
self.excinfo = py.code.ExceptionInfo()
|
||||
|
||||
def runtest_with_deprecated_check(item):
|
||||
if not item._deprecated_testexecution():
|
||||
item.runtest()
|
||||
|
||||
def basic_run_report(item):
|
||||
""" return report about setting up and running a test item. """
|
||||
setupstate = item.config._setupstate
|
||||
capture = item.config._getcapture()
|
||||
hook = item.config.hook
|
||||
try:
|
||||
call = Call("setup", lambda: setupstate.prepare(item))
|
||||
call = Call("setup", lambda: hook.pytest_runtest_setup(item=item))
|
||||
if not call.excinfo:
|
||||
call = Call("runtest", lambda: runtest_with_deprecated_check(item))
|
||||
call = Call("call", lambda: hook.pytest_runtest_call(item=item))
|
||||
# in case of an error we defer teardown to not shadow the error
|
||||
if not call.excinfo:
|
||||
call = Call("teardown", lambda: setupstate.teardown_exact(item))
|
||||
call = Call("teardown", lambda: hook.pytest_runtest_teardown(item=item))
|
||||
finally:
|
||||
outerr = capture.reset()
|
||||
return item.config.hook.pytest_item_makereport(
|
||||
return item.config.hook.pytest_runtest_makereport(
|
||||
item=item, excinfo=call.excinfo,
|
||||
when=call.when, outerr=outerr)
|
||||
|
||||
def basic_collect_report(collector):
|
||||
call = collector.config.guardedcall(
|
||||
lambda: collector._memocollect()
|
||||
)
|
||||
result = None
|
||||
if not call.excinfo:
|
||||
result = call.result
|
||||
return CollectReport(collector, result, call.excinfo, call.outerr)
|
||||
|
||||
def forked_run_report(item):
|
||||
EXITSTATUS_TESTEXIT = 4
|
||||
from py.__.test.dist.mypickle import ImmutablePickler
|
||||
|
@ -122,7 +163,7 @@ class ItemTestReport(BaseReport):
|
|||
else:
|
||||
self.failed = True
|
||||
shortrepr = self.item.shortfailurerepr
|
||||
if self.when == "runtest":
|
||||
if self.when == "call":
|
||||
longrepr = self.item.repr_failure(excinfo, outerr)
|
||||
else: # exception in setup or teardown
|
||||
longrepr = self.item._repr_failure_py(excinfo, outerr)
|
|
@ -161,7 +161,7 @@ class TerminalReporter:
|
|||
fspath, lineno, msg = self._getreportinfo(item)
|
||||
self.write_fspath_result(fspath, "")
|
||||
|
||||
def pytest_itemtestreport(self, rep):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
fspath = rep.item.fspath
|
||||
cat, letter, word = self.getcategoryletterword(rep)
|
||||
if isinstance(word, tuple):
|
||||
|
@ -397,10 +397,9 @@ def repr_pythonversion(v=None):
|
|||
#
|
||||
# ===============================================================================
|
||||
|
||||
from py.__.test import runner
|
||||
import pytest_runner as runner # XXX
|
||||
|
||||
class TestTerminal:
|
||||
|
||||
def test_pass_skip_fail(self, testdir, linecomp):
|
||||
modcol = testdir.getmodulecol("""
|
||||
import py
|
||||
|
@ -417,7 +416,7 @@ class TestTerminal:
|
|||
|
||||
for item in testdir.genitems([modcol]):
|
||||
ev = runner.basic_run_report(item)
|
||||
rep.config.hook.pytest_itemtestreport(rep=ev)
|
||||
rep.config.hook.pytest_runtest_logreport(rep=ev)
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_pass_skip_fail.py .sF"
|
||||
])
|
||||
|
@ -447,7 +446,7 @@ class TestTerminal:
|
|||
rep.config.hook.pytest_itemstart(item=item, node=None)
|
||||
s = linecomp.stringio.getvalue().strip()
|
||||
assert s.endswith(item.name)
|
||||
rep.config.hook.pytest_itemtestreport(rep=runner.basic_run_report(item))
|
||||
rep.config.hook.pytest_runtest_logreport(rep=runner.basic_run_report(item))
|
||||
|
||||
linecomp.assert_contains_lines([
|
||||
"*test_pass_skip_fail_verbose.py:2: *test_ok*PASS*",
|
||||
|
@ -563,7 +562,7 @@ class TestTerminal:
|
|||
rep.config.pluginmanager.register(rep)
|
||||
rep.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
for item in testdir.genitems([modcol]):
|
||||
rep.config.hook.pytest_itemtestreport(
|
||||
rep.config.hook.pytest_runtest_logreport(
|
||||
rep=runner.basic_run_report(item))
|
||||
rep.config.hook.pytest_sessionfinish(session=testdir.session, exitstatus=1)
|
||||
s = linecomp.stringio.getvalue()
|
||||
|
@ -644,7 +643,7 @@ class TestTerminal:
|
|||
modcol.config.hook.pytest_sessionstart(session=testdir.session)
|
||||
try:
|
||||
for item in testdir.genitems([modcol]):
|
||||
modcol.config.hook.pytest_itemtestreport(
|
||||
modcol.config.hook.pytest_runtest_logreport(
|
||||
rep=runner.basic_run_report(item))
|
||||
except KeyboardInterrupt:
|
||||
excinfo = py.code.ExceptionInfo()
|
||||
|
|
|
@ -10,7 +10,7 @@ example:
|
|||
"""
|
||||
import py
|
||||
|
||||
def pytest_item_makereport(__call__, item, excinfo, when, outerr):
|
||||
def pytest_runtest_makereport(__call__, item, excinfo, when, outerr):
|
||||
if hasattr(item, 'obj') and hasattr(item.obj, 'func_dict'):
|
||||
if 'xfail' in item.obj.func_dict:
|
||||
res = __call__.execute(firstresult=True)
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
import py
|
||||
from py.__.test import runner
|
||||
from py.__.test.plugin import pytest_runner as runner
|
||||
from py.__.code.excinfo import ReprExceptionInfo
|
||||
|
||||
class TestSetupState:
|
||||
|
@ -56,7 +56,7 @@ class BaseFunctionalTests:
|
|||
assert not rep.passed
|
||||
assert not rep.skipped
|
||||
assert rep.failed
|
||||
assert rep.when == "runtest"
|
||||
assert rep.when == "call"
|
||||
assert isinstance(rep.longrepr, ReprExceptionInfo)
|
||||
assert str(rep.shortrepr) == "F"
|
||||
|
||||
|
@ -69,8 +69,8 @@ class BaseFunctionalTests:
|
|||
assert not rep.failed
|
||||
assert not rep.passed
|
||||
assert rep.skipped
|
||||
#assert rep.skipped.when == "runtest"
|
||||
#assert rep.skipped.when == "runtest"
|
||||
#assert rep.skipped.when == "call"
|
||||
#assert rep.skipped.when == "call"
|
||||
#assert rep.skipped == "%sreason == "hello"
|
||||
#assert rep.skipped.location.lineno == 3
|
||||
#assert rep.skipped.location.path
|
||||
|
@ -137,7 +137,7 @@ class BaseFunctionalTests:
|
|||
assert not rep.skipped
|
||||
assert not rep.passed
|
||||
assert rep.failed
|
||||
#assert rep.outcome.when == "runtest"
|
||||
#assert rep.outcome.when == "call"
|
||||
#assert rep.failed.where.lineno == 3
|
||||
#assert rep.failed.where.path.basename == "test_func.py"
|
||||
#assert rep.failed.failurerepr == "hello"
|
||||
|
@ -190,7 +190,7 @@ class BaseFunctionalTests:
|
|||
except SystemExit:
|
||||
py.test.fail("runner did not catch SystemExit")
|
||||
assert rep.failed
|
||||
assert rep.when == "runtest"
|
||||
assert rep.when == "call"
|
||||
|
||||
def test_exit_propagates(self, testdir):
|
||||
from py.__.test.outcome import Exit
|
||||
|
@ -245,7 +245,7 @@ class TestCollectionReports:
|
|||
class TestClass:
|
||||
pass
|
||||
""")
|
||||
rep = runner.basic_collect_report(col)
|
||||
rep = runner.pytest_make_collect_report(col)
|
||||
assert not rep.failed
|
||||
assert not rep.skipped
|
||||
assert rep.passed
|
||||
|
@ -261,9 +261,7 @@ class TestCollectionReports:
|
|||
def test_func():
|
||||
pass
|
||||
""")
|
||||
rep = runner.basic_collect_report(col)
|
||||
rep = runner.pytest_make_collect_report(col)
|
||||
assert not rep.failed
|
||||
assert not rep.passed
|
||||
assert rep.skipped
|
||||
|
||||
|
|
@ -183,11 +183,6 @@ class PluginManager(object):
|
|||
config.hook.pytest_unconfigure(config=config)
|
||||
config.pluginmanager.unregister(self)
|
||||
|
||||
def do_itemrun(self, item):
|
||||
res = self.hook.pytest_itemrun(item=item)
|
||||
if res is None:
|
||||
raise ValueError("could not run %r" %(item,))
|
||||
|
||||
#
|
||||
# XXX old code to automatically load classes
|
||||
#
|
||||
|
|
|
@ -331,16 +331,13 @@ class Function(FunctionMixin, py.test.collect.Item):
|
|||
if callobj is not _dummy:
|
||||
self._obj = callobj
|
||||
|
||||
#def addfinalizer(self, func):
|
||||
# self.config._setupstate.ddfinalizer(func, colitem=self)
|
||||
|
||||
def readkeywords(self):
|
||||
d = super(Function, self).readkeywords()
|
||||
d.update(self.obj.func_dict)
|
||||
return d
|
||||
|
||||
def runtest(self):
|
||||
""" execute the given test function. """
|
||||
""" execute the underlying test function. """
|
||||
kwargs = getattr(self, 'funcargs', {})
|
||||
self.config.hook.pytest_pyfunc_call(
|
||||
pyfuncitem=self, args=self._args, kwargs=kwargs)
|
||||
|
|
|
@ -11,7 +11,6 @@ from py.__.test import outcome
|
|||
# imports used for genitems()
|
||||
Item = py.test.collect.Item
|
||||
Collector = py.test.collect.Collector
|
||||
from runner import basic_collect_report
|
||||
|
||||
class Session(object):
|
||||
"""
|
||||
|
@ -42,7 +41,7 @@ class Session(object):
|
|||
else:
|
||||
assert isinstance(next, Collector)
|
||||
self.config.hook.pytest_collectstart(collector=next)
|
||||
rep = basic_collect_report(next)
|
||||
rep = self.config.hook.pytest_make_collect_report(collector=next)
|
||||
if rep.passed:
|
||||
for x in self.genitems(rep.result, keywordexpr):
|
||||
yield x
|
||||
|
@ -80,12 +79,12 @@ class Session(object):
|
|||
""" setup any neccessary resources ahead of the test run. """
|
||||
self.config.hook.pytest_sessionstart(session=self)
|
||||
|
||||
def pytest_itemtestreport(self, rep):
|
||||
def pytest_runtest_logreport(self, rep):
|
||||
if rep.failed:
|
||||
self._testsfailed = True
|
||||
if self.config.option.exitfirst:
|
||||
self.shouldstop = True
|
||||
pytest_collectreport = pytest_itemtestreport
|
||||
pytest_collectreport = pytest_runtest_logreport
|
||||
|
||||
def sessionfinishes(self, exitstatus=0, excinfo=None):
|
||||
""" teardown any resources after a test run. """
|
||||
|
@ -113,8 +112,7 @@ class Session(object):
|
|||
if self.shouldstop:
|
||||
break
|
||||
if not self.config.option.collectonly:
|
||||
item.config.pluginmanager.do_itemrun(item)
|
||||
self.config._setupstate.teardown_all()
|
||||
item.config.hook.pytest_runtest_protocol(item=item)
|
||||
except KeyboardInterrupt:
|
||||
captured_excinfo = py.code.ExceptionInfo()
|
||||
exitstatus = outcome.EXIT_INTERRUPTED
|
||||
|
|
|
@ -185,7 +185,7 @@ class TestNewSession(SessionTests):
|
|||
|
||||
itemstarted = reprec.getcalls("pytest_itemstart")
|
||||
assert len(itemstarted) == 3
|
||||
assert not reprec.getreports("pytest_itemtestreport")
|
||||
assert not reprec.getreports("pytest_runtest_logreport")
|
||||
started = reprec.getcalls("pytest_collectstart")
|
||||
finished = reprec.getreports("pytest_collectreport")
|
||||
assert len(started) == len(finished)
|
||||
|
|
|
@ -118,7 +118,7 @@ def test_func_generator_setup(testdir):
|
|||
yield check
|
||||
assert x == [1]
|
||||
""")
|
||||
rep = reprec.matchreport("test_one", names="pytest_itemtestreport")
|
||||
rep = reprec.matchreport("test_one", names="pytest_runtest_logreport")
|
||||
assert rep.passed
|
||||
|
||||
def test_method_setup_uses_fresh_instances(testdir):
|
||||
|
|
Loading…
Reference in New Issue