adding a logxml plugin and a --xml=path option generating a junit-xml style result log. The xml result log can be parsed nicely by hudson.

Initial code was based on Ross Lawley's pytest_xmlresult plugin.

--HG--
branch : trunk
This commit is contained in:
holger krekel 2009-12-31 11:25:07 +01:00
parent fa0c7b18bf
commit 587951966f
7 changed files with 288 additions and 191 deletions

View File

@ -11,9 +11,10 @@ merlinux GmbH, Germany, office at merlinux eu
Contributors include:: Contributors include::
Ross Lawley
Ralf Schmitt
Chris Lamb Chris Lamb
Harald Armin Massa Harald Armin Massa
Ralf Schmitt
Martijn Faassen Martijn Faassen
Ian Bicking Ian Bicking
Jan Balster Jan Balster

View File

@ -1,6 +1,9 @@
Changes between 1.X and 1.1.1 Changes between 1.X and 1.1.1
===================================== =====================================
- new junitxml plugin: --xml=path will generate a junit style xml file
which is parseable e.g. by the hudson continous integration server.
- new option: --genscript=path will generate a standalone py.test script - new option: --genscript=path will generate a standalone py.test script
which will not need any libraries installed. thanks to Ralf Schmitt. which will not need any libraries installed. thanks to Ralf Schmitt.

View File

@ -78,6 +78,19 @@ but a remote one fail because the tests directory
does not contain an "__init__.py". Either give does not contain an "__init__.py". Either give
an error or make it work without the __init__.py an error or make it work without the __init__.py
introduce a "RootCollector"
----------------------------------------------------------------
tags: feature 1.2
Currently the top collector is a Directory node and
there also is the notion of a "topdir". See to refine
internal handling such that there is a RootCollector
which holds this topdir (or do away with topdirs?).
Make sure this leads to an improvement in how
tests are shown in hudson which currently sometimes
shows "workspace" and sometimes not as the leading
name.
deprecate ensuretemp / introduce funcargs to setup method deprecate ensuretemp / introduce funcargs to setup method
-------------------------------------------------------------- --------------------------------------------------------------
tags: experimental-wish 1.2 tags: experimental-wish 1.2

View File

@ -8,7 +8,8 @@ from py.impl.test.outcome import Skipped
default_plugins = ( default_plugins = (
"default runner capture terminal mark skipping tmpdir monkeypatch " "default runner capture terminal mark skipping tmpdir monkeypatch "
"recwarn pdb pastebin unittest helpconfig nose assertion genscript").split() "recwarn pdb pastebin unittest helpconfig nose assertion genscript "
"logxml").split()
def check_old_use(mod, modname): def check_old_use(mod, modname):
clsname = modname[len('pytest_'):].capitalize() + "Plugin" clsname = modname[len('pytest_'):].capitalize() + "Plugin"

147
py/plugin/pytest_logxml.py Normal file
View File

@ -0,0 +1,147 @@
"""
logxml plugin for machine-readable logging of test results.
Based on initial code from Ross Lawley.
"""
import py
import time
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption('--xml', action="store", dest="xmlpath",
metavar="path", default=None,
help="create junit-xml style report file at the given path.")
def pytest_configure(config):
xmlpath = config.option.xmlpath
if xmlpath:
config._xml = LogXML(xmlpath)
config.pluginmanager.register(config._xml)
def pytest_unconfigure(config):
xml = getattr(config, '_xml', None)
if xml:
del config._xml
config.pluginmanager.unregister(xml)
class LogXML(object):
def __init__(self, logfile):
self.logfile = logfile
self.test_logs = []
self.passed = self.skipped = 0
self.failed = self.errors = 0
self._durations = {}
def _opentestcase(self, report):
node = report.item
d = {'time': self._durations.pop(report.item, "0")}
names = [x.replace(".py", "") for x in node.listnames()]
d['classname'] = ".".join(names[:-1])
d['name'] = names[-1]
attrs = ['%s="%s"' % item for item in sorted(d.items())]
self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
def _closetestcase(self):
self.test_logs.append("</testcase>")
def append_pass(self, report):
self.passed += 1
self._opentestcase(report)
self._closetestcase()
def append_failure(self, report):
self._opentestcase(report)
s = py.xml.escape(str(report.longrepr))
#msg = str(report.longrepr.reprtraceback.extraline)
self.test_logs.append(
'<failure message="test failure">%s</failure>' % (s))
self._closetestcase()
self.failed += 1
def _opentestcase_collectfailure(self, report):
node = report.collector
d = {'time': '???'}
names = [x.replace(".py", "") for x in node.listnames()]
d['classname'] = ".".join(names[:-1])
d['name'] = names[-1]
attrs = ['%s="%s"' % item for item in sorted(d.items())]
self.test_logs.append("\n<testcase %s>" % " ".join(attrs))
def append_collect_failure(self, report):
self._opentestcase_collectfailure(report)
s = py.xml.escape(str(report.longrepr))
#msg = str(report.longrepr.reprtraceback.extraline)
self.test_logs.append(
'<failure message="collection failure">%s</failure>' % (s))
self._closetestcase()
self.errors += 1
def append_error(self, report):
self._opentestcase(report)
s = py.xml.escape(str(report.longrepr))
self.test_logs.append(
'<error message="test setup failure">%s</error>' % s)
self._closetestcase()
self.errors += 1
def append_skipped(self, report):
self._opentestcase(report)
self.test_logs.append("<skipped/>")
self._closetestcase()
self.skipped += 1
def pytest_runtest_logreport(self, report):
if report.passed:
self.append_pass(report)
elif report.failed:
if report.when != "call":
self.append_error(report)
else:
self.append_failure(report)
elif report.skipped:
self.append_skipped(report)
def pytest_runtest_call(self, item, __multicall__):
start = time.time()
try:
return __multicall__.execute()
finally:
self._durations[item] = time.time() - start
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
self.append_collect_failure(report)
else:
self.append_collect_skipped(report)
def pytest_internalerror(self, excrepr):
self.errors += 1
data = py.xml.escape(str(excrepr))
self.test_logs.append(
'\n<testcase classname="pytest" name="internal">'
' <error message="internal error">'
'%s</error></testcase>' % data)
def pytest_sessionstart(self, session):
self.suite_start_time = time.time()
def pytest_sessionfinish(self, session, exitstatus, __multicall__):
logfile = open(self.logfile, 'w', 1) # line buffered
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
numtests = self.passed + self.skipped + self.failed
logfile.write('<testsuite ')
logfile.write('name="" ')
logfile.write('errors="%i" ' % self.errors)
logfile.write('failures="%i" ' % self.failed)
logfile.write('skips="%i" ' % self.skipped)
logfile.write('tests="%i" ' % numtests)
logfile.write('time="%.3f"' % suite_time_delta)
logfile.write(' >')
logfile.writelines(self.test_logs)
logfile.write('</testsuite>')
logfile.close()
tw = session.config.pluginmanager.getplugin("terminalreporter")._tw
tw.line()
tw.sep("-", "generated xml file: %s" %(self.logfile))

View File

@ -1,189 +0,0 @@
"""
xmlresult plugin for machine-readable logging of test results.
Useful for cruisecontrol integration code.
An adaptation of pytest_resultlog.py
"""
import time
def pytest_addoption(parser):
group = parser.getgroup("xmlresult", "xmlresult plugin options")
group.addoption('--xmlresult', action="store", dest="xmlresult", metavar="path", default=None,
help="path for machine-readable xml result log.")
def pytest_configure(config):
xmlresult = config.option.xmlresult
if xmlresult:
logfile = open(xmlresult, 'w', 1) # line buffered
config._xmlresult = XMLResult(logfile)
config.pluginmanager.register(config._xmlresult)
def pytest_unconfigure(config):
xmlresult = getattr(config, '_xmlresult', None)
if xmlresult:
xmlresult.logfile.close()
del config._xmlresult
config.pluginmanager.unregister(xmlresult)
def generic_path(item):
chain = item.listchain()
gpath = [chain[0].name]
fspath = chain[0].fspath
fspart = False
for node in chain[1:]:
newfspath = node.fspath
if newfspath == fspath:
if fspart:
gpath.append(':')
fspart = False
else:
gpath.append('.')
else:
gpath.append('/')
fspart = True
name = node.name
if name[0] in '([':
gpath.pop()
gpath.append(name)
fspath = newfspath
return ''.join(gpath)
class XMLResult(object):
test_start_time = 0.0
test_taken_time = 0.0
test_count = 0
error_count = 0
failure_count = 0
skip_count = 0
def __init__(self, logfile):
self.logfile = logfile
self.test_logs = []
def write_log_entry(self, testpath, shortrepr, longrepr):
self.test_count += 1
# Create an xml log entry for the tests
self.test_logs.append('<testcase test_method="%s" name="%s" time="%.3f">' % (testpath.split(':')[-1], testpath, self.test_taken_time))
# Do we have any other data to capture for Errors, Fails and Skips
if shortrepr in ['E', 'F', 'S']:
if shortrepr == 'E':
self.error_count += 1
elif shortrepr == 'F':
self.failure_count += 1
elif shortrepr == 'S':
self.skip_count += 1
tag_map = {'E': 'error', 'F': 'failure', 'S': 'skipped'}
self.test_logs.append("<%s>" % tag_map[shortrepr])
# Output any more information
for line in longrepr.splitlines():
self.test_logs.append("<![CDATA[%s\n]]>" % line)
self.test_logs.append("</%s>" % tag_map[shortrepr])
self.test_logs.append("</testcase>")
def log_outcome(self, node, shortrepr, longrepr):
self.write_log_entry(node.name, shortrepr, longrepr)
def pytest_runtest_logreport(self, report):
code = report.shortrepr
if report.passed:
longrepr = ""
code = "."
elif report.failed:
longrepr = str(report.longrepr)
code = "F"
elif report.skipped:
code = "S"
longrepr = str(report.longrepr.reprcrash.message)
self.log_outcome(report.item, code, longrepr)
def pytest_runtest_setup(self, item):
self.test_start_time = time.time()
def pytest_runtest_teardown(self, item):
self.test_taken_time = time.time() - self.test_start_time
def pytest_collectreport(self, report):
if not report.passed:
if report.failed:
code = "F"
else:
assert report.skipped
code = "S"
longrepr = str(report.longrepr.reprcrash)
self.log_outcome(report.collector, code, longrepr)
def pytest_internalerror(self, excrepr):
path = excrepr.reprcrash.path
self.errors += 1
self.write_log_entry(path, '!', str(excrepr))
def pytest_sessionstart(self, session):
self.suite_start_time = time.time()
def pytest_sessionfinish(self, session, exitstatus):
"""
Write the xml output
"""
suite_stop_time = time.time()
suite_time_delta = suite_stop_time - self.suite_start_time
self.logfile.write('<testsuite ')
self.logfile.write('errors="%i" ' % self.error_count)
self.logfile.write('failures="%i" ' % self.failure_count)
self.logfile.write('skips="%i" ' % self.skip_count)
self.logfile.write('name="" ')
self.logfile.write('tests="%i" ' % self.test_count)
self.logfile.write('time="%.3f"' % suite_time_delta)
self.logfile.write(' >')
self.logfile.writelines(self.test_logs)
self.logfile.write('</testsuite>')
self.logfile.close()
# Tests
def test_generic(testdir, LineMatcher):
testdir.plugins.append("resultlog")
testdir.makepyfile("""
import py
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
py.test.skip("")
""")
testdir.runpytest("--xmlresult=result.xml")
lines = testdir.tmpdir.join("result.xml").readlines(cr=0)
LineMatcher(lines).fnmatch_lines([
'*testsuite errors="0" failures="1" skips="1" name="" tests="3"*'
])
LineMatcher(lines).fnmatch_lines([
'*<failure><![CDATA[def test_fail():*'
])
LineMatcher(lines).fnmatch_lines([
'*<skipped><![CDATA[Skipped: <Skipped instance>*'
])
def test_generic_path():
from py.__.test.collect import Node, Item, FSCollector
p1 = Node('a')
assert p1.fspath is None
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
item = Item('c', parent = p3)
res = generic_path(item)
assert res == 'a.B().c'
p0 = FSCollector('proj/test')
p1 = FSCollector('proj/test/a', parent=p0)
p2 = Node('B', parent=p1)
p3 = Node('()', parent = p2)
p4 = Node('c', parent=p3)
item = Item('[1]', parent = p4)
res = generic_path(item)
assert res == 'test/a:B().c[1]'

View File

@ -0,0 +1,121 @@
from xml.dom import minidom
def runandparse(testdir, *args):
resultpath = testdir.tmpdir.join("junit.xml")
result = testdir.runpytest("--xml=%s" % resultpath, *args)
xmldoc = minidom.parse(str(resultpath))
return result, xmldoc
def assert_attr(node, **kwargs):
for name, expected in kwargs.items():
anode = node.getAttributeNode(name)
assert anode, "node %r has no attribute %r" %(node, name)
val = anode.value
assert val == str(expected)
class TestPython:
def test_summing_simple(self, testdir):
testdir.makepyfile("""
import py
def test_pass():
pass
def test_fail():
assert 0
def test_skip():
py.test.skip("")
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.getElementsByTagName("testsuite")[0]
assert_attr(node, errors=0, failures=1, skips=1, tests=3)
def test_setup_error(self, testdir):
testdir.makepyfile("""
def pytest_funcarg__arg(request):
raise ValueError()
def test_function(arg):
pass
""")
result, dom = runandparse(testdir)
assert result.ret
node = dom.getElementsByTagName("testsuite")[0]
assert_attr(node, errors=1, tests=0)
tnode = node.getElementsByTagName("testcase")[0]
assert_attr(tnode,
classname="test_setup_error.test_setup_error",
name="test_function")
fnode = tnode.getElementsByTagName("error")[0]
assert_attr(fnode, message="test setup failure")
assert "ValueError" in fnode.toxml()
def test_internal_error(self, testdir):
testdir.makeconftest("def pytest_runtest_protocol(): 0 / 0")
testdir.makepyfile("def test_function(): pass")
result, dom = runandparse(testdir)
assert result.ret
node = dom.getElementsByTagName("testsuite")[0]
assert_attr(node, errors=1, tests=0)
tnode = node.getElementsByTagName("testcase")[0]
assert_attr(tnode, classname="pytest", name="internal")
fnode = tnode.getElementsByTagName("error")[0]
assert_attr(fnode, message="internal error")
assert "Division" in fnode.toxml()
def test_failure_function(self, testdir):
testdir.makepyfile("def test_fail(): raise ValueError(42)")
result, dom = runandparse(testdir)
assert result.ret
node = dom.getElementsByTagName("testsuite")[0]
assert_attr(node, failures=1, tests=1)
tnode = node.getElementsByTagName("testcase")[0]
assert_attr(tnode,
classname="test_failure_function.test_failure_function",
name="test_fail")
fnode = tnode.getElementsByTagName("failure")[0]
assert_attr(fnode, message="test failure")
assert "ValueError" in fnode.toxml()
def test_collect_error(self, testdir):
testdir.makepyfile("syntax error")
result, dom = runandparse(testdir)
assert result.ret
node = dom.getElementsByTagName("testsuite")[0]
assert_attr(node, errors=1, tests=0)
tnode = node.getElementsByTagName("testcase")[0]
assert_attr(tnode,
#classname="test_collect_error",
name="test_collect_error")
fnode = tnode.getElementsByTagName("failure")[0]
assert_attr(fnode, message="collection failure")
assert "invalid syntax" in fnode.toxml()
class TestNonPython:
def test_summing_simple(self, testdir):
testdir.makeconftest("""
import py
def pytest_collect_file(path, parent):
if path.ext == ".xyz":
return MyItem(path, parent)
class MyItem(py.test.collect.Item):
def __init__(self, path, parent):
super(MyItem, self).__init__(path.basename, parent)
self.fspath = path
def runtest(self):
raise ValueError(42)
def repr_failure(self, excinfo):
return "custom item runtest failed"
""")
testdir.tmpdir.join("myfile.xyz").write("hello")
result, dom = runandparse(testdir)
assert result.ret
node = dom.getElementsByTagName("testsuite")[0]
assert_attr(node, errors=0, failures=1, skips=0, tests=1)
tnode = node.getElementsByTagName("testcase")[0]
assert_attr(tnode,
#classname="test_collect_error",
name="myfile.xyz")
fnode = tnode.getElementsByTagName("failure")[0]
assert_attr(fnode, message="test failure")
assert "custom item runtest failed" in fnode.toxml()