split out pytest-xdist related reporting to the plugin

--HG--
branch : trunk
This commit is contained in:
holger krekel 2010-07-07 12:41:15 +02:00
parent 2664230fad
commit 320835d43f
7 changed files with 181 additions and 248 deletions

View File

@ -66,6 +66,9 @@ Bug fixes / Maintenance
- make initial conftest discovery ignore "--" prefixed arguments
- fix resultlog plugin when used in an multicpu/multihost xdist situation
(thanks Jakub Gustak)
- perform distributed testing related reporting in the xdist-plugin
rather than having dist-related code in the generic py.test
distribution
Changes between 1.3.0 and 1.3.1
==================================================

View File

@ -115,12 +115,25 @@ class CallInfo:
return "<CallInfo when=%r %s>" % (self.when, status)
class BaseReport(object):
def __init__(self):
self.headerlines = []
def __repr__(self):
l = ["%s=%s" %(key, value)
for key, value in self.__dict__.items()]
return "<%s %s>" %(self.__class__.__name__, " ".join(l),)
def _getcrashline(self):
try:
return self.longrepr.reprcrash
except AttributeError:
try:
return str(self.longrepr)[:50]
except AttributeError:
return ""
def toterminal(self, out):
for line in self.headerlines:
out.line(line)
longrepr = self.longrepr
if hasattr(longrepr, 'toterminal'):
longrepr.toterminal(out)
@ -129,6 +142,7 @@ class BaseReport(object):
class CollectErrorRepr(BaseReport):
def __init__(self, msg):
super(CollectErrorRepr, self).__init__()
self.longrepr = msg
def toterminal(self, out):
out.line(str(self.longrepr), red=True)
@ -137,6 +151,7 @@ class ItemTestReport(BaseReport):
failed = passed = skipped = False
def __init__(self, item, excinfo=None, when=None):
super(ItemTestReport, self).__init__()
self.item = item
self.when = when
if item and when != "setup":
@ -189,6 +204,7 @@ class CollectReport(BaseReport):
skipped = failed = passed = False
def __init__(self, collector, result, excinfo=None):
super(CollectReport, self).__init__()
self.collector = collector
if not excinfo:
self.passed = True
@ -213,6 +229,7 @@ class TeardownErrorReport(BaseReport):
failed = True
when = "teardown"
def __init__(self, excinfo):
super(TeardownErrorReport, self).__init__()
self.longrepr = excinfo.getrepr(funcargs=True)
class SetupState(object):

View File

@ -6,8 +6,6 @@ This is a good source for looking at the various reporting hooks.
import py
import sys
optionalhook = py.test.mark.optionalhook
def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
@ -80,7 +78,6 @@ class TerminalReporter:
file = py.std.sys.stdout
self._tw = py.io.TerminalWriter(file)
self.currentfspath = None
self.gateway2info = {}
self.reportchars = getreportopt(config)
def hasopt(self, char):
@ -167,53 +164,6 @@ class TerminalReporter:
# which garbles our output if we use self.write_line
self.write_line(msg)
@optionalhook
def pytest_gwmanage_newgateway(self, gateway, platinfo):
#self.write_line("%s instantiated gateway from spec %r" %(gateway.id, gateway.spec._spec))
d = {}
d['version'] = repr_pythonversion(platinfo.version_info)
d['id'] = gateway.id
d['spec'] = gateway.spec._spec
d['platform'] = platinfo.platform
if self.config.option.verbose:
d['extra'] = "- " + platinfo.executable
else:
d['extra'] = ""
d['cwd'] = platinfo.cwd
infoline = ("[%(id)s] %(spec)s -- platform %(platform)s, "
"Python %(version)s "
"cwd: %(cwd)s"
"%(extra)s" % d)
self.write_line(infoline)
self.gateway2info[gateway] = infoline
@optionalhook
def pytest_testnodeready(self, node):
self.write_line("[%s] txnode ready to receive tests" %(node.gateway.id,))
@optionalhook
def pytest_testnodedown(self, node, error):
if error:
self.write_line("[%s] node down, error: %s" %(node.gateway.id, error))
@optionalhook
def pytest_rescheduleitems(self, items):
if self.config.option.debug:
self.write_sep("!", "RESCHEDULING %s " %(items,))
@optionalhook
def pytest_looponfailinfo(self, failreports, rootdirs):
if failreports:
self.write_sep("#", "LOOPONFAILING", red=True)
for report in failreports:
loc = self._getcrashline(report)
if loc:
self.write_line(loc, red=True)
self.write_sep("#", "waiting for changes")
for rootdir in rootdirs:
self.write_line("### Watching: %s" %(rootdir,), bold=True)
def pytest_trace(self, category, msg):
if self.config.option.debug or \
self.config.option.traceconfig and category.find("config") != -1:
@ -223,24 +173,13 @@ class TerminalReporter:
self.stats.setdefault('deselected', []).append(items)
def pytest_itemstart(self, item, node=None):
if getattr(self.config.option, 'dist', 'no') != "no":
# for dist-testing situations itemstart means we
# queued the item for sending, not interesting (unless debugging)
if self.config.option.debug:
line = self._reportinfoline(item)
extra = ""
if node:
extra = "-> [%s]" % node.gateway.id
self.write_ensure_prefix(line, extra)
if self.config.option.verbose:
line = self._reportinfoline(item)
self.write_ensure_prefix(line, "")
else:
if self.config.option.verbose:
line = self._reportinfoline(item)
self.write_ensure_prefix(line, "")
else:
# ensure that the path is printed before the
# 1st test of a module starts running
self.write_fspath_result(self._getfspath(item), "")
# ensure that the path is printed before the
# 1st test of a module starts running
self.write_fspath_result(self._getfspath(item), "")
def pytest__teardown_final_logerror(self, report):
self.stats.setdefault("error", []).append(report)
@ -321,15 +260,6 @@ class TerminalReporter:
else:
excrepr.reprcrash.toterminal(self._tw)
def _getcrashline(self, report):
try:
return report.longrepr.reprcrash
except AttributeError:
try:
return str(report.longrepr)[:50]
except AttributeError:
return ""
def _reportinfoline(self, item):
collect_fspath = self._getfspath(item)
fspath, lineno, msg = self._getreportinfo(item)
@ -387,12 +317,11 @@ class TerminalReporter:
self.write_sep("=", "FAILURES")
for rep in self.stats['failed']:
if tbstyle == "line":
line = self._getcrashline(rep)
line = rep._getcrashline()
self.write_line(line)
else:
msg = self._getfailureheadline(rep)
self.write_sep("_", msg)
self.write_platinfo(rep)
rep.toterminal(self._tw)
def summary_errors(self):
@ -408,16 +337,8 @@ class TerminalReporter:
elif rep.when == "teardown":
msg = "ERROR at teardown of " + msg
self.write_sep("_", msg)
self.write_platinfo(rep)
rep.toterminal(self._tw)
def write_platinfo(self, rep):
if hasattr(rep, 'node'):
self.write_line(self.gateway2info.get(
rep.node.gateway,
"node %r (platinfo not found? strange)")
[:self._tw.fullwidth-1])
def summary_stats(self):
session_duration = py.std.time.time() - self._sessionstarttime

View File

@ -69,6 +69,21 @@ class BaseFunctionalTests:
assert isinstance(rep.longrepr, ReprExceptionInfo)
assert str(rep.shortrepr) == "F"
def test_failfunction_customized_report(self, testdir, LineMatcher):
reports = testdir.runitem("""
def test_func():
assert 0
""")
rep = reports[1]
rep.headerlines += ["hello world"]
tr = py.io.TerminalWriter(stringio=True)
rep.toterminal(tr)
val = tr.stringio.getvalue()
LineMatcher(val.split("\n")).fnmatch_lines([
"*hello world",
"*def test_func():*"
])
def test_skipfunction(self, testdir):
reports = testdir.runitem("""
import py
@ -435,3 +450,4 @@ def test_pytest_cmdline_main(testdir):
s = popen.stdout.read()
ret = popen.wait()
assert ret == 0

View File

@ -18,36 +18,28 @@ def basic_run_report(item):
return runner.call_and_report(item, "call", log=False)
class Option:
def __init__(self, verbose=False, dist=None, fulltrace=False):
def __init__(self, verbose=False, fulltrace=False):
self.verbose = verbose
self.dist = dist
self.fulltrace = fulltrace
def _getcmdargs(self):
@property
def args(self):
l = []
if self.verbose:
l.append('-v')
if self.dist:
l.append('--dist=%s' % self.dist)
l.append('--tx=popen')
if self.fulltrace:
l.append('--fulltrace')
return l
def _getcmdstring(self):
return " ".join(self._getcmdargs())
def pytest_generate_tests(metafunc):
if "option" in metafunc.funcargnames:
metafunc.addcall(id="default", param=Option(verbose=False))
metafunc.addcall(id="verbose", param=Option(verbose=True))
metafunc.addcall(id="fulltrace", param=Option(fulltrace=True))
if not getattr(metafunc.function, 'nodist', False):
metafunc.addcall(id="verbose-dist",
param=Option(dist='each', verbose=True))
metafunc.addcall(id="default",
funcargs={'option': Option(verbose=False)})
metafunc.addcall(id="verbose",
funcargs={'option': Option(verbose=True)})
metafunc.addcall(id="fulltrace",
funcargs={'option': Option(fulltrace=True)})
def pytest_funcarg__option(request):
if request.param.dist:
request.config.pluginmanager.skipifmissing("xdist")
return request.param
class TestTerminal:
def test_pass_skip_fail(self, testdir, option):
@ -60,22 +52,13 @@ class TestTerminal:
def test_func():
assert 0
""")
result = testdir.runpytest(*option._getcmdargs())
result = testdir.runpytest(*option.args)
if option.verbose:
if not option.dist:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py:2: *test_ok*PASS*",
"*test_pass_skip_fail.py:4: *test_skip*SKIP*",
"*test_pass_skip_fail.py:6: *test_func*FAIL*",
])
else:
expected = [
"*PASS*test_pass_skip_fail.py:2: *test_ok*",
"*SKIP*test_pass_skip_fail.py:4: *test_skip*",
"*FAIL*test_pass_skip_fail.py:6: *test_func*",
]
for line in expected:
result.stdout.fnmatch_lines([line])
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py:2: *test_ok*PASS*",
"*test_pass_skip_fail.py:4: *test_skip*SKIP*",
"*test_pass_skip_fail.py:6: *test_func*FAIL*",
])
else:
result.stdout.fnmatch_lines([
"*test_pass_skip_fail.py .sF"
@ -86,16 +69,6 @@ class TestTerminal:
"E assert 0",
])
def test_collect_fail(self, testdir, option):
p = testdir.makepyfile("import xyz\n")
result = testdir.runpytest(*option._getcmdargs())
result.stdout.fnmatch_lines([
"*test_collect_fail.py E*",
"> import xyz",
"E ImportError: No module named xyz",
"*1 error*",
])
def test_internalerror(self, testdir, linecomp):
modcol = testdir.getmodulecol("def test_one(): pass")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
@ -132,75 +105,6 @@ class TestTerminal:
id = tr.gettestid(method)
assert id.endswith("test_testid.py::TestClass::test_method")
def test_looponfailreport(self, testdir, linecomp):
modcol = testdir.getmodulecol("""
import py
def test_fail():
assert 0
def test_fail2():
raise ValueError()
@py.test.mark.xfail
def test_xfail():
assert 0
@py.test.mark.xfail
def test_xpass():
assert 1
""")
rep = TerminalReporter(modcol.config, file=linecomp.stringio)
reports = [basic_run_report(x) for x in modcol.collect()]
rep.pytest_looponfailinfo(reports, [modcol.config.topdir])
linecomp.assert_contains_lines([
"*test_looponfailreport.py:3: assert 0",
"*test_looponfailreport.py:5: ValueError*",
"*waiting*",
"*%s*" % (modcol.config.topdir),
])
def test_tb_option(self, testdir, option):
p = testdir.makepyfile("""
import py
def g():
raise IndexError
def test_func():
print (6*7)
g() # --calling--
""")
for tbopt in ["long", "short", "no"]:
print('testing --tb=%s...' % tbopt)
result = testdir.runpytest('--tb=%s' % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert 'print (6*7)' in s
else:
assert 'print (6*7)' not in s
if tbopt != "no":
assert '--calling--' in s
assert 'IndexError' in s
else:
assert 'FAILURES' not in s
assert '--calling--' not in s
assert 'IndexError' not in s
def test_tb_crashline(self, testdir, option):
p = testdir.makepyfile("""
import py
def g():
raise IndexError
def test_func1():
print (6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
""")
result = testdir.runpytest("--tb=line")
bn = p.basename
result.stdout.fnmatch_lines([
"*%s:3: IndexError*" % bn,
"*%s:8: AssertionError: hello*" % bn,
])
s = result.stdout.str()
assert "def test_func2" not in s
def test_show_path_before_running_test(self, testdir, linecomp):
item = testdir.getitem("def test_func(): pass")
tr = TerminalReporter(item.config, file=linecomp.stringio)
@ -263,22 +167,6 @@ class TestTerminal:
"*test_p2.py <- *test_p1.py:2: TestMore.test_p1*",
])
def test_keyboard_interrupt_dist(self, testdir, option):
# xxx could be refined to check for return code
p = testdir.makepyfile("""
def test_sleep():
import time
time.sleep(10)
""")
child = testdir.spawn_pytest(" ".join(option._getcmdargs()))
child.expect(".*test session starts.*")
child.kill(2) # keyboard interrupt
child.expect(".*KeyboardInterrupt.*")
#child.expect(".*seconds.*")
child.close()
#assert ret == 2
@py.test.mark.nodist
def test_keyboard_interrupt(self, testdir, option):
p = testdir.makepyfile("""
def test_foobar():
@ -289,7 +177,7 @@ class TestTerminal:
raise KeyboardInterrupt # simulating the user
""")
result = testdir.runpytest(*option._getcmdargs())
result = testdir.runpytest(*option.args)
result.stdout.fnmatch_lines([
" def test_foobar():",
"> assert 0",
@ -302,37 +190,6 @@ class TestTerminal:
])
result.stdout.fnmatch_lines(['*KeyboardInterrupt*'])
def test_maxfailures(self, testdir, option):
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
""")
result = testdir.runpytest("--maxfail=2", *option._getcmdargs())
result.stdout.fnmatch_lines([
"*def test_1():*",
"*def test_2():*",
"*!! Interrupted: stopping after 2 failures*!!*",
"*2 failed*",
])
def test_pytest_report_header(self, testdir):
testdir.makeconftest("""
def pytest_report_header(config):
return "hello: info"
""")
testdir.mkdir("a").join("conftest.py").write("""
def pytest_report_header(config):
return ["line1", "line2"]""")
result = testdir.runpytest("a")
result.stdout.fnmatch_lines([
"*hello: info*",
"line1",
"line2",
])
class TestCollectonly:
@ -691,12 +548,103 @@ def test_trace_reporting(testdir):
])
assert result.ret == 0
@py.test.mark.nodist
def test_show_funcarg(testdir, option):
args = option._getcmdargs() + ["--funcargs"]
args = option.args + ["--funcargs"]
result = testdir.runpytest(*args)
result.stdout.fnmatch_lines([
"*tmpdir*",
"*temporary directory*",
]
)
class TestGenericReporting:
""" this test class can be subclassed with a different option
provider to run e.g. distributed tests.
"""
def test_collect_fail(self, testdir, option):
p = testdir.makepyfile("import xyz\n")
result = testdir.runpytest(*option.args)
result.stdout.fnmatch_lines([
"*test_collect_fail.py E*",
"> import xyz",
"E ImportError: No module named xyz",
"*1 error*",
])
def test_maxfailures(self, testdir, option):
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 0
""")
result = testdir.runpytest("--maxfail=2", *option.args)
result.stdout.fnmatch_lines([
"*def test_1():*",
"*def test_2():*",
"*!! Interrupted: stopping after 2 failures*!!*",
"*2 failed*",
])
def test_tb_option(self, testdir, option):
p = testdir.makepyfile("""
import py
def g():
raise IndexError
def test_func():
print (6*7)
g() # --calling--
""")
for tbopt in ["long", "short", "no"]:
print('testing --tb=%s...' % tbopt)
result = testdir.runpytest('--tb=%s' % tbopt)
s = result.stdout.str()
if tbopt == "long":
assert 'print (6*7)' in s
else:
assert 'print (6*7)' not in s
if tbopt != "no":
assert '--calling--' in s
assert 'IndexError' in s
else:
assert 'FAILURES' not in s
assert '--calling--' not in s
assert 'IndexError' not in s
def test_tb_crashline(self, testdir, option):
p = testdir.makepyfile("""
import py
def g():
raise IndexError
def test_func1():
print (6*7)
g() # --calling--
def test_func2():
assert 0, "hello"
""")
result = testdir.runpytest("--tb=line")
bn = p.basename
result.stdout.fnmatch_lines([
"*%s:3: IndexError*" % bn,
"*%s:8: AssertionError: hello*" % bn,
])
s = result.stdout.str()
assert "def test_func2" not in s
def test_pytest_report_header(self, testdir, option):
testdir.makeconftest("""
def pytest_report_header(config):
return "hello: info"
""")
testdir.mkdir("a").join("conftest.py").write("""
def pytest_report_header(config):
return ["line1", "line2"]""")
result = testdir.runpytest("a")
result.stdout.fnmatch_lines([
"*hello: info*",
"line1",
"line2",
])

View File

@ -181,6 +181,24 @@ class TestPrunetraceback:
"*hello world*",
])
def test_collect_report_postprocessing(self, testdir):
p = testdir.makepyfile("""
import not_exists
""")
testdir.makeconftest("""
import py
def pytest_make_collect_report(__multicall__):
rep = __multicall__.execute()
rep.headerlines += ["header1"]
return rep
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*ERROR collecting*",
"*header1*",
])
class TestCustomConftests:
def test_ignore_collect_path(self, testdir):
testdir.makeconftest("""

10
tox.ini
View File

@ -1,5 +1,6 @@
[tox]
distshare={homedir}/.tox/distshare
envlist=py26,py27,py31,py27-xdist,py25,py24
[tox:hudson]
distshare={toxworkdir}/distshare
@ -14,6 +15,15 @@ deps=
pexpect
[testenv:py27]
basepython=python2.7
[testenv:py27-xdist]
basepython=python2.7
deps=
{distshare}/py-**LATEST**
{distshare}/pytest-xdist-**LATEST**
commands=
py.test -n3 --confcutdir=.. -rfsxX \
--junitxml={envlogdir}/junit-{envname}.xml --tools-on-path []
[testenv:py26]
basepython=python2.6
[testenv:doc]