make --durations also show the execution times of setup/teardown calls. This requires a slight incompatibility - pytest_runtest_logreport now sees setup/teardown reports even if the tests passed.
This commit is contained in:
parent
3b9fd3abd8
commit
29bf205f3a
|
@ -1,8 +1,8 @@
|
||||||
Changes between 2.1.3 and [next version]
|
Changes between 2.1.3 and XXX 2.2.0
|
||||||
----------------------------------------
|
----------------------------------------
|
||||||
|
|
||||||
- new feature to help optimizing your tests: --durations=N option for
|
- new feature to help optimizing your tests: --durations=N option for
|
||||||
displaying N slowest test calls
|
displaying N slowest test calls and setup/teardown methods.
|
||||||
- fix and cleanup pytest's own test suite to not leak FDs
|
- fix and cleanup pytest's own test suite to not leak FDs
|
||||||
- fix issue83: link to generated funcarg list
|
- fix issue83: link to generated funcarg list
|
||||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||||
|
|
|
@ -1,2 +1,2 @@
|
||||||
#
|
#
|
||||||
__version__ = '2.2.0.dev1'
|
__version__ = '2.2.0.dev2'
|
||||||
|
|
|
@ -149,7 +149,8 @@ def pytest_runtest_makereport(item, call):
|
||||||
pytest_runtest_makereport.firstresult = True
|
pytest_runtest_makereport.firstresult = True
|
||||||
|
|
||||||
def pytest_runtest_logreport(report):
|
def pytest_runtest_logreport(report):
|
||||||
""" process item test report. """
|
""" process a test setup/call/teardown report relating to
|
||||||
|
the respective phase of executing a test. """
|
||||||
|
|
||||||
# special handling for final teardown - somewhat internal for now
|
# special handling for final teardown - somewhat internal for now
|
||||||
def pytest__teardown_final(session):
|
def pytest__teardown_final(session):
|
||||||
|
|
|
@ -166,7 +166,8 @@ class LogXML(object):
|
||||||
|
|
||||||
def pytest_runtest_logreport(self, report):
|
def pytest_runtest_logreport(self, report):
|
||||||
if report.passed:
|
if report.passed:
|
||||||
self.append_pass(report)
|
if report.when == "call": # ignore setup/teardown
|
||||||
|
self.append_pass(report)
|
||||||
elif report.failed:
|
elif report.failed:
|
||||||
if report.when != "call":
|
if report.when != "call":
|
||||||
self.append_error(report)
|
self.append_error(report)
|
||||||
|
|
|
@ -334,8 +334,8 @@ class TmpTestdir:
|
||||||
l = list(args) + [p]
|
l = list(args) + [p]
|
||||||
reprec = self.inline_run(*l)
|
reprec = self.inline_run(*l)
|
||||||
reports = reprec.getreports("pytest_runtest_logreport")
|
reports = reprec.getreports("pytest_runtest_logreport")
|
||||||
assert len(reports) == 1, reports
|
assert len(reports) == 3, reports # setup/call/teardown
|
||||||
return reports[0]
|
return reports[1]
|
||||||
|
|
||||||
def inline_genitems(self, *args):
|
def inline_genitems(self, *args):
|
||||||
return self.inprocess_run(list(args) + ['--collectonly'])
|
return self.inprocess_run(list(args) + ['--collectonly'])
|
||||||
|
|
|
@ -63,6 +63,8 @@ class ResultLog(object):
|
||||||
self.write_log_entry(testpath, lettercode, longrepr)
|
self.write_log_entry(testpath, lettercode, longrepr)
|
||||||
|
|
||||||
def pytest_runtest_logreport(self, report):
|
def pytest_runtest_logreport(self, report):
|
||||||
|
if report.when != "call" and report.passed:
|
||||||
|
return
|
||||||
res = self.config.hook.pytest_report_teststatus(report=report)
|
res = self.config.hook.pytest_report_teststatus(report=report)
|
||||||
code = res[1]
|
code = res[1]
|
||||||
if code == 'x':
|
if code == 'x':
|
||||||
|
|
|
@ -26,25 +26,36 @@ def pytest_terminal_summary(terminalreporter):
|
||||||
return
|
return
|
||||||
tr = terminalreporter
|
tr = terminalreporter
|
||||||
duration2rep = {}
|
duration2rep = {}
|
||||||
|
alldurations = 0.0
|
||||||
for key, replist in tr.stats.items():
|
for key, replist in tr.stats.items():
|
||||||
if key == "deselected":
|
if key == "deselected":
|
||||||
continue
|
continue
|
||||||
for rep in replist:
|
for rep in replist:
|
||||||
duration2rep[rep.duration] = rep
|
duration2rep[rep.duration] = rep
|
||||||
|
alldurations += rep.duration
|
||||||
if not duration2rep:
|
if not duration2rep:
|
||||||
return
|
return
|
||||||
d2 = duration2rep.items()
|
d2 = remaining = duration2rep.items()
|
||||||
d2.sort()
|
d2.sort()
|
||||||
d2.reverse()
|
d2.reverse()
|
||||||
if not durations:
|
if not durations:
|
||||||
tr.write_sep("=", "slowest test durations")
|
tr.write_sep("=", "slowest test durations")
|
||||||
else:
|
else:
|
||||||
tr.write_sep("=", "slowest %s test durations" % durations)
|
tr.write_sep("=", "slowest %s test durations" % durations)
|
||||||
|
remaining = d2[durations:]
|
||||||
d2 = d2[:durations]
|
d2 = d2[:durations]
|
||||||
|
|
||||||
|
assert (alldurations/100) > 0
|
||||||
for duration, rep in d2:
|
for duration, rep in d2:
|
||||||
nodeid = rep.nodeid.replace("::()::", "::")
|
nodeid = rep.nodeid.replace("::()::", "::")
|
||||||
tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid))
|
percent = rep.duration / (alldurations / 100)
|
||||||
|
tr.write_line("%02.2fs %-02.2f%% %s %s" %
|
||||||
|
(duration, percent, rep.when, nodeid))
|
||||||
|
if remaining:
|
||||||
|
remsum = sum(map(lambda x: x[0], remaining))
|
||||||
|
tr.write_line("%02.2fs %-02.2f%% remaining in %d test phases" %(
|
||||||
|
remsum, remsum / (alldurations / 100), len(remaining)))
|
||||||
|
|
||||||
|
|
||||||
def pytest_sessionstart(session):
|
def pytest_sessionstart(session):
|
||||||
session._setupstate = SetupState()
|
session._setupstate = SetupState()
|
||||||
|
@ -110,7 +121,7 @@ def call_and_report(item, when, log=True):
|
||||||
call = call_runtest_hook(item, when)
|
call = call_runtest_hook(item, when)
|
||||||
hook = item.ihook
|
hook = item.ihook
|
||||||
report = hook.pytest_runtest_makereport(item=item, call=call)
|
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||||
if log and (when == "call" or not report.passed):
|
if log:
|
||||||
hook.pytest_runtest_logreport(report=report)
|
hook.pytest_runtest_logreport(report=report)
|
||||||
return report
|
return report
|
||||||
|
|
||||||
|
|
|
@ -426,9 +426,10 @@ class TerminalReporter:
|
||||||
keys.append(key)
|
keys.append(key)
|
||||||
parts = []
|
parts = []
|
||||||
for key in keys:
|
for key in keys:
|
||||||
val = self.stats.get(key, None)
|
if key: # setup/teardown reports have an empty key, ignore them
|
||||||
if val:
|
val = self.stats.get(key, None)
|
||||||
parts.append("%d %s" %(len(val), key))
|
if val:
|
||||||
|
parts.append("%d %s" %(len(val), key))
|
||||||
line = ", ".join(parts)
|
line = ", ".join(parts)
|
||||||
# XXX coloring
|
# XXX coloring
|
||||||
msg = "%s in %.2f seconds" %(line, session_duration)
|
msg = "%s in %.2f seconds" %(line, session_duration)
|
||||||
|
|
|
@ -1,7 +1,7 @@
|
||||||
py.test 2.2.0: new test duration profiling and bug fixes
|
py.test 2.2.0: new test duration profiling and bug fixes
|
||||||
===========================================================================
|
===========================================================================
|
||||||
|
|
||||||
pytest-2.2.0 is a backward compatible release of the popular
|
pytest-2.2.0 is a quite backward compatible release of the popular
|
||||||
py.test testing tool. It introduces the new "--duration=N" option
|
py.test testing tool. It introduces the new "--duration=N" option
|
||||||
showing the N slowest test execution or setup/teardown calls. The
|
showing the N slowest test execution or setup/teardown calls. The
|
||||||
release also contains a few fixes and some cleanup of pytest's own test
|
release also contains a few fixes and some cleanup of pytest's own test
|
||||||
|
@ -16,6 +16,19 @@ If you want to install or upgrade pytest you might just type::
|
||||||
pip install -U pytest # or
|
pip install -U pytest # or
|
||||||
easy_install -U pytest
|
easy_install -U pytest
|
||||||
|
|
||||||
|
incompatible change
|
||||||
|
------------------------------------
|
||||||
|
|
||||||
|
* You need a new version of the pytest-xdist plugin (1.7) for distributing
|
||||||
|
test runs.
|
||||||
|
|
||||||
|
* Also other plugins might need an upgrade if they implement
|
||||||
|
the ``pytest_runtest_logreport`` hook which now is called unconditionally
|
||||||
|
for the setup/teardown fixture phases of a test. You can just choose to
|
||||||
|
ignore them by inserting "if rep.when != 'call': return". Note that
|
||||||
|
most code probably "just" works because the hook was already called
|
||||||
|
for failing setup/teardown phases of a test.
|
||||||
|
|
||||||
best,
|
best,
|
||||||
holger krekel
|
holger krekel
|
||||||
|
|
||||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
||||||
name='pytest',
|
name='pytest',
|
||||||
description='py.test: simple powerful testing with Python',
|
description='py.test: simple powerful testing with Python',
|
||||||
long_description = long_description,
|
long_description = long_description,
|
||||||
version='2.2.0.dev1',
|
version='2.2.0.dev2',
|
||||||
url='http://pytest.org',
|
url='http://pytest.org',
|
||||||
license='MIT license',
|
license='MIT license',
|
||||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||||
|
|
|
@ -484,12 +484,15 @@ def test_duration_test(testdir):
|
||||||
"*call*test_2*",
|
"*call*test_2*",
|
||||||
"*call*test_1*",
|
"*call*test_1*",
|
||||||
])
|
])
|
||||||
|
assert "remaining in" not in result.stdout.str()
|
||||||
|
|
||||||
result = testdir.runpytest("--durations=2")
|
result = testdir.runpytest("--durations=2")
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
"*durations*",
|
"*durations*",
|
||||||
"*call*test_3*",
|
"*call*test_3*",
|
||||||
"*call*test_2*",
|
"*call*test_2*",
|
||||||
|
"*s*%*remaining in 7 test phases",
|
||||||
])
|
])
|
||||||
assert "test_1" not in result.stdout.str()
|
assert "test_1" not in result.stdout.str()
|
||||||
result = testdir.runpytest("--durations=0")
|
result = testdir.runpytest("--durations=0")
|
||||||
|
@ -503,6 +506,28 @@ def test_duration_test(testdir):
|
||||||
else:
|
else:
|
||||||
raise AssertionError("not found %s %s" % (x,y))
|
raise AssertionError("not found %s %s" % (x,y))
|
||||||
|
|
||||||
|
def test_duration_test_with_fixture(testdir):
|
||||||
|
testdir.makepyfile("""
|
||||||
|
import time
|
||||||
|
frag = 0.01
|
||||||
|
def setup_function(func):
|
||||||
|
time.sleep(frag * 3)
|
||||||
|
|
||||||
|
def test_1():
|
||||||
|
time.sleep(frag*2)
|
||||||
|
def test_2():
|
||||||
|
time.sleep(frag)
|
||||||
|
""")
|
||||||
|
result = testdir.runpytest("--durations=10")
|
||||||
|
assert result.ret == 0
|
||||||
|
result.stdout.fnmatch_lines([
|
||||||
|
"*durations*",
|
||||||
|
"*setup*test_1*",
|
||||||
|
"*setup*test_2*",
|
||||||
|
"*call*test_1*",
|
||||||
|
"*call*test_2*",
|
||||||
|
])
|
||||||
|
|
||||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||||
assert result.ret == 0
|
assert result.ret == 0
|
||||||
result.stdout.fnmatch_lines([
|
result.stdout.fnmatch_lines([
|
||||||
|
|
|
@ -77,7 +77,7 @@ class TestFunctional:
|
||||||
assert hasattr(test_hello, 'hello')
|
assert hasattr(test_hello, 'hello')
|
||||||
""")
|
""")
|
||||||
result = testdir.runpytest(p)
|
result = testdir.runpytest(p)
|
||||||
result.stdout.fnmatch_lines(["*passed*"])
|
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||||
|
|
||||||
def test_mark_per_module(self, testdir):
|
def test_mark_per_module(self, testdir):
|
||||||
item = testdir.getitem("""
|
item = testdir.getitem("""
|
||||||
|
|
Loading…
Reference in New Issue