make --durations also show the execution times of setup/teardown calls. This requires a slight incompatibility - pytest_runtest_logreport now sees setup/teardown reports even if the tests passed.
This commit is contained in:
parent
3b9fd3abd8
commit
29bf205f3a
|
@ -1,8 +1,8 @@
|
|||
Changes between 2.1.3 and [next version]
|
||||
Changes between 2.1.3 and XXX 2.2.0
|
||||
----------------------------------------
|
||||
|
||||
- new feature to help optimizing your tests: --durations=N option for
|
||||
displaying N slowest test calls
|
||||
displaying N slowest test calls and setup/teardown methods.
|
||||
- fix and cleanup pytest's own test suite to not leak FDs
|
||||
- fix issue83: link to generated funcarg list
|
||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.2.0.dev1'
|
||||
__version__ = '2.2.0.dev2'
|
||||
|
|
|
@ -149,7 +149,8 @@ def pytest_runtest_makereport(item, call):
|
|||
pytest_runtest_makereport.firstresult = True
|
||||
|
||||
def pytest_runtest_logreport(report):
|
||||
""" process item test report. """
|
||||
""" process a test setup/call/teardown report relating to
|
||||
the respective phase of executing a test. """
|
||||
|
||||
# special handling for final teardown - somewhat internal for now
|
||||
def pytest__teardown_final(session):
|
||||
|
|
|
@ -166,6 +166,7 @@ class LogXML(object):
|
|||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.passed:
|
||||
if report.when == "call": # ignore setup/teardown
|
||||
self.append_pass(report)
|
||||
elif report.failed:
|
||||
if report.when != "call":
|
||||
|
|
|
@ -334,8 +334,8 @@ class TmpTestdir:
|
|||
l = list(args) + [p]
|
||||
reprec = self.inline_run(*l)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 1, reports
|
||||
return reports[0]
|
||||
assert len(reports) == 3, reports # setup/call/teardown
|
||||
return reports[1]
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
return self.inprocess_run(list(args) + ['--collectonly'])
|
||||
|
|
|
@ -63,6 +63,8 @@ class ResultLog(object):
|
|||
self.write_log_entry(testpath, lettercode, longrepr)
|
||||
|
||||
def pytest_runtest_logreport(self, report):
|
||||
if report.when != "call" and report.passed:
|
||||
return
|
||||
res = self.config.hook.pytest_report_teststatus(report=report)
|
||||
code = res[1]
|
||||
if code == 'x':
|
||||
|
|
|
@ -26,25 +26,36 @@ def pytest_terminal_summary(terminalreporter):
|
|||
return
|
||||
tr = terminalreporter
|
||||
duration2rep = {}
|
||||
alldurations = 0.0
|
||||
for key, replist in tr.stats.items():
|
||||
if key == "deselected":
|
||||
continue
|
||||
for rep in replist:
|
||||
duration2rep[rep.duration] = rep
|
||||
alldurations += rep.duration
|
||||
if not duration2rep:
|
||||
return
|
||||
d2 = duration2rep.items()
|
||||
d2 = remaining = duration2rep.items()
|
||||
d2.sort()
|
||||
d2.reverse()
|
||||
if not durations:
|
||||
tr.write_sep("=", "slowest test durations")
|
||||
else:
|
||||
tr.write_sep("=", "slowest %s test durations" % durations)
|
||||
remaining = d2[durations:]
|
||||
d2 = d2[:durations]
|
||||
|
||||
assert (alldurations/100) > 0
|
||||
for duration, rep in d2:
|
||||
nodeid = rep.nodeid.replace("::()::", "::")
|
||||
tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid))
|
||||
percent = rep.duration / (alldurations / 100)
|
||||
tr.write_line("%02.2fs %-02.2f%% %s %s" %
|
||||
(duration, percent, rep.when, nodeid))
|
||||
if remaining:
|
||||
remsum = sum(map(lambda x: x[0], remaining))
|
||||
tr.write_line("%02.2fs %-02.2f%% remaining in %d test phases" %(
|
||||
remsum, remsum / (alldurations / 100), len(remaining)))
|
||||
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
|
@ -110,7 +121,7 @@ def call_and_report(item, when, log=True):
|
|||
call = call_runtest_hook(item, when)
|
||||
hook = item.ihook
|
||||
report = hook.pytest_runtest_makereport(item=item, call=call)
|
||||
if log and (when == "call" or not report.passed):
|
||||
if log:
|
||||
hook.pytest_runtest_logreport(report=report)
|
||||
return report
|
||||
|
||||
|
|
|
@ -426,6 +426,7 @@ class TerminalReporter:
|
|||
keys.append(key)
|
||||
parts = []
|
||||
for key in keys:
|
||||
if key: # setup/teardown reports have an empty key, ignore them
|
||||
val = self.stats.get(key, None)
|
||||
if val:
|
||||
parts.append("%d %s" %(len(val), key))
|
||||
|
|
|
@ -1,7 +1,7 @@
|
|||
py.test 2.2.0: new test duration profiling and bug fixes
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.0 is a backward compatible release of the popular
|
||||
pytest-2.2.0 is a quite backward compatible release of the popular
|
||||
py.test testing tool. It introduces the new "--duration=N" option
|
||||
showing the N slowest test execution or setup/teardown calls. The
|
||||
release also contains a few fixes and some cleanup of pytest's own test
|
||||
|
@ -16,6 +16,19 @@ If you want to install or upgrade pytest you might just type::
|
|||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
incompatible change
|
||||
------------------------------------
|
||||
|
||||
* You need a new version of the pytest-xdist plugin (1.7) for distributing
|
||||
test runs.
|
||||
|
||||
* Also other plugins might need an upgrade if they implement
|
||||
the ``pytest_runtest_logreport`` hook which now is called unconditionally
|
||||
for the setup/teardown fixture phases of a test. You can just choose to
|
||||
ignore them by inserting "if rep.when != 'call': return". Note that
|
||||
most code probably "just" works because the hook was already called
|
||||
for failing setup/teardown phases of a test.
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
|||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.2.0.dev1',
|
||||
version='2.2.0.dev2',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
|
|
@ -484,12 +484,15 @@ def test_duration_test(testdir):
|
|||
"*call*test_2*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
assert "remaining in" not in result.stdout.str()
|
||||
|
||||
result = testdir.runpytest("--durations=2")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
"*s*%*remaining in 7 test phases",
|
||||
])
|
||||
assert "test_1" not in result.stdout.str()
|
||||
result = testdir.runpytest("--durations=0")
|
||||
|
@ -503,6 +506,28 @@ def test_duration_test(testdir):
|
|||
else:
|
||||
raise AssertionError("not found %s %s" % (x,y))
|
||||
|
||||
def test_duration_test_with_fixture(testdir):
|
||||
testdir.makepyfile("""
|
||||
import time
|
||||
frag = 0.01
|
||||
def setup_function(func):
|
||||
time.sleep(frag * 3)
|
||||
|
||||
def test_1():
|
||||
time.sleep(frag*2)
|
||||
def test_2():
|
||||
time.sleep(frag)
|
||||
""")
|
||||
result = testdir.runpytest("--durations=10")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*setup*test_1*",
|
||||
"*setup*test_2*",
|
||||
"*call*test_1*",
|
||||
"*call*test_2*",
|
||||
])
|
||||
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
|
|
|
@ -77,7 +77,7 @@ class TestFunctional:
|
|||
assert hasattr(test_hello, 'hello')
|
||||
""")
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines(["*passed*"])
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_mark_per_module(self, testdir):
|
||||
item = testdir.getitem("""
|
||||
|
|
Loading…
Reference in New Issue