introduce --durations=N showing slowest test executions
This commit is contained in:
parent
974e4e3a9d
commit
3b9fd3abd8
|
@ -1,6 +1,8 @@
|
|||
Changes between 2.1.3 and [next version]
|
||||
----------------------------------------
|
||||
|
||||
- new feature to help optimizing your tests: --durations=N option for
|
||||
displaying N slowest test calls
|
||||
- fix and cleanup pytest's own test suite to not leak FDs
|
||||
- fix issue83: link to generated funcarg list
|
||||
- fix issue74: pyarg module names are now checked against imp.find_module false positives
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.1.4.dev3'
|
||||
__version__ = '2.2.0.dev1'
|
||||
|
|
|
@ -549,10 +549,17 @@ class ReportRecorder(object):
|
|||
def getreports(self, names="pytest_runtest_logreport pytest_collectreport"):
|
||||
return [x.report for x in self.getcalls(names)]
|
||||
|
||||
def matchreport(self, inamepart="", names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
def matchreport(self, inamepart="",
|
||||
names="pytest_runtest_logreport pytest_collectreport", when=None):
|
||||
""" return a testreport whose dotted import path matches """
|
||||
l = []
|
||||
for rep in self.getreports(names=names):
|
||||
try:
|
||||
if not when and rep.when != "call" and rep.passed:
|
||||
# setup/teardown passing reports - let's ignore those
|
||||
continue
|
||||
except AttributeError:
|
||||
pass
|
||||
if when and getattr(rep, 'when', None) != when:
|
||||
continue
|
||||
if not inamepart or inamepart in rep.nodeid.split("::"):
|
||||
|
|
|
@ -14,6 +14,38 @@ def pytest_namespace():
|
|||
#
|
||||
# pytest plugin hooks
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
group.addoption('--durations',
|
||||
action="store", type="int", dest="durations", default=None, metavar="N",
|
||||
help="show N slowest setup/test durations (N=0 for all)."),
|
||||
|
||||
def pytest_terminal_summary(terminalreporter):
|
||||
durations = terminalreporter.config.option.durations
|
||||
if durations is None:
|
||||
return
|
||||
tr = terminalreporter
|
||||
duration2rep = {}
|
||||
for key, replist in tr.stats.items():
|
||||
if key == "deselected":
|
||||
continue
|
||||
for rep in replist:
|
||||
duration2rep[rep.duration] = rep
|
||||
if not duration2rep:
|
||||
return
|
||||
d2 = duration2rep.items()
|
||||
d2.sort()
|
||||
d2.reverse()
|
||||
if not durations:
|
||||
tr.write_sep("=", "slowest test durations")
|
||||
else:
|
||||
tr.write_sep("=", "slowest %s test durations" % durations)
|
||||
d2 = d2[:durations]
|
||||
|
||||
for duration, rep in d2:
|
||||
nodeid = rep.nodeid.replace("::()::", "::")
|
||||
tr.write_line("%2.2f %s %s" % (duration, rep.when, nodeid))
|
||||
|
||||
def pytest_sessionstart(session):
|
||||
session._setupstate = SetupState()
|
||||
|
||||
|
@ -185,13 +217,13 @@ class TestReport(BaseReport):
|
|||
#: a name -> value dictionary containing all keywords and
|
||||
#: markers associated with a test invocation.
|
||||
self.keywords = keywords
|
||||
|
||||
|
||||
#: test outcome, always one of "passed", "failed", "skipped".
|
||||
self.outcome = outcome
|
||||
|
||||
#: None or a failure representation.
|
||||
self.longrepr = longrepr
|
||||
|
||||
|
||||
#: one of 'setup', 'call', 'teardown' to indicate runtest phase.
|
||||
self.when = when
|
||||
|
||||
|
|
|
@ -0,0 +1,21 @@
|
|||
py.test 2.2.0: new test duration profiling and bug fixes
|
||||
===========================================================================
|
||||
|
||||
pytest-2.2.0 is a backward compatible release of the popular
|
||||
py.test testing tool. It introduces the new "--duration=N" option
|
||||
showing the N slowest test execution or setup/teardown calls. The
|
||||
release also contains a few fixes and some cleanup of pytest's own test
|
||||
suite allowing it to run on a wider range of environments.
|
||||
|
||||
For general information, see extensive docs with examples here:
|
||||
|
||||
http://pytest.org/
|
||||
|
||||
If you want to install or upgrade pytest you might just type::
|
||||
|
||||
pip install -U pytest # or
|
||||
easy_install -U pytest
|
||||
|
||||
best,
|
||||
holger krekel
|
||||
|
|
@ -299,3 +299,30 @@ and nothing when run plainly::
|
|||
collecting ... collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
||||
profiling test duration
|
||||
--------------------------
|
||||
|
||||
.. regendoc:wipe
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
If you have a slow running large test suite you might want to find
|
||||
out which tests are slowest. Let's make an artifical test suite::
|
||||
|
||||
# content of test_some_are_slow.py
|
||||
|
||||
import time
|
||||
|
||||
def test_funcfast():
|
||||
pass
|
||||
|
||||
def test_funcslow1():
|
||||
time.sleep(0.1)
|
||||
|
||||
def test_funcslow2():
|
||||
time.sleep(0.2)
|
||||
|
||||
Now we can profile which test functions execute slowest::
|
||||
|
||||
$ py.test --durations=3
|
||||
|
|
|
@ -25,6 +25,7 @@ Welcome to pytest!
|
|||
|
||||
- **supports functional testing and complex test setups**
|
||||
|
||||
- (new in 2.2) :ref:`durations`
|
||||
- advanced :ref:`skip and xfail`
|
||||
- generic :ref:`marking and test selection <mark>`
|
||||
- can :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
|
||||
|
|
|
@ -98,6 +98,18 @@ can use a helper::
|
|||
In previous versions you could only enter PDB tracing if
|
||||
you disable capturing on the command line via ``py.test -s``.
|
||||
|
||||
.. _durations:
|
||||
|
||||
Profiling test execution duration
|
||||
-------------------------------------
|
||||
|
||||
.. versionadded: 2.2
|
||||
|
||||
To get a list of the slowest 10 test durations::
|
||||
|
||||
py.test --durations=10
|
||||
|
||||
|
||||
Creating JUnitXML format files
|
||||
----------------------------------------------------
|
||||
|
||||
|
|
2
setup.py
2
setup.py
|
@ -24,7 +24,7 @@ def main():
|
|||
name='pytest',
|
||||
description='py.test: simple powerful testing with Python',
|
||||
long_description = long_description,
|
||||
version='2.1.4.dev3',
|
||||
version='2.2.0.dev1',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
|
|
@ -465,3 +465,48 @@ class TestInvocationVariants:
|
|||
"*1 failed*",
|
||||
])
|
||||
|
||||
def test_duration_test(testdir):
|
||||
testdir.makepyfile("""
|
||||
import time
|
||||
frag = 0.01
|
||||
def test_2():
|
||||
time.sleep(frag*2)
|
||||
def test_1():
|
||||
time.sleep(frag)
|
||||
def test_3():
|
||||
time.sleep(frag*3)
|
||||
""")
|
||||
result = testdir.runpytest("--durations=10")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
result = testdir.runpytest("--durations=2")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_3*",
|
||||
"*call*test_2*",
|
||||
])
|
||||
assert "test_1" not in result.stdout.str()
|
||||
result = testdir.runpytest("--durations=0")
|
||||
assert result.ret == 0
|
||||
for x in "123":
|
||||
for y in 'call',: #'setup', 'call', 'teardown':
|
||||
l = []
|
||||
for line in result.stdout.lines:
|
||||
if ("test_%s" % x) in line and y in line:
|
||||
break
|
||||
else:
|
||||
raise AssertionError("not found %s %s" % (x,y))
|
||||
|
||||
result = testdir.runpytest("--durations=2", "-k test_1")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*durations*",
|
||||
"*call*test_1*",
|
||||
])
|
||||
|
||||
|
|
Loading…
Reference in New Issue