Merge pull request #2858 from nicoddemus/console-progress-2657
Console progress output
This commit is contained in:
commit
b533c2600a
|
@ -1077,6 +1077,23 @@ class LineMatcher:
|
|||
return lines2
|
||||
|
||||
def fnmatch_lines_random(self, lines2):
|
||||
"""Check lines exist in the output using ``fnmatch.fnmatch``, in any order.
|
||||
|
||||
The argument is a list of lines which have to occur in the
|
||||
output, in any order.
|
||||
"""
|
||||
self._match_lines_random(lines2, fnmatch)
|
||||
|
||||
def re_match_lines_random(self, lines2):
|
||||
"""Check lines exist in the output using ``re.match``, in any order.
|
||||
|
||||
The argument is a list of lines which have to occur in the
|
||||
output, in any order.
|
||||
|
||||
"""
|
||||
self._match_lines_random(lines2, lambda name, pat: re.match(pat, name))
|
||||
|
||||
def _match_lines_random(self, lines2, match_func):
|
||||
"""Check lines exist in the output.
|
||||
|
||||
The argument is a list of lines which have to occur in the
|
||||
|
@ -1086,7 +1103,7 @@ class LineMatcher:
|
|||
lines2 = self._getlines(lines2)
|
||||
for line in lines2:
|
||||
for x in self.lines:
|
||||
if line == x or fnmatch(x, line):
|
||||
if line == x or match_func(x, line):
|
||||
self._log("matched: ", repr(line))
|
||||
break
|
||||
else:
|
||||
|
@ -1111,13 +1128,37 @@ class LineMatcher:
|
|||
return '\n'.join(self._log_output)
|
||||
|
||||
def fnmatch_lines(self, lines2):
|
||||
"""Search the text for matching lines.
|
||||
"""Search captured text for matching lines using ``fnmatch.fnmatch``.
|
||||
|
||||
The argument is a list of lines which have to match and can
|
||||
use glob wildcards. If they do not match an pytest.fail() is
|
||||
use glob wildcards. If they do not match a pytest.fail() is
|
||||
called. The matches and non-matches are also printed on
|
||||
stdout.
|
||||
|
||||
"""
|
||||
self._match_lines(lines2, fnmatch, 'fnmatch')
|
||||
|
||||
def re_match_lines(self, lines2):
|
||||
"""Search captured text for matching lines using ``re.match``.
|
||||
|
||||
The argument is a list of lines which have to match using ``re.match``.
|
||||
If they do not match a pytest.fail() is called.
|
||||
|
||||
The matches and non-matches are also printed on
|
||||
stdout.
|
||||
"""
|
||||
self._match_lines(lines2, lambda name, pat: re.match(pat, name), 're.match')
|
||||
|
||||
def _match_lines(self, lines2, match_func, match_nickname):
|
||||
"""Underlying implementation of ``fnmatch_lines`` and ``re_match_lines``.
|
||||
|
||||
:param list[str] lines2: list of string patterns to match. The actual format depends on
|
||||
``match_func``.
|
||||
:param match_func: a callable ``match_func(line, pattern)`` where line is the captured
|
||||
line from stdout/stderr and pattern is the matching pattern.
|
||||
|
||||
:param str match_nickname: the nickname for the match function that will be logged
|
||||
to stdout when a match occurs.
|
||||
"""
|
||||
lines2 = self._getlines(lines2)
|
||||
lines1 = self.lines[:]
|
||||
|
@ -1131,8 +1172,8 @@ class LineMatcher:
|
|||
if line == nextline:
|
||||
self._log("exact match:", repr(line))
|
||||
break
|
||||
elif fnmatch(nextline, line):
|
||||
self._log("fnmatch:", repr(line))
|
||||
elif match_func(nextline, line):
|
||||
self._log("%s:" % match_nickname, repr(line))
|
||||
self._log(" with:", repr(nextline))
|
||||
break
|
||||
else:
|
||||
|
|
|
@ -51,6 +51,10 @@ def pytest_addoption(parser):
|
|||
choices=['yes', 'no', 'auto'],
|
||||
help="color terminal output (yes/no/auto).")
|
||||
|
||||
parser.addini("console_output_style",
|
||||
help="console output: classic or with additional progress information (classic|progress).",
|
||||
default='progress')
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
config.option.verbose -= config.option.quiet
|
||||
|
@ -135,16 +139,20 @@ class TerminalReporter:
|
|||
self.showfspath = self.verbosity >= 0
|
||||
self.showlongtestinfo = self.verbosity > 0
|
||||
self._numcollected = 0
|
||||
self._session = None
|
||||
|
||||
self.stats = {}
|
||||
self.startdir = py.path.local()
|
||||
if file is None:
|
||||
file = sys.stdout
|
||||
self._writer = _pytest.config.create_terminal_writer(config, file)
|
||||
self._screen_width = self.writer.fullwidth
|
||||
self.currentfspath = None
|
||||
self.reportchars = getreportopt(config)
|
||||
self.hasmarkup = self.writer.hasmarkup
|
||||
self.isatty = file.isatty()
|
||||
self._progress_items_reported = 0
|
||||
self._show_progress_info = self.config.getini('console_output_style') == 'progress'
|
||||
|
||||
@property
|
||||
def writer(self):
|
||||
|
@ -163,6 +171,8 @@ class TerminalReporter:
|
|||
def write_fspath_result(self, nodeid, res):
|
||||
fspath = self.config.rootdir.join(nodeid.split("::")[0])
|
||||
if fspath != self.currentfspath:
|
||||
if self.currentfspath is not None:
|
||||
self._write_progress_information_filling_space()
|
||||
self.currentfspath = fspath
|
||||
fspath = self.startdir.bestrelpath(fspath)
|
||||
self.writer.line()
|
||||
|
@ -177,6 +187,7 @@ class TerminalReporter:
|
|||
if extra:
|
||||
self.writer.write(extra, **kwargs)
|
||||
self.currentfspath = -2
|
||||
self._write_progress_information_filling_space()
|
||||
|
||||
def ensure_newline(self):
|
||||
if self.currentfspath:
|
||||
|
@ -203,7 +214,7 @@ class TerminalReporter:
|
|||
"""
|
||||
erase = markup.pop('erase', False)
|
||||
if erase:
|
||||
fill_count = self.writer.fullwidth - len(line)
|
||||
fill_count = self.writer.fullwidth - len(line) - 1
|
||||
fill = ' ' * fill_count
|
||||
else:
|
||||
fill = ''
|
||||
|
@ -256,20 +267,25 @@ class TerminalReporter:
|
|||
rep = report
|
||||
res = self.config.hook.pytest_report_teststatus(report=rep)
|
||||
cat, letter, word = res
|
||||
if isinstance(word, tuple):
|
||||
word, markup = word
|
||||
else:
|
||||
markup = None
|
||||
self.stats.setdefault(cat, []).append(rep)
|
||||
self._tests_ran = True
|
||||
if not letter and not word:
|
||||
# probably passed setup/teardown
|
||||
return
|
||||
running_xdist = hasattr(rep, 'node')
|
||||
self._progress_items_reported += 1
|
||||
if self.verbosity <= 0:
|
||||
if not hasattr(rep, 'node') and self.showfspath:
|
||||
if not running_xdist and self.showfspath:
|
||||
self.write_fspath_result(rep.nodeid, letter)
|
||||
else:
|
||||
self.writer.write(letter)
|
||||
self._write_progress_if_past_edge()
|
||||
else:
|
||||
if isinstance(word, tuple):
|
||||
word, markup = word
|
||||
else:
|
||||
if markup is None:
|
||||
if rep.passed:
|
||||
markup = {'green': True}
|
||||
elif rep.failed:
|
||||
|
@ -279,17 +295,45 @@ class TerminalReporter:
|
|||
else:
|
||||
markup = {}
|
||||
line = self._locationline(rep.nodeid, *rep.location)
|
||||
if not hasattr(rep, 'node'):
|
||||
if not running_xdist:
|
||||
self.write_ensure_prefix(line, word, **markup)
|
||||
# self.writer.write(word, **markup)
|
||||
else:
|
||||
self.ensure_newline()
|
||||
if hasattr(rep, 'node'):
|
||||
self.writer.write("[%s] " % rep.node.gateway.id)
|
||||
self.writer.write("[%s]" % rep.node.gateway.id)
|
||||
if self._show_progress_info:
|
||||
self.writer.write(self._get_progress_information_message() + " ", cyan=True)
|
||||
else:
|
||||
self.writer.write(' ')
|
||||
self.writer.write(word, **markup)
|
||||
self.writer.write(" " + line)
|
||||
self.currentfspath = -2
|
||||
|
||||
def _write_progress_if_past_edge(self):
|
||||
if not self._show_progress_info:
|
||||
return
|
||||
last_item = self._progress_items_reported == self._session.testscollected
|
||||
if last_item:
|
||||
self._write_progress_information_filling_space()
|
||||
return
|
||||
|
||||
past_edge = self.writer.chars_on_current_line + self._PROGRESS_LENGTH + 1 >= self._screen_width
|
||||
if past_edge:
|
||||
msg = self._get_progress_information_message()
|
||||
self.writer.write(msg + '\n', cyan=True)
|
||||
|
||||
_PROGRESS_LENGTH = len(' [100%]')
|
||||
|
||||
def _get_progress_information_message(self):
|
||||
progress = self._progress_items_reported * 100 // self._session.testscollected
|
||||
return ' [{:3d}%]'.format(progress)
|
||||
|
||||
def _write_progress_information_filling_space(self):
|
||||
if not self._show_progress_info:
|
||||
return
|
||||
msg = self._get_progress_information_message()
|
||||
fill = ' ' * (self.writer.fullwidth - self.writer.chars_on_current_line - len(msg) - 1)
|
||||
self.write(fill + msg, cyan=True)
|
||||
|
||||
def pytest_collection(self):
|
||||
if not self.isatty and self.config.option.verbose >= 1:
|
||||
self.write("collecting ... ", bold=True)
|
||||
|
@ -332,6 +376,7 @@ class TerminalReporter:
|
|||
|
||||
@pytest.hookimpl(trylast=True)
|
||||
def pytest_sessionstart(self, session):
|
||||
self._session = session
|
||||
self._sessionstarttime = time.time()
|
||||
if not self.showheader:
|
||||
return
|
||||
|
|
|
@ -0,0 +1 @@
|
|||
Now pytest displays the total progress percentage while running tests. The previous output style can be set by setting the new ``console_output_style`` to ``classic``.
|
|
@ -312,3 +312,22 @@ Builtin configuration file options
|
|||
relative to :ref:`rootdir <rootdir>`. Additionally path may contain environment
|
||||
variables, that will be expanded. For more information about cache plugin
|
||||
please refer to :ref:`cache_provider`.
|
||||
|
||||
|
||||
.. confval:: console_output_style
|
||||
|
||||
.. versionadded:: 3.3
|
||||
|
||||
Sets the console output style while running tests:
|
||||
|
||||
* ``classic``: classic pytest output.
|
||||
* ``progress``: like classic pytest output, but with a progress indicator.
|
||||
|
||||
The default is ``progress``, but you can fallback to ``classic`` if you prefer or
|
||||
the new mode is causing unexpected problems:
|
||||
|
||||
.. code-block:: ini
|
||||
|
||||
# content of pytest.ini
|
||||
[pytest]
|
||||
console_output_style = classic
|
||||
|
|
2
setup.py
2
setup.py
|
@ -45,7 +45,7 @@ def has_environment_marker_support():
|
|||
def main():
|
||||
extras_require = {}
|
||||
install_requires = [
|
||||
'py>=1.4.33,<1.5',
|
||||
'py>=1.5.0',
|
||||
'six>=1.10.0',
|
||||
'setuptools',
|
||||
'attrs>=17.2.0',
|
||||
|
|
|
@ -630,10 +630,10 @@ class TestInvocationVariants(object):
|
|||
testdir.chdir()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_hello.py::test_hello*PASSED",
|
||||
"*test_hello.py::test_other*PASSED",
|
||||
"*test_world.py::test_world*PASSED",
|
||||
"*test_world.py::test_other*PASSED",
|
||||
"*test_hello.py::test_hello*PASSED*",
|
||||
"*test_hello.py::test_other*PASSED*",
|
||||
"*test_world.py::test_world*PASSED*",
|
||||
"*test_world.py::test_other*PASSED*",
|
||||
"*4 passed*"
|
||||
])
|
||||
|
||||
|
@ -641,7 +641,7 @@ class TestInvocationVariants(object):
|
|||
result = testdir.runpytest("--pyargs", "-v", "ns_pkg.world.test_world::test_other")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_world.py::test_other*PASSED",
|
||||
"*test_world.py::test_other*PASSED*",
|
||||
"*1 passed*"
|
||||
])
|
||||
|
||||
|
|
|
@ -2119,6 +2119,10 @@ class TestFixtureMarker(object):
|
|||
assert values == [1, 1, 2, 2]
|
||||
|
||||
def test_module_parametrized_ordering(self, testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
console_output_style=classic
|
||||
""")
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
||||
|
@ -2165,6 +2169,10 @@ class TestFixtureMarker(object):
|
|||
""")
|
||||
|
||||
def test_class_ordering(self, testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
console_output_style=classic
|
||||
""")
|
||||
testdir.makeconftest("""
|
||||
import pytest
|
||||
|
||||
|
|
|
@ -960,6 +960,10 @@ class TestMetafuncFunctional(object):
|
|||
])
|
||||
|
||||
def test_parametrize_with_ids(self, testdir):
|
||||
testdir.makeini("""
|
||||
[pytest]
|
||||
console_output_style=classic
|
||||
""")
|
||||
testdir.makepyfile("""
|
||||
import pytest
|
||||
def pytest_generate_tests(metafunc):
|
||||
|
@ -1005,9 +1009,9 @@ class TestMetafuncFunctional(object):
|
|||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*basic*PASSED",
|
||||
"*test_function*1-1*PASSED",
|
||||
"*test_function*advanced*FAILED",
|
||||
"*test_function*basic*PASSED*",
|
||||
"*test_function*1-1*PASSED*",
|
||||
"*test_function*advanced*FAILED*",
|
||||
])
|
||||
|
||||
def test_fixture_parametrized_empty_ids(self, testdir):
|
||||
|
@ -1062,8 +1066,8 @@ class TestMetafuncFunctional(object):
|
|||
result = testdir.runpytest("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*a0*PASSED",
|
||||
"*test_function*a1*FAILED"
|
||||
"*test_function*a0*PASSED*",
|
||||
"*test_function*a1*FAILED*"
|
||||
])
|
||||
|
||||
@pytest.mark.parametrize(("scope", "length"),
|
||||
|
|
|
@ -238,6 +238,6 @@ def test_show_fixtures_and_execute_test(testdir):
|
|||
|
||||
result.stdout.fnmatch_lines([
|
||||
'*SETUP F arg*',
|
||||
'*test_arg (fixtures used: arg)F',
|
||||
'*test_arg (fixtures used: arg)F*',
|
||||
'*TEARDOWN F arg*',
|
||||
])
|
||||
|
|
|
@ -820,7 +820,7 @@ def test_traceback_failure(testdir):
|
|||
""")
|
||||
result = testdir.runpytest(p1, "--tb=long")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"*test_traceback_failure.py F*",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
|
@ -840,7 +840,7 @@ def test_traceback_failure(testdir):
|
|||
|
||||
result = testdir.runpytest(p1) # "auto"
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_traceback_failure.py F",
|
||||
"*test_traceback_failure.py F*",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"",
|
||||
|
|
|
@ -266,7 +266,7 @@ class TestPerTestCapturing(object):
|
|||
""")
|
||||
result = testdir.runpytest(p1)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_capturing_outerr.py .F",
|
||||
"*test_capturing_outerr.py .F*",
|
||||
"====* FAILURES *====",
|
||||
"____*____",
|
||||
"*test_capturing_outerr.py:8: ValueError",
|
||||
|
|
|
@ -78,7 +78,7 @@ class TestTerminal(object):
|
|||
])
|
||||
else:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_pass_skip_fail.py .sF"
|
||||
"*test_pass_skip_fail.py .sF*"
|
||||
])
|
||||
result.stdout.fnmatch_lines([
|
||||
" def test_func():",
|
||||
|
@ -142,12 +142,12 @@ class TestTerminal(object):
|
|||
""")
|
||||
result = testdir.runpytest(p2)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_p2.py .",
|
||||
"*test_p2.py .*",
|
||||
"*1 passed*",
|
||||
])
|
||||
result = testdir.runpytest("-v", p2)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED",
|
||||
"*test_p2.py::TestMore::test_p1* <- *test_p1.py*PASSED*",
|
||||
])
|
||||
|
||||
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
|
||||
|
@ -222,7 +222,7 @@ class TestTerminal(object):
|
|||
tr.writer.fullwidth = 10
|
||||
tr.write('hello')
|
||||
tr.rewrite('hey', erase=True)
|
||||
assert f.getvalue() == 'hello' + '\r' + 'hey' + (7 * ' ')
|
||||
assert f.getvalue() == 'hello' + '\r' + 'hey' + (6 * ' ')
|
||||
|
||||
|
||||
class TestCollectonly(object):
|
||||
|
@ -431,7 +431,7 @@ class TestTerminalFunctional(object):
|
|||
)
|
||||
result = testdir.runpytest("-k", "test_two:", testpath)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_deselected.py ..",
|
||||
"*test_deselected.py ..*",
|
||||
"=* 1 test*deselected *=",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
@ -464,7 +464,7 @@ class TestTerminalFunctional(object):
|
|||
finally:
|
||||
old.chdir()
|
||||
result.stdout.fnmatch_lines([
|
||||
"test_passes.py ..",
|
||||
"test_passes.py ..*",
|
||||
"* 2 pass*",
|
||||
])
|
||||
assert result.ret == 0
|
||||
|
@ -481,7 +481,7 @@ class TestTerminalFunctional(object):
|
|||
"platform %s -- Python %s*pytest-%s*py-%s*pluggy-%s" % (
|
||||
py.std.sys.platform, verinfo,
|
||||
pytest.__version__, py.__version__, pluggy.__version__),
|
||||
"*test_header_trailer_info.py .",
|
||||
"*test_header_trailer_info.py .*",
|
||||
"=* 1 passed*in *.[0-9][0-9] seconds *=",
|
||||
])
|
||||
if pytest.config.pluginmanager.list_plugin_distinfo():
|
||||
|
@ -964,3 +964,58 @@ def test_no_trailing_whitespace_after_inifile_word(testdir):
|
|||
testdir.makeini('[pytest]')
|
||||
result = testdir.runpytest('')
|
||||
assert 'inifile: tox.ini\n' in result.stdout.str()
|
||||
|
||||
|
||||
class TestProgress:
|
||||
|
||||
@pytest.fixture
|
||||
def many_tests_file(self, testdir):
|
||||
testdir.makepyfile(
|
||||
test_bar="""
|
||||
import pytest
|
||||
@pytest.mark.parametrize('i', range(10))
|
||||
def test_bar(i): pass
|
||||
""",
|
||||
test_foo="""
|
||||
import pytest
|
||||
@pytest.mark.parametrize('i', range(5))
|
||||
def test_foo(i): pass
|
||||
""",
|
||||
test_foobar="""
|
||||
import pytest
|
||||
@pytest.mark.parametrize('i', range(5))
|
||||
def test_foobar(i): pass
|
||||
""",
|
||||
)
|
||||
|
||||
def test_normal(self, many_tests_file, testdir):
|
||||
output = testdir.runpytest()
|
||||
output.stdout.re_match_lines([
|
||||
r'test_bar.py \.{10} \s+ \[ 50%\]',
|
||||
r'test_foo.py \.{5} \s+ \[ 75%\]',
|
||||
r'test_foobar.py \.{5} \s+ \[100%\]',
|
||||
])
|
||||
|
||||
def test_verbose(self, many_tests_file, testdir):
|
||||
output = testdir.runpytest('-v')
|
||||
output.stdout.re_match_lines([
|
||||
r'test_bar.py::test_bar\[0\] PASSED \s+ \[ 5%\]',
|
||||
r'test_foo.py::test_foo\[4\] PASSED \s+ \[ 75%\]',
|
||||
r'test_foobar.py::test_foobar\[4\] PASSED \s+ \[100%\]',
|
||||
])
|
||||
|
||||
def test_xdist_normal(self, many_tests_file, testdir):
|
||||
pytest.importorskip('xdist')
|
||||
output = testdir.runpytest('-n2')
|
||||
output.stdout.re_match_lines([
|
||||
r'\.{20} \s+ \[100%\]',
|
||||
])
|
||||
|
||||
def test_xdist_verbose(self, many_tests_file, testdir):
|
||||
pytest.importorskip('xdist')
|
||||
output = testdir.runpytest('-n2', '-v')
|
||||
output.stdout.re_match_lines_random([
|
||||
r'\[gw\d\] \[\s*\d+%\] PASSED test_bar.py::test_bar\[1\]',
|
||||
r'\[gw\d\] \[\s*\d+%\] PASSED test_foo.py::test_foo\[1\]',
|
||||
r'\[gw\d\] \[\s*\d+%\] PASSED test_foobar.py::test_foobar\[1\]',
|
||||
])
|
||||
|
|
Loading…
Reference in New Issue