deprecate --report option in favour of a new shorter and easier to remember -r option: this takes a string argument consisting of any combination of 'xsfX'

Those letters basically correspond to the letters you see during terminal reporting.

--HG--
branch : trunk
This commit is contained in:
holger krekel 2010-05-05 19:50:59 +02:00
parent 325cb0aa49
commit ee036223ce
6 changed files with 154 additions and 65 deletions

View File

@ -1,41 +1,65 @@
Changes between 1.2.1 and 1.3.0 (release pending)
Changes between 1.2.1 and 1.3.0
==================================================
- deprecate --report option in favour of a new shorter and easier to
remember -r option: it takes a string argument consisting of any
combination of 'xfsX' characters. They relate to the single chars
you see during the dotted progress printing and will print an extra line
per test at the end of the test run. This extra line indicates the exact
position or test ID that you directly paste to the py.test cmdline in order
to re-run a particular test.
- allow external plugins to register new hooks via the new
pytest_addhooks(pluginmanager) hook. The new release of
the pytest-xdist plugin for distributed and looponfailing
testing requires this feature.
- add a new pytest_ignore_collect(path, config) hook to allow projects and
plugins to define exclusion behaviour for their directory structure -
for example you may define in a conftest.py this method:
for example you may define in a conftest.py this method::
def pytest_ignore_collect(path):
return path.check(link=1)
to prevent even a collection try of any tests in symlinked dirs.
- new pytest_pycollect_makemodule(path, parent) hook for
allowing customization of the Module collection object for a
matching test module.
- extend and refine xfail mechanism:
``@py.test.mark.xfail(run=False)`` do not run the decorated test
``@py.test.mark.xfail(reason="...")`` prints the reason string in xfail summaries
specifiying ``--runxfail`` on command line virtually ignores xfail markers
- expose (previously internal) commonly useful methods:
py.io.get_terminal_with() -> return terminal width
py.io.ansi_print(...) -> print colored/bold text on linux/win32
py.io.saferepr(obj) -> return limited representation string
- expose test outcome related exceptions as py.test.skip.Exception,
py.test.raises.Exception etc., useful mostly for plugins
doing special outcome interpretation/tweaking
- (issue85) fix junitxml plugin to handle tests with non-ascii output
- fix/refine python3 compatibility (thanks Benjamin Peterson)
- fixes for making the jython/win32 combination work, note however:
jython2.5.1/win32 does not provide a command line launcher, see
http://bugs.jython.org/issue1491 . See pylib install documentation
for how to work around.
- fixes for handling of unicode exception values and unprintable objects
- (issue87) fix unboundlocal error in assertionold code
- (issue86) improve documentation for looponfailing
- refine IO capturing: stdin-redirect pseudo-file now has a NOP close() method
- ship distribute_setup.py version 0.6.10
- added links to the new capturelog and coverage plugins

View File

@ -10,9 +10,8 @@ The need for skipping a test is usually connected to a condition.
If a test fails under all conditions then it's probably better
to mark your test as 'xfail'.
By passing ``--report=xfailed,skipped`` to the terminal reporter
you will see summary information on skips and xfail-run tests
at the end of a test run.
By passing ``-rxs`` to the terminal reporter you will see extra
summary information on skips and xfail-run tests at the end of a test run.
.. _skipif:
@ -165,7 +164,7 @@ class MarkEvaluator:
expl = self.get('reason', None)
if not expl:
if not hasattr(self, 'expr'):
return "condition: True"
return ""
else:
return "condition: " + self.expr
return expl
@ -222,31 +221,53 @@ def pytest_report_teststatus(report):
# called by the terminalreporter instance/plugin
def pytest_terminal_summary(terminalreporter):
show_xfailed(terminalreporter)
show_skipped(terminalreporter)
def show_xfailed(terminalreporter):
tr = terminalreporter
xfailed = tr.stats.get("xfailed")
if xfailed:
if not tr.hasopt('xfailed'):
tr.write_line(
"%d expected failures, use --report=xfailed for more info" %
len(xfailed))
if not tr.reportchars:
#for name in "xfailed skipped failed xpassed":
# if not tr.stats.get(name, 0):
# tr.write_line("HINT: use '-r' option to see extra "
# "summary info about tests")
# break
return
tr.write_sep("_", "expected failures")
lines = []
for char in tr.reportchars:
if char == "x":
show_xfailed(terminalreporter, lines)
elif char == "X":
show_xpassed(terminalreporter, lines)
elif char == "f":
show_failed(terminalreporter, lines)
elif char == "s":
show_skipped(terminalreporter, lines)
if lines:
tr._tw.sep("=", "short test summary info")
for line in lines:
tr._tw.line(line)
def show_failed(terminalreporter, lines):
tw = terminalreporter._tw
failed = terminalreporter.stats.get("failed")
if failed:
for rep in failed:
pos = terminalreporter.gettestid(rep.item)
lines.append("FAIL %s" %(pos, ))
def show_xfailed(terminalreporter, lines):
xfailed = terminalreporter.stats.get("xfailed")
if xfailed:
for rep in xfailed:
pos = terminalreporter.gettestid(rep.item)
reason = rep.keywords['xfail']
tr._tw.line("%s %s" %(pos, reason))
lines.append("XFAIL %s %s" %(pos, reason))
def show_xpassed(terminalreporter, lines):
xpassed = terminalreporter.stats.get("xpassed")
if xpassed:
tr.write_sep("_", "UNEXPECTEDLY PASSING TESTS")
for rep in xpassed:
pos = terminalreporter.gettestid(rep.item)
reason = rep.keywords['xfail']
tr._tw.line("%s %s" %(pos, reason))
lines.append("XPASS %s %s" %(pos, reason))
def cached_eval(config, expr, d):
if not hasattr(config, '_evalcache'):
@ -271,17 +292,20 @@ def folded_skips(skipped):
l.append((len(events),) + key)
return l
def show_skipped(terminalreporter):
def show_skipped(terminalreporter, lines):
tr = terminalreporter
skipped = tr.stats.get('skipped', [])
if skipped:
if not tr.hasopt('skipped'):
tr.write_line(
"%d skipped tests, use --report=skipped for more info" %
len(skipped))
return
#if not tr.hasopt('skipped'):
# tr.write_line(
# "%d skipped tests, specify -rs for more info" %
# len(skipped))
# return
fskips = folded_skips(skipped)
if fskips:
tr.write_sep("_", "skipped test summary")
#tr.write_sep("_", "skipped test summary")
for num, fspath, lineno, reason in fskips:
tr._tw.line("%s:%d: [%d] %s" %(fspath, lineno, num, reason))
if reason.startswith("Skipped: "):
reason = reason[9:]
lines.append("SKIP [%d] %s:%d: %s" %
(num, fspath, lineno, reason))

View File

@ -12,12 +12,16 @@ def pytest_addoption(parser):
group = parser.getgroup("terminal reporting", "reporting", after="general")
group._addoption('-v', '--verbose', action="count",
dest="verbose", default=0, help="increase verbosity."),
group._addoption('-r',
action="store", dest="reportchars", default=None, metavar="chars",
help="show extra test summary info as specified by chars (f)ailed, "
"(s)skipped, (x)failed, (X)passed.")
group._addoption('-l', '--showlocals',
action="store_true", dest="showlocals", default=False,
help="show locals in tracebacks (disabled by default).")
group.addoption('--report',
group._addoption('--report',
action="store", dest="report", default=None, metavar="opts",
help="show more info, valid: skipped,xfailed")
help="(deprecated, use -r)")
group._addoption('--tb', metavar="style",
action="store", dest="tbstyle", default='long',
type="choice", choices=['long', 'short', 'no', 'line'],
@ -47,17 +51,25 @@ def pytest_configure(config):
setattr(reporter._tw, name, getattr(config, attr))
config.pluginmanager.register(reporter, 'terminalreporter')
def getreportopt(optvalue):
d = {}
def getreportopt(config):
reportopts = ""
optvalue = config.getvalue("report")
if optvalue:
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
file=py.std.sys.stderr)
if optvalue:
for setting in optvalue.split(","):
setting = setting.strip()
val = True
if setting.startswith("no"):
val = False
setting = setting[2:]
d[setting] = val
return d
if setting == "skipped":
reportopts += "s"
elif setting == "xfailed":
reportopts += "x"
reportchars = config.getvalue("reportchars")
if reportchars:
for char in reportchars:
if char not in reportopts:
reportopts += char
return reportopts
class TerminalReporter:
def __init__(self, config, file=None):
@ -69,10 +81,11 @@ class TerminalReporter:
self._tw = py.io.TerminalWriter(file)
self.currentfspath = None
self.gateway2info = {}
self._reportopt = getreportopt(config.getvalue('report'))
self.reportchars = getreportopt(config)
def hasopt(self, name):
return self._reportopt.get(name, False)
def hasopt(self, char):
char = {'xfailed': 'x', 'skipped': 's'}.get(char,char)
return char in self.reportchars
def write_fspath_result(self, fspath, res):
fspath = self.curdir.bestrelpath(fspath)

View File

@ -12,7 +12,7 @@ py.test and pylib: rapid testing and development utils
- `py.code`_: dynamic code compile and traceback printing support
Platforms: Linux, Win32, OSX
Interpreters: Python versions 2.4 through to 3.1, Jython 2.5.1.
Interpreters: Python versions 2.4 through to 3.2, Jython 2.5.1 and PyPy
For questions please check out http://pylib.org/contact.html
.. _`py.test`: http://pytest.org

View File

@ -22,7 +22,7 @@ class TestEvaluator:
assert ev
assert ev.istrue()
expl = ev.getexplanation()
assert expl == "condition: True"
assert expl == ""
assert not ev.get("run", False)
def test_marked_one_arg(self, testdir):
@ -80,7 +80,7 @@ class TestXFail:
callreport = reports[1]
assert callreport.skipped
expl = callreport.keywords['xfail']
assert expl == "condition: True"
assert expl == ""
def test_xfail_xpassed(self, testdir):
item = testdir.getitem("""
@ -94,7 +94,7 @@ class TestXFail:
callreport = reports[1]
assert callreport.failed
expl = callreport.keywords['xfail']
assert expl == "condition: True"
assert expl == ""
def test_xfail_run_anyway(self, testdir):
testdir.makepyfile("""
@ -131,9 +131,9 @@ class TestXFail:
assert 0
""")
result = testdir.runpytest(p, '-v')
result.stdout.fnmatch_lines([
"*1 expected failures*--report=xfailed*",
])
#result.stdout.fnmatch_lines([
# "*HINT*use*-r*"
#])
def test_xfail_not_run_xfail_reporting(self, testdir):
p = testdir.makepyfile(test_one="""
@ -162,10 +162,9 @@ class TestXFail:
def test_that():
assert 1
""")
result = testdir.runpytest(p, '--report=xfailed')
result = testdir.runpytest(p, '-rX')
result.stdout.fnmatch_lines([
"*UNEXPECTEDLY PASSING*",
"*test_that*",
"*XPASS*test_that*",
"*1 xpassed*"
])
assert result.ret == 1
@ -189,9 +188,9 @@ class TestSkipif:
def test_that():
assert 0
""")
result = testdir.runpytest(p, '-s', '--report=skipped')
result = testdir.runpytest(p, '-s', '-rs')
result.stdout.fnmatch_lines([
"*Skipped*platform*",
"*SKIP*1*platform*",
"*1 skipped*"
])
assert result.ret == 0
@ -204,7 +203,8 @@ def test_skip_not_report_default(testdir):
""")
result = testdir.runpytest(p, '-v')
result.stdout.fnmatch_lines([
"*1 skipped*--report=skipped*",
#"*HINT*use*-r*",
"*1 skipped*",
])
@ -276,8 +276,7 @@ def test_skipped_reasons_functional(testdir):
result.stdout.fnmatch_lines([
"*test_one.py ss",
"*test_two.py S",
"___* skipped test summary *_",
"*conftest.py:3: *3* Skipped: 'test'",
"*SKIP*3*conftest.py:3: 'test'",
])
assert result.ret == 0

View File

@ -575,12 +575,41 @@ class TestTerminalFunctional:
])
assert result.ret == 1
def test_fail_extra_reporting(testdir):
p = testdir.makepyfile("def test_this(): assert 0")
result = testdir.runpytest(p)
assert 'short test summary' not in result.stdout.str()
result = testdir.runpytest(p, '-rf')
result.stdout.fnmatch_lines([
"*test summary*",
"FAIL*test_fail_extra_reporting*",
])
def test_fail_reporting_on_pass(testdir):
p = testdir.makepyfile("def test_this(): assert 1")
result = testdir.runpytest(p, '-rf')
assert 'short test summary' not in result.stdout.str()
def test_getreportopt():
assert getreportopt(None) == {}
assert getreportopt("hello") == {'hello': True}
assert getreportopt("hello, world") == dict(hello=True, world=True)
assert getreportopt("nohello") == dict(hello=False)
testdict = {}
class Config:
def getvalue(self, name):
return testdict.get(name, None)
config = Config()
testdict.update(dict(report="xfailed"))
assert getreportopt(config) == "x"
testdict.update(dict(report="xfailed,skipped"))
assert getreportopt(config) == "xs"
testdict.update(dict(report="skipped,xfailed"))
assert getreportopt(config) == "sx"
testdict.update(dict(report="skipped", reportchars="sf"))
assert getreportopt(config) == "sf"
testdict.update(dict(reportchars="sfx"))
assert getreportopt(config) == "sfx"
def test_terminalreporter_reportopt_conftestsetting(testdir):
testdir.makeconftest("option_report = 'skipped'")