introduce a new request.applymarker() function and refactor

internally to allow for dynamically adding keywords to test
items.

--HG--
branch : trunk
This commit is contained in:
holger krekel 2010-06-08 02:34:51 +02:00
parent d00b62e0f4
commit 64388832d9
13 changed files with 159 additions and 27 deletions

View File

@ -4,6 +4,17 @@ Changes between 1.3.1 and 1.3.x
New features New features
++++++++++++++++++ ++++++++++++++++++
- Funcarg factories can now dynamically apply a marker to a
test invocation. This is particularly useful if a factory
provides parameters to a test which you expect-to-fail:
def pytest_funcarg__arg(request):
request.applymarker(py.test.mark.xfail(reason="flaky config"))
...
def test_function(arg):
...
Bug fixes / Maintenance Bug fixes / Maintenance
++++++++++++++++++++++++++ ++++++++++++++++++++++++++

View File

@ -160,6 +160,25 @@ like this:
scope="session" scope="session"
) )
dynamically applying a marker
---------------------------------------------
.. sourcecode:: python
def applymarker(self, marker):
""" apply a marker to a test function invocation.
The 'marker' must be created with py.test.mark.* XYZ.
"""
``request.applymarker(marker)`` will mark the test invocation
with the given marker. For example, if your funcarg factory provides
values which may cause a test function to fail you can call
``request.applymarker(py.test.mark.xfail(reason='flaky config'))``
and this will cause the test to not show tracebacks. See xfail_
for details.
.. _`xfail`: plugin/skipping.html#xfail
requesting values of other funcargs requesting values of other funcargs
--------------------------------------------- ---------------------------------------------

View File

@ -81,6 +81,7 @@ apply the function will be skipped.
.. _`whole class- or module level`: mark.html#scoped-marking .. _`whole class- or module level`: mark.html#scoped-marking
.. _xfail:
mark a test function as **expected to fail** mark a test function as **expected to fail**
------------------------------------------------------- -------------------------------------------------------

View File

@ -171,4 +171,6 @@ def pytest_pycollect_makeitem(__multicall__, collector, name, obj):
for mark in marker: for mark in marker:
if isinstance(mark, MarkDecorator): if isinstance(mark, MarkDecorator):
mark(func) mark(func)
item.keywords.update(py.builtin._getfuncdict(func) or {})
return item return item

View File

@ -134,7 +134,7 @@ class ItemTestReport(BaseReport):
self.item = item self.item = item
self.when = when self.when = when
if item and when != "setup": if item and when != "setup":
self.keywords = item.readkeywords() self.keywords = item.keywords
else: else:
# if we fail during setup it might mean # if we fail during setup it might mean
# we are not able to access the underlying object # we are not able to access the underlying object

View File

@ -159,8 +159,10 @@ class MarkEvaluator:
def __init__(self, item, name): def __init__(self, item, name):
self.item = item self.item = item
self.name = name self.name = name
self.holder = getattr(item.obj, name, None)
@property
def holder(self):
return self.item.keywords.get(self.name, None)
def __bool__(self): def __bool__(self):
return bool(self.holder) return bool(self.holder)
__nonzero__ = __bool__ __nonzero__ = __bool__
@ -204,10 +206,17 @@ def pytest_runtest_setup(item):
if evalskip.istrue(): if evalskip.istrue():
py.test.skip(evalskip.getexplanation()) py.test.skip(evalskip.getexplanation())
item._evalxfail = MarkEvaluator(item, 'xfail') item._evalxfail = MarkEvaluator(item, 'xfail')
check_xfail_no_run(item)
def pytest_pyfunc_call(pyfuncitem):
check_xfail_no_run(pyfuncitem)
def check_xfail_no_run(item):
if not item.config.getvalue("runxfail"): if not item.config.getvalue("runxfail"):
if item._evalxfail.istrue(): evalxfail = item._evalxfail
if not item._evalxfail.get('run', True): if evalxfail.istrue():
py.test.skip("xfail") if not evalxfail.get('run', True):
py.test.xfail("[NOTRUN] " + evalxfail.getexplanation())
def pytest_runtest_makereport(__multicall__, item, call): def pytest_runtest_makereport(__multicall__, item, call):
if not isinstance(item, py.test.collect.Function): if not isinstance(item, py.test.collect.Function):
@ -224,16 +233,9 @@ def pytest_runtest_makereport(__multicall__, item, call):
rep.skipped = True rep.skipped = True
rep.failed = False rep.failed = False
return rep return rep
if call.when == "setup": if call.when == "call":
rep = __multicall__.execute()
if rep.skipped and evalxfail.istrue():
expl = evalxfail.getexplanation()
if not evalxfail.get("run", True):
expl = "[NOTRUN] " + expl
rep.keywords['xfail'] = expl
return rep
elif call.when == "call":
rep = __multicall__.execute() rep = __multicall__.execute()
evalxfail = getattr(item, '_evalxfail')
if not item.config.getvalue("runxfail") and evalxfail.istrue(): if not item.config.getvalue("runxfail") and evalxfail.istrue():
if call.excinfo: if call.excinfo:
rep.skipped = True rep.skipped = True

View File

@ -31,6 +31,7 @@ class Node(object):
self.config = config or parent.config self.config = config or parent.config
self.fspath = getattr(parent, 'fspath', None) self.fspath = getattr(parent, 'fspath', None)
self.ihook = HookProxy(self) self.ihook = HookProxy(self)
self.keywords = self.readkeywords()
def _reraiseunpicklingproblem(self): def _reraiseunpicklingproblem(self):
if hasattr(self, '_unpickle_exc'): if hasattr(self, '_unpickle_exc'):
@ -153,7 +154,7 @@ class Node(object):
def _matchonekeyword(self, key, chain): def _matchonekeyword(self, key, chain):
elems = key.split(".") elems = key.split(".")
# XXX O(n^2), anyone cares? # XXX O(n^2), anyone cares?
chain = [item.readkeywords() for item in chain if item._keywords()] chain = [item.keywords for item in chain if item.keywords]
for start, _ in enumerate(chain): for start, _ in enumerate(chain):
if start + len(elems) > len(chain): if start + len(elems) > len(chain):
return False return False

View File

@ -92,6 +92,16 @@ class FuncargRequest:
if argname not in self._pyfuncitem.funcargs: if argname not in self._pyfuncitem.funcargs:
self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname) self._pyfuncitem.funcargs[argname] = self.getfuncargvalue(argname)
def applymarker(self, marker):
""" apply a marker to a test function invocation.
The 'marker' must be created with py.test.mark.* XYZ.
"""
if not isinstance(marker, py.test.mark.XYZ.__class__):
raise ValueError("%r is not a py.test.mark.* object")
self._pyfuncitem.keywords[marker.markname] = marker
def cached_setup(self, setup, teardown=None, scope="module", extrakey=None): def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
""" cache and return result of calling setup(). """ cache and return result of calling setup().

View File

@ -348,6 +348,7 @@ class Function(FunctionMixin, py.test.collect.Item):
if callobj is not _dummy: if callobj is not _dummy:
self._obj = callobj self._obj = callobj
self.function = getattr(self.obj, 'im_func', self.obj) self.function = getattr(self.obj, 'im_func', self.obj)
self.keywords.update(py.builtin._getfuncdict(self.obj) or {})
def _getobj(self): def _getobj(self):
name = self.name name = self.name
@ -359,11 +360,6 @@ class Function(FunctionMixin, py.test.collect.Item):
def _isyieldedfunction(self): def _isyieldedfunction(self):
return self._args is not None return self._args is not None
def readkeywords(self):
d = super(Function, self).readkeywords()
d.update(py.builtin._getfuncdict(self.obj))
return d
def runtest(self): def runtest(self):
""" execute the underlying test function. """ """ execute the underlying test function. """
self.ihook.pytest_pyfunc_call(pyfuncitem=self) self.ihook.pytest_pyfunc_call(pyfuncitem=self)

View File

@ -65,7 +65,7 @@ class TestFunctional:
def test_func(): def test_func():
pass pass
""") """)
keywords = item.readkeywords() keywords = item.keywords
assert 'hello' in keywords assert 'hello' in keywords
def test_marklist_per_class(self, testdir): def test_marklist_per_class(self, testdir):
@ -79,7 +79,7 @@ class TestFunctional:
""") """)
clscol = modcol.collect()[0] clscol = modcol.collect()[0]
item = clscol.collect()[0].collect()[0] item = clscol.collect()[0].collect()[0]
keywords = item.readkeywords() keywords = item.keywords
assert 'hello' in keywords assert 'hello' in keywords
def test_marklist_per_module(self, testdir): def test_marklist_per_module(self, testdir):
@ -93,7 +93,7 @@ class TestFunctional:
""") """)
clscol = modcol.collect()[0] clscol = modcol.collect()[0]
item = clscol.collect()[0].collect()[0] item = clscol.collect()[0].collect()[0]
keywords = item.readkeywords() keywords = item.keywords
assert 'hello' in keywords assert 'hello' in keywords
assert 'world' in keywords assert 'world' in keywords
@ -108,7 +108,7 @@ class TestFunctional:
""") """)
clscol = modcol.collect()[0] clscol = modcol.collect()[0]
item = clscol.collect()[0].collect()[0] item = clscol.collect()[0].collect()[0]
keywords = item.readkeywords() keywords = item.keywords
assert 'hello' in keywords assert 'hello' in keywords
@py.test.mark.skipif("sys.version_info < (2,6)") @py.test.mark.skipif("sys.version_info < (2,6)")
@ -124,7 +124,7 @@ class TestFunctional:
""") """)
clscol = modcol.collect()[0] clscol = modcol.collect()[0]
item = clscol.collect()[0].collect()[0] item = clscol.collect()[0].collect()[0]
keywords = item.readkeywords() keywords = item.keywords
assert 'hello' in keywords assert 'hello' in keywords
assert 'world' in keywords assert 'world' in keywords
@ -141,7 +141,7 @@ class TestFunctional:
""") """)
items, rec = testdir.inline_genitems(p) items, rec = testdir.inline_genitems(p)
item, = items item, = items
keywords = item.readkeywords() keywords = item.keywords
marker = keywords['hello'] marker = keywords['hello']
assert marker.args == ["pos0", "pos1"] assert marker.args == ["pos0", "pos1"]
assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4} assert marker.kwargs == {'x': 3, 'y': 2, 'z': 4}
@ -154,4 +154,22 @@ class TestFunctional:
def test_func(): def test_func():
pass pass
""") """)
keywords = item.readkeywords() keywords = item.keywords
def test_mark_dynamically_in_funcarg(self, testdir):
testdir.makeconftest("""
import py
def pytest_funcarg__arg(request):
request.applymarker(py.test.mark.hello)
def pytest_terminal_summary(terminalreporter):
l = terminalreporter.stats['passed']
terminalreporter._tw.line("keyword: %s" % l[0].keywords)
""")
testdir.makepyfile("""
def test_func(arg):
pass
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"keyword: *hello*"
])

View File

@ -188,6 +188,21 @@ class TestXFail:
"*1 passed*", "*1 passed*",
]) ])
def test_xfail_not_run_no_setup_run(self, testdir):
p = testdir.makepyfile(test_one="""
import py
@py.test.mark.xfail(run=False, reason="hello")
def test_this():
assert 0
def setup_module(mod):
raise ValueError(42)
""")
result = testdir.runpytest(p, '--report=xfailed', )
result.stdout.fnmatch_lines([
"*test_one*test_this*NOTRUN*hello",
"*1 xfailed*",
])
def test_xfail_xpass(self, testdir): def test_xfail_xpass(self, testdir):
p = testdir.makepyfile(test_one=""" p = testdir.makepyfile(test_one="""
import py import py
@ -245,8 +260,47 @@ class TestXFail:
"*py.test.xfail*", "*py.test.xfail*",
]) ])
def xtest_dynamic_xfail_set_during_setup(self, testdir):
p = testdir.makepyfile("""
import py
def setup_function(function):
py.test.mark.xfail(function)
def test_this():
assert 0
def test_that():
assert 1
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*",
"*XPASS*test_that*",
])
def test_dynamic_xfail_no_run(self, testdir):
p = testdir.makepyfile("""
import py
def pytest_funcarg__arg(request):
request.applymarker(py.test.mark.xfail(run=False))
def test_this(arg):
assert 0
""")
result = testdir.runpytest(p, '-rxX')
result.stdout.fnmatch_lines([
"*XFAIL*test_this*NOTRUN*",
])
def test_dynamic_xfail_set_during_funcarg_setup(self, testdir):
p = testdir.makepyfile("""
import py
def pytest_funcarg__arg(request):
request.applymarker(py.test.mark.xfail)
def test_this2(arg):
assert 0
""")
result = testdir.runpytest(p)
result.stdout.fnmatch_lines([
"*1 xfailed*",
])
class TestSkipif: class TestSkipif:

View File

@ -14,6 +14,7 @@ from py._plugin.pytest_terminal import TerminalReporter, \
from py._plugin import pytest_runner as runner from py._plugin import pytest_runner as runner
def basic_run_report(item): def basic_run_report(item):
runner.call_and_report(item, "setup", log=False)
return runner.call_and_report(item, "call", log=False) return runner.call_and_report(item, "call", log=False)
class Option: class Option:

View File

@ -211,6 +211,23 @@ class TestRequest:
req = funcargs.FuncargRequest(item) req = funcargs.FuncargRequest(item)
assert req.fspath == modcol.fspath assert req.fspath == modcol.fspath
def test_applymarker(testdir):
item1,item2 = testdir.getitems("""
class TestClass:
def test_func1(self, something):
pass
def test_func2(self, something):
pass
""")
req1 = funcargs.FuncargRequest(item1)
assert 'xfail' not in item1.keywords
req1.applymarker(py.test.mark.xfail)
assert 'xfail' in item1.keywords
assert 'skipif' not in item1.keywords
req1.applymarker(py.test.mark.skipif)
assert 'skipif' in item1.keywords
py.test.raises(ValueError, "req1.applymarker(42)")
class TestRequestCachedSetup: class TestRequestCachedSetup:
def test_request_cachedsetup(self, testdir): def test_request_cachedsetup(self, testdir):
item1,item2 = testdir.getitems(""" item1,item2 = testdir.getitems("""