commit
bbc61c85ac
|
@ -4,6 +4,7 @@ import inspect
|
|||
import sys
|
||||
import pytest
|
||||
from _pytest.main import getfslineno
|
||||
from _pytest.mark import MarkDecorator, MarkInfo
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
from py._code.code import TerminalRepr
|
||||
|
||||
|
@ -565,11 +566,13 @@ class CallSpec2(object):
|
|||
self._globalid_args = set()
|
||||
self._globalparam = _notexists
|
||||
self._arg2scopenum = {} # used for sorting parametrized resources
|
||||
self.keywords = {}
|
||||
|
||||
def copy(self, metafunc):
|
||||
cs = CallSpec2(self.metafunc)
|
||||
cs.funcargs.update(self.funcargs)
|
||||
cs.params.update(self.params)
|
||||
cs.keywords.update(self.keywords)
|
||||
cs._arg2scopenum.update(self._arg2scopenum)
|
||||
cs._idlist = list(self._idlist)
|
||||
cs._globalid = self._globalid
|
||||
|
@ -593,7 +596,7 @@ class CallSpec2(object):
|
|||
def id(self):
|
||||
return "-".join(map(str, filter(None, self._idlist)))
|
||||
|
||||
def setmulti(self, valtype, argnames, valset, id, scopenum=0):
|
||||
def setmulti(self, valtype, argnames, valset, id, keywords, scopenum=0):
|
||||
for arg,val in zip(argnames, valset):
|
||||
self._checkargnotcontained(arg)
|
||||
getattr(self, valtype)[arg] = val
|
||||
|
@ -605,6 +608,7 @@ class CallSpec2(object):
|
|||
if val is _notexists:
|
||||
self._emptyparamspecified = True
|
||||
self._idlist.append(id)
|
||||
self.keywords.update(keywords)
|
||||
|
||||
def setall(self, funcargs, id, param):
|
||||
for x in funcargs:
|
||||
|
@ -667,6 +671,21 @@ class Metafunc(FuncargnamesCompatAttr):
|
|||
It will also override any fixture-function defined scope, allowing
|
||||
to set a dynamic scope using test context or configuration.
|
||||
"""
|
||||
# remove any marks applied to individual tests instances
|
||||
# these marks will be applied in Function init
|
||||
newkeywords = {}
|
||||
strippedargvalues = []
|
||||
for i, argval in enumerate(argvalues):
|
||||
if isinstance(argval, MarkDecorator):
|
||||
# convert into a mark without the test content mixed in
|
||||
newmark = MarkDecorator(argval.markname, argval.args[:-1], argval.kwargs)
|
||||
newkeywords[i] = {newmark.markname: newmark}
|
||||
strippedargvalues.append(argval.args[-1])
|
||||
else:
|
||||
newkeywords[i] = {}
|
||||
strippedargvalues.append(argval)
|
||||
argvalues = strippedargvalues
|
||||
|
||||
if not isinstance(argnames, (tuple, list)):
|
||||
argnames = (argnames,)
|
||||
argvalues = [(val,) for val in argvalues]
|
||||
|
@ -691,7 +710,7 @@ class Metafunc(FuncargnamesCompatAttr):
|
|||
assert len(valset) == len(argnames)
|
||||
newcallspec = callspec.copy(self)
|
||||
newcallspec.setmulti(valtype, argnames, valset, ids[i],
|
||||
scopenum)
|
||||
newkeywords[i], scopenum)
|
||||
newcalls.append(newcallspec)
|
||||
self._calls = newcalls
|
||||
|
||||
|
@ -908,6 +927,9 @@ class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
|
|||
|
||||
for name, val in (py.builtin._getfuncdict(self.obj) or {}).items():
|
||||
self.keywords[name] = val
|
||||
if callspec:
|
||||
for name, val in callspec.keywords.items():
|
||||
self.keywords[name] = val
|
||||
if keywords:
|
||||
for name, val in keywords.items():
|
||||
self.keywords[name] = val
|
||||
|
|
|
@ -185,6 +185,29 @@ You can also set a module level marker::
|
|||
in which case it will be applied to all functions and
|
||||
methods defined in the module.
|
||||
|
||||
.. _`marking individual tests when using parametrize`:
|
||||
|
||||
Marking individual tests when using parametrize
|
||||
-----------------------------------------------
|
||||
|
||||
When using parametrize, applying a mark will make it apply
|
||||
to each individual test. However it is also possible to
|
||||
apply a marker to an individual test instance::
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.mark.foo
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.bar((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
|
||||
In this example the mark "foo" will apply to each of the three
|
||||
tests, whereas the "bar" mark is only applied to the second test.
|
||||
Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`.
|
||||
|
||||
|
||||
.. _`adding a custom marker from a plugin`:
|
||||
|
|
|
@ -82,6 +82,18 @@ And as usual with test function arguments, you can see the ``input`` and ``outpu
|
|||
Note that there ways how you can mark a class or a module,
|
||||
see :ref:`mark`.
|
||||
|
||||
It is also possible to mark individual test instances within parametrize::
|
||||
|
||||
# content of test_expectation.py
|
||||
import pytest
|
||||
@pytest.mark.parametrize(("input", "expected"), [
|
||||
("3+5", 8),
|
||||
("2+4", 6),
|
||||
pytest.mark.xfail(("6*9", 42)),
|
||||
])
|
||||
def test_eval(input, expected):
|
||||
assert eval(input) == expected
|
||||
|
||||
|
||||
.. _`pytest_generate_tests`:
|
||||
|
||||
|
|
|
@ -176,6 +176,28 @@ Running it with the report-on-xfail option gives this output::
|
|||
|
||||
======================== 6 xfailed in 0.05 seconds =========================
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
Skip/xfail with parametrize
|
||||
---------------------------
|
||||
|
||||
It is possible to apply markers like skip and xfail to individual
|
||||
test instances when using parametrize:
|
||||
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail((1, 0)),
|
||||
pytest.mark.xfail(reason="some bug")((1, 3)),
|
||||
(2, 3),
|
||||
(3, 4),
|
||||
(4, 5),
|
||||
pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
|
||||
|
||||
Imperative xfail from within a test or setup function
|
||||
------------------------------------------------------
|
||||
|
|
|
@ -578,3 +578,186 @@ class TestMetafuncFunctional:
|
|||
])
|
||||
|
||||
|
||||
@pytest.mark.issue308
|
||||
class TestMarkersWithParametrization:
|
||||
def test_simple_mark(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.foo
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.bar((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
items = testdir.getitems(s)
|
||||
assert len(items) == 3
|
||||
for item in items:
|
||||
assert 'foo' in item.keywords
|
||||
assert 'bar' not in items[0].keywords
|
||||
assert 'bar' in items[1].keywords
|
||||
assert 'bar' not in items[2].keywords
|
||||
|
||||
def test_select_based_on_mark(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.foo((2, 3)),
|
||||
(3, 4),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
rec = testdir.inline_run("-m", 'foo')
|
||||
passed, skipped, fail = rec.listoutcomes()
|
||||
assert len(passed) == 1
|
||||
assert len(skipped) == 0
|
||||
assert len(fail) == 0
|
||||
|
||||
@pytest.mark.xfail(reason="is this important to support??")
|
||||
def test_nested_marks(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
mastermark = pytest.mark.foo(pytest.mark.bar)
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
mastermark((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
items = testdir.getitems(s)
|
||||
assert len(items) == 3
|
||||
for mark in ['foo', 'bar']:
|
||||
assert mark not in items[0].keywords
|
||||
assert mark in items[1].keywords
|
||||
assert mark not in items[2].keywords
|
||||
|
||||
def test_simple_xfail(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
# xfail is skip??
|
||||
reprec.assertoutcome(passed=2, skipped=1)
|
||||
|
||||
def test_simple_xfail_single_argname(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize("n", [
|
||||
2,
|
||||
pytest.mark.xfail(3),
|
||||
4,
|
||||
])
|
||||
def test_isEven(n):
|
||||
assert n % 2 == 0
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=1)
|
||||
|
||||
def test_xfail_with_arg(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail("sys.version > 0")((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=1)
|
||||
|
||||
def test_xfail_with_kwarg(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail(reason="some bug")((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=1)
|
||||
|
||||
def test_xfail_with_arg_and_kwarg(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail("sys.version > 0", reason="some bug")((1, 3)),
|
||||
(2, 3),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=1)
|
||||
|
||||
def test_xfail_passing_is_xpass(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
@pytest.mark.parametrize(("n", "expected"), [
|
||||
(1, 2),
|
||||
pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)),
|
||||
(3, 4),
|
||||
])
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
# xpass is fail, obviously :)
|
||||
reprec.assertoutcome(passed=2, failed=1)
|
||||
|
||||
def test_parametrize_called_in_generate_tests(self, testdir):
|
||||
s = """
|
||||
import pytest
|
||||
|
||||
|
||||
def pytest_generate_tests(metafunc):
|
||||
passingTestData = [(1, 2),
|
||||
(2, 3)]
|
||||
failingTestData = [(1, 3),
|
||||
(2, 2)]
|
||||
|
||||
testData = passingTestData + [pytest.mark.xfail(d)
|
||||
for d in failingTestData]
|
||||
metafunc.parametrize(("n", "expected"), testData)
|
||||
|
||||
|
||||
def test_increment(n, expected):
|
||||
assert n + 1 == expected
|
||||
"""
|
||||
testdir.makepyfile(s)
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=2, skipped=2)
|
||||
|
|
Loading…
Reference in New Issue