From 5373a630086c2443adc8e3087305853194e7d88f Mon Sep 17 00:00:00 2001 From: Brianna Laugher Date: Fri, 17 May 2013 18:46:36 +1000 Subject: [PATCH 1/3] issue #308 first attempt, mark individual parametrize test instances with other marks (like xfail) --- _pytest/python.py | 23 ++++- testing/python/metafunc.py | 171 +++++++++++++++++++++++++++++++++++++ 2 files changed, 192 insertions(+), 2 deletions(-) diff --git a/_pytest/python.py b/_pytest/python.py index 529e0d688..e55fe2148 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -4,6 +4,7 @@ import inspect import sys import pytest from _pytest.main import getfslineno +from _pytest.mark import MarkDecorator, MarkInfo from _pytest.monkeypatch import monkeypatch from py._code.code import TerminalRepr @@ -565,11 +566,13 @@ class CallSpec2(object): self._globalid_args = set() self._globalparam = _notexists self._arg2scopenum = {} # used for sorting parametrized resources + self.keywords = {} def copy(self, metafunc): cs = CallSpec2(self.metafunc) cs.funcargs.update(self.funcargs) cs.params.update(self.params) + cs.keywords.update(self.keywords) cs._arg2scopenum.update(self._arg2scopenum) cs._idlist = list(self._idlist) cs._globalid = self._globalid @@ -593,7 +596,7 @@ class CallSpec2(object): def id(self): return "-".join(map(str, filter(None, self._idlist))) - def setmulti(self, valtype, argnames, valset, id, scopenum=0): + def setmulti(self, valtype, argnames, valset, id, keywords, scopenum=0): for arg,val in zip(argnames, valset): self._checkargnotcontained(arg) getattr(self, valtype)[arg] = val @@ -605,6 +608,7 @@ class CallSpec2(object): if val is _notexists: self._emptyparamspecified = True self._idlist.append(id) + self.keywords.update(keywords) def setall(self, funcargs, id, param): for x in funcargs: @@ -673,6 +677,18 @@ class Metafunc(FuncargnamesCompatAttr): if not argvalues: argvalues = [(_notexists,) * len(argnames)] + # these marks/keywords will be applied in Function init + newkeywords = {} + for i, argval in enumerate(argvalues): + newkeywords[i] = {} + if isinstance(argval, MarkDecorator): + # convert into a mark without the test content mixed in + newmark = MarkDecorator(argval.markname, argval.args[:-1], argval.kwargs) + newkeywords[i] = {newmark.markname: newmark} + + argvalues = [av.args[-1] if isinstance(av, MarkDecorator) else av + for av in argvalues] + if scope is None: scope = "subfunction" scopenum = scopes.index(scope) @@ -691,7 +707,7 @@ class Metafunc(FuncargnamesCompatAttr): assert len(valset) == len(argnames) newcallspec = callspec.copy(self) newcallspec.setmulti(valtype, argnames, valset, ids[i], - scopenum) + newkeywords[i], scopenum) newcalls.append(newcallspec) self._calls = newcalls @@ -908,6 +924,9 @@ class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr): for name, val in (py.builtin._getfuncdict(self.obj) or {}).items(): self.keywords[name] = val + if callspec: + for name, val in callspec.keywords.items(): + self.keywords[name] = val if keywords: for name, val in keywords.items(): self.keywords[name] = val diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 60247212f..16f2da493 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -577,4 +577,175 @@ class TestMetafuncFunctional: "*3 passed*" ]) + @pytest.mark.issue308 + def test_mark_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.foo + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.bar((1, 3)), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + items = testdir.getitems(s) + assert len(items) == 3 + for item in items: + assert 'foo' in item.keywords + assert 'bar' not in items[0].keywords + assert 'bar' in items[1].keywords + assert 'bar' not in items[2].keywords + + @pytest.mark.issue308 + def test_select_individual_parametrize_instance_based_on_mark(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.foo((2, 3)), + (3, 4), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + testdir.makepyfile(s) + rec = testdir.inline_run("-m", 'foo') + passed, skipped, fail = rec.listoutcomes() + assert len(passed) == 1 + assert len(skipped) == 0 + assert len(fail) == 0 + + @pytest.mark.xfail("is this important to support??") + @pytest.mark.issue308 + def test_nested_marks_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.foo(pytest.mark.bar((1, 3))), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + items = testdir.getitems(s) + assert len(items) == 3 + for mark in ['foo', 'bar']: + assert mark not in items[0].keywords + assert mark in items[1].keywords + assert mark not in items[2].keywords + + @pytest.mark.xfail(reason="is this important to support??") + @pytest.mark.issue308 + def test_nested_marks_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + mastermark = pytest.mark.foo(pytest.mark.bar) + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + mastermark((1, 3)), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + items = testdir.getitems(s) + assert len(items) == 3 + for mark in ['foo', 'bar']: + assert mark not in items[0].keywords + assert mark in items[1].keywords + assert mark not in items[2].keywords + + @pytest.mark.issue308 + def test_simple_xfail_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.xfail((1, 3)), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + # xfail is skip?? + reprec.assertoutcome(passed=2, skipped=1) + + @pytest.mark.issue308 + def test_xfail_with_arg_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.xfail("sys.version > 0")((1, 3)), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2, skipped=1) + + @pytest.mark.issue308 + def test_xfail_with_kwarg_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.xfail(reason="some bug")((1, 3)), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2, skipped=1) + + @pytest.mark.issue308 + def test_xfail_with_arg_and_kwarg_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.xfail("sys.version > 0", reason="some bug")((1, 3)), + (2, 3), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2, skipped=1) + + @pytest.mark.issue308 + def test_xfail_is_xpass_on_individual_parametrize_instance(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("input", "expected"), [ + (1, 2), + pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)), + (3, 4), + ]) + def test_increment(input, expected): + assert input + 1 == expected + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + # xpass is fail, obviously :) + reprec.assertoutcome(passed=2, failed=1) From ee65ca10f479c015030c93f00f83ba2bb4c6b486 Mon Sep 17 00:00:00 2001 From: Brianna Laugher Date: Mon, 20 May 2013 12:52:20 +1000 Subject: [PATCH 2/3] issue #308 address some comments by @hpk42 on 0b9d82e : - move tests into their own class, rename - add test showing metafunc.parametrize called in pytest_generate_tests rather than as decorator - add test and fix single-argname case - convert two loops into one in parametrize() also - renamed 'input' to 'n', since 'input' is a built-in --- _pytest/python.py | 27 ++++---- testing/python/metafunc.py | 134 ++++++++++++++++++++----------------- 2 files changed, 88 insertions(+), 73 deletions(-) diff --git a/_pytest/python.py b/_pytest/python.py index e55fe2148..eab5ce8e6 100644 --- a/_pytest/python.py +++ b/_pytest/python.py @@ -671,24 +671,27 @@ class Metafunc(FuncargnamesCompatAttr): It will also override any fixture-function defined scope, allowing to set a dynamic scope using test context or configuration. """ + # remove any marks applied to individual tests instances + # these marks will be applied in Function init + newkeywords = {} + strippedargvalues = [] + for i, argval in enumerate(argvalues): + if isinstance(argval, MarkDecorator): + # convert into a mark without the test content mixed in + newmark = MarkDecorator(argval.markname, argval.args[:-1], argval.kwargs) + newkeywords[i] = {newmark.markname: newmark} + strippedargvalues.append(argval.args[-1]) + else: + newkeywords[i] = {} + strippedargvalues.append(argval) + argvalues = strippedargvalues + if not isinstance(argnames, (tuple, list)): argnames = (argnames,) argvalues = [(val,) for val in argvalues] if not argvalues: argvalues = [(_notexists,) * len(argnames)] - # these marks/keywords will be applied in Function init - newkeywords = {} - for i, argval in enumerate(argvalues): - newkeywords[i] = {} - if isinstance(argval, MarkDecorator): - # convert into a mark without the test content mixed in - newmark = MarkDecorator(argval.markname, argval.args[:-1], argval.kwargs) - newkeywords[i] = {newmark.markname: newmark} - - argvalues = [av.args[-1] if isinstance(av, MarkDecorator) else av - for av in argvalues] - if scope is None: scope = "subfunction" scopenum = scopes.index(scope) diff --git a/testing/python/metafunc.py b/testing/python/metafunc.py index 16f2da493..786446637 100644 --- a/testing/python/metafunc.py +++ b/testing/python/metafunc.py @@ -577,19 +577,21 @@ class TestMetafuncFunctional: "*3 passed*" ]) - @pytest.mark.issue308 - def test_mark_on_individual_parametrize_instance(self, testdir): + +@pytest.mark.issue308 +class TestMarkersWithParametrization: + def test_simple_mark(self, testdir): s = """ import pytest @pytest.mark.foo - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.bar((1, 3)), (2, 3), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ items = testdir.getitems(s) assert len(items) == 3 @@ -599,18 +601,17 @@ class TestMetafuncFunctional: assert 'bar' in items[1].keywords assert 'bar' not in items[2].keywords - @pytest.mark.issue308 - def test_select_individual_parametrize_instance_based_on_mark(self, testdir): + def test_select_based_on_mark(self, testdir): s = """ import pytest - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.foo((2, 3)), (3, 4), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ testdir.makepyfile(s) rec = testdir.inline_run("-m", 'foo') @@ -619,41 +620,19 @@ class TestMetafuncFunctional: assert len(skipped) == 0 assert len(fail) == 0 - @pytest.mark.xfail("is this important to support??") - @pytest.mark.issue308 - def test_nested_marks_on_individual_parametrize_instance(self, testdir): - s = """ - import pytest - - @pytest.mark.parametrize(("input", "expected"), [ - (1, 2), - pytest.mark.foo(pytest.mark.bar((1, 3))), - (2, 3), - ]) - def test_increment(input, expected): - assert input + 1 == expected - """ - items = testdir.getitems(s) - assert len(items) == 3 - for mark in ['foo', 'bar']: - assert mark not in items[0].keywords - assert mark in items[1].keywords - assert mark not in items[2].keywords - @pytest.mark.xfail(reason="is this important to support??") - @pytest.mark.issue308 - def test_nested_marks_on_individual_parametrize_instance(self, testdir): + def test_nested_marks(self, testdir): s = """ import pytest mastermark = pytest.mark.foo(pytest.mark.bar) - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), mastermark((1, 3)), (2, 3), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ items = testdir.getitems(s) assert len(items) == 3 @@ -662,90 +641,123 @@ class TestMetafuncFunctional: assert mark in items[1].keywords assert mark not in items[2].keywords - @pytest.mark.issue308 - def test_simple_xfail_on_individual_parametrize_instance(self, testdir): + def test_simple_xfail(self, testdir): s = """ import pytest - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail((1, 3)), (2, 3), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() # xfail is skip?? reprec.assertoutcome(passed=2, skipped=1) - @pytest.mark.issue308 - def test_xfail_with_arg_on_individual_parametrize_instance(self, testdir): + def test_simple_xfail_single_argname(self, testdir): s = """ import pytest - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize("n", [ + 2, + pytest.mark.xfail(3), + 4, + ]) + def test_isEven(n): + assert n % 2 == 0 + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2, skipped=1) + + def test_xfail_with_arg(self, testdir): + s = """ + import pytest + + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail("sys.version > 0")((1, 3)), (2, 3), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) - @pytest.mark.issue308 - def test_xfail_with_kwarg_on_individual_parametrize_instance(self, testdir): + def test_xfail_with_kwarg(self, testdir): s = """ import pytest - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail(reason="some bug")((1, 3)), (2, 3), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) - @pytest.mark.issue308 - def test_xfail_with_arg_and_kwarg_on_individual_parametrize_instance(self, testdir): + def test_xfail_with_arg_and_kwarg(self, testdir): s = """ import pytest - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail("sys.version > 0", reason="some bug")((1, 3)), (2, 3), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() reprec.assertoutcome(passed=2, skipped=1) - @pytest.mark.issue308 - def test_xfail_is_xpass_on_individual_parametrize_instance(self, testdir): + def test_xfail_passing_is_xpass(self, testdir): s = """ import pytest - @pytest.mark.parametrize(("input", "expected"), [ + @pytest.mark.parametrize(("n", "expected"), [ (1, 2), pytest.mark.xfail("sys.version > 0", reason="some bug")((2, 3)), (3, 4), ]) - def test_increment(input, expected): - assert input + 1 == expected + def test_increment(n, expected): + assert n + 1 == expected """ testdir.makepyfile(s) reprec = testdir.inline_run() # xpass is fail, obviously :) reprec.assertoutcome(passed=2, failed=1) + def test_parametrize_called_in_generate_tests(self, testdir): + s = """ + import pytest + + + def pytest_generate_tests(metafunc): + passingTestData = [(1, 2), + (2, 3)] + failingTestData = [(1, 3), + (2, 2)] + + testData = passingTestData + [pytest.mark.xfail(d) + for d in failingTestData] + metafunc.parametrize(("n", "expected"), testData) + + + def test_increment(n, expected): + assert n + 1 == expected + """ + testdir.makepyfile(s) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=2, skipped=2) From d8bc40271a19c0a1296fd3987094463da1f37405 Mon Sep 17 00:00:00 2001 From: Brianna Laugher Date: Tue, 21 May 2013 11:12:45 +1000 Subject: [PATCH 3/3] issue #308 + docs --- doc/en/example/markers.txt | 23 +++++++++++++++++++++++ doc/en/parametrize.txt | 12 ++++++++++++ doc/en/skipping.txt | 22 ++++++++++++++++++++++ 3 files changed, 57 insertions(+) diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.txt index 309669350..afc6aa3b5 100644 --- a/doc/en/example/markers.txt +++ b/doc/en/example/markers.txt @@ -185,6 +185,29 @@ You can also set a module level marker:: in which case it will be applied to all functions and methods defined in the module. +.. _`marking individual tests when using parametrize`: + +Marking individual tests when using parametrize +----------------------------------------------- + +When using parametrize, applying a mark will make it apply +to each individual test. However it is also possible to +apply a marker to an individual test instance:: + + import pytest + + @pytest.mark.foo + @pytest.mark.parametrize(("n", "expected"), [ + (1, 2), + pytest.mark.bar((1, 3)), + (2, 3), + ]) + def test_increment(n, expected): + assert n + 1 == expected + +In this example the mark "foo" will apply to each of the three +tests, whereas the "bar" mark is only applied to the second test. +Skip and xfail marks can also be applied in this way, see :ref:`skip/xfail with parametrize`. .. _`adding a custom marker from a plugin`: diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.txt index 6cc59ffda..779cef60e 100644 --- a/doc/en/parametrize.txt +++ b/doc/en/parametrize.txt @@ -82,6 +82,18 @@ And as usual with test function arguments, you can see the ``input`` and ``outpu Note that there ways how you can mark a class or a module, see :ref:`mark`. +It is also possible to mark individual test instances within parametrize:: + + # content of test_expectation.py + import pytest + @pytest.mark.parametrize(("input", "expected"), [ + ("3+5", 8), + ("2+4", 6), + pytest.mark.xfail(("6*9", 42)), + ]) + def test_eval(input, expected): + assert eval(input) == expected + .. _`pytest_generate_tests`: diff --git a/doc/en/skipping.txt b/doc/en/skipping.txt index c2d738667..1c45d4219 100644 --- a/doc/en/skipping.txt +++ b/doc/en/skipping.txt @@ -176,6 +176,28 @@ Running it with the report-on-xfail option gives this output:: ======================== 6 xfailed in 0.05 seconds ========================= +.. _`skip/xfail with parametrize`: + +Skip/xfail with parametrize +--------------------------- + +It is possible to apply markers like skip and xfail to individual +test instances when using parametrize: + + import pytest + + @pytest.mark.parametrize(("n", "expected"), [ + (1, 2), + pytest.mark.xfail((1, 0)), + pytest.mark.xfail(reason="some bug")((1, 3)), + (2, 3), + (3, 4), + (4, 5), + pytest.mark.skipif("sys.version_info >= (3,0)")((10, 11)), + ]) + def test_increment(n, expected): + assert n + 1 == expected + Imperative xfail from within a test or setup function ------------------------------------------------------