diff --git a/doc/en/example/markers.rst b/doc/en/example/markers.rst index 7b75c7900..b162c938c 100644 --- a/doc/en/example/markers.rst +++ b/doc/en/example/markers.rst @@ -330,11 +330,10 @@ specifies via named environments:: "env(name): mark test to run only on named environment") def pytest_runtest_setup(item): - envmarker = item.get_marker("env") - if envmarker is not None: - envname = envmarker.args[0] - if envname != item.config.getoption("-E"): - pytest.skip("test requires env %r" % envname) + envnames = [mark.args[0] for mark in item.iter_markers() if mark.name == "env"] + if envnames: + if item.config.getoption("-E") not in envnames: + pytest.skip("test requires env in %r" % envnames) A test file using this local plugin:: @@ -403,10 +402,9 @@ Below is the config file that will be used in the next examples:: import sys def pytest_runtest_setup(item): - marker = item.get_marker('my_marker') - if marker is not None: - for info in marker: - print('Marker info name={} args={} kwars={}'.format(info.name, info.args, info.kwargs)) + for marker in item.iter_markers(): + if marker.name == 'my_marker': + print(marker) sys.stdout.flush() A custom marker can have its argument set, i.e. ``args`` and ``kwargs`` properties, defined by either invoking it as a callable or using ``pytest.mark.MARKER_NAME.with_args``. These two methods achieve the same effect most of the time. @@ -426,7 +424,7 @@ However, if there is a callable as the single positional argument with no keywor The output is as follows:: $ pytest -q -s - Marker info name=my_marker args=(,) kwars={} + Mark(name='my_marker', args=(,), kwargs={}) . 1 passed in 0.12 seconds @@ -460,10 +458,9 @@ test function. From a conftest file we can read it like this:: import sys def pytest_runtest_setup(item): - g = item.get_marker("glob") - if g is not None: - for info in g: - print ("glob args=%s kwargs=%s" %(info.args, info.kwargs)) + for mark in item.iter_markers(): + if mark.name == 'glob': + print ("glob args=%s kwargs=%s" %(mark.args, mark.kwargs)) sys.stdout.flush() Let's run this without capturing output and see what we get:: @@ -494,11 +491,10 @@ for your particular platform, you could use the following plugin:: ALL = set("darwin linux win32".split()) def pytest_runtest_setup(item): - if isinstance(item, item.Function): - plat = sys.platform - if not item.get_marker(plat): - if ALL.intersection(item.keywords): - pytest.skip("cannot run on platform %s" %(plat)) + supported_platforms = ALL.intersection(mark.name for mark in item.iter_markers()) + plat = sys.platform + if supported_platforms and plat not in supported_platforms: + pytest.skip("cannot run on platform %s" % (plat)) then tests will be skipped if they were specified for a different platform. Let's do a little test file to show how this looks like:: @@ -532,7 +528,7 @@ then you will see two tests skipped and two executed tests as expected:: test_plat.py s.s. [100%] ========================= short test summary info ========================== - SKIP [2] $REGENDOC_TMPDIR/conftest.py:13: cannot run on platform linux + SKIP [2] $REGENDOC_TMPDIR/conftest.py:12: cannot run on platform linux =================== 2 passed, 2 skipped in 0.12 seconds ==================== diff --git a/doc/en/example/simple.rst b/doc/en/example/simple.rst index 25d1225b5..3dc942018 100644 --- a/doc/en/example/simple.rst +++ b/doc/en/example/simple.rst @@ -389,7 +389,7 @@ Now we can profile which test functions execute the slowest:: ========================= slowest 3 test durations ========================= 0.30s call test_some_are_slow.py::test_funcslow2 0.20s call test_some_are_slow.py::test_funcslow1 - 0.16s call test_some_are_slow.py::test_funcfast + 0.10s call test_some_are_slow.py::test_funcfast ========================= 3 passed in 0.12 seconds ========================= incremental testing - test steps diff --git a/doc/en/usage.rst b/doc/en/usage.rst index 7274dccc9..9b6db82c5 100644 --- a/doc/en/usage.rst +++ b/doc/en/usage.rst @@ -260,10 +260,10 @@ Alternatively, you can integrate this functionality with custom markers: def pytest_collection_modifyitems(session, config, items): for item in items: - marker = item.get_marker('test_id') - if marker is not None: - test_id = marker.args[0] - item.user_properties.append(('test_id', test_id)) + for marker in item.iter_markers(): + if marker.name == 'test_id': + test_id = marker.args[0] + item.user_properties.append(('test_id', test_id)) And in your tests: diff --git a/doc/en/warnings.rst b/doc/en/warnings.rst index e78a6afc0..f7b67f5f2 100644 --- a/doc/en/warnings.rst +++ b/doc/en/warnings.rst @@ -25,14 +25,14 @@ Running pytest now produces this output:: platform linux -- Python 3.x.y, pytest-3.x.y, py-1.x.y, pluggy-0.x.y rootdir: $REGENDOC_TMPDIR, inifile: collected 1 item - + test_show_warnings.py . [100%] - + ============================= warnings summary ============================= test_show_warnings.py::test_one $REGENDOC_TMPDIR/test_show_warnings.py:4: UserWarning: api v1, should use functions from v2 warnings.warn(UserWarning("api v1, should use functions from v2")) - + -- Docs: http://doc.pytest.org/en/latest/warnings.html =================== 1 passed, 1 warnings in 0.12 seconds =================== @@ -45,17 +45,17 @@ them into errors:: F [100%] ================================= FAILURES ================================= _________________________________ test_one _________________________________ - + def test_one(): > assert api_v1() == 1 - - test_show_warnings.py:8: - _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - + + test_show_warnings.py:8: + _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ + def api_v1(): > warnings.warn(UserWarning("api v1, should use functions from v2")) E UserWarning: api v1, should use functions from v2 - + test_show_warnings.py:4: UserWarning 1 failed in 0.12 seconds