merge the pytest-cache plugin into core

This commit is contained in:
Ronny Pfannschmidt 2015-07-11 10:13:27 +02:00
parent c7888d1d97
commit e20216a1a8
8 changed files with 797 additions and 28 deletions

1
.gitignore vendored
View File

@ -28,6 +28,7 @@ dist/
*.egg-info
issue/
env/
.env/
3rdparty/
.tox
.cache

View File

@ -42,6 +42,7 @@
- fix issue82: avoid loading conftest files from setup.cfg/pytest.ini/tox.ini
files and upwards by default (--confcutdir can still be set to override this).
Thanks Bruno Oliveira for the PR.
- merge the pytest-cache extension into core
- fix issue768: docstrings found in python modules were not setting up session
fixtures. Thanks Jason R. Coombs for reporting and Bruno Oliveira for the PR.

214
_pytest/cacheprovider.py Executable file
View File

@ -0,0 +1,214 @@
"""
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache:
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("clearcache"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
path.dirpath().ensure_dir()
with path.open("w") as f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("showcache") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache', action='store_true', dest="showcache",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--clearcache', action='store_true', dest="clearcache",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.showcache:
from _pytest.main import wrap_session
return wrap_session(config, showcache)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def showcache(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0

View File

@ -64,7 +64,7 @@ _preinit = []
default_plugins = (
"mark main terminal runner python pdb unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
"junitxml resultlog doctest").split()
"junitxml resultlog doctest cacheprovider").split()
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")

View File

@ -1,27 +0,0 @@
.. _apiref:
pytest reference documentation
================================================
.. toctree::
:maxdepth: 2
builtin
customize
assert
fixture
yieldfixture
parametrize
xunit_setup
capture
monkeypatch
xdist
tmpdir
mark
skipping
recwarn
unittest
nose
doctest

249
doc/en/cache.txt Normal file
View File

@ -0,0 +1,249 @@
cache: working with cross-testrun state
=======================================
Usage
---------
plugins can access the `config.cache`_ object
which helps sharing values between ``py.test`` invocations.
The plugin provides two options to rerun failures, namely:
* ``--lf`` (last failures) - to only re-run the failures.
* ``--ff`` (failures first) - to run the failures first and then the rest of
the tests.
For cleanup (usually not needed), a ``--clearcache`` option allows to remove
all cross-session cache contents ahead of a test run.
Rerunning only failures or failures first
-----------------------------------------------
First, let's create 50 test invocation of which only 2 fail::
# content of test_50.py
import pytest
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
pytest.fail("bad luck")
If you run this for the first time you will see two failures::
$ py.test -q
.................F.......F........................
=================================== FAILURES ===================================
_________________________________ test_num[17] _________________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_________________________________ test_num[25] _________________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
If you then run it with ``--lf`` you will run only the two failing test
from the last run::
$ py.test --lf
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.5
run-last-failure: rerun last 2 failures
plugins: cache
collected 50 items
test_50.py FF
=================================== FAILURES ===================================
_________________________________ test_num[17] _________________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_________________________________ test_num[25] _________________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
=================== 2 failed, 48 deselected in 0.02 seconds ====================
The last line indicates that 48 tests have not been run.
If you run with the ``--ff`` option, all tests will be run but the first
failures will be executed first (as can be seen from the series of ``FF`` and
dots)::
$ py.test --ff
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.5
run-last-failure: rerun last 2 failures first
plugins: cache
collected 50 items
test_50.py FF................................................
=================================== FAILURES ===================================
_________________________________ test_num[17] _________________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_________________________________ test_num[25] _________________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
===================== 2 failed, 48 passed in 0.07 seconds ======================
.. _`config.cache`:
The new config.cache object
--------------------------------
.. regendoc:wipe
Plugins or conftest.py support code can get a cached value
using the pytest ``config`` object. Here is a basic example
plugin which implements a `funcarg <http://pytest.org/latest/funcargs.html>`_
which re-uses previously created state across py.test invocations::
# content of test_caching.py
import time
def pytest_funcarg__mydata(request):
val = request.config.cache.get("example/value", None)
if val is None:
time.sleep(9*0.6) # expensive computation :)
val = 42
request.config.cache.set("example/value", val)
return val
def test_function(mydata):
assert mydata == 23
If you run this command once, it will take a while because
of the sleep::
$ py.test -q
F
=================================== FAILURES ===================================
________________________________ test_function _________________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:12: AssertionError
If you run it a second time the value will be retrieved from
the cache and this will be quick::
$ py.test -q
F
=================================== FAILURES ===================================
________________________________ test_function _________________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:12: AssertionError
Consult the `pytest-cache API <http://packages.python.org/pytest-cache/api.html>`_
for more details.
Inspecting Cache content
-------------------------------
You can always peek at the content of the cache using the
``--cache`` command line option::
$ py.test --cache
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.5
plugins: cache
cachedir: /tmp/doc-exec-6/.cache
--------------------------------- cache values ---------------------------------
example/value contains:
42
cache/lastfailed contains:
set(['test_caching.py::test_function'])
=============================== in 0.01 seconds ===============================
Clearing Cache content
-------------------------------
You can instruct pytest to clear all cache files and values
by adding the ``--clearcache`` option like this::
py.test --clearcache
This is recommended for invocations from Continous Integration
servers where isolation and correctness is more important
than speed.
config.cache API
========================================
The `config.cache`` object allows other plugins,
including ``conftest.py`` files,
to safely and flexibly store and retrieve values across
test runs because the ``config`` object is available
in many places.
Under the hood, the cache plugin uses the simple
dumps/loads API of the json stdlib module
.. currentmodule:: pytest_cache
.. automethod:: Cache.get
.. automethod:: Cache.set
.. automethod:: Cache.makedir

96
testing/test_cache.py Executable file
View File

@ -0,0 +1,96 @@
import os
import pytest
import shutil
import py
pytest_plugins = "pytester",
class TestNewAPI:
def test_config_cache_makedir(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
with pytest.raises(ValueError):
config.cache.makedir("key/name")
p = config.cache.makedir("name")
assert p.check()
def test_config_cache_dataerror(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
cache = config.cache
pytest.raises(TypeError, lambda: cache.set("key/name", cache))
config.cache.set("key/name", 0)
config.cache._getvaluepath("key/name").write("123invalid")
val = config.cache.get("key/name", -2)
assert val == -2
def test_config_cache(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
# see that we get cache information early on
assert hasattr(config, "cache")
""")
testdir.makepyfile("""
def test_session(pytestconfig):
assert hasattr(pytestconfig, "cache")
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def XXX_test_cachefuncarg(self, testdir):
testdir.makepyfile("""
import pytest
def test_cachefuncarg(cache):
val = cache.get("some/thing", None)
assert val is None
cache.set("some/thing", [1])
pytest.raises(TypeError, lambda: cache.get("some/thing"))
val = cache.get("some/thing", [])
assert val == [1]
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cache_reportheader(testdir):
p = testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"cachedir: .cache"
])
def test_cache_show(testdir):
result = testdir.runpytest("--cache")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*cache is empty*"
])
p = testdir.makeconftest("""
def pytest_configure(config):
config.cache.set("my/name", [1,2,3])
config.cache.set("other/some", {1:2})
dp = config.cache.makedir("mydb")
dp.ensure("hello")
dp.ensure("world")
""")
result = testdir.runpytest()
assert result.ret == 0
result = testdir.runpytest("--cache")
result.stdout.fnmatch_lines_random([
"*cachedir:*",
"-*cache values*-",
"*my/name contains:",
" [1, 2, 3]",
"*other/some contains*",
" {*1*: 2}",
"-*cache directories*-",
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
])

235
testing/test_lastfailed.py Executable file
View File

@ -0,0 +1,235 @@
import os
import pytest
import shutil
import py
pytest_plugins = "pytester",
class TestLastFailed:
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(py.code.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--clearcache")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
# Run this again to make sure clearcache is robust
if os.path.isdir('.cache'):
shutil.rmtree('.cache')
result = testdir.runpytest("--lf", "--clearcache")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
def test_failedfirst_order(self, testdir):
always_pass = testdir.tmpdir.join('test_a.py').write(py.code.Source("""
def test_always_passes():
assert 1
"""))
always_fail = testdir.tmpdir.join('test_b.py').write(py.code.Source("""
def test_always_fails():
assert 0
"""))
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
"test_a.py*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(py.code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p1 = testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(py.code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
rep = testdir.inline_runsource("""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
""")
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
assert not lastfailed
def test_lastfailed_collectfailure(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
def rlf(fail_import, fail_run):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
testdir.runpytest('-q')
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return lastfailed
lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
lastfailed = rlf(fail_import=1, fail_run=0)
assert list(lastfailed) == ['test_maybe.py']
lastfailed = rlf(fail_import=0, fail_run=1)
assert list(lastfailed) == ['test_maybe.py::test_hello']
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
testdir.makepyfile(test_maybe2="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
def test_pass():
pass
""")
def rlf(fail_import, fail_run, args=()):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
result = testdir.runpytest('-q', '--lf', *args)
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return result, lastfailed
result, lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
result.stdout.fnmatch_lines([
'*3 passed*',
])
result, lastfailed = rlf(fail_import=1, fail_run=0)
assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
# edge case of test selection - even if we remember failures
# from other tests we still need to run all tests if no test
# matches the failures
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
result.stdout.fnmatch_lines([
'*2 passed*',
])