Merge pull request #1015 from hpk42/cache-integration

refined pytest-cache integration
This commit is contained in:
Florian Bruhin 2015-09-16 21:46:46 +02:00
commit 2575053697
14 changed files with 845 additions and 36 deletions

1
.gitignore vendored
View File

@ -28,6 +28,7 @@ dist/
*.egg-info
issue/
env/
.env/
3rdparty/
.tox
.cache

View File

@ -1,6 +1,13 @@
2.8.0.dev (compared to 2.7.X)
-----------------------------
- new ``--lf`` and ``-ff`` options to run only the last failing tests or
"failing tests first" from the last run. This functionality is provided
through porting the formerly external pytest-cache plugin into pytest core.
BACKWARD INCOMPAT: if you used pytest-cache's functionality to persist
data between test runs be aware that we don't serialize sets anymore.
Thanks Ronny Pfannschmidt for most of the merging work.
- "-r" option now accepts "a" to include all possible reports, similar
to passing "fEsxXw" explicitly (isse960).
Thanks Abhijeet Kasurde for the PR.
@ -66,6 +73,7 @@
- Summary bar now is colored yellow for warning
situations such as: all tests either were skipped or xpass/xfailed,
or no tests were run at all (this is a partial fix for issue500).
- fix issue812: pytest now exits with status code 5 in situations where no
tests were run at all, such as the directory given in the command line does
not contain any tests or as result of a command line option filters

219
_pytest/cacheprovider.py Executable file
View File

@ -0,0 +1,219 @@
"""
merged implementation of the cache provider
the name cache was not choosen to ensure pluggy automatically
ignores the external pytest-cache
"""
import py
import pytest
import json
from os.path import sep as _sep, altsep as _altsep
class Cache(object):
def __init__(self, config):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
self._cachedir.mkdir()
def makedir(self, name):
""" return a directory path object with the given name. If the
directory does not yet exist, it will be created. You can use it
to manage files likes e. g. store/retrieve database
dumps across test sessions.
:param name: must be a string not containing a ``/`` separator.
Make sure the name contains your plugin or application
identifiers to prevent clashes with other cache users.
"""
if _sep in name or _altsep is not None and _altsep in name:
raise ValueError("name is not allowed to contain path separators")
return self._cachedir.ensure_dir("d", name)
def _getvaluepath(self, key):
return self._cachedir.join('v', *key.split('/'))
def get(self, key, default):
""" return cached value for the given key. If no value
was yet cached or the value cannot be read, the specified
default is returned.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param default: must be provided in case of a cache-miss or
invalid cache values.
"""
path = self._getvaluepath(key)
if path.check():
try:
with path.open("r") as f:
return json.load(f)
except ValueError:
self.trace("cache-invalid at %s" % (path,))
return default
def set(self, key, value):
""" save value for the given key.
:param key: must be a ``/`` separated value. Usually the first
name is the name of your plugin or your application.
:param value: must be of any combination of basic
python types, including nested types
like e. g. lists of dictionaries.
"""
path = self._getvaluepath(key)
path.dirpath().ensure_dir()
with path.open("w") as f:
self.trace("cache-write %s: %r" % (key, value,))
json.dump(value, f, indent=2, sort_keys=True)
class LFPlugin:
""" Plugin which implements the --lf (run last-failing) option """
def __init__(self, config):
self.config = config
active_keys = 'lf', 'failedfirst'
self.active = any(config.getvalue(key) for key in active_keys)
if self.active:
self.lastfailed = config.cache.get("cache/lastfailed", {})
else:
self.lastfailed = {}
def pytest_report_header(self):
if self.active:
if not self.lastfailed:
mode = "run all (no recorded failures)"
else:
mode = "rerun last %d failures%s" % (
len(self.lastfailed),
" first" if self.config.getvalue("failedfirst") else "")
return "run-last-failure: %s" % mode
def pytest_runtest_logreport(self, report):
if report.failed and "xfail" not in report.keywords:
self.lastfailed[report.nodeid] = True
elif not report.failed:
if report.when == "call":
self.lastfailed.pop(report.nodeid, None)
def pytest_collectreport(self, report):
passed = report.outcome in ('passed', 'skipped')
if passed:
if report.nodeid in self.lastfailed:
self.lastfailed.pop(report.nodeid)
self.lastfailed.update(
(item.nodeid, True)
for item in report.result)
else:
self.lastfailed[report.nodeid] = True
def pytest_collection_modifyitems(self, session, config, items):
if self.active and self.lastfailed:
previously_failed = []
previously_passed = []
for item in items:
if item.nodeid in self.lastfailed:
previously_failed.append(item)
else:
previously_passed.append(item)
if not previously_failed and previously_passed:
# running a subset of all tests with recorded failures outside
# of the set of tests currently executing
pass
elif self.config.getvalue("failedfirst"):
items[:] = previously_failed + previously_passed
else:
items[:] = previously_failed
config.hook.pytest_deselected(items=previously_passed)
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/lastfailed", self.lastfailed)
def pytest_addoption(parser):
group = parser.getgroup("general")
group.addoption(
'--lf', action='store_true', dest="lf",
help="rerun only the tests that failed "
"at the last run (or all if none failed)")
group.addoption(
'--ff', action='store_true', dest="failedfirst",
help="run all tests but run the last failures first. "
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, cacheshow)
@pytest.hookimpl(tryfirst=True)
def pytest_configure(config):
config.cache = Cache(config)
config.pluginmanager.register(LFPlugin(config), "lfplugin")
@pytest.fixture
def cache(request):
return request.config.cache
def pytest_report_header(config):
if config.option.verbose:
relpath = py.path.local().bestrelpath(config.cache._cachedir)
return "cachedir: %s" % relpath
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))
if not config.cache._cachedir.check():
tw.line("cache is empty")
return 0
dummy = object()
basedir = config.cache._cachedir
vdir = basedir.join("v")
tw.sep("-", "cache values")
for valpath in vdir.visit(lambda x: x.isfile()):
key = valpath.relto(vdir).replace(valpath.sep, "/")
val = config.cache.get(key, dummy)
if val is dummy:
tw.line("%s contains unreadable content, "
"will be ignored" % key)
else:
tw.line("%s contains:" % key)
stream = py.io.TextIO()
pprint(val, stream=stream)
for line in stream.getvalue().splitlines():
tw.line(" " + line)
ddir = basedir.join("d")
if ddir.isdir() and ddir.listdir():
tw.sep("-", "cache directories")
for p in basedir.join("d").visit():
#if p.check(dir=1):
# print("%s/" % p.relto(basedir))
if p.isfile():
key = p.relto(basedir)
tw.line("%s is a file of length %d" % (
key, p.size()))
return 0

View File

@ -64,7 +64,7 @@ _preinit = []
default_plugins = (
"mark main terminal runner python pdb unittest capture skipping "
"tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
"junitxml resultlog doctest").split()
"junitxml resultlog doctest cacheprovider").split()
builtin_plugins = set(default_plugins)
builtin_plugins.add("pytester")

View File

@ -194,12 +194,12 @@ class monkeypatch:
undo stack. Calling it a second time has no effect unless
you do more monkeypatching after the undo call.
There is generally no need to call `undo()`, since it is
There is generally no need to call `undo()`, since it is
called automatically during tear-down.
Note that the same `monkeypatch` fixture is used across a
single test function invocation. If `monkeypatch` is used both by
the test function itself and one of the test fixtures,
the test function itself and one of the test fixtures,
calling `undo()` will undo all of the changes made in
both functions.
"""

View File

@ -1,27 +0,0 @@
.. _apiref:
pytest reference documentation
================================================
.. toctree::
:maxdepth: 2
builtin
customize
assert
fixture
yieldfixture
parametrize
xunit_setup
capture
monkeypatch
xdist
tmpdir
mark
skipping
recwarn
unittest
nose
doctest

271
doc/en/cache.rst Normal file
View File

@ -0,0 +1,271 @@
cache: working with cross-testrun state
=======================================
.. versionadded:: 2.8
.. warning::
The functionality of this core plugin was previosuly distributed
as a third party plugin named ``pytest-cache``. The core plugin
is compatible regarding command line options and API usage except that you
can only store/receive data between test runs that is json-serializable.
Usage
---------
The plugin provides two command line options to rerun failures from the
last ``py.test`` invocation:
* ``--lf`` (last failures) - to only re-run the failures.
* ``--ff`` (failures first) - to run the failures first and then the rest of
the tests.
For cleanup (usually not needed), a ``--cache-clear`` option allows to remove
all cross-session cache contents ahead of a test run.
Other plugins may access the `config.cache`_ object to set/get
**json encodable** values between ``py.test`` invocations.
Rerunning only failures or failures first
-----------------------------------------------
First, let's create 50 test invocation of which only 2 fail::
# content of test_50.py
import pytest
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
pytest.fail("bad luck")
If you run this for the first time you will see two failures::
$ py.test -q
.................F.......F........................
================================= FAILURES =================================
_______________________________ test_num[17] _______________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_______________________________ test_num[25] _______________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
2 failed, 48 passed in 0.04 seconds
If you then run it with ``--lf``::
$ py.test --lf
=========================== test session starts ============================
platform linux2 -- Python 2.7.6, pytest-2.7.3.dev428+ng79d22bf.d20150916, py-1.4.30, pluggy-0.3.0
run-last-failure: rerun last 2 failures
rootdir: /tmp/doc-exec-94, inifile:
collected 50 items
test_50.py FF
================================= FAILURES =================================
_______________________________ test_num[17] _______________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_______________________________ test_num[25] _______________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
================= 2 failed, 48 deselected in 0.01 seconds ==================
You have run only the two failing test from the last run, while 48 tests have
not been run ("deselected").
Now, if you run with the ``--ff`` option, all tests will be run but the first
previous failures will be executed first (as can be seen from the series
of ``FF`` and dots)::
$ py.test --ff
=========================== test session starts ============================
platform linux2 -- Python 2.7.6, pytest-2.7.3.dev428+ng79d22bf.d20150916, py-1.4.30, pluggy-0.3.0
run-last-failure: rerun last 2 failures first
rootdir: /tmp/doc-exec-94, inifile:
collected 50 items
test_50.py FF................................................
================================= FAILURES =================================
_______________________________ test_num[17] _______________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_______________________________ test_num[25] _______________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
=================== 2 failed, 48 passed in 0.03 seconds ====================
.. _`config.cache`:
The new config.cache object
--------------------------------
.. regendoc:wipe
Plugins or conftest.py support code can get a cached value using the
pytest ``config`` object. Here is a basic example plugin which
implements a :ref:`fixture` which re-uses previously created state
across py.test invocations::
# content of test_caching.py
import pytest
import time
@pytest.fixture
def mydata(request):
val = request.config.cache.get("example/value", None)
if val is None:
time.sleep(9*0.6) # expensive computation :)
val = 42
request.config.cache.set("example/value", val)
return val
def test_function(mydata):
assert mydata == 23
If you run this command once, it will take a while because
of the sleep::
$ py.test -q
F
================================= FAILURES =================================
______________________________ test_function _______________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:14: AssertionError
1 failed in 5.41 seconds
If you run it a second time the value will be retrieved from
the cache and this will be quick::
$ py.test -q
F
================================= FAILURES =================================
______________________________ test_function _______________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:14: AssertionError
1 failed in 0.01 seconds
See the `cache-api`_ for more details.
Inspecting Cache content
-------------------------------
You can always peek at the content of the cache using the
``--cache-clear`` command line option::
$ py.test --cache-clear
=========================== test session starts ============================
platform linux2 -- Python 2.7.6, pytest-2.7.3.dev428+ng79d22bf.d20150916, py-1.4.30, pluggy-0.3.0
rootdir: /tmp/doc-exec-94, inifile:
collected 1 items
test_caching.py F
================================= FAILURES =================================
______________________________ test_function _______________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:14: AssertionError
========================= 1 failed in 5.41 seconds =========================
Clearing Cache content
-------------------------------
You can instruct pytest to clear all cache files and values
by adding the ``--cache-clear`` option like this::
py.test --cache-clear
This is recommended for invocations from Continous Integration
servers where isolation and correctness is more important
than speed.
.. _`cache-api`:
config.cache API
========================================
The `config.cache`` object allows other plugins,
including ``conftest.py`` files,
to safely and flexibly store and retrieve values across
test runs because the ``config`` object is available
in many places.
Under the hood, the cache plugin uses the simple
dumps/loads API of the json stdlib module
.. currentmodule:: _pytest.cacheprovider
.. automethod:: Cache.get
.. automethod:: Cache.set
.. automethod:: Cache.makedir

View File

@ -12,6 +12,7 @@ Full pytest documentation
overview
apiref
cache
plugins
plugins_index/index
example/index

View File

@ -32,6 +32,7 @@ pytest: helps you write better programs
- :ref:`skipping` (improved in 2.4)
- :ref:`distribute tests to multiple CPUs <xdistcpu>` through :ref:`xdist plugin <xdist>`
- :ref:`continuously re-run failing tests <looponfailing>`
- :doc:`cache`
- flexible :ref:`Python test discovery`
**integrates with other testing methods and tools**:

View File

@ -2,7 +2,14 @@
"""
pytest: unit and functional testing with Python.
"""
__all__ = ['main']
__all__ = [
'main',
'UsageError',
'cmdline',
'hookspec',
'hookimpl',
'__version__',
]
if __name__ == '__main__': # if run as a script or by 'python -m pytest'
# we trigger the below "else" condition by the following import

View File

@ -908,7 +908,7 @@ def test_unorderable_types(testdir):
""")
result = testdir.runpytest()
assert "TypeError" not in result.stdout.str()
assert result.ret == EXIT_NOTESTSCOLLECTED
assert result.ret == EXIT_NOTESTSCOLLECTED
def test_collect_functools_partial(testdir):

326
testing/test_cache.py Executable file
View File

@ -0,0 +1,326 @@
import pytest
import os
import shutil
import py
pytest_plugins = "pytester",
class TestNewAPI:
def test_config_cache_makedir(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
with pytest.raises(ValueError):
config.cache.makedir("key/name")
p = config.cache.makedir("name")
assert p.check()
def test_config_cache_dataerror(self, testdir):
testdir.makeini("[pytest]")
config = testdir.parseconfigure()
cache = config.cache
pytest.raises(TypeError, lambda: cache.set("key/name", cache))
config.cache.set("key/name", 0)
config.cache._getvaluepath("key/name").write("123invalid")
val = config.cache.get("key/name", -2)
assert val == -2
def test_config_cache(self, testdir):
testdir.makeconftest("""
def pytest_configure(config):
# see that we get cache information early on
assert hasattr(config, "cache")
""")
testdir.makepyfile("""
def test_session(pytestconfig):
assert hasattr(pytestconfig, "cache")
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cachefuncarg(self, testdir):
testdir.makepyfile("""
import pytest
def test_cachefuncarg(cache):
val = cache.get("some/thing", None)
assert val is None
cache.set("some/thing", [1])
pytest.raises(TypeError, lambda: cache.get("some/thing"))
val = cache.get("some/thing", [])
assert val == [1]
""")
result = testdir.runpytest()
assert result.ret == 0
result.stdout.fnmatch_lines(["*1 passed*"])
def test_cache_reportheader(testdir):
testdir.makepyfile("""
def test_hello():
pass
""")
result = testdir.runpytest("-v")
result.stdout.fnmatch_lines([
"cachedir: .cache"
])
def test_cache_show(testdir):
result = testdir.runpytest("--cache-show")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*cache is empty*"
])
testdir.makeconftest("""
def pytest_configure(config):
config.cache.set("my/name", [1,2,3])
config.cache.set("other/some", {1:2})
dp = config.cache.makedir("mydb")
dp.ensure("hello")
dp.ensure("world")
""")
result = testdir.runpytest()
assert result.ret == 5 # no tests executed
result = testdir.runpytest("--cache-show")
result.stdout.fnmatch_lines_random([
"*cachedir:*",
"-*cache values*-",
"*my/name contains:",
" [1, 2, 3]",
"*other/some contains*",
" {*1*: 2}",
"-*cache directories*-",
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
])
class TestLastFailed:
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(py.code.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
# Run this again to make sure clear-cache is robust
if os.path.isdir('.cache'):
shutil.rmtree('.cache')
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
def test_failedfirst_order(self, testdir):
testdir.tmpdir.join('test_a.py').write(py.code.Source("""
def test_always_passes():
assert 1
"""))
testdir.tmpdir.join('test_b.py').write(py.code.Source("""
def test_always_fails():
assert 0
"""))
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
"test_a.py*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(py.code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(py.code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
testdir.inline_runsource("""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
""")
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
assert not lastfailed
def test_lastfailed_collectfailure(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
def rlf(fail_import, fail_run):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
testdir.runpytest('-q')
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return lastfailed
lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
lastfailed = rlf(fail_import=1, fail_run=0)
assert list(lastfailed) == ['test_maybe.py']
lastfailed = rlf(fail_import=0, fail_run=1)
assert list(lastfailed) == ['test_maybe.py::test_hello']
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
testdir.makepyfile(test_maybe2="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
def test_pass():
pass
""")
def rlf(fail_import, fail_run, args=()):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
result = testdir.runpytest('-q', '--lf', *args)
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return result, lastfailed
result, lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
result.stdout.fnmatch_lines([
'*3 passed*',
])
result, lastfailed = rlf(fail_import=1, fail_run=0)
assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
# edge case of test selection - even if we remember failures
# from other tests we still need to run all tests if no test
# matches the failures
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
result.stdout.fnmatch_lines([
'*2 passed*',
])

View File

@ -53,7 +53,6 @@ def pytest_generate_tests(metafunc):
DistInfo(project_name='test', version=1)
], ['test-1']),
], ids=['normal', 'prefix-strip', 'deduplicate'])
def test_plugin_nameversion(input, expected):
pluginlist = [(None, x) for x in input]
result = _plugin_nameversions(pluginlist)

View File

@ -33,8 +33,8 @@ commands= py.test --genscript=pytest1
[testenv:flakes]
basepython = python2.7
deps = pytest-flakes>=0.2
commands = py.test --flakes -m flakes _pytest testing
deps = flake8
commands = flake8 pytest.py _pytest testing
[testenv:py27-xdist]
deps=pytest-xdist>=1.13
@ -148,5 +148,8 @@ rsyncdirs=tox.ini pytest.py _pytest testing
python_files=test_*.py *_test.py testing/*/*.py
python_classes=Test Acceptance
python_functions=test
pep8ignore = E401 E225 E261 E128 E124 E302
norecursedirs = .tox ja .hg cx_freeze_source
[flake8]
ignore =E401,E225,E261,E128,E124,E301,E302,E121,E303,W391,E501,E231,E126,E701,E265,E241,E251,E226,E101,W191,E131,E203,E122,E123,E271,E712,E222,E127,E125,E221,W292,E111,E113,E293,E262,W293,E129,E702,E201,E272,E202