refine command line option naming and docs

This commit is contained in:
holger krekel 2015-09-16 20:41:22 +02:00
parent 9a90aaca96
commit 45065e4e2e
4 changed files with 328 additions and 319 deletions

View File

@ -16,7 +16,7 @@ class Cache(object):
self.config = config
self._cachedir = config.rootdir.join(".cache")
self.trace = config.trace.root.get("cache")
if config.getvalue("clearcache"):
if config.getvalue("cacheclear"):
self.trace("clearing cachedir")
if self._cachedir.check():
self._cachedir.remove()
@ -135,7 +135,7 @@ class LFPlugin:
def pytest_sessionfinish(self, session):
config = self.config
if config.getvalue("showcache") or hasattr(config, "slaveinput"):
if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
return
config.cache.set("cache/lastfailed", self.lastfailed)
@ -152,17 +152,17 @@ def pytest_addoption(parser):
"This may re-order tests and thus lead to "
"repeated fixture setup/teardown")
group.addoption(
'--show-cache', action='store_true', dest="showcache",
'--cache-show', action='store_true', dest="cacheshow",
help="show cache contents, don't perform collection or tests")
group.addoption(
'--clearcache', action='store_true', dest="clearcache",
'--cache-clear', action='store_true', dest="cacheclear",
help="remove all cache contents at start of test run.")
def pytest_cmdline_main(config):
if config.option.showcache:
if config.option.cacheshow:
from _pytest.main import wrap_session
return wrap_session(config, showcache)
return wrap_session(config, cacheshow)
@ -182,7 +182,7 @@ def pytest_report_header(config):
return "cachedir: %s" % relpath
def showcache(config, session):
def cacheshow(config, session):
from pprint import pprint
tw = py.io.TerminalWriter()
tw.line("cachedir: " + str(config.cache._cachedir))

View File

@ -13,18 +13,19 @@ cache: working with cross-testrun state
Usage
---------
plugins can access the `config.cache`_ object
which helps sharing **json encodable** values between ``py.test`` invocations.
The plugin provides two options to rerun failures, namely:
The plugin provides two command line options to rerun failures from the
last ``py.test`` invocation:
* ``--lf`` (last failures) - to only re-run the failures.
* ``--ff`` (failures first) - to run the failures first and then the rest of
the tests.
For cleanup (usually not needed), a ``--clearcache`` option allows to remove
For cleanup (usually not needed), a ``--cache-clear`` option allows to remove
all cross-session cache contents ahead of a test run.
Other plugins may access the `config.cache`_ object to set/get
**json encodable** values between ``py.test`` invocations.
Rerunning only failures or failures first
-----------------------------------------------
@ -43,66 +44,67 @@ If you run this for the first time you will see two failures::
$ py.test -q
.................F.......F........................
=================================== FAILURES ===================================
_________________________________ test_num[17] _________________________________
================================= FAILURES =================================
_______________________________ test_num[17] _______________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_________________________________ test_num[25] _________________________________
_______________________________ test_num[25] _______________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
2 failed, 48 passed in 0.04 seconds
If you then run it with ``--lf`` you will run only the two failing test
from the last run::
$ py.test --lf
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.5
=========================== test session starts ============================
platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0
run-last-failure: rerun last 2 failures
plugins: cache
rootdir: /tmp/doc-exec-9, inifile:
collected 50 items
test_50.py FF
=================================== FAILURES ===================================
_________________________________ test_num[17] _________________________________
================================= FAILURES =================================
_______________________________ test_num[17] _______________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_________________________________ test_num[25] _________________________________
_______________________________ test_num[25] _______________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
=================== 2 failed, 48 deselected in 0.02 seconds ====================
================= 2 failed, 48 deselected in 0.02 seconds ==================
The last line indicates that 48 tests have not been run.
@ -111,38 +113,38 @@ failures will be executed first (as can be seen from the series of ``FF`` and
dots)::
$ py.test --ff
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.5
=========================== test session starts ============================
platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0
run-last-failure: rerun last 2 failures first
plugins: cache
rootdir: /tmp/doc-exec-9, inifile:
collected 50 items
test_50.py FF................................................
=================================== FAILURES ===================================
_________________________________ test_num[17] _________________________________
================================= FAILURES =================================
_______________________________ test_num[17] _______________________________
i = 17
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
_________________________________ test_num[25] _________________________________
_______________________________ test_num[25] _______________________________
i = 25
@pytest.mark.parametrize("i", range(50))
def test_num(i):
if i in (17,25):
if i in (17, 25):
> pytest.fail("bad luck")
E Failed: bad luck
test_50.py:6: Failed
===================== 2 failed, 48 passed in 0.07 seconds ======================
=================== 2 failed, 48 passed in 0.04 seconds ====================
.. _`config.cache`:
@ -175,32 +177,34 @@ of the sleep::
$ py.test -q
F
=================================== FAILURES ===================================
________________________________ test_function _________________________________
================================= FAILURES =================================
______________________________ test_function _______________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:12: AssertionError
1 failed in 5.41 seconds
If you run it a second time the value will be retrieved from
the cache and this will be quick::
$ py.test -q
F
=================================== FAILURES ===================================
________________________________ test_function _________________________________
================================= FAILURES =================================
______________________________ test_function _______________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:12: AssertionError
1 failed in 0.01 seconds
See the `cache-api`_ for more details.
@ -209,28 +213,35 @@ Inspecting Cache content
-------------------------------
You can always peek at the content of the cache using the
``--cache`` command line option::
``--cache-clear`` command line option::
$ py.test --cache
============================= test session starts ==============================
platform linux2 -- Python 2.7.3 -- pytest-2.3.5
plugins: cache
cachedir: /tmp/doc-exec-6/.cache
--------------------------------- cache values ---------------------------------
example/value contains:
42
cache/lastfailed contains:
set(['test_caching.py::test_function'])
=============================== in 0.01 seconds ===============================
$ py.test --cache-clear
=========================== test session starts ============================
platform linux2 -- Python 2.7.6, pytest-2.7.3.dev426+ng9a90aac.d20150916, py-1.4.30, pluggy-0.3.0
rootdir: /tmp/doc-exec-9, inifile:
collected 1 items
test_caching.py F
================================= FAILURES =================================
______________________________ test_function _______________________________
mydata = 42
def test_function(mydata):
> assert mydata == 23
E assert 42 == 23
test_caching.py:12: AssertionError
========================= 1 failed in 5.41 seconds =========================
Clearing Cache content
-------------------------------
You can instruct pytest to clear all cache files and values
by adding the ``--clearcache`` option like this::
by adding the ``--cache-clear`` option like this::
py.test --clearcache
py.test --cache-clear
This is recommended for invocations from Continous Integration
servers where isolation and correctness is more important

View File

@ -1,4 +1,7 @@
import pytest
import os
import shutil
import py
pytest_plugins = "pytester",
@ -63,8 +66,9 @@ def test_cache_reportheader(testdir):
"cachedir: .cache"
])
def test_cache_show(testdir):
result = testdir.runpytest("--show-cache")
result = testdir.runpytest("--cache-show")
assert result.ret == 0
result.stdout.fnmatch_lines([
"*cache is empty*"
@ -79,7 +83,7 @@ def test_cache_show(testdir):
""")
result = testdir.runpytest()
assert result.ret == 5 # no tests executed
result = testdir.runpytest("--show-cache")
result = testdir.runpytest("--cache-show")
result.stdout.fnmatch_lines_random([
"*cachedir:*",
"-*cache values*-",
@ -91,3 +95,232 @@ def test_cache_show(testdir):
"*mydb/hello*length 0*",
"*mydb/world*length 0*",
])
class TestLastFailed:
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(py.code.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
# Run this again to make sure clear-cache is robust
if os.path.isdir('.cache'):
shutil.rmtree('.cache')
result = testdir.runpytest("--lf", "--cache-clear")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
def test_failedfirst_order(self, testdir):
testdir.tmpdir.join('test_a.py').write(py.code.Source("""
def test_always_passes():
assert 1
"""))
testdir.tmpdir.join('test_b.py').write(py.code.Source("""
def test_always_fails():
assert 0
"""))
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
"test_a.py*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(py.code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(py.code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
testdir.inline_runsource("""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
""")
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
assert not lastfailed
def test_lastfailed_collectfailure(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
def rlf(fail_import, fail_run):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
testdir.runpytest('-q')
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return lastfailed
lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
lastfailed = rlf(fail_import=1, fail_run=0)
assert list(lastfailed) == ['test_maybe.py']
lastfailed = rlf(fail_import=0, fail_run=1)
assert list(lastfailed) == ['test_maybe.py::test_hello']
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
testdir.makepyfile(test_maybe2="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
def test_pass():
pass
""")
def rlf(fail_import, fail_run, args=()):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
result = testdir.runpytest('-q', '--lf', *args)
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return result, lastfailed
result, lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
result.stdout.fnmatch_lines([
'*3 passed*',
])
result, lastfailed = rlf(fail_import=1, fail_run=0)
assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
# edge case of test selection - even if we remember failures
# from other tests we still need to run all tests if no test
# matches the failures
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
result.stdout.fnmatch_lines([
'*2 passed*',
])

View File

@ -1,235 +0,0 @@
import os
import pytest
import shutil
import py
pytest_plugins = "pytester",
class TestLastFailed:
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
p = testdir.makepyfile("""
def test_1():
assert 0
def test_2():
assert 0
def test_3():
assert 1
""")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
p.write(py.code.Source("""
def test_1():
assert 1
def test_2():
assert 1
def test_3():
assert 0
"""))
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 passed*1 desel*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
result = testdir.runpytest("--lf", "--clearcache")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
# Run this again to make sure clearcache is robust
if os.path.isdir('.cache'):
shutil.rmtree('.cache')
result = testdir.runpytest("--lf", "--clearcache")
result.stdout.fnmatch_lines([
"*1 failed*2 passed*",
])
def test_failedfirst_order(self, testdir):
testdir.tmpdir.join('test_a.py').write(py.code.Source("""
def test_always_passes():
assert 1
"""))
testdir.tmpdir.join('test_b.py').write(py.code.Source("""
def test_always_fails():
assert 0
"""))
result = testdir.runpytest()
# Test order will be collection order; alphabetical
result.stdout.fnmatch_lines([
"test_a.py*",
"test_b.py*",
])
result = testdir.runpytest("--lf", "--ff")
# Test order will be failing tests firs
result.stdout.fnmatch_lines([
"test_b.py*",
"test_a.py*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_difference_invocations(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile(test_a="""
def test_a1():
assert 0
def test_a2():
assert 1
""", test_b="""
def test_b1():
assert 0
""")
p = testdir.tmpdir.join("test_a.py")
p2 = testdir.tmpdir.join("test_b.py")
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
p2.write(py.code.Source("""
def test_b1():
assert 1
"""))
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 passed*",
])
result = testdir.runpytest("--lf", p)
result.stdout.fnmatch_lines([
"*1 failed*1 desel*",
])
@pytest.mark.skipif("sys.version_info < (2,6)")
def test_lastfailed_usecase_splice(self, testdir, monkeypatch):
monkeypatch.setenv("PYTHONDONTWRITEBYTECODE", 1)
testdir.makepyfile("""
def test_1():
assert 0
""")
p2 = testdir.tmpdir.join("test_something.py")
p2.write(py.code.Source("""
def test_2():
assert 0
"""))
result = testdir.runpytest()
result.stdout.fnmatch_lines([
"*2 failed*",
])
result = testdir.runpytest("--lf", p2)
result.stdout.fnmatch_lines([
"*1 failed*",
])
result = testdir.runpytest("--lf")
result.stdout.fnmatch_lines([
"*2 failed*",
])
def test_lastfailed_xpass(self, testdir):
testdir.inline_runsource("""
import pytest
@pytest.mark.xfail
def test_hello():
assert 1
""")
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
assert not lastfailed
def test_lastfailed_collectfailure(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
def rlf(fail_import, fail_run):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
testdir.runpytest('-q')
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return lastfailed
lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
lastfailed = rlf(fail_import=1, fail_run=0)
assert list(lastfailed) == ['test_maybe.py']
lastfailed = rlf(fail_import=0, fail_run=1)
assert list(lastfailed) == ['test_maybe.py::test_hello']
def test_lastfailed_failure_subset(self, testdir, monkeypatch):
testdir.makepyfile(test_maybe="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
""")
testdir.makepyfile(test_maybe2="""
import py
env = py.std.os.environ
if '1' == env['FAILIMPORT']:
raise ImportError('fail')
def test_hello():
assert '0' == env['FAILTEST']
def test_pass():
pass
""")
def rlf(fail_import, fail_run, args=()):
monkeypatch.setenv('FAILIMPORT', fail_import)
monkeypatch.setenv('FAILTEST', fail_run)
result = testdir.runpytest('-q', '--lf', *args)
config = testdir.parseconfigure()
lastfailed = config.cache.get("cache/lastfailed", -1)
return result, lastfailed
result, lastfailed = rlf(fail_import=0, fail_run=0)
assert not lastfailed
result.stdout.fnmatch_lines([
'*3 passed*',
])
result, lastfailed = rlf(fail_import=1, fail_run=0)
assert sorted(list(lastfailed)) == ['test_maybe.py', 'test_maybe2.py']
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
# edge case of test selection - even if we remember failures
# from other tests we still need to run all tests if no test
# matches the failures
result, lastfailed = rlf(fail_import=0, fail_run=0,
args=('test_maybe2.py',))
assert list(lastfailed) == ['test_maybe.py']
result.stdout.fnmatch_lines([
'*2 passed*',
])