streamline pytester API majorly:
- integrate conftest into pytester plugin - introduce runpytest() to either call runpytest_inline (default) or runpytest_subprocess (python -m pytest) - move testdir.inline_runsource1 to pdb tests - strike some unneccessary methods. - a new section "writing plugins" and some better pytester docs --HG-- branch : testrefactor
This commit is contained in:
parent
a8afba054a
commit
db5649ec6a
|
@ -43,6 +43,14 @@
|
|||
implementations. Use the ``hookwrapper`` mechanism instead already
|
||||
introduced with pytest-2.7.
|
||||
|
||||
- speed up pytest's own test suite considerably by using inprocess
|
||||
tests by default (testrun can be modified with --runpytest=subprocess
|
||||
to create subprocesses in many places instead). The main
|
||||
APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess"
|
||||
and "runpytest_inprocess" if you need a particular way of running
|
||||
the test. In all cases you get back a RunResult but the inprocess
|
||||
one will also have a "reprec" attribute with the recorded events/reports.
|
||||
|
||||
|
||||
2.7.1.dev (compared to 2.7.0)
|
||||
-----------------------------
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
""" (disabled by default) support for testing pytest and pytest plugins. """
|
||||
import gc
|
||||
import sys
|
||||
import traceback
|
||||
import os
|
||||
|
@ -16,6 +17,136 @@ from _pytest.core import TracedHookExecution
|
|||
|
||||
from _pytest.main import Session, EXIT_OK
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
# group = parser.getgroup("pytester", "pytester (self-tests) options")
|
||||
parser.addoption('--lsof',
|
||||
action="store_true", dest="lsof", default=False,
|
||||
help=("run FD checks if lsof is available"))
|
||||
|
||||
parser.addoption('--runpytest', default="inprocess", dest="runpytest",
|
||||
choices=("inprocess", "subprocess", ),
|
||||
help=("run pytest sub runs in tests using an 'inprocess' "
|
||||
"or 'subprocess' (python -m main) method"))
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
# This might be called multiple times. Only take the first.
|
||||
global _pytest_fullpath
|
||||
try:
|
||||
_pytest_fullpath
|
||||
except NameError:
|
||||
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
|
||||
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
|
||||
|
||||
if config.getvalue("lsof"):
|
||||
checker = LsofFdLeakChecker()
|
||||
if checker.matching_platform():
|
||||
config.pluginmanager.register(checker)
|
||||
|
||||
|
||||
class LsofFdLeakChecker(object):
|
||||
def get_open_files(self):
|
||||
out = self._exec_lsof()
|
||||
open_files = self._parse_lsof_output(out)
|
||||
return open_files
|
||||
|
||||
def _exec_lsof(self):
|
||||
pid = os.getpid()
|
||||
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
|
||||
|
||||
def _parse_lsof_output(self, out):
|
||||
def isopen(line):
|
||||
return line.startswith('f') and ("deleted" not in line and
|
||||
'mem' not in line and "txt" not in line and 'cwd' not in line)
|
||||
|
||||
open_files = []
|
||||
|
||||
for line in out.split("\n"):
|
||||
if isopen(line):
|
||||
fields = line.split('\0')
|
||||
fd = fields[0][1:]
|
||||
filename = fields[1][1:]
|
||||
if filename.startswith('/'):
|
||||
open_files.append((fd, filename))
|
||||
|
||||
return open_files
|
||||
|
||||
def matching_platform(self):
|
||||
try:
|
||||
py.process.cmdexec("lsof -v")
|
||||
except py.process.cmdexec.Error:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
|
||||
def pytest_runtest_item(self, item):
|
||||
lines1 = self.get_open_files()
|
||||
yield
|
||||
if hasattr(sys, "pypy_version_info"):
|
||||
gc.collect()
|
||||
lines2 = self.get_open_files()
|
||||
|
||||
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
|
||||
leaked_files = [t for t in lines2 if t[0] in new_fds]
|
||||
if leaked_files:
|
||||
error = []
|
||||
error.append("***** %s FD leakage detected" % len(leaked_files))
|
||||
error.extend([str(f) for f in leaked_files])
|
||||
error.append("*** Before:")
|
||||
error.extend([str(f) for f in lines1])
|
||||
error.append("*** After:")
|
||||
error.extend([str(f) for f in lines2])
|
||||
error.append(error[0])
|
||||
error.append("*** function %s:%s: %s " % item.location)
|
||||
pytest.fail("\n".join(error), pytrace=False)
|
||||
|
||||
|
||||
# XXX copied from execnet's conftest.py - needs to be merged
|
||||
winpymap = {
|
||||
'python2.7': r'C:\Python27\python.exe',
|
||||
'python2.6': r'C:\Python26\python.exe',
|
||||
'python3.1': r'C:\Python31\python.exe',
|
||||
'python3.2': r'C:\Python32\python.exe',
|
||||
'python3.3': r'C:\Python33\python.exe',
|
||||
'python3.4': r'C:\Python34\python.exe',
|
||||
'python3.5': r'C:\Python35\python.exe',
|
||||
}
|
||||
|
||||
def getexecutable(name, cache={}):
|
||||
try:
|
||||
return cache[name]
|
||||
except KeyError:
|
||||
executable = py.path.local.sysfind(name)
|
||||
if executable:
|
||||
if name == "jython":
|
||||
import subprocess
|
||||
popen = subprocess.Popen([str(executable), "--version"],
|
||||
universal_newlines=True, stderr=subprocess.PIPE)
|
||||
out, err = popen.communicate()
|
||||
if not err or "2.5" not in err:
|
||||
executable = None
|
||||
if "2.5.2" in err:
|
||||
executable = None # http://bugs.jython.org/issue1790
|
||||
cache[name] = executable
|
||||
return executable
|
||||
|
||||
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
|
||||
'pypy', 'pypy3'])
|
||||
def anypython(request):
|
||||
name = request.param
|
||||
executable = getexecutable(name)
|
||||
if executable is None:
|
||||
if sys.platform == "win32":
|
||||
executable = winpymap.get(name, None)
|
||||
if executable:
|
||||
executable = py.path.local(executable)
|
||||
if executable.check():
|
||||
return executable
|
||||
pytest.skip("no suitable %s found" % (name,))
|
||||
return executable
|
||||
|
||||
# used at least by pytest-xdist plugin
|
||||
@pytest.fixture
|
||||
def _pytest(request):
|
||||
|
@ -40,23 +171,6 @@ def get_public_names(l):
|
|||
return [x for x in l if x[0] != "_"]
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("pylib")
|
||||
group.addoption('--no-tools-on-path',
|
||||
action="store_true", dest="notoolsonpath", default=False,
|
||||
help=("discover tools on PATH instead of going through py.cmdline.")
|
||||
)
|
||||
|
||||
def pytest_configure(config):
|
||||
# This might be called multiple times. Only take the first.
|
||||
global _pytest_fullpath
|
||||
try:
|
||||
_pytest_fullpath
|
||||
except NameError:
|
||||
_pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
|
||||
_pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
|
||||
|
||||
|
||||
class ParsedCall:
|
||||
def __init__(self, name, kwargs):
|
||||
self.__dict__.update(kwargs)
|
||||
|
@ -202,7 +316,7 @@ def pytest_funcarg__LineMatcher(request):
|
|||
return LineMatcher
|
||||
|
||||
def pytest_funcarg__testdir(request):
|
||||
tmptestdir = TmpTestdir(request)
|
||||
tmptestdir = Testdir(request)
|
||||
return tmptestdir
|
||||
|
||||
|
||||
|
@ -216,10 +330,10 @@ class RunResult:
|
|||
:ret: The return value.
|
||||
:outlines: List of lines captured from stdout.
|
||||
:errlines: List of lines captures from stderr.
|
||||
:stdout: LineMatcher of stdout, use ``stdout.str()`` to
|
||||
:stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
|
||||
reconstruct stdout or the commonly used
|
||||
``stdout.fnmatch_lines()`` method.
|
||||
:stderrr: LineMatcher of stderr.
|
||||
:stderrr: :py:class:`LineMatcher` of stderr.
|
||||
:duration: Duration in seconds.
|
||||
|
||||
"""
|
||||
|
@ -253,7 +367,7 @@ class RunResult:
|
|||
|
||||
|
||||
|
||||
class TmpTestdir:
|
||||
class Testdir:
|
||||
"""Temporary test directory with tools to test/run py.test itself.
|
||||
|
||||
This is based on the ``tmpdir`` fixture but provides a number of
|
||||
|
@ -276,7 +390,6 @@ class TmpTestdir:
|
|||
|
||||
def __init__(self, request):
|
||||
self.request = request
|
||||
self.Config = request.config.__class__
|
||||
# XXX remove duplication with tmpdir plugin
|
||||
basetmp = request.config._tmpdirhandler.ensuretemp("testdir")
|
||||
name = request.function.__name__
|
||||
|
@ -292,9 +405,14 @@ class TmpTestdir:
|
|||
self._savemodulekeys = set(sys.modules)
|
||||
self.chdir() # always chdir
|
||||
self.request.addfinalizer(self.finalize)
|
||||
method = self.request.config.getoption("--runpytest")
|
||||
if method == "inprocess":
|
||||
self._runpytest_method = self.runpytest_inprocess
|
||||
elif method == "subprocess":
|
||||
self._runpytest_method = self.runpytest_subprocess
|
||||
|
||||
def __repr__(self):
|
||||
return "<TmpTestdir %r>" % (self.tmpdir,)
|
||||
return "<Testdir %r>" % (self.tmpdir,)
|
||||
|
||||
def finalize(self):
|
||||
"""Clean up global state artifacts.
|
||||
|
@ -315,7 +433,6 @@ class TmpTestdir:
|
|||
|
||||
This allows the interpreter to catch module changes in case
|
||||
the module is re-imported.
|
||||
|
||||
"""
|
||||
for name in set(sys.modules).difference(self._savemodulekeys):
|
||||
# it seems zope.interfaces is keeping some state
|
||||
|
@ -512,43 +629,19 @@ class TmpTestdir:
|
|||
l = list(cmdlineargs) + [p]
|
||||
return self.inline_run(*l)
|
||||
|
||||
def inline_runsource1(self, *args):
|
||||
"""Run a test module in process using ``pytest.main()``.
|
||||
|
||||
This behaves exactly like :py:meth:`inline_runsource` and
|
||||
takes identical arguments. However the return value is a list
|
||||
of the reports created by the pytest_runtest_logreport hook
|
||||
during the run.
|
||||
|
||||
"""
|
||||
args = list(args)
|
||||
source = args.pop()
|
||||
p = self.makepyfile(source)
|
||||
l = list(args) + [p]
|
||||
reprec = self.inline_run(*l)
|
||||
reports = reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3, reports # setup/call/teardown
|
||||
return reports[1]
|
||||
|
||||
def inline_genitems(self, *args):
|
||||
"""Run ``pytest.main(['--collectonly'])`` in-process.
|
||||
|
||||
Retuns a tuple of the collected items and a
|
||||
:py:class:`HookRecorder` instance.
|
||||
|
||||
"""
|
||||
return self.inprocess_run(list(args) + ['--collectonly'])
|
||||
|
||||
def inprocess_run(self, args, plugins=()):
|
||||
"""Run ``pytest.main()`` in-process, return Items and a HookRecorder.
|
||||
|
||||
This runs the :py:func:`pytest.main` function to run all of
|
||||
py.test inside the test process itself like
|
||||
:py:meth:`inline_run`. However the return value is a tuple of
|
||||
the collection items and a :py:class:`HookRecorder` instance.
|
||||
|
||||
"""
|
||||
rec = self.inline_run(*args, plugins=plugins)
|
||||
rec = self.inline_run("--collect-only", *args)
|
||||
items = [x.item for x in rec.getcalls("pytest_itemcollected")]
|
||||
return items, rec
|
||||
|
||||
|
@ -586,7 +679,7 @@ class TmpTestdir:
|
|||
reprec.ret = ret
|
||||
return reprec
|
||||
|
||||
def inline_runpytest(self, *args, **kwargs):
|
||||
def runpytest_inprocess(self, *args, **kwargs):
|
||||
""" Return result of running pytest in-process, providing a similar
|
||||
interface to what self.runpytest() provides. """
|
||||
if kwargs.get("syspathinsert"):
|
||||
|
@ -615,7 +708,11 @@ class TmpTestdir:
|
|||
return res
|
||||
|
||||
def runpytest(self, *args, **kwargs):
|
||||
return self.inline_runpytest(*args, **kwargs)
|
||||
""" Run pytest inline or in a subprocess, depending on the command line
|
||||
option "--runpytest" and return a :py:class:`RunResult`.
|
||||
|
||||
"""
|
||||
return self._runpytest_method(*args, **kwargs)
|
||||
|
||||
def parseconfig(self, *args):
|
||||
"""Return a new py.test Config instance from given commandline args.
|
||||
|
@ -788,57 +885,23 @@ class TmpTestdir:
|
|||
except UnicodeEncodeError:
|
||||
print("couldn't print to %s because of encoding" % (fp,))
|
||||
|
||||
def runpybin(self, scriptname, *args):
|
||||
"""Run a py.* tool with arguments.
|
||||
|
||||
This can realy only be used to run py.test, you probably want
|
||||
:py:meth:`runpytest` instead.
|
||||
|
||||
Returns a :py:class:`RunResult`.
|
||||
|
||||
"""
|
||||
fullargs = self._getpybinargs(scriptname) + args
|
||||
return self.run(*fullargs)
|
||||
|
||||
def _getpybinargs(self, scriptname):
|
||||
if not self.request.config.getvalue("notoolsonpath"):
|
||||
# XXX we rely on script referring to the correct environment
|
||||
def _getpytestargs(self):
|
||||
# we cannot use "(sys.executable,script)"
|
||||
# because on windows the script is e.g. a py.test.exe
|
||||
return (sys.executable, _pytest_fullpath,) # noqa
|
||||
else:
|
||||
pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
|
||||
|
||||
def runpython(self, script, prepend=True):
|
||||
"""Run a python script.
|
||||
|
||||
If ``prepend`` is True then the directory from which the py
|
||||
package has been imported will be prepended to sys.path.
|
||||
def runpython(self, script):
|
||||
"""Run a python script using sys.executable as interpreter.
|
||||
|
||||
Returns a :py:class:`RunResult`.
|
||||
|
||||
"""
|
||||
# XXX The prepend feature is probably not very useful since the
|
||||
# split of py and pytest.
|
||||
if prepend:
|
||||
s = self._getsysprepend()
|
||||
if s:
|
||||
script.write(s + "\n" + script.read())
|
||||
return self.run(sys.executable, script)
|
||||
|
||||
def _getsysprepend(self):
|
||||
if self.request.config.getvalue("notoolsonpath"):
|
||||
s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath())
|
||||
else:
|
||||
s = ""
|
||||
return s
|
||||
|
||||
def runpython_c(self, command):
|
||||
"""Run python -c "command", return a :py:class:`RunResult`."""
|
||||
command = self._getsysprepend() + command
|
||||
return self.run(sys.executable, "-c", command)
|
||||
|
||||
def runpytest_subprocess(self, *args):
|
||||
def runpytest_subprocess(self, *args, **kwargs):
|
||||
"""Run py.test as a subprocess with given arguments.
|
||||
|
||||
Any plugins added to the :py:attr:`plugins` list will added
|
||||
|
@ -863,7 +926,8 @@ class TmpTestdir:
|
|||
plugins = [x for x in self.plugins if isinstance(x, str)]
|
||||
if plugins:
|
||||
args = ('-p', plugins[0]) + args
|
||||
return self.runpybin("py.test", *args)
|
||||
args = self._getpytestargs() + args
|
||||
return self.run(*args)
|
||||
|
||||
def spawn_pytest(self, string, expect_timeout=10.0):
|
||||
"""Run py.test using pexpect.
|
||||
|
@ -874,10 +938,8 @@ class TmpTestdir:
|
|||
The pexpect child is returned.
|
||||
|
||||
"""
|
||||
if self.request.config.getvalue("notoolsonpath"):
|
||||
pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests")
|
||||
basetemp = self.tmpdir.mkdir("pexpect")
|
||||
invoke = " ".join(map(str, self._getpybinargs("py.test")))
|
||||
invoke = " ".join(map(str, self._getpytestargs()))
|
||||
cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
|
||||
return self.spawn(cmd, expect_timeout=expect_timeout)
|
||||
|
||||
|
|
|
@ -186,12 +186,44 @@ the plugin manager like this:
|
|||
If you want to look at the names of existing plugins, use
|
||||
the ``--traceconfig`` option.
|
||||
|
||||
Testing plugins
|
||||
---------------
|
||||
|
||||
pytest comes with some facilities that you can enable for testing your
|
||||
plugin. Given that you have an installed plugin you can enable the
|
||||
:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a
|
||||
command line option to include the pytester plugin (``-p pytester``) or
|
||||
by putting ``pytest_plugins = pytester`` into your test or
|
||||
``conftest.py`` file. You then will have a ``testdir`` fixure which you
|
||||
can use like this::
|
||||
|
||||
# content of test_myplugin.py
|
||||
|
||||
pytest_plugins = pytester # to get testdir fixture
|
||||
|
||||
def test_myplugin(testdir):
|
||||
testdir.makepyfile("""
|
||||
def test_example():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest("--verbose")
|
||||
result.fnmatch_lines("""
|
||||
test_example*
|
||||
""")
|
||||
|
||||
Note that by default ``testdir.runpytest()`` will perform a pytest
|
||||
in-process. You can pass the command line option ``--runpytest=subprocess``
|
||||
to have it happen in a subprocess.
|
||||
|
||||
Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more
|
||||
methods of the result object that you get from a call to ``runpytest``.
|
||||
|
||||
.. _`writinghooks`:
|
||||
|
||||
Writing hook functions
|
||||
======================
|
||||
|
||||
|
||||
.. _validation:
|
||||
|
||||
hook function validation and execution
|
||||
|
@ -493,3 +525,13 @@ Reference of objects involved in hooks
|
|||
.. autoclass:: _pytest.core.CallOutcome()
|
||||
:members:
|
||||
|
||||
.. currentmodule:: _pytest.pytester
|
||||
|
||||
.. autoclass:: Testdir()
|
||||
:members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile
|
||||
|
||||
.. autoclass:: RunResult()
|
||||
:members:
|
||||
|
||||
.. autoclass:: LineMatcher()
|
||||
:members:
|
||||
|
|
|
@ -203,7 +203,7 @@ class TestGeneralUsage:
|
|||
os.chdir(os.path.dirname(os.getcwd()))
|
||||
print (py.log)
|
||||
"""))
|
||||
result = testdir.runpython(p, prepend=False)
|
||||
result = testdir.runpython(p)
|
||||
assert not result.ret
|
||||
|
||||
def test_issue109_sibling_conftests_not_loaded(self, testdir):
|
||||
|
|
|
@ -1,122 +0,0 @@
|
|||
import pytest
|
||||
import sys
|
||||
import gc
|
||||
|
||||
pytest_plugins = "pytester",
|
||||
|
||||
import os, py
|
||||
|
||||
class LsofFdLeakChecker(object):
|
||||
def get_open_files(self):
|
||||
out = self._exec_lsof()
|
||||
open_files = self._parse_lsof_output(out)
|
||||
return open_files
|
||||
|
||||
def _exec_lsof(self):
|
||||
pid = os.getpid()
|
||||
return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
|
||||
|
||||
def _parse_lsof_output(self, out):
|
||||
def isopen(line):
|
||||
return line.startswith('f') and ("deleted" not in line and
|
||||
'mem' not in line and "txt" not in line and 'cwd' not in line)
|
||||
|
||||
open_files = []
|
||||
|
||||
for line in out.split("\n"):
|
||||
if isopen(line):
|
||||
fields = line.split('\0')
|
||||
fd = fields[0][1:]
|
||||
filename = fields[1][1:]
|
||||
if filename.startswith('/'):
|
||||
open_files.append((fd, filename))
|
||||
|
||||
return open_files
|
||||
|
||||
def matching_platform(self):
|
||||
try:
|
||||
py.process.cmdexec("lsof -v")
|
||||
except py.process.cmdexec.Error:
|
||||
return False
|
||||
else:
|
||||
return True
|
||||
|
||||
@pytest.hookimpl_opts(hookwrapper=True, tryfirst=True)
|
||||
def pytest_runtest_item(self, item):
|
||||
lines1 = self.get_open_files()
|
||||
yield
|
||||
if hasattr(sys, "pypy_version_info"):
|
||||
gc.collect()
|
||||
lines2 = self.get_open_files()
|
||||
|
||||
new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
|
||||
leaked_files = [t for t in lines2 if t[0] in new_fds]
|
||||
if leaked_files:
|
||||
error = []
|
||||
error.append("***** %s FD leakage detected" % len(leaked_files))
|
||||
error.extend([str(f) for f in leaked_files])
|
||||
error.append("*** Before:")
|
||||
error.extend([str(f) for f in lines1])
|
||||
error.append("*** After:")
|
||||
error.extend([str(f) for f in lines2])
|
||||
error.append(error[0])
|
||||
error.append("*** function %s:%s: %s " % item.location)
|
||||
pytest.fail("\n".join(error), pytrace=False)
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addoption('--lsof',
|
||||
action="store_true", dest="lsof", default=False,
|
||||
help=("run FD checks if lsof is available"))
|
||||
|
||||
|
||||
def pytest_configure(config):
|
||||
if config.getvalue("lsof"):
|
||||
checker = LsofFdLeakChecker()
|
||||
if checker.matching_platform():
|
||||
config.pluginmanager.register(checker)
|
||||
|
||||
|
||||
# XXX copied from execnet's conftest.py - needs to be merged
|
||||
winpymap = {
|
||||
'python2.7': r'C:\Python27\python.exe',
|
||||
'python2.6': r'C:\Python26\python.exe',
|
||||
'python3.1': r'C:\Python31\python.exe',
|
||||
'python3.2': r'C:\Python32\python.exe',
|
||||
'python3.3': r'C:\Python33\python.exe',
|
||||
'python3.4': r'C:\Python34\python.exe',
|
||||
'python3.5': r'C:\Python35\python.exe',
|
||||
}
|
||||
|
||||
def getexecutable(name, cache={}):
|
||||
try:
|
||||
return cache[name]
|
||||
except KeyError:
|
||||
executable = py.path.local.sysfind(name)
|
||||
if executable:
|
||||
if name == "jython":
|
||||
import subprocess
|
||||
popen = subprocess.Popen([str(executable), "--version"],
|
||||
universal_newlines=True, stderr=subprocess.PIPE)
|
||||
out, err = popen.communicate()
|
||||
if not err or "2.5" not in err:
|
||||
executable = None
|
||||
if "2.5.2" in err:
|
||||
executable = None # http://bugs.jython.org/issue1790
|
||||
cache[name] = executable
|
||||
return executable
|
||||
|
||||
@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
|
||||
'pypy', 'pypy3'])
|
||||
def anypython(request):
|
||||
name = request.param
|
||||
executable = getexecutable(name)
|
||||
if executable is None:
|
||||
if sys.platform == "win32":
|
||||
executable = winpymap.get(name, None)
|
||||
if executable:
|
||||
executable = py.path.local(executable)
|
||||
if executable.check():
|
||||
return executable
|
||||
pytest.skip("no suitable %s found" % (name,))
|
||||
return executable
|
|
@ -15,7 +15,7 @@ class TestModule:
|
|||
p.pyimport()
|
||||
del py.std.sys.modules['test_whatever']
|
||||
b.ensure("test_whatever.py")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*import*mismatch*",
|
||||
"*imported*test_whatever*",
|
||||
|
@ -59,7 +59,7 @@ class TestClass:
|
|||
def __init__(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-rw")
|
||||
result = testdir.runpytest_inprocess("-rw")
|
||||
result.stdout.fnmatch_lines_random("""
|
||||
WC1*test_class_with_init_warning.py*__init__*
|
||||
""")
|
||||
|
@ -69,7 +69,7 @@ class TestClass:
|
|||
class test(object):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*collected 0*",
|
||||
])
|
||||
|
@ -86,7 +86,7 @@ class TestClass:
|
|||
def teardown_class(cls):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
@ -534,7 +534,7 @@ class TestConftestCustomization:
|
|||
""")
|
||||
testdir.makepyfile("def test_some(): pass")
|
||||
testdir.makepyfile(test_xyz="def test_func(): pass")
|
||||
result = testdir.inline_runpytest("--collect-only")
|
||||
result = testdir.runpytest_inprocess("--collect-only")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*<Module*test_pytest*",
|
||||
"*<MyModule*xyz*",
|
||||
|
@ -590,7 +590,7 @@ class TestConftestCustomization:
|
|||
return MyFunction(name, collector)
|
||||
""")
|
||||
testdir.makepyfile("def some(): pass")
|
||||
result = testdir.inline_runpytest("--collect-only")
|
||||
result = testdir.runpytest_inprocess("--collect-only")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*MyFunction*some*",
|
||||
])
|
||||
|
@ -648,7 +648,7 @@ class TestTracebackCutting:
|
|||
raise ValueError("xyz")
|
||||
""")
|
||||
p = testdir.makepyfile("def test(hello): pass")
|
||||
result = testdir.inline_runpytest(p)
|
||||
result = testdir.runpytest_inprocess(p)
|
||||
assert result.ret != 0
|
||||
out = result.stdout.str()
|
||||
assert out.find("xyz") != -1
|
||||
|
@ -656,7 +656,7 @@ class TestTracebackCutting:
|
|||
numentries = out.count("_ _ _") # separator for traceback entries
|
||||
assert numentries == 0
|
||||
|
||||
result = testdir.inline_runpytest("--fulltrace", p)
|
||||
result = testdir.runpytest_inprocess("--fulltrace", p)
|
||||
out = result.stdout.str()
|
||||
assert out.find("conftest.py:2: ValueError") != -1
|
||||
numentries = out.count("_ _ _ _") # separator for traceback entries
|
||||
|
@ -669,7 +669,7 @@ class TestTracebackCutting:
|
|||
x = 17
|
||||
asd
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret != 0
|
||||
out = result.stdout.str()
|
||||
assert "x = 1" not in out
|
||||
|
@ -678,7 +678,7 @@ class TestTracebackCutting:
|
|||
" *asd*",
|
||||
"E*NameError*",
|
||||
])
|
||||
result = testdir.inline_runpytest("--fulltrace")
|
||||
result = testdir.runpytest_inprocess("--fulltrace")
|
||||
out = result.stdout.str()
|
||||
assert "x = 1" in out
|
||||
assert "x = 2" in out
|
||||
|
@ -769,7 +769,7 @@ def test_customized_python_discovery(testdir):
|
|||
""")
|
||||
p2 = p.new(basename=p.basename.replace("test", "check"))
|
||||
p.move(p2)
|
||||
result = testdir.inline_runpytest("--collect-only", "-s")
|
||||
result = testdir.runpytest_inprocess("--collect-only", "-s")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*check_customized*",
|
||||
"*check_simple*",
|
||||
|
@ -777,7 +777,7 @@ def test_customized_python_discovery(testdir):
|
|||
"*check_meth*",
|
||||
])
|
||||
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*",
|
||||
|
@ -793,12 +793,12 @@ def test_customized_python_discovery_functions(testdir):
|
|||
def _test_underscore():
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("--collect-only", "-s")
|
||||
result = testdir.runpytest_inprocess("--collect-only", "-s")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*_test_underscore*",
|
||||
])
|
||||
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
|
@ -818,7 +818,7 @@ def test_collector_attributes(testdir):
|
|||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
@ -842,7 +842,7 @@ def test_customize_through_attributes(testdir):
|
|||
def test_hello(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("--collect-only")
|
||||
result = testdir.runpytest_inprocess("--collect-only")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*MyClass*",
|
||||
"*MyInstance*",
|
||||
|
@ -862,6 +862,6 @@ def test_unorderable_types(testdir):
|
|||
return Test
|
||||
TestFoo = make_test()
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert "TypeError" not in result.stdout.str()
|
||||
assert result.ret == 0
|
||||
|
|
|
@ -33,7 +33,7 @@ class TestFillFixtures:
|
|||
def test_func(some):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest() # "--collect-only")
|
||||
result = testdir.runpytest_inprocess() # "--collect-only")
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*def test_func(some)*",
|
||||
|
@ -78,7 +78,7 @@ class TestFillFixtures:
|
|||
def test_method(self, something):
|
||||
assert something is self
|
||||
""")
|
||||
result = testdir.inline_runpytest(p)
|
||||
result = testdir.runpytest_inprocess(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
@ -119,9 +119,9 @@ class TestFillFixtures:
|
|||
def test_spam(self, spam):
|
||||
assert spam == 'spamspam'
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_extend_fixture_conftest_module(self, testdir):
|
||||
|
@ -142,9 +142,9 @@ class TestFillFixtures:
|
|||
def test_spam(spam):
|
||||
assert spam == 'spamspam'
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_extend_fixture_conftest_conftest(self, testdir):
|
||||
|
@ -168,9 +168,9 @@ class TestFillFixtures:
|
|||
def test_spam(spam):
|
||||
assert spam == "spamspam"
|
||||
"""))
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_extend_fixture_conftest_plugin(self, testdir):
|
||||
|
@ -195,7 +195,7 @@ class TestFillFixtures:
|
|||
def test_foo(foo):
|
||||
assert foo == 14
|
||||
""")
|
||||
result = testdir.inline_runpytest('-s')
|
||||
result = testdir.runpytest_inprocess('-s')
|
||||
assert result.ret == 0
|
||||
|
||||
def test_extend_fixture_plugin_plugin(self, testdir):
|
||||
|
@ -221,7 +221,7 @@ class TestFillFixtures:
|
|||
def test_foo(foo):
|
||||
assert foo == 14
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret == 0
|
||||
|
||||
def test_override_parametrized_fixture_conftest_module(self, testdir):
|
||||
|
@ -243,9 +243,9 @@ class TestFillFixtures:
|
|||
def test_spam(spam):
|
||||
assert spam == 'spam'
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_override_parametrized_fixture_conftest_conftest(self, testdir):
|
||||
|
@ -270,9 +270,9 @@ class TestFillFixtures:
|
|||
def test_spam(spam):
|
||||
assert spam == "spam"
|
||||
"""))
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*1 passed*"])
|
||||
|
||||
def test_override_non_parametrized_fixture_conftest_module(self, testdir):
|
||||
|
@ -297,9 +297,9 @@ class TestFillFixtures:
|
|||
assert spam == params['spam']
|
||||
params['spam'] += 1
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
|
||||
def test_override_non_parametrized_fixture_conftest_conftest(self, testdir):
|
||||
|
@ -327,9 +327,9 @@ class TestFillFixtures:
|
|||
assert spam == params['spam']
|
||||
params['spam'] += 1
|
||||
"""))
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
result = testdir.inline_runpytest(testfile)
|
||||
result = testdir.runpytest_inprocess(testfile)
|
||||
result.stdout.fnmatch_lines(["*3 passed*"])
|
||||
|
||||
def test_autouse_fixture_plugin(self, testdir):
|
||||
|
@ -349,7 +349,7 @@ class TestFillFixtures:
|
|||
def test_foo(request):
|
||||
assert request.function.foo == 7
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret == 0
|
||||
|
||||
def test_funcarg_lookup_error(self, testdir):
|
||||
|
@ -357,7 +357,7 @@ class TestFillFixtures:
|
|||
def test_lookup_error(unknown):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ERROR*test_lookup_error*",
|
||||
"*def test_lookup_error(unknown):*",
|
||||
|
@ -386,7 +386,7 @@ class TestFillFixtures:
|
|||
traceback.print_exc()
|
||||
assert sys.exc_info() == (None, None, None)
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret == 0
|
||||
|
||||
|
||||
|
@ -529,7 +529,7 @@ class TestRequestBasic:
|
|||
def test_second():
|
||||
assert len(l) == 1
|
||||
""")
|
||||
result = testdir.inline_runpytest(p)
|
||||
result = testdir.runpytest_inprocess(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 error*" # XXX the whole module collection fails
|
||||
])
|
||||
|
@ -614,7 +614,7 @@ class TestRequestBasic:
|
|||
"""))
|
||||
p = b.join("test_module.py")
|
||||
p.write("def test_func(arg1): pass")
|
||||
result = testdir.inline_runpytest(p, "--fixtures")
|
||||
result = testdir.runpytest_inprocess(p, "--fixtures")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines("""
|
||||
*fixtures defined*conftest*
|
||||
|
@ -783,7 +783,7 @@ class TestRequestCachedSetup:
|
|||
def test_two_different_setups(arg1, arg2):
|
||||
assert arg1 != arg2
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
@ -798,7 +798,7 @@ class TestRequestCachedSetup:
|
|||
def test_two_funcarg(arg1):
|
||||
assert arg1 == 11
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
@ -825,7 +825,7 @@ class TestRequestCachedSetup:
|
|||
def test_check_test0_has_teardown_correct():
|
||||
assert test_0.l == [2]
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*3 passed*"
|
||||
])
|
||||
|
@ -841,7 +841,7 @@ class TestRequestCachedSetup:
|
|||
def test_func(app):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*3/x*",
|
||||
|
@ -896,7 +896,7 @@ class TestFixtureUsages:
|
|||
def test_add(arg2):
|
||||
assert arg2 == 2
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ScopeMismatch*involved factories*",
|
||||
"* def arg2*",
|
||||
|
@ -918,7 +918,7 @@ class TestFixtureUsages:
|
|||
def test_add(arg1, arg2):
|
||||
assert arg2 == 2
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ScopeMismatch*involved factories*",
|
||||
"* def arg2*",
|
||||
|
@ -942,7 +942,7 @@ class TestFixtureUsages:
|
|||
assert arg2 == arg1 + 1
|
||||
assert len(l) == arg1
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*"
|
||||
])
|
||||
|
@ -962,7 +962,7 @@ class TestFixtureUsages:
|
|||
def test_missing(call_fail):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*pytest.fixture()*
|
||||
*def call_fail(fail)*
|
||||
|
@ -1044,7 +1044,7 @@ class TestFixtureUsages:
|
|||
reprec.assertoutcome(passed=2)
|
||||
|
||||
def test_usefixtures_seen_in_showmarkers(self, testdir):
|
||||
result = testdir.inline_runpytest("--markers")
|
||||
result = testdir.runpytest_inprocess("--markers")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*usefixtures(fixturename1*mark tests*fixtures*
|
||||
""")
|
||||
|
@ -1311,7 +1311,7 @@ class TestAutouseDiscovery:
|
|||
conftest.move(a.join(conftest.basename))
|
||||
a.join("test_something.py").write("def test_func(): pass")
|
||||
b.join("test_otherthing.py").write("def test_func(): pass")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*1 passed*1 error*
|
||||
""")
|
||||
|
@ -1765,7 +1765,7 @@ class TestFixtureMarker:
|
|||
def test_1(arg):
|
||||
pass
|
||||
""" % method)
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ScopeMismatch*You tried*function*session*request*",
|
||||
|
@ -1823,7 +1823,7 @@ class TestFixtureMarker:
|
|||
def test_mismatch(arg):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ScopeMismatch*",
|
||||
"*1 error*",
|
||||
|
@ -1874,7 +1874,7 @@ class TestFixtureMarker:
|
|||
def test_func4(marg):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines("""
|
||||
test_mod1.py::test_func[s1] PASSED
|
||||
test_mod2.py::test_func2[s1] PASSED
|
||||
|
@ -1926,7 +1926,7 @@ class TestFixtureMarker:
|
|||
def test_3(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-vs")
|
||||
result = testdir.runpytest_inprocess("-vs")
|
||||
result.stdout.fnmatch_lines("""
|
||||
test_class_ordering.py::TestClass2::test_1[1-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[2-a] PASSED
|
||||
|
@ -2017,7 +2017,7 @@ class TestFixtureMarker:
|
|||
def test_finish():
|
||||
assert not l
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*3 passed*
|
||||
""")
|
||||
|
@ -2047,7 +2047,7 @@ class TestFixtureMarker:
|
|||
def test_browser(browser):
|
||||
assert browser['visited'] is True
|
||||
"""))
|
||||
reprec = testdir.inline_runpytest("-s")
|
||||
reprec = testdir.runpytest_inprocess("-s")
|
||||
for test in ['test_browser']:
|
||||
reprec.stdout.fnmatch_lines('*Finalized*')
|
||||
|
||||
|
@ -2258,7 +2258,7 @@ class TestFixtureMarker:
|
|||
def test_foo(fix):
|
||||
assert 1
|
||||
""")
|
||||
res = testdir.inline_runpytest('-v')
|
||||
res = testdir.runpytest_inprocess('-v')
|
||||
res.stdout.fnmatch_lines([
|
||||
'*test_foo*alpha*',
|
||||
'*test_foo*beta*'])
|
||||
|
@ -2275,7 +2275,7 @@ class TestFixtureMarker:
|
|||
def test_foo(fix):
|
||||
assert 1
|
||||
""")
|
||||
res = testdir.inline_runpytest('-v')
|
||||
res = testdir.runpytest_inprocess('-v')
|
||||
res.stdout.fnmatch_lines([
|
||||
'*test_foo*alpha*',
|
||||
'*test_foo*beta*'])
|
||||
|
@ -2335,7 +2335,7 @@ class TestErrors:
|
|||
def test_something(gen):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*def gen(qwe123):*",
|
||||
|
@ -2361,7 +2361,7 @@ class TestErrors:
|
|||
def test_3():
|
||||
assert l[0] != l[1]
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines("""
|
||||
*ERROR*teardown*test_1*
|
||||
*KeyError*
|
||||
|
@ -2381,7 +2381,7 @@ class TestErrors:
|
|||
def test_something():
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret != 0
|
||||
result.stdout.fnmatch_lines([
|
||||
"*def gen(qwe123):*",
|
||||
|
@ -2395,7 +2395,7 @@ class TestShowFixtures:
|
|||
assert config.option.showfixtures
|
||||
|
||||
def test_show_fixtures(self, testdir):
|
||||
result = testdir.inline_runpytest("--fixtures")
|
||||
result = testdir.runpytest_inprocess("--fixtures")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*tmpdir*",
|
||||
"*temporary directory*",
|
||||
|
@ -2403,7 +2403,7 @@ class TestShowFixtures:
|
|||
)
|
||||
|
||||
def test_show_fixtures_verbose(self, testdir):
|
||||
result = testdir.inline_runpytest("--fixtures", "-v")
|
||||
result = testdir.runpytest_inprocess("--fixtures", "-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*tmpdir*--*tmpdir.py*",
|
||||
"*temporary directory*",
|
||||
|
@ -2420,7 +2420,7 @@ class TestShowFixtures:
|
|||
def arg1():
|
||||
""" hello world """
|
||||
''')
|
||||
result = testdir.inline_runpytest("--fixtures", p)
|
||||
result = testdir.runpytest_inprocess("--fixtures", p)
|
||||
result.stdout.fnmatch_lines("""
|
||||
*tmpdir
|
||||
*fixtures defined from*
|
||||
|
@ -2442,7 +2442,7 @@ class TestShowFixtures:
|
|||
def test_hello():
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("--fixtures")
|
||||
result = testdir.runpytest_inprocess("--fixtures")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*tmpdir*
|
||||
*fixtures defined from*conftest*
|
||||
|
@ -2468,7 +2468,7 @@ class TestShowFixtures:
|
|||
|
||||
"""
|
||||
''')
|
||||
result = testdir.inline_runpytest("--fixtures", p)
|
||||
result = testdir.runpytest_inprocess("--fixtures", p)
|
||||
result.stdout.fnmatch_lines("""
|
||||
* fixtures defined from test_show_fixtures_trimmed_doc *
|
||||
arg2
|
||||
|
@ -2496,7 +2496,7 @@ class TestContextManagerFixtureFuncs:
|
|||
print ("test2 %s" % arg1)
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*setup*
|
||||
*test1 1*
|
||||
|
@ -2519,7 +2519,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_2(arg1):
|
||||
print ("test2 %s" % arg1)
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*setup*
|
||||
*test1 1*
|
||||
|
@ -2537,7 +2537,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*pytest.fail*setup*
|
||||
*1 error*
|
||||
|
@ -2553,7 +2553,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*pytest.fail*teardown*
|
||||
*1 passed*1 error*
|
||||
|
@ -2569,7 +2569,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*fixture function*
|
||||
*test_yields*:2*
|
||||
|
@ -2585,7 +2585,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*yield_fixture*requires*yield*
|
||||
*yield_fixture*
|
||||
|
@ -2601,7 +2601,7 @@ class TestContextManagerFixtureFuncs:
|
|||
def test_1(arg1):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-s")
|
||||
result = testdir.runpytest_inprocess("-s")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*fixture*cannot use*yield*
|
||||
*def arg1*
|
||||
|
|
|
@ -246,7 +246,7 @@ class TestMetafunc:
|
|||
assert x in (10,20)
|
||||
assert y == 2
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_simple*1-2*",
|
||||
"*test_simple*2-2*",
|
||||
|
@ -290,7 +290,7 @@ class TestMetafunc:
|
|||
def test_meth(self, x, y):
|
||||
assert 0, x
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret == 1
|
||||
result.assert_outcomes(failed=6)
|
||||
|
||||
|
@ -330,7 +330,7 @@ class TestMetafunc:
|
|||
def test_3(self, arg, arg2):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
assert result.ret == 0
|
||||
result.stdout.fnmatch_lines("""
|
||||
*test_1*1*
|
||||
|
@ -372,7 +372,7 @@ class TestMetafuncFunctional:
|
|||
assert metafunc.function == unbound
|
||||
assert metafunc.cls == TestClass
|
||||
""")
|
||||
result = testdir.inline_runpytest(p, "-v")
|
||||
result = testdir.runpytest_inprocess(p, "-v")
|
||||
result.assert_outcomes(passed=2)
|
||||
|
||||
def test_addcall_with_two_funcargs_generators(self, testdir):
|
||||
|
@ -389,7 +389,7 @@ class TestMetafuncFunctional:
|
|||
def test_myfunc(self, arg1, arg2):
|
||||
assert arg1 == arg2
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_myfunc*0*PASS*",
|
||||
"*test_myfunc*1*FAIL*",
|
||||
|
@ -410,7 +410,7 @@ class TestMetafuncFunctional:
|
|||
def test_func2(arg1):
|
||||
assert arg1 in (10, 20)
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func1*0*PASS*",
|
||||
"*test_func1*1*FAIL*",
|
||||
|
@ -427,7 +427,7 @@ class TestMetafuncFunctional:
|
|||
def test_hello(xyz):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest(p)
|
||||
result = testdir.runpytest_inprocess(p)
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
|
@ -450,7 +450,7 @@ class TestMetafuncFunctional:
|
|||
def test_myfunc(self, arg1, arg2):
|
||||
assert arg1 == arg2
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_myfunc*hello*PASS*",
|
||||
"*test_myfunc*world*FAIL*",
|
||||
|
@ -466,7 +466,7 @@ class TestMetafuncFunctional:
|
|||
def test_myfunc(self, hello):
|
||||
assert hello == "world"
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_myfunc*hello*PASS*",
|
||||
"*1 passed*"
|
||||
|
@ -483,7 +483,7 @@ class TestMetafuncFunctional:
|
|||
assert not hasattr(self, 'x')
|
||||
self.x = 1
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func*0*PASS*",
|
||||
"*test_func*1*PASS*",
|
||||
|
@ -501,7 +501,7 @@ class TestMetafuncFunctional:
|
|||
def setup_method(self, func):
|
||||
self.val = 1
|
||||
""")
|
||||
result = testdir.inline_runpytest(p)
|
||||
result = testdir.runpytest_inprocess(p)
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
def test_parametrize_functional2(self, testdir):
|
||||
|
@ -512,7 +512,7 @@ class TestMetafuncFunctional:
|
|||
def test_hello(arg1, arg2):
|
||||
assert 0, (arg1, arg2)
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*(1, 4)*",
|
||||
"*(1, 5)*",
|
||||
|
@ -537,7 +537,7 @@ class TestMetafuncFunctional:
|
|||
def test_func1(arg1, arg2):
|
||||
assert arg1 == 11
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func1*1*PASS*",
|
||||
"*1 passed*"
|
||||
|
@ -558,7 +558,7 @@ class TestMetafuncFunctional:
|
|||
def test_func(arg2):
|
||||
assert arg2 == 10
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v", p)
|
||||
result = testdir.runpytest_inprocess("-v", p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func*1*PASS*",
|
||||
"*1 passed*"
|
||||
|
@ -574,7 +574,7 @@ class TestMetafuncFunctional:
|
|||
def test_function(a, b):
|
||||
assert a == b
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
assert result.ret == 1
|
||||
result.stdout.fnmatch_lines_random([
|
||||
"*test_function*basic*PASSED",
|
||||
|
@ -591,7 +591,7 @@ class TestMetafuncFunctional:
|
|||
def test_function(a, b):
|
||||
assert 1
|
||||
""")
|
||||
result = testdir.inline_runpytest("-v")
|
||||
result = testdir.runpytest_inprocess("-v")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*test_function*1-b0*
|
||||
*test_function*1.3-b1*
|
||||
|
@ -647,7 +647,7 @@ class TestMetafuncFunctional:
|
|||
def test_function():
|
||||
pass
|
||||
""")
|
||||
reprec = testdir.inline_runpytest()
|
||||
reprec = testdir.runpytest_inprocess()
|
||||
reprec.assert_outcomes(passed=1)
|
||||
|
||||
def test_generate_tests_only_done_in_subdir(self, testdir):
|
||||
|
@ -679,7 +679,7 @@ class TestMetafuncFunctional:
|
|||
test_x = make_tests()
|
||||
test_y = make_tests()
|
||||
""")
|
||||
reprec = testdir.inline_runpytest()
|
||||
reprec = testdir.runpytest_inprocess()
|
||||
reprec.assert_outcomes(passed=4)
|
||||
|
||||
@pytest.mark.issue463
|
||||
|
|
|
@ -468,12 +468,12 @@ def test_rewritten():
|
|||
tmp = "--basetemp=%s" % p
|
||||
monkeypatch.setenv("PYTHONOPTIMIZE", "2")
|
||||
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
|
||||
assert testdir.runpybin("py.test", tmp).ret == 0
|
||||
assert testdir.runpytest_subprocess(tmp).ret == 0
|
||||
tagged = "test_pyc_vs_pyo." + PYTEST_TAG
|
||||
assert tagged + ".pyo" in os.listdir("__pycache__")
|
||||
monkeypatch.undo()
|
||||
monkeypatch.delenv("PYTHONDONTWRITEBYTECODE", raising=False)
|
||||
assert testdir.runpybin("py.test", tmp).ret == 1
|
||||
assert testdir.runpytest_subprocess(tmp).ret == 1
|
||||
assert tagged + ".pyc" in os.listdir("__pycache__")
|
||||
|
||||
def test_package(self, testdir):
|
||||
|
|
|
@ -121,7 +121,7 @@ def test_capturing_unicode(testdir, method):
|
|||
print (sys.stdout)
|
||||
print (%s)
|
||||
""" % obj)
|
||||
result = testdir.runpytest_subprocess("--capture=%s" % method)
|
||||
result = testdir.runpytest("--capture=%s" % method)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
@ -133,7 +133,7 @@ def test_capturing_bytes_in_utf8_encoding(testdir, method):
|
|||
def test_unicode():
|
||||
print ('b\\u00f6y')
|
||||
""")
|
||||
result = testdir.runpytest_subprocess("--capture=%s" % method)
|
||||
result = testdir.runpytest("--capture=%s" % method)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
|
@ -144,7 +144,7 @@ def test_collect_capturing(testdir):
|
|||
print ("collect %s failure" % 13)
|
||||
import xyz42123
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*Captured stdout*",
|
||||
"*collect 13 failure*",
|
||||
|
@ -165,7 +165,7 @@ class TestPerTestCapturing:
|
|||
print ("in func2")
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"setup module*",
|
||||
"setup test_func1*",
|
||||
|
@ -188,7 +188,7 @@ class TestPerTestCapturing:
|
|||
def teardown_function(func):
|
||||
print ("in teardown")
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_func():*",
|
||||
"*Captured stdout during setup*",
|
||||
|
@ -206,7 +206,7 @@ class TestPerTestCapturing:
|
|||
print ("in func2")
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
s = result.stdout.str()
|
||||
assert "in func1" not in s
|
||||
assert "in func2" in s
|
||||
|
@ -222,7 +222,7 @@ class TestPerTestCapturing:
|
|||
print ("in func1")
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
'*teardown_function*',
|
||||
'*Captured stdout*',
|
||||
|
@ -240,7 +240,7 @@ class TestPerTestCapturing:
|
|||
def test_func():
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*def teardown_module(mod):*",
|
||||
"*Captured stdout*",
|
||||
|
@ -259,7 +259,7 @@ class TestPerTestCapturing:
|
|||
sys.stderr.write(str(2))
|
||||
raise ValueError
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p1)
|
||||
result = testdir.runpytest(p1)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_capturing_outerr.py .F",
|
||||
"====* FAILURES *====",
|
||||
|
@ -410,7 +410,7 @@ class TestCaptureFixture:
|
|||
def test_two(capfd, capsys):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ERROR*setup*test_one*",
|
||||
"*capsys*capfd*same*time*",
|
||||
|
@ -425,7 +425,7 @@ class TestCaptureFixture:
|
|||
print ("xxx42xxx")
|
||||
assert 0
|
||||
""" % method)
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"xxx42xxx",
|
||||
])
|
||||
|
@ -447,7 +447,7 @@ class TestCaptureFixture:
|
|||
def test_hello(capsys, missingarg):
|
||||
pass
|
||||
""")
|
||||
result = testdir.runpytest_subprocess(p)
|
||||
result = testdir.runpytest(p)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_partial_setup_failure*",
|
||||
"*1 error*",
|
||||
|
@ -485,7 +485,7 @@ def test_setup_failure_does_not_kill_capturing(testdir):
|
|||
raise ValueError(42)
|
||||
"""))
|
||||
sub1.join("test_mod.py").write("def test_func1(): pass")
|
||||
result = testdir.runpytest_subprocess(testdir.tmpdir, '--traceconfig')
|
||||
result = testdir.runpytest(testdir.tmpdir, '--traceconfig')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*ValueError(42)*",
|
||||
"*1 error*"
|
||||
|
@ -512,7 +512,7 @@ def test_capture_conftest_runtest_setup(testdir):
|
|||
print ("hello19")
|
||||
""")
|
||||
testdir.makepyfile("def test_func(): pass")
|
||||
result = testdir.runpytest_subprocess()
|
||||
result = testdir.runpytest()
|
||||
assert result.ret == 0
|
||||
assert 'hello19' not in result.stdout.str()
|
||||
|
||||
|
@ -526,7 +526,7 @@ def test_capture_badoutput_issue412(testdir):
|
|||
os.write(1, omg)
|
||||
assert 0
|
||||
""")
|
||||
result = testdir.runpytest_subprocess('--cap=fd')
|
||||
result = testdir.runpytest('--cap=fd')
|
||||
result.stdout.fnmatch_lines('''
|
||||
*def test_func*
|
||||
*assert 0*
|
||||
|
@ -541,7 +541,7 @@ def test_capture_early_option_parsing(testdir):
|
|||
print ("hello19")
|
||||
""")
|
||||
testdir.makepyfile("def test_func(): pass")
|
||||
result = testdir.runpytest_subprocess("-vs")
|
||||
result = testdir.runpytest("-vs")
|
||||
assert result.ret == 0
|
||||
assert 'hello19' in result.stdout.str()
|
||||
|
||||
|
@ -562,10 +562,8 @@ def test_capture_binary_output(testdir):
|
|||
if __name__ == '__main__':
|
||||
test_foo()
|
||||
""")
|
||||
result = testdir.runpytest_subprocess('--assert=plain')
|
||||
result.stdout.fnmatch_lines([
|
||||
'*2 passed*',
|
||||
])
|
||||
result = testdir.runpytest('--assert=plain')
|
||||
result.assert_outcomes(passed=2)
|
||||
|
||||
|
||||
class TestTextIO:
|
||||
|
|
|
@ -312,7 +312,7 @@ class TestSession:
|
|||
def test_collect_topdir(self, testdir):
|
||||
p = testdir.makepyfile("def test_func(): pass")
|
||||
id = "::".join([p.basename, "test_func"])
|
||||
# XXX migrate to inline_genitems? (see below)
|
||||
# XXX migrate to collectonly? (see below)
|
||||
config = testdir.parseconfig(id)
|
||||
topdir = testdir.tmpdir
|
||||
rcol = Session(config)
|
||||
|
|
|
@ -39,7 +39,7 @@ class TestParseIni:
|
|||
[pytest]
|
||||
minversion=9.0
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.ret != 0
|
||||
result.stderr.fnmatch_lines([
|
||||
"*tox.ini:2*requires*9.0*actual*"
|
||||
|
@ -320,7 +320,7 @@ def test_cmdline_processargs_simple(testdir):
|
|||
def pytest_cmdline_preparse(args):
|
||||
args.append("-h")
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*pytest*",
|
||||
"*-h*",
|
||||
|
@ -389,11 +389,11 @@ class TestWarning:
|
|||
def test_hello(fix):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
assert result.parseoutcomes()["warnings"] > 0
|
||||
assert "hello" not in result.stdout.str()
|
||||
|
||||
result = testdir.inline_runpytest("-rw")
|
||||
result = testdir.runpytest_inprocess("-rw")
|
||||
result.stdout.fnmatch_lines("""
|
||||
===*warning summary*===
|
||||
*WT1*test_warn_on_test_item*:5*hello*
|
||||
|
|
|
@ -1,5 +1,5 @@
|
|||
from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile
|
||||
import py, pytest
|
||||
import py
|
||||
|
||||
class TestDoctests:
|
||||
|
||||
|
|
|
@ -18,7 +18,7 @@ def test_nose_setup(testdir):
|
|||
test_hello.setup = lambda: l.append(1)
|
||||
test_hello.teardown = lambda: l.append(2)
|
||||
""")
|
||||
result = testdir.inline_runpytest(p, '-p', 'nose')
|
||||
result = testdir.runpytest_inprocess(p, '-p', 'nose')
|
||||
result.assert_outcomes(passed=2)
|
||||
|
||||
|
||||
|
@ -63,7 +63,7 @@ def test_nose_setup_func(testdir):
|
|||
assert l == [1,2]
|
||||
|
||||
""")
|
||||
result = testdir.inline_runpytest(p, '-p', 'nose')
|
||||
result = testdir.runpytest_inprocess(p, '-p', 'nose')
|
||||
result.assert_outcomes(passed=2)
|
||||
|
||||
|
||||
|
@ -85,7 +85,7 @@ def test_nose_setup_func_failure(testdir):
|
|||
assert l == [1,2]
|
||||
|
||||
""")
|
||||
result = testdir.inline_runpytest(p, '-p', 'nose')
|
||||
result = testdir.runpytest_inprocess(p, '-p', 'nose')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*TypeError: <lambda>()*"
|
||||
])
|
||||
|
@ -136,7 +136,7 @@ def test_nose_setup_partial(testdir):
|
|||
test_hello.setup = my_setup_partial
|
||||
test_hello.teardown = my_teardown_partial
|
||||
""")
|
||||
result = testdir.inline_runpytest(p, '-p', 'nose')
|
||||
result = testdir.runpytest_inprocess(p, '-p', 'nose')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*"
|
||||
])
|
||||
|
@ -203,7 +203,7 @@ def test_nose_test_generator_fixtures(testdir):
|
|||
#expect.append('setup')
|
||||
eq_(self.called, expect)
|
||||
""")
|
||||
result = testdir.inline_runpytest(p, '-p', 'nose')
|
||||
result = testdir.runpytest_inprocess(p, '-p', 'nose')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*10 passed*"
|
||||
])
|
||||
|
@ -234,7 +234,7 @@ def test_module_level_setup(testdir):
|
|||
assert items[2] == 2
|
||||
assert 1 not in items
|
||||
""")
|
||||
result = testdir.inline_runpytest('-p', 'nose')
|
||||
result = testdir.runpytest_inprocess('-p', 'nose')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*",
|
||||
])
|
||||
|
@ -256,7 +256,7 @@ def test_nose_style_setup_teardown(testdir):
|
|||
def test_world():
|
||||
assert l == [1]
|
||||
""")
|
||||
result = testdir.inline_runpytest('-p', 'nose')
|
||||
result = testdir.runpytest_inprocess('-p', 'nose')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*2 passed*",
|
||||
])
|
||||
|
@ -272,7 +272,7 @@ def test_nose_setup_ordering(testdir):
|
|||
def test_first(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*",
|
||||
])
|
||||
|
@ -297,7 +297,7 @@ def test_apiwrapper_problem_issue260(testdir):
|
|||
def test_fun(self):
|
||||
pass
|
||||
""")
|
||||
result = testdir.inline_runpytest()
|
||||
result = testdir.runpytest_inprocess()
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
@pytest.mark.skipif("sys.version_info < (2,6)")
|
||||
|
@ -323,7 +323,7 @@ def test_setup_teardown_linking_issue265(testdir):
|
|||
"""Undoes the setup."""
|
||||
raise Exception("should not call teardown for skipped tests")
|
||||
''')
|
||||
reprec = testdir.inline_runpytest()
|
||||
reprec = testdir.runpytest_inprocess()
|
||||
reprec.assert_outcomes(passed=1, skipped=1)
|
||||
|
||||
|
||||
|
@ -334,7 +334,7 @@ def test_SkipTest_during_collection(testdir):
|
|||
def test_failing():
|
||||
assert False
|
||||
""")
|
||||
result = testdir.inline_runpytest(p)
|
||||
result = testdir.runpytest_inprocess(p)
|
||||
result.assert_outcomes(skipped=1)
|
||||
|
||||
|
||||
|
|
|
@ -2,6 +2,13 @@
|
|||
import py
|
||||
import sys
|
||||
|
||||
def runpdb_and_get_report(testdir, source):
|
||||
p = testdir.makepyfile(source)
|
||||
result = testdir.runpytest_inprocess("--pdb", p)
|
||||
reports = result.reprec.getreports("pytest_runtest_logreport")
|
||||
assert len(reports) == 3, reports # setup/call/teardown
|
||||
return reports[1]
|
||||
|
||||
|
||||
class TestPDB:
|
||||
def pytest_funcarg__pdblist(self, request):
|
||||
|
@ -14,7 +21,7 @@ class TestPDB:
|
|||
return pdblist
|
||||
|
||||
def test_pdb_on_fail(self, testdir, pdblist):
|
||||
rep = testdir.inline_runsource1('--pdb', """
|
||||
rep = runpdb_and_get_report(testdir, """
|
||||
def test_func():
|
||||
assert 0
|
||||
""")
|
||||
|
@ -24,7 +31,7 @@ class TestPDB:
|
|||
assert tb[-1].name == "test_func"
|
||||
|
||||
def test_pdb_on_xfail(self, testdir, pdblist):
|
||||
rep = testdir.inline_runsource1('--pdb', """
|
||||
rep = runpdb_and_get_report(testdir, """
|
||||
import pytest
|
||||
@pytest.mark.xfail
|
||||
def test_func():
|
||||
|
@ -34,7 +41,7 @@ class TestPDB:
|
|||
assert not pdblist
|
||||
|
||||
def test_pdb_on_skip(self, testdir, pdblist):
|
||||
rep = testdir.inline_runsource1('--pdb', """
|
||||
rep = runpdb_and_get_report(testdir, """
|
||||
import pytest
|
||||
def test_func():
|
||||
pytest.skip("hello")
|
||||
|
@ -43,7 +50,7 @@ class TestPDB:
|
|||
assert len(pdblist) == 0
|
||||
|
||||
def test_pdb_on_BdbQuit(self, testdir, pdblist):
|
||||
rep = testdir.inline_runsource1('--pdb', """
|
||||
rep = runpdb_and_get_report(testdir, """
|
||||
import bdb
|
||||
def test_func():
|
||||
raise bdb.BdbQuit
|
||||
|
|
|
@ -69,9 +69,7 @@ def test_testdir_runs_with_plugin(testdir):
|
|||
assert 1
|
||||
""")
|
||||
result = testdir.runpytest()
|
||||
result.stdout.fnmatch_lines([
|
||||
"*1 passed*"
|
||||
])
|
||||
result.assert_outcomes(passed=1)
|
||||
|
||||
|
||||
def make_holder():
|
||||
|
@ -114,16 +112,6 @@ def test_makepyfile_unicode(testdir):
|
|||
unichr = chr
|
||||
testdir.makepyfile(unichr(0xfffd))
|
||||
|
||||
def test_inprocess_plugins(testdir):
|
||||
class Plugin(object):
|
||||
configured = False
|
||||
def pytest_configure(self, config):
|
||||
self.configured = True
|
||||
plugin = Plugin()
|
||||
testdir.inprocess_run([], [plugin])
|
||||
|
||||
assert plugin.configured
|
||||
|
||||
def test_inline_run_clean_modules(testdir):
|
||||
test_mod = testdir.makepyfile("def test_foo(): assert True")
|
||||
result = testdir.inline_run(str(test_mod))
|
||||
|
|
13
tox.ini
13
tox.ini
|
@ -1,6 +1,6 @@
|
|||
[tox]
|
||||
distshare={homedir}/.tox/distshare
|
||||
envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,py27-trial,py33-trial,doctesting,py27-cxfreeze
|
||||
envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,{py27,py33}-trial,py27-subprocess,doctesting,py27-cxfreeze
|
||||
|
||||
[testenv]
|
||||
changedir=testing
|
||||
|
@ -9,6 +9,15 @@ deps=
|
|||
nose
|
||||
mock
|
||||
|
||||
[testenv:py27-subprocess]
|
||||
changedir=.
|
||||
basepython=python2.7
|
||||
deps=pytest-xdist
|
||||
mock
|
||||
nose
|
||||
commands=
|
||||
py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing}
|
||||
|
||||
[testenv:genscript]
|
||||
changedir=.
|
||||
commands= py.test --genscript=pytest1
|
||||
|
@ -136,7 +145,7 @@ commands=
|
|||
minversion=2.0
|
||||
plugins=pytester
|
||||
#--pyargs --doctest-modules --ignore=.tox
|
||||
addopts= -rxsX
|
||||
addopts= -rxsX -p pytester
|
||||
rsyncdirs=tox.ini pytest.py _pytest testing
|
||||
python_files=test_*.py *_test.py testing/*/*.py
|
||||
python_classes=Test Acceptance
|
||||
|
|
Loading…
Reference in New Issue