From db5649ec6a8fcd9ef148b9f77797a22cb9f3cda1 Mon Sep 17 00:00:00 2001 From: holger krekel Date: Tue, 28 Apr 2015 11:54:53 +0200 Subject: [PATCH] streamline pytester API majorly: - integrate conftest into pytester plugin - introduce runpytest() to either call runpytest_inline (default) or runpytest_subprocess (python -m pytest) - move testdir.inline_runsource1 to pdb tests - strike some unneccessary methods. - a new section "writing plugins" and some better pytester docs --HG-- branch : testrefactor --- CHANGELOG | 8 ++ _pytest/pytester.py | 254 +++++++++++++++++++++------------- doc/en/writing_plugins.txt | 42 ++++++ testing/acceptance_test.py | 2 +- testing/conftest.py | 122 ---------------- testing/python/collect.py | 34 ++--- testing/python/fixture.py | 112 +++++++-------- testing/python/metafunc.py | 36 ++--- testing/test_assertrewrite.py | 4 +- testing/test_capture.py | 38 +++-- testing/test_collection.py | 2 +- testing/test_config.py | 8 +- testing/test_doctest.py | 2 +- testing/test_nose.py | 22 +-- testing/test_pdb.py | 15 +- testing/test_pytester.py | 14 +- tox.ini | 13 +- 17 files changed, 360 insertions(+), 368 deletions(-) delete mode 100644 testing/conftest.py diff --git a/CHANGELOG b/CHANGELOG index ef773c59f..63219e02c 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -43,6 +43,14 @@ implementations. Use the ``hookwrapper`` mechanism instead already introduced with pytest-2.7. +- speed up pytest's own test suite considerably by using inprocess + tests by default (testrun can be modified with --runpytest=subprocess + to create subprocesses in many places instead). The main + APIs to run pytest in a test is "runpytest()" or "runpytest_subprocess" + and "runpytest_inprocess" if you need a particular way of running + the test. In all cases you get back a RunResult but the inprocess + one will also have a "reprec" attribute with the recorded events/reports. + 2.7.1.dev (compared to 2.7.0) ----------------------------- diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 6e095bf52..53a077a71 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,4 +1,5 @@ """ (disabled by default) support for testing pytest and pytest plugins. """ +import gc import sys import traceback import os @@ -16,6 +17,136 @@ from _pytest.core import TracedHookExecution from _pytest.main import Session, EXIT_OK + +def pytest_addoption(parser): + # group = parser.getgroup("pytester", "pytester (self-tests) options") + parser.addoption('--lsof', + action="store_true", dest="lsof", default=False, + help=("run FD checks if lsof is available")) + + parser.addoption('--runpytest', default="inprocess", dest="runpytest", + choices=("inprocess", "subprocess", ), + help=("run pytest sub runs in tests using an 'inprocess' " + "or 'subprocess' (python -m main) method")) + + +def pytest_configure(config): + # This might be called multiple times. Only take the first. + global _pytest_fullpath + try: + _pytest_fullpath + except NameError: + _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) + _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") + + if config.getvalue("lsof"): + checker = LsofFdLeakChecker() + if checker.matching_platform(): + config.pluginmanager.register(checker) + + +class LsofFdLeakChecker(object): + def get_open_files(self): + out = self._exec_lsof() + open_files = self._parse_lsof_output(out) + return open_files + + def _exec_lsof(self): + pid = os.getpid() + return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) + + def _parse_lsof_output(self, out): + def isopen(line): + return line.startswith('f') and ("deleted" not in line and + 'mem' not in line and "txt" not in line and 'cwd' not in line) + + open_files = [] + + for line in out.split("\n"): + if isopen(line): + fields = line.split('\0') + fd = fields[0][1:] + filename = fields[1][1:] + if filename.startswith('/'): + open_files.append((fd, filename)) + + return open_files + + def matching_platform(self): + try: + py.process.cmdexec("lsof -v") + except py.process.cmdexec.Error: + return False + else: + return True + + @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True) + def pytest_runtest_item(self, item): + lines1 = self.get_open_files() + yield + if hasattr(sys, "pypy_version_info"): + gc.collect() + lines2 = self.get_open_files() + + new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1]) + leaked_files = [t for t in lines2 if t[0] in new_fds] + if leaked_files: + error = [] + error.append("***** %s FD leakage detected" % len(leaked_files)) + error.extend([str(f) for f in leaked_files]) + error.append("*** Before:") + error.extend([str(f) for f in lines1]) + error.append("*** After:") + error.extend([str(f) for f in lines2]) + error.append(error[0]) + error.append("*** function %s:%s: %s " % item.location) + pytest.fail("\n".join(error), pytrace=False) + + +# XXX copied from execnet's conftest.py - needs to be merged +winpymap = { + 'python2.7': r'C:\Python27\python.exe', + 'python2.6': r'C:\Python26\python.exe', + 'python3.1': r'C:\Python31\python.exe', + 'python3.2': r'C:\Python32\python.exe', + 'python3.3': r'C:\Python33\python.exe', + 'python3.4': r'C:\Python34\python.exe', + 'python3.5': r'C:\Python35\python.exe', +} + +def getexecutable(name, cache={}): + try: + return cache[name] + except KeyError: + executable = py.path.local.sysfind(name) + if executable: + if name == "jython": + import subprocess + popen = subprocess.Popen([str(executable), "--version"], + universal_newlines=True, stderr=subprocess.PIPE) + out, err = popen.communicate() + if not err or "2.5" not in err: + executable = None + if "2.5.2" in err: + executable = None # http://bugs.jython.org/issue1790 + cache[name] = executable + return executable + +@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", + 'pypy', 'pypy3']) +def anypython(request): + name = request.param + executable = getexecutable(name) + if executable is None: + if sys.platform == "win32": + executable = winpymap.get(name, None) + if executable: + executable = py.path.local(executable) + if executable.check(): + return executable + pytest.skip("no suitable %s found" % (name,)) + return executable + # used at least by pytest-xdist plugin @pytest.fixture def _pytest(request): @@ -40,23 +171,6 @@ def get_public_names(l): return [x for x in l if x[0] != "_"] -def pytest_addoption(parser): - group = parser.getgroup("pylib") - group.addoption('--no-tools-on-path', - action="store_true", dest="notoolsonpath", default=False, - help=("discover tools on PATH instead of going through py.cmdline.") - ) - -def pytest_configure(config): - # This might be called multiple times. Only take the first. - global _pytest_fullpath - try: - _pytest_fullpath - except NameError: - _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc")) - _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py") - - class ParsedCall: def __init__(self, name, kwargs): self.__dict__.update(kwargs) @@ -202,7 +316,7 @@ def pytest_funcarg__LineMatcher(request): return LineMatcher def pytest_funcarg__testdir(request): - tmptestdir = TmpTestdir(request) + tmptestdir = Testdir(request) return tmptestdir @@ -216,10 +330,10 @@ class RunResult: :ret: The return value. :outlines: List of lines captured from stdout. :errlines: List of lines captures from stderr. - :stdout: LineMatcher of stdout, use ``stdout.str()`` to + :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to reconstruct stdout or the commonly used ``stdout.fnmatch_lines()`` method. - :stderrr: LineMatcher of stderr. + :stderrr: :py:class:`LineMatcher` of stderr. :duration: Duration in seconds. """ @@ -253,7 +367,7 @@ class RunResult: -class TmpTestdir: +class Testdir: """Temporary test directory with tools to test/run py.test itself. This is based on the ``tmpdir`` fixture but provides a number of @@ -276,7 +390,6 @@ class TmpTestdir: def __init__(self, request): self.request = request - self.Config = request.config.__class__ # XXX remove duplication with tmpdir plugin basetmp = request.config._tmpdirhandler.ensuretemp("testdir") name = request.function.__name__ @@ -292,9 +405,14 @@ class TmpTestdir: self._savemodulekeys = set(sys.modules) self.chdir() # always chdir self.request.addfinalizer(self.finalize) + method = self.request.config.getoption("--runpytest") + if method == "inprocess": + self._runpytest_method = self.runpytest_inprocess + elif method == "subprocess": + self._runpytest_method = self.runpytest_subprocess def __repr__(self): - return "" % (self.tmpdir,) + return "" % (self.tmpdir,) def finalize(self): """Clean up global state artifacts. @@ -315,7 +433,6 @@ class TmpTestdir: This allows the interpreter to catch module changes in case the module is re-imported. - """ for name in set(sys.modules).difference(self._savemodulekeys): # it seems zope.interfaces is keeping some state @@ -512,43 +629,19 @@ class TmpTestdir: l = list(cmdlineargs) + [p] return self.inline_run(*l) - def inline_runsource1(self, *args): - """Run a test module in process using ``pytest.main()``. - - This behaves exactly like :py:meth:`inline_runsource` and - takes identical arguments. However the return value is a list - of the reports created by the pytest_runtest_logreport hook - during the run. - - """ - args = list(args) - source = args.pop() - p = self.makepyfile(source) - l = list(args) + [p] - reprec = self.inline_run(*l) - reports = reprec.getreports("pytest_runtest_logreport") - assert len(reports) == 3, reports # setup/call/teardown - return reports[1] - def inline_genitems(self, *args): """Run ``pytest.main(['--collectonly'])`` in-process. Retuns a tuple of the collected items and a :py:class:`HookRecorder` instance. - """ - return self.inprocess_run(list(args) + ['--collectonly']) - - def inprocess_run(self, args, plugins=()): - """Run ``pytest.main()`` in-process, return Items and a HookRecorder. - This runs the :py:func:`pytest.main` function to run all of py.test inside the test process itself like :py:meth:`inline_run`. However the return value is a tuple of the collection items and a :py:class:`HookRecorder` instance. """ - rec = self.inline_run(*args, plugins=plugins) + rec = self.inline_run("--collect-only", *args) items = [x.item for x in rec.getcalls("pytest_itemcollected")] return items, rec @@ -586,7 +679,7 @@ class TmpTestdir: reprec.ret = ret return reprec - def inline_runpytest(self, *args, **kwargs): + def runpytest_inprocess(self, *args, **kwargs): """ Return result of running pytest in-process, providing a similar interface to what self.runpytest() provides. """ if kwargs.get("syspathinsert"): @@ -615,7 +708,11 @@ class TmpTestdir: return res def runpytest(self, *args, **kwargs): - return self.inline_runpytest(*args, **kwargs) + """ Run pytest inline or in a subprocess, depending on the command line + option "--runpytest" and return a :py:class:`RunResult`. + + """ + return self._runpytest_method(*args, **kwargs) def parseconfig(self, *args): """Return a new py.test Config instance from given commandline args. @@ -788,57 +885,23 @@ class TmpTestdir: except UnicodeEncodeError: print("couldn't print to %s because of encoding" % (fp,)) - def runpybin(self, scriptname, *args): - """Run a py.* tool with arguments. + def _getpytestargs(self): + # we cannot use "(sys.executable,script)" + # because on windows the script is e.g. a py.test.exe + return (sys.executable, _pytest_fullpath,) # noqa - This can realy only be used to run py.test, you probably want - :py:meth:`runpytest` instead. + def runpython(self, script): + """Run a python script using sys.executable as interpreter. Returns a :py:class:`RunResult`. - """ - fullargs = self._getpybinargs(scriptname) + args - return self.run(*fullargs) - - def _getpybinargs(self, scriptname): - if not self.request.config.getvalue("notoolsonpath"): - # XXX we rely on script referring to the correct environment - # we cannot use "(sys.executable,script)" - # because on windows the script is e.g. a py.test.exe - return (sys.executable, _pytest_fullpath,) # noqa - else: - pytest.skip("cannot run %r with --no-tools-on-path" % scriptname) - - def runpython(self, script, prepend=True): - """Run a python script. - - If ``prepend`` is True then the directory from which the py - package has been imported will be prepended to sys.path. - - Returns a :py:class:`RunResult`. - - """ - # XXX The prepend feature is probably not very useful since the - # split of py and pytest. - if prepend: - s = self._getsysprepend() - if s: - script.write(s + "\n" + script.read()) return self.run(sys.executable, script) - def _getsysprepend(self): - if self.request.config.getvalue("notoolsonpath"): - s = "import sys;sys.path.insert(0,%r);" % str(py._pydir.dirpath()) - else: - s = "" - return s - def runpython_c(self, command): """Run python -c "command", return a :py:class:`RunResult`.""" - command = self._getsysprepend() + command return self.run(sys.executable, "-c", command) - def runpytest_subprocess(self, *args): + def runpytest_subprocess(self, *args, **kwargs): """Run py.test as a subprocess with given arguments. Any plugins added to the :py:attr:`plugins` list will added @@ -863,7 +926,8 @@ class TmpTestdir: plugins = [x for x in self.plugins if isinstance(x, str)] if plugins: args = ('-p', plugins[0]) + args - return self.runpybin("py.test", *args) + args = self._getpytestargs() + args + return self.run(*args) def spawn_pytest(self, string, expect_timeout=10.0): """Run py.test using pexpect. @@ -874,10 +938,8 @@ class TmpTestdir: The pexpect child is returned. """ - if self.request.config.getvalue("notoolsonpath"): - pytest.skip("--no-tools-on-path prevents running pexpect-spawn tests") basetemp = self.tmpdir.mkdir("pexpect") - invoke = " ".join(map(str, self._getpybinargs("py.test"))) + invoke = " ".join(map(str, self._getpytestargs())) cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string) return self.spawn(cmd, expect_timeout=expect_timeout) diff --git a/doc/en/writing_plugins.txt b/doc/en/writing_plugins.txt index 78431c8ee..9f09fea9a 100644 --- a/doc/en/writing_plugins.txt +++ b/doc/en/writing_plugins.txt @@ -186,12 +186,44 @@ the plugin manager like this: If you want to look at the names of existing plugins, use the ``--traceconfig`` option. +Testing plugins +--------------- + +pytest comes with some facilities that you can enable for testing your +plugin. Given that you have an installed plugin you can enable the +:py:class:`testdir <_pytest.pytester.Testdir>` fixture via specifying a +command line option to include the pytester plugin (``-p pytester``) or +by putting ``pytest_plugins = pytester`` into your test or +``conftest.py`` file. You then will have a ``testdir`` fixure which you +can use like this:: + + # content of test_myplugin.py + + pytest_plugins = pytester # to get testdir fixture + + def test_myplugin(testdir): + testdir.makepyfile(""" + def test_example(): + pass + """) + result = testdir.runpytest("--verbose") + result.fnmatch_lines(""" + test_example* + """) + +Note that by default ``testdir.runpytest()`` will perform a pytest +in-process. You can pass the command line option ``--runpytest=subprocess`` +to have it happen in a subprocess. + +Also see the :py:class:`RunResult <_pytest.pytester.RunResult>` for more +methods of the result object that you get from a call to ``runpytest``. .. _`writinghooks`: Writing hook functions ====================== + .. _validation: hook function validation and execution @@ -493,3 +525,13 @@ Reference of objects involved in hooks .. autoclass:: _pytest.core.CallOutcome() :members: +.. currentmodule:: _pytest.pytester + +.. autoclass:: Testdir() + :members: runpytest,runpytest_subprocess,runpytest_inprocess,makeconftest,makepyfile + +.. autoclass:: RunResult() + :members: + +.. autoclass:: LineMatcher() + :members: diff --git a/testing/acceptance_test.py b/testing/acceptance_test.py index 1e9291fa5..09f98a269 100644 --- a/testing/acceptance_test.py +++ b/testing/acceptance_test.py @@ -203,7 +203,7 @@ class TestGeneralUsage: os.chdir(os.path.dirname(os.getcwd())) print (py.log) """)) - result = testdir.runpython(p, prepend=False) + result = testdir.runpython(p) assert not result.ret def test_issue109_sibling_conftests_not_loaded(self, testdir): diff --git a/testing/conftest.py b/testing/conftest.py deleted file mode 100644 index 3f24c95e8..000000000 --- a/testing/conftest.py +++ /dev/null @@ -1,122 +0,0 @@ -import pytest -import sys -import gc - -pytest_plugins = "pytester", - -import os, py - -class LsofFdLeakChecker(object): - def get_open_files(self): - out = self._exec_lsof() - open_files = self._parse_lsof_output(out) - return open_files - - def _exec_lsof(self): - pid = os.getpid() - return py.process.cmdexec("lsof -Ffn0 -p %d" % pid) - - def _parse_lsof_output(self, out): - def isopen(line): - return line.startswith('f') and ("deleted" not in line and - 'mem' not in line and "txt" not in line and 'cwd' not in line) - - open_files = [] - - for line in out.split("\n"): - if isopen(line): - fields = line.split('\0') - fd = fields[0][1:] - filename = fields[1][1:] - if filename.startswith('/'): - open_files.append((fd, filename)) - - return open_files - - def matching_platform(self): - try: - py.process.cmdexec("lsof -v") - except py.process.cmdexec.Error: - return False - else: - return True - - @pytest.hookimpl_opts(hookwrapper=True, tryfirst=True) - def pytest_runtest_item(self, item): - lines1 = self.get_open_files() - yield - if hasattr(sys, "pypy_version_info"): - gc.collect() - lines2 = self.get_open_files() - - new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1]) - leaked_files = [t for t in lines2 if t[0] in new_fds] - if leaked_files: - error = [] - error.append("***** %s FD leakage detected" % len(leaked_files)) - error.extend([str(f) for f in leaked_files]) - error.append("*** Before:") - error.extend([str(f) for f in lines1]) - error.append("*** After:") - error.extend([str(f) for f in lines2]) - error.append(error[0]) - error.append("*** function %s:%s: %s " % item.location) - pytest.fail("\n".join(error), pytrace=False) - - -def pytest_addoption(parser): - parser.addoption('--lsof', - action="store_true", dest="lsof", default=False, - help=("run FD checks if lsof is available")) - - -def pytest_configure(config): - if config.getvalue("lsof"): - checker = LsofFdLeakChecker() - if checker.matching_platform(): - config.pluginmanager.register(checker) - - -# XXX copied from execnet's conftest.py - needs to be merged -winpymap = { - 'python2.7': r'C:\Python27\python.exe', - 'python2.6': r'C:\Python26\python.exe', - 'python3.1': r'C:\Python31\python.exe', - 'python3.2': r'C:\Python32\python.exe', - 'python3.3': r'C:\Python33\python.exe', - 'python3.4': r'C:\Python34\python.exe', - 'python3.5': r'C:\Python35\python.exe', -} - -def getexecutable(name, cache={}): - try: - return cache[name] - except KeyError: - executable = py.path.local.sysfind(name) - if executable: - if name == "jython": - import subprocess - popen = subprocess.Popen([str(executable), "--version"], - universal_newlines=True, stderr=subprocess.PIPE) - out, err = popen.communicate() - if not err or "2.5" not in err: - executable = None - if "2.5.2" in err: - executable = None # http://bugs.jython.org/issue1790 - cache[name] = executable - return executable - -@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4", - 'pypy', 'pypy3']) -def anypython(request): - name = request.param - executable = getexecutable(name) - if executable is None: - if sys.platform == "win32": - executable = winpymap.get(name, None) - if executable: - executable = py.path.local(executable) - if executable.check(): - return executable - pytest.skip("no suitable %s found" % (name,)) - return executable diff --git a/testing/python/collect.py b/testing/python/collect.py index 16b7288b6..2fbdf76f7 100644 --- a/testing/python/collect.py +++ b/testing/python/collect.py @@ -15,7 +15,7 @@ class TestModule: p.pyimport() del py.std.sys.modules['test_whatever'] b.ensure("test_whatever.py") - result = testdir.inline_runpytest() + result = testdir.runpytest_inprocess() result.stdout.fnmatch_lines([ "*import*mismatch*", "*imported*test_whatever*", @@ -59,7 +59,7 @@ class TestClass: def __init__(self): pass """) - result = testdir.inline_runpytest("-rw") + result = testdir.runpytest_inprocess("-rw") result.stdout.fnmatch_lines_random(""" WC1*test_class_with_init_warning.py*__init__* """) @@ -69,7 +69,7 @@ class TestClass: class test(object): pass """) - result = testdir.inline_runpytest() + result = testdir.runpytest_inprocess() result.stdout.fnmatch_lines([ "*collected 0*", ]) @@ -86,7 +86,7 @@ class TestClass: def teardown_class(cls): pass """) - result = testdir.inline_runpytest() + result = testdir.runpytest_inprocess() result.stdout.fnmatch_lines([ "*1 passed*", ]) @@ -534,7 +534,7 @@ class TestConftestCustomization: """) testdir.makepyfile("def test_some(): pass") testdir.makepyfile(test_xyz="def test_func(): pass") - result = testdir.inline_runpytest("--collect-only") + result = testdir.runpytest_inprocess("--collect-only") result.stdout.fnmatch_lines([ "* 0 assert "hello" not in result.stdout.str() - result = testdir.inline_runpytest("-rw") + result = testdir.runpytest_inprocess("-rw") result.stdout.fnmatch_lines(""" ===*warning summary*=== *WT1*test_warn_on_test_item*:5*hello* diff --git a/testing/test_doctest.py b/testing/test_doctest.py index 661f85841..3b0bb6868 100644 --- a/testing/test_doctest.py +++ b/testing/test_doctest.py @@ -1,5 +1,5 @@ from _pytest.doctest import DoctestItem, DoctestModule, DoctestTextfile -import py, pytest +import py class TestDoctests: diff --git a/testing/test_nose.py b/testing/test_nose.py index b4b3d8836..58ccdfcb6 100644 --- a/testing/test_nose.py +++ b/testing/test_nose.py @@ -18,7 +18,7 @@ def test_nose_setup(testdir): test_hello.setup = lambda: l.append(1) test_hello.teardown = lambda: l.append(2) """) - result = testdir.inline_runpytest(p, '-p', 'nose') + result = testdir.runpytest_inprocess(p, '-p', 'nose') result.assert_outcomes(passed=2) @@ -63,7 +63,7 @@ def test_nose_setup_func(testdir): assert l == [1,2] """) - result = testdir.inline_runpytest(p, '-p', 'nose') + result = testdir.runpytest_inprocess(p, '-p', 'nose') result.assert_outcomes(passed=2) @@ -85,7 +85,7 @@ def test_nose_setup_func_failure(testdir): assert l == [1,2] """) - result = testdir.inline_runpytest(p, '-p', 'nose') + result = testdir.runpytest_inprocess(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*TypeError: ()*" ]) @@ -136,7 +136,7 @@ def test_nose_setup_partial(testdir): test_hello.setup = my_setup_partial test_hello.teardown = my_teardown_partial """) - result = testdir.inline_runpytest(p, '-p', 'nose') + result = testdir.runpytest_inprocess(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*" ]) @@ -203,7 +203,7 @@ def test_nose_test_generator_fixtures(testdir): #expect.append('setup') eq_(self.called, expect) """) - result = testdir.inline_runpytest(p, '-p', 'nose') + result = testdir.runpytest_inprocess(p, '-p', 'nose') result.stdout.fnmatch_lines([ "*10 passed*" ]) @@ -234,7 +234,7 @@ def test_module_level_setup(testdir): assert items[2] == 2 assert 1 not in items """) - result = testdir.inline_runpytest('-p', 'nose') + result = testdir.runpytest_inprocess('-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*", ]) @@ -256,7 +256,7 @@ def test_nose_style_setup_teardown(testdir): def test_world(): assert l == [1] """) - result = testdir.inline_runpytest('-p', 'nose') + result = testdir.runpytest_inprocess('-p', 'nose') result.stdout.fnmatch_lines([ "*2 passed*", ]) @@ -272,7 +272,7 @@ def test_nose_setup_ordering(testdir): def test_first(self): pass """) - result = testdir.inline_runpytest() + result = testdir.runpytest_inprocess() result.stdout.fnmatch_lines([ "*1 passed*", ]) @@ -297,7 +297,7 @@ def test_apiwrapper_problem_issue260(testdir): def test_fun(self): pass """) - result = testdir.inline_runpytest() + result = testdir.runpytest_inprocess() result.assert_outcomes(passed=1) @pytest.mark.skipif("sys.version_info < (2,6)") @@ -323,7 +323,7 @@ def test_setup_teardown_linking_issue265(testdir): """Undoes the setup.""" raise Exception("should not call teardown for skipped tests") ''') - reprec = testdir.inline_runpytest() + reprec = testdir.runpytest_inprocess() reprec.assert_outcomes(passed=1, skipped=1) @@ -334,7 +334,7 @@ def test_SkipTest_during_collection(testdir): def test_failing(): assert False """) - result = testdir.inline_runpytest(p) + result = testdir.runpytest_inprocess(p) result.assert_outcomes(skipped=1) diff --git a/testing/test_pdb.py b/testing/test_pdb.py index 18487d8d9..a2fd4d43d 100644 --- a/testing/test_pdb.py +++ b/testing/test_pdb.py @@ -2,6 +2,13 @@ import py import sys +def runpdb_and_get_report(testdir, source): + p = testdir.makepyfile(source) + result = testdir.runpytest_inprocess("--pdb", p) + reports = result.reprec.getreports("pytest_runtest_logreport") + assert len(reports) == 3, reports # setup/call/teardown + return reports[1] + class TestPDB: def pytest_funcarg__pdblist(self, request): @@ -14,7 +21,7 @@ class TestPDB: return pdblist def test_pdb_on_fail(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ def test_func(): assert 0 """) @@ -24,7 +31,7 @@ class TestPDB: assert tb[-1].name == "test_func" def test_pdb_on_xfail(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ import pytest @pytest.mark.xfail def test_func(): @@ -34,7 +41,7 @@ class TestPDB: assert not pdblist def test_pdb_on_skip(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ import pytest def test_func(): pytest.skip("hello") @@ -43,7 +50,7 @@ class TestPDB: assert len(pdblist) == 0 def test_pdb_on_BdbQuit(self, testdir, pdblist): - rep = testdir.inline_runsource1('--pdb', """ + rep = runpdb_and_get_report(testdir, """ import bdb def test_func(): raise bdb.BdbQuit diff --git a/testing/test_pytester.py b/testing/test_pytester.py index 5478b2df8..65660afdf 100644 --- a/testing/test_pytester.py +++ b/testing/test_pytester.py @@ -69,9 +69,7 @@ def test_testdir_runs_with_plugin(testdir): assert 1 """) result = testdir.runpytest() - result.stdout.fnmatch_lines([ - "*1 passed*" - ]) + result.assert_outcomes(passed=1) def make_holder(): @@ -114,16 +112,6 @@ def test_makepyfile_unicode(testdir): unichr = chr testdir.makepyfile(unichr(0xfffd)) -def test_inprocess_plugins(testdir): - class Plugin(object): - configured = False - def pytest_configure(self, config): - self.configured = True - plugin = Plugin() - testdir.inprocess_run([], [plugin]) - - assert plugin.configured - def test_inline_run_clean_modules(testdir): test_mod = testdir.makepyfile("def test_foo(): assert True") result = testdir.inline_run(str(test_mod)) diff --git a/tox.ini b/tox.ini index 682289413..f8506fcdc 100644 --- a/tox.ini +++ b/tox.ini @@ -1,6 +1,6 @@ [tox] distshare={homedir}/.tox/distshare -envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,py27-trial,py33-trial,doctesting,py27-cxfreeze +envlist=flakes,py26,py27,py34,pypy,py27-pexpect,py33-pexpect,py27-nobyte,py33,py27-xdist,py33-xdist,{py27,py33}-trial,py27-subprocess,doctesting,py27-cxfreeze [testenv] changedir=testing @@ -9,6 +9,15 @@ deps= nose mock +[testenv:py27-subprocess] +changedir=. +basepython=python2.7 +deps=pytest-xdist + mock + nose +commands= + py.test -n3 -rfsxX --runpytest=subprocess {posargs:testing} + [testenv:genscript] changedir=. commands= py.test --genscript=pytest1 @@ -136,7 +145,7 @@ commands= minversion=2.0 plugins=pytester #--pyargs --doctest-modules --ignore=.tox -addopts= -rxsX +addopts= -rxsX -p pytester rsyncdirs=tox.ini pytest.py _pytest testing python_files=test_*.py *_test.py testing/*/*.py python_classes=Test Acceptance