diff --git a/.hgtags b/.hgtags index fbc09d511..f378fae3e 100644 --- a/.hgtags +++ b/.hgtags @@ -69,3 +69,4 @@ a064ad64d167508a8e9e73766b1a4e6bd10c85db 2.5.0 60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0 60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0 88af949b9611494e2c65d528f9e565b00fb7e8ca 2.6.0 +a4f9639702baa3eb4f3b16e162f74f7b69f3f9e1 2.6.1 diff --git a/CHANGELOG b/CHANGELOG index c2f53fef6..9ff564f85 100644 --- a/CHANGELOG +++ b/CHANGELOG @@ -1,6 +1,21 @@ NEXT +----------- + +- fixed issue561: adapt autouse fixture example for python3. + +2.6.1 ----------------------------------- +- No longer show line numbers in the --verbose output, the output is now + purely the nodeid. The line number is still shown in failure reports. + Thanks Floris Bruynooghe. + +- fix issue437 where assertion rewriting could cause pytest-xdist slaves + to collect different tests. Thanks Bruno Oliveira. + +- fix issue555: add "errors" attribute to capture-streams to satisfy + some distutils and possibly other code accessing sys.stdout.errors. + - fix issue547 capsys/capfd also work when output capturing ("-s") is disabled. - address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via @@ -17,6 +32,9 @@ NEXT - fix issue544 by only removing "@NUM" at the end of "::" separated parts and if the part has an ".py" extension +- don't use py.std import helper, rather import things directly. + Thanks Bruno Oliveira. + 2.6 ----------------------------------- diff --git a/_pytest/__init__.py b/_pytest/__init__.py index f8d003fcf..39aed090e 100644 --- a/_pytest/__init__.py +++ b/_pytest/__init__.py @@ -1,2 +1,2 @@ # -__version__ = '2.6.1.dev1' +__version__ = '2.6.2.dev1' diff --git a/_pytest/assertion/oldinterpret.py b/_pytest/assertion/oldinterpret.py index df737b7d5..40c6d3936 100644 --- a/_pytest/assertion/oldinterpret.py +++ b/_pytest/assertion/oldinterpret.py @@ -1,3 +1,5 @@ +import traceback +import types import py import sys, inspect from compiler import parse, ast, pycodegen @@ -477,7 +479,7 @@ def check(s, frame=None): def interpret(source, frame, should_fail=False): module = Interpretable(parse(source, 'exec').node) #print "got module", module - if isinstance(frame, py.std.types.FrameType): + if isinstance(frame, types.FrameType): frame = py.code.Frame(frame) try: module.run(frame) @@ -487,7 +489,6 @@ def interpret(source, frame, should_fail=False): except passthroughex: raise except: - import traceback traceback.print_exc() if should_fail: return ("(assertion failed, but when it was re-run for " diff --git a/_pytest/assertion/rewrite.py b/_pytest/assertion/rewrite.py index 95bf4117d..523d2b2dc 100644 --- a/_pytest/assertion/rewrite.py +++ b/_pytest/assertion/rewrite.py @@ -131,7 +131,7 @@ class AssertionRewritingHook(object): pyc = os.path.join(cache_dir, cache_name) # Notice that even if we're in a read-only directory, I'm going # to check for a cached pyc. This may not be optimal... - co = _read_pyc(fn_pypath, pyc) + co = _read_pyc(fn_pypath, pyc, state.trace) if co is None: state.trace("rewriting %r" % (fn,)) co = _rewrite_test(state, fn_pypath) @@ -289,7 +289,7 @@ def _make_rewritten_pyc(state, fn, pyc, co): if _write_pyc(state, co, fn, proc_pyc): os.rename(proc_pyc, pyc) -def _read_pyc(source, pyc): +def _read_pyc(source, pyc, trace=lambda x: None): """Possibly read a pytest pyc containing rewritten code. Return rewritten code if successful or None if not. @@ -298,23 +298,27 @@ def _read_pyc(source, pyc): fp = open(pyc, "rb") except IOError: return None - try: + with fp: try: mtime = int(source.mtime()) data = fp.read(8) - except EnvironmentError: + except EnvironmentError as e: + trace('_read_pyc(%s): EnvironmentError %s' % (source, e)) return None # Check for invalid or out of date pyc file. if (len(data) != 8 or data[:4] != imp.get_magic() or struct.unpack("", "PDB set_trace (IO-capturing turned off)") - py.std.pdb.Pdb().set_trace(frame) + pdb.Pdb().set_trace(frame) class PdbInvoke: @@ -74,7 +78,8 @@ def _enter_pdb(node, excinfo, rep): def _postmortem_traceback(excinfo): # A doctest.UnexpectedException is not useful for post_mortem. # Use the underlying exception instead: - if isinstance(excinfo.value, py.std.doctest.UnexpectedException): + from doctest import UnexpectedException + if isinstance(excinfo.value, UnexpectedException): return excinfo.value.exc_info[2] else: return excinfo._excinfo[2] @@ -88,7 +93,6 @@ def _find_last_non_hidden_frame(stack): def post_mortem(t): - pdb = py.std.pdb class Pdb(pdb.Pdb): def get_stack(self, f, t): stack, i = pdb.Pdb.get_stack(self, f, t) diff --git a/_pytest/pytester.py b/_pytest/pytester.py index 36713bf2c..9e987ae03 100644 --- a/_pytest/pytester.py +++ b/_pytest/pytester.py @@ -1,15 +1,21 @@ """ (disabled by default) support for testing pytest and pytest plugins. """ - -import py, pytest -import sys, os +import inspect +import sys +import os import codecs import re import time +import platform from fnmatch import fnmatch -from _pytest.main import Session, EXIT_OK +import subprocess + +import py +import pytest from py.builtin import print_ from _pytest.core import HookRelay +from _pytest.main import Session, EXIT_OK + def get_public_names(l): """Only return names from iterator l without a leading underscore.""" @@ -87,10 +93,10 @@ class HookRecorder: def _makecallparser(self, method): name = method.__name__ - args, varargs, varkw, default = py.std.inspect.getargspec(method) + args, varargs, varkw, default = inspect.getargspec(method) if not args or args[0] != "self": args.insert(0, 'self') - fspec = py.std.inspect.formatargspec(args, varargs, varkw, default) + fspec = inspect.formatargspec(args, varargs, varkw, default) # we use exec because we want to have early type # errors on wrong input arguments, using # *args/**kwargs delays this and gives errors @@ -122,7 +128,7 @@ class HookRecorder: __tracebackhide__ = True i = 0 entries = list(entries) - backlocals = py.std.sys._getframe(1).f_locals + backlocals = sys._getframe(1).f_locals while entries: name, check = entries.pop(0) for ind, call in enumerate(self.calls[i:]): @@ -210,7 +216,7 @@ class TmpTestdir: def finalize(self): for p in self._syspathremove: - py.std.sys.path.remove(p) + sys.path.remove(p) if hasattr(self, '_olddir'): self._olddir.chdir() # delete modules that have been loaded from tmpdir @@ -283,7 +289,7 @@ class TmpTestdir: def syspathinsert(self, path=None): if path is None: path = self.tmpdir - py.std.sys.path.insert(0, str(path)) + sys.path.insert(0, str(path)) self._syspathremove.append(str(path)) def mkdir(self, name): @@ -426,9 +432,8 @@ class TmpTestdir: env['PYTHONPATH'] = os.pathsep.join(filter(None, [ str(os.getcwd()), env.get('PYTHONPATH', '')])) kw['env'] = env - #print "env", env - return py.std.subprocess.Popen(cmdargs, - stdout=stdout, stderr=stderr, **kw) + return subprocess.Popen(cmdargs, + stdout=stdout, stderr=stderr, **kw) def run(self, *cmdargs): return self._run(*cmdargs) @@ -474,9 +479,9 @@ class TmpTestdir: def _getpybinargs(self, scriptname): if not self.request.config.getvalue("notoolsonpath"): # XXX we rely on script referring to the correct environment - # we cannot use "(py.std.sys.executable,script)" + # we cannot use "(sys.executable,script)" # because on windows the script is e.g. a py.test.exe - return (py.std.sys.executable, _pytest_fullpath,) # noqa + return (sys.executable, _pytest_fullpath,) # noqa else: pytest.skip("cannot run %r with --no-tools-on-path" % scriptname) @@ -496,7 +501,7 @@ class TmpTestdir: def runpython_c(self, command): command = self._getsysprepend() + command - return self.run(py.std.sys.executable, "-c", command) + return self.run(sys.executable, "-c", command) def runpytest(self, *args): p = py.path.local.make_numbered_dir(prefix="runpytest-", @@ -523,7 +528,7 @@ class TmpTestdir: def spawn(self, cmd, expect_timeout=10.0): pexpect = pytest.importorskip("pexpect", "3.0") - if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine(): + if hasattr(sys, 'pypy_version_info') and '64' in platform.machine(): pytest.skip("pypy-64 bit not supported") if sys.platform == "darwin": pytest.xfail("pexpect does not work reliably on darwin?!") @@ -670,7 +675,7 @@ class LineMatcher: def fnmatch_lines(self, lines2): def show(arg1, arg2): - py.builtin.print_(arg1, arg2, file=py.std.sys.stderr) + py.builtin.print_(arg1, arg2, file=sys.stderr) lines2 = self._getlines(lines2) lines1 = self.lines[:] nextline = None diff --git a/_pytest/recwarn.py b/_pytest/recwarn.py index 987ff8f97..482b78b0e 100644 --- a/_pytest/recwarn.py +++ b/_pytest/recwarn.py @@ -1,7 +1,8 @@ """ recording warnings during test function execution. """ -import py import sys +import warnings + def pytest_funcarg__recwarn(request): """Return a WarningsRecorder instance that provides these methods: @@ -13,7 +14,6 @@ def pytest_funcarg__recwarn(request): on warning categories. """ if sys.version_info >= (2,7): - import warnings oldfilters = warnings.filters[:] warnings.simplefilter('default') def reset_filters(): @@ -30,26 +30,24 @@ def deprecated_call(func, *args, **kwargs): """ assert that calling ``func(*args, **kwargs)`` triggers a DeprecationWarning. """ - warningmodule = py.std.warnings l = [] - oldwarn_explicit = getattr(warningmodule, 'warn_explicit') + oldwarn_explicit = getattr(warnings, 'warn_explicit') def warn_explicit(*args, **kwargs): l.append(args) oldwarn_explicit(*args, **kwargs) - oldwarn = getattr(warningmodule, 'warn') + oldwarn = getattr(warnings, 'warn') def warn(*args, **kwargs): l.append(args) oldwarn(*args, **kwargs) - warningmodule.warn_explicit = warn_explicit - warningmodule.warn = warn + warnings.warn_explicit = warn_explicit + warnings.warn = warn try: ret = func(*args, **kwargs) finally: - warningmodule.warn_explicit = warn_explicit - warningmodule.warn = warn + warnings.warn_explicit = warn_explicit + warnings.warn = warn if not l: - #print warningmodule __tracebackhide__ = True raise AssertionError("%r did not produce DeprecationWarning" %(func,)) return ret @@ -65,7 +63,6 @@ class RecordedWarning: class WarningsRecorder: def __init__(self): - warningmodule = py.std.warnings self.list = [] def showwarning(message, category, filename, lineno, line=0): self.list.append(RecordedWarning( @@ -76,8 +73,8 @@ class WarningsRecorder: except TypeError: # < python2.6 self.old_showwarning(message, category, filename, lineno) - self.old_showwarning = warningmodule.showwarning - warningmodule.showwarning = showwarning + self.old_showwarning = warnings.showwarning + warnings.showwarning = showwarning def pop(self, cls=Warning): """ pop the first recorded warning, raise exception if not exists.""" @@ -88,7 +85,6 @@ class WarningsRecorder: assert 0, "%r not found in %r" %(cls, self.list) #def resetregistry(self): - # import warnings # warnings.onceregistry.clear() # warnings.__warningregistry__.clear() @@ -96,4 +92,4 @@ class WarningsRecorder: self.list[:] = [] def finalize(self): - py.std.warnings.showwarning = self.old_showwarning + warnings.showwarning = self.old_showwarning diff --git a/_pytest/runner.py b/_pytest/runner.py index 428e6ec42..2932f14c3 100644 --- a/_pytest/runner.py +++ b/_pytest/runner.py @@ -1,9 +1,10 @@ """ basic collect and runtest protocol implementations """ +import bdb +import sys +from time import time import py import pytest -import sys -from time import time from py._code.code import TerminalRepr def pytest_namespace(): @@ -118,7 +119,7 @@ def check_interactive_exception(call, report): return call.excinfo and not ( hasattr(report, "wasxfail") or call.excinfo.errisinstance(skip.Exception) or - call.excinfo.errisinstance(py.std.bdb.BdbQuit)) + call.excinfo.errisinstance(bdb.BdbQuit)) def call_runtest_hook(item, when, **kwds): hookname = "pytest_runtest_" + when diff --git a/_pytest/skipping.py b/_pytest/skipping.py index aee98ffa8..56f28b118 100644 --- a/_pytest/skipping.py +++ b/_pytest/skipping.py @@ -1,7 +1,10 @@ """ support for skip/xfail functions and markers. """ - -import py, pytest +import os import sys +import traceback + +import py +import pytest def pytest_addoption(parser): group = parser.getgroup("general") @@ -79,7 +82,7 @@ class MarkEvaluator: msg = [" " * (self.exc[1].offset + 4) + "^",] msg.append("SyntaxError: invalid syntax") else: - msg = py.std.traceback.format_exception_only(*self.exc[:2]) + msg = traceback.format_exception_only(*self.exc[:2]) pytest.fail("Error evaluating %r expression\n" " %s\n" "%s" @@ -87,7 +90,7 @@ class MarkEvaluator: pytrace=False) def _getglobals(self): - d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config} + d = {'os': os, 'sys': sys, 'config': self.item.config} func = self.item.obj try: d.update(func.__globals__) diff --git a/_pytest/terminal.py b/_pytest/terminal.py index abb8f1549..b73f3f113 100644 --- a/_pytest/terminal.py +++ b/_pytest/terminal.py @@ -5,6 +5,8 @@ This is a good source for looking at the various reporting hooks. import pytest import py import sys +import time + def pytest_addoption(parser): group = parser.getgroup("terminal reporting", "reporting", after="general") @@ -49,7 +51,7 @@ def getreportopt(config): optvalue = config.option.report if optvalue: py.builtin.print_("DEPRECATED: use -r instead of --report option.", - file=py.std.sys.stderr) + file=sys.stderr) if optvalue: for setting in optvalue.split(","): setting = setting.strip() @@ -95,7 +97,7 @@ class TerminalReporter: self.stats = {} self.startdir = self.curdir = py.path.local() if file is None: - file = py.std.sys.stdout + file = sys.stdout self._tw = self.writer = py.io.TerminalWriter(file) if self.config.option.color == 'yes': self._tw.hasmarkup = True @@ -265,7 +267,7 @@ class TerminalReporter: @pytest.mark.trylast def pytest_sessionstart(self, session): - self._sessionstarttime = py.std.time.time() + self._sessionstarttime = time.time() if not self.showheader: return self.write_sep("=", "test session starts", bold=True) @@ -380,9 +382,6 @@ class TerminalReporter: fspath = "%s <- %s" % (collect_fspath, fspath) if fspath: line = str(fspath) - if lineno is not None: - lineno += 1 - line += "@" + str(lineno) if domain: split = str(domain).split('[') split[0] = split[0].replace('.', '::') # don't replace '.' in params @@ -469,7 +468,7 @@ class TerminalReporter: self._tw.line(content) def summary_stats(self): - session_duration = py.std.time.time() - self._sessionstarttime + session_duration = time.time() - self._sessionstarttime keys = ("failed passed skipped deselected " "xfailed xpassed warnings").split() diff --git a/_pytest/tmpdir.py b/_pytest/tmpdir.py index 8eb0cdec0..53c396b76 100644 --- a/_pytest/tmpdir.py +++ b/_pytest/tmpdir.py @@ -1,7 +1,11 @@ """ support for providing temporary directories to test functions. """ -import pytest, py +import re + +import pytest +import py from _pytest.monkeypatch import monkeypatch + class TempdirHandler: def __init__(self, config): self.config = config @@ -63,7 +67,7 @@ def tmpdir(request): path object. """ name = request.node.name - name = py.std.re.sub("[\W]", "_", name) + name = re.sub("[\W]", "_", name) MAXVAL = 30 if len(name) > MAXVAL: name = name[:MAXVAL] diff --git a/_pytest/unittest.py b/_pytest/unittest.py index 435a93514..af2aa9357 100644 --- a/_pytest/unittest.py +++ b/_pytest/unittest.py @@ -1,27 +1,25 @@ """ discovery and running of std-library "unittest" style tests. """ -import pytest, py +from __future__ import absolute_import +import traceback import sys +import pytest +import py + + # for transfering markers from _pytest.python import transfer_markers -def is_unittest(obj): - """Is obj a subclass of unittest.TestCase?""" - unittest = sys.modules.get('unittest') - if unittest is None: - return # nobody can have derived unittest.TestCase - try: - return issubclass(obj, unittest.TestCase) - except KeyboardInterrupt: - raise - except: - return False - - def pytest_pycollect_makeitem(collector, name, obj): - if is_unittest(obj): - return UnitTestCase(name, parent=collector) + # has unittest been imported and is obj a subclass of its TestCase? + try: + if not issubclass(obj, sys.modules["unittest"].TestCase): + return + except Exception: + return + # yes, so let's collect it + return UnitTestCase(name, parent=collector) class UnitTestCase(pytest.Class): @@ -41,11 +39,12 @@ class UnitTestCase(pytest.Class): super(UnitTestCase, self).setup() def collect(self): + from unittest import TestLoader cls = self.obj if not getattr(cls, "__test__", True): return self.session._fixturemanager.parsefactories(self, unittest=True) - loader = py.std.unittest.TestLoader() + loader = TestLoader() module = self.getparent(pytest.Module).obj foundsomething = False for name in loader.getTestCaseNames(self.obj): @@ -90,7 +89,7 @@ class TestCaseFunction(pytest.Function): except TypeError: try: try: - l = py.std.traceback.format_exception(*rawexcinfo) + l = traceback.format_exception(*rawexcinfo) l.insert(0, "NOTE: Incompatible Exception Representation, " "displaying natively:\n\n") pytest.fail("".join(l), pytrace=False) diff --git a/doc/en/announce/release-2.6.1.txt b/doc/en/announce/release-2.6.1.txt new file mode 100644 index 000000000..6f27c5861 --- /dev/null +++ b/doc/en/announce/release-2.6.1.txt @@ -0,0 +1,59 @@ +pytest-2.6.1: fixes and new xfail feature +=========================================================================== + +pytest is a mature Python testing tool with more than a 1100 tests +against itself, passing on many different interpreters and platforms. +The 2.6.1 release is drop-in compatible to 2.5.2 and actually fixes some +regressions introduced with 2.6.0. It also brings a little feature +to the xfail marker which now recognizes expected exceptions, +see the CHANGELOG below. + +See docs at: + + http://pytest.org + +As usual, you can upgrade from pypi via:: + + pip install -U pytest + +Thanks to all who contributed, among them: + + Floris Bruynooghe + Bruno Oliveira + Nicolas Delaby + +have fun, +holger krekel + +Changes 2.6.1 +================= + +- No longer show line numbers in the --verbose output, the output is now + purely the nodeid. The line number is still shown in failure reports. + Thanks Floris Bruynooghe. + +- fix issue437 where assertion rewriting could cause pytest-xdist slaves + to collect different tests. Thanks Bruno Oliveira. + +- fix issue555: add "errors" attribute to capture-streams to satisfy + some distutils and possibly other code accessing sys.stdout.errors. + +- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled. + +- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via + an optional "raises=EXC" argument where EXC can be a single exception + or a tuple of exception classes. Thanks David Mohr for the complete + PR. + +- fix integration of pytest with unittest.mock.patch decorator when + it uses the "new" argument. Thanks Nicolas Delaby for test and PR. + +- fix issue with detecting conftest files if the arguments contain + "::" node id specifications (copy pasted from "-v" output) + +- fix issue544 by only removing "@NUM" at the end of "::" separated parts + and if the part has an ".py" extension + +- don't use py.std import helper, rather import things directly. + Thanks Bruno Oliveira. + diff --git a/doc/en/assert.txt b/doc/en/assert.txt index 1dd7ba0c0..b7ea5aae1 100644 --- a/doc/en/assert.txt +++ b/doc/en/assert.txt @@ -26,7 +26,7 @@ you will see the return value of the function call:: $ py.test test_assert1.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items test_assert1.py F @@ -132,7 +132,7 @@ if you run this module:: $ py.test test_assert2.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items test_assert2.py F diff --git a/doc/en/attic_fixtures.txt b/doc/en/attic_fixtures.txt deleted file mode 100644 index 8b796a637..000000000 --- a/doc/en/attic_fixtures.txt +++ /dev/null @@ -1,188 +0,0 @@ - -**Test classes, modules or whole projects can make use of -one or more fixtures**. All required fixture functions will execute -before a test from the specifying context executes. As You can use this -to make tests operate from a pre-initialized directory or with -certain environment variables or with pre-configured global application -settings. - -For example, the Django_ project requires database -initialization to be able to import from and use its model objects. -For that, the `pytest-django`_ plugin provides fixtures which your -project can then easily depend or extend on, simply by referencing the -name of the particular fixture. - -Fixture functions have limited visilibity which depends on where they -are defined. If they are defined on a test class, only its test methods -may use it. A fixture defined in a module can only be used -from that test module. A fixture defined in a conftest.py file -can only be used by the tests below the directory of that file. -Lastly, plugins can define fixtures which are available across all -projects. - - - - - -Python, Java and many other languages support a so called xUnit_ style -for providing a fixed state, `test fixtures`_, for running tests. It -typically involves calling a autouse function ahead and a teardown -function after test execute. In 2005 pytest introduced a scope-specific -model of automatically detecting and calling autouse and teardown -functions on a per-module, class or function basis. The Python unittest -package and nose have subsequently incorporated them. This model -remains supported by pytest as :ref:`classic xunit`. - -One property of xunit fixture functions is that they work implicitely -by preparing global state or setting attributes on TestCase objects. -By contrast, pytest provides :ref:`funcargs` which allow to -dependency-inject application test state into test functions or -methods as function arguments. If your application is sufficiently modular -or if you are creating a new project, we recommend you now rather head over to -:ref:`funcargs` instead because many pytest users agree that using this -paradigm leads to better application and test organisation. - -However, not all programs and frameworks work and can be tested in -a fully modular way. They rather require preparation of global state -like database autouse on which further fixtures like preparing application -specific tables or wrapping tests in transactions can take place. For those -needs, pytest-2.3 now supports new **fixture functions** which come with -a ton of improvements over classic xunit fixture writing. Fixture functions: - -- allow to separate different autouse concerns into multiple modular functions - -- can receive and fully interoperate with :ref:`funcargs `, - -- are called multiple times if its funcargs are parametrized, - -- don't need to be defined directly in your test classes or modules, - they can also be defined in a plugin or :ref:`conftest.py ` files and get called - -- are called on a per-session, per-module, per-class or per-function basis - by means of a simple "scope" declaration. - -- can access the :ref:`request ` object which allows to - introspect and interact with the (scoped) testcontext. - -- can add cleanup functions which will be invoked when the last test - of the fixture test context has finished executing. - -All of these features are now demonstrated by little examples. - - - - - -test modules accessing a global resource -------------------------------------------------------- - -.. note:: - - Relying on `global state is considered bad programming practise `_ but when you work with an application - that relies on it you often have no choice. - -If you want test modules to access a global resource, -you can stick the resource to the module globals in -a per-module autouse function. We use a :ref:`resource factory -<@pytest.fixture>` to create our global resource:: - - # content of conftest.py - import pytest - - class GlobalResource: - def __init__(self): - pass - - @pytest.fixture(scope="session") - def globresource(): - return GlobalResource() - - @pytest.fixture(scope="module") - def setresource(request, globresource): - request.module.globresource = globresource - -Now any test module can access ``globresource`` as a module global:: - - # content of test_glob.py - - def test_1(): - print ("test_1 %s" % globresource) - def test_2(): - print ("test_2 %s" % globresource) - -Let's run this module without output-capturing:: - - $ py.test -qs test_glob.py - FF - ================================= FAILURES ================================= - __________________________________ test_1 __________________________________ - - def test_1(): - > print ("test_1 %s" % globresource) - E NameError: global name 'globresource' is not defined - - test_glob.py:3: NameError - __________________________________ test_2 __________________________________ - - def test_2(): - > print ("test_2 %s" % globresource) - E NameError: global name 'globresource' is not defined - - test_glob.py:5: NameError - 2 failed in 0.01 seconds - -The two tests see the same global ``globresource`` object. - -Parametrizing the global resource -+++++++++++++++++++++++++++++++++++++++++++++++++ - -We extend the previous example and add parametrization to the globresource -factory and also add a finalizer:: - - # content of conftest.py - - import pytest - - class GlobalResource: - def __init__(self, param): - self.param = param - - @pytest.fixture(scope="session", params=[1,2]) - def globresource(request): - g = GlobalResource(request.param) - def fin(): - print "finalizing", g - request.addfinalizer(fin) - return g - - @pytest.fixture(scope="module") - def setresource(request, globresource): - request.module.globresource = globresource - -And then re-run our test module:: - - $ py.test -qs test_glob.py - FF - ================================= FAILURES ================================= - __________________________________ test_1 __________________________________ - - def test_1(): - > print ("test_1 %s" % globresource) - E NameError: global name 'globresource' is not defined - - test_glob.py:3: NameError - __________________________________ test_2 __________________________________ - - def test_2(): - > print ("test_2 %s" % globresource) - E NameError: global name 'globresource' is not defined - - test_glob.py:5: NameError - 2 failed in 0.01 seconds - -We are now running the two tests twice with two different global resource -instances. Note that the tests are ordered such that only -one instance is active at any given time: the finalizer of -the first globresource instance is called before the second -instance is created and sent to the autouse functions. - diff --git a/doc/en/capture.txt b/doc/en/capture.txt index 893168ee4..aadc482ac 100644 --- a/doc/en/capture.txt +++ b/doc/en/capture.txt @@ -64,7 +64,7 @@ of the failing function and hide the other one:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_module.py .F @@ -78,7 +78,7 @@ of the failing function and hide the other one:: test_module.py:9: AssertionError -------------------------- Captured stdout setup --------------------------- - setting up + setting up ==================== 1 failed, 1 passed in 0.01 seconds ==================== Accessing captured output from a test function diff --git a/doc/en/conf.py b/doc/en/conf.py index 3b6091041..d6df88a33 100644 --- a/doc/en/conf.py +++ b/doc/en/conf.py @@ -18,7 +18,7 @@ # The full version, including alpha/beta/rc tags. # The short X.Y version. version = "2.6" -release = "2.6.0" +release = "2.6.1" import sys, os @@ -225,7 +225,7 @@ latex_documents = [ # The name of an image file (relative to this directory) to place at the top of # the title page. -#latex_logo = None +latex_logo = 'img/pytest1.png' # For "manual" documents, if this is true, then toplevel headings are parts, # not chapters. diff --git a/doc/en/doctest.txt b/doc/en/doctest.txt index 119cc0612..35f6b371e 100644 --- a/doc/en/doctest.txt +++ b/doc/en/doctest.txt @@ -44,12 +44,12 @@ then you can just invoke ``py.test`` without command line options:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items mymodule.py . - ========================= 1 passed in 0.04 seconds ========================= + ========================= 1 passed in 0.06 seconds ========================= It is possible to use fixtures using the ``getfixture`` helper:: diff --git a/doc/en/example/markers.txt b/doc/en/example/markers.txt index 6eb80ea6d..74b3ef4de 100644 --- a/doc/en/example/markers.txt +++ b/doc/en/example/markers.txt @@ -31,10 +31,10 @@ You can then restrict a test run to only run tests marked with ``webtest``:: $ py.test -v -m webtest =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 4 items - test_server.py@3::test_send_http PASSED + test_server.py::test_send_http PASSED =================== 3 tests deselected by "-m 'webtest'" =================== ================== 1 passed, 3 deselected in 0.01 seconds ================== @@ -43,12 +43,12 @@ Or the inverse, running all tests except the webtest ones:: $ py.test -v -m "not webtest" =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 4 items - test_server.py@6::test_something_quick PASSED - test_server.py@8::test_another PASSED - test_server.py@11::TestClass::test_method PASSED + test_server.py::test_something_quick PASSED + test_server.py::test_another PASSED + test_server.py::TestClass::test_method PASSED ================= 1 tests deselected by "-m 'not webtest'" ================= ================== 3 passed, 1 deselected in 0.01 seconds ================== @@ -62,10 +62,10 @@ tests based on their module, class, method, or function name:: $ py.test -v test_server.py::TestClass::test_method =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 5 items - test_server.py@11::TestClass::test_method PASSED + test_server.py::TestClass::test_method PASSED ========================= 1 passed in 0.01 seconds ========================= @@ -73,10 +73,10 @@ You can also select on the class:: $ py.test -v test_server.py::TestClass =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 4 items - test_server.py@11::TestClass::test_method PASSED + test_server.py::TestClass::test_method PASSED ========================= 1 passed in 0.01 seconds ========================= @@ -84,11 +84,11 @@ Or select multiple nodes:: $ py.test -v test_server.py::TestClass test_server.py::test_send_http =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 8 items - test_server.py@11::TestClass::test_method PASSED - test_server.py@3::test_send_http PASSED + test_server.py::TestClass::test_method PASSED + test_server.py::test_send_http PASSED ========================= 2 passed in 0.01 seconds ========================= @@ -120,10 +120,10 @@ select tests based on their names:: $ py.test -v -k http # running with the above defined example module =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 4 items - test_server.py@3::test_send_http PASSED + test_server.py::test_send_http PASSED ====================== 3 tests deselected by '-khttp' ====================== ================== 1 passed, 3 deselected in 0.01 seconds ================== @@ -132,12 +132,12 @@ And you can also run all tests except the ones that match the keyword:: $ py.test -k "not send_http" -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 4 items - test_server.py@6::test_something_quick PASSED - test_server.py@8::test_another PASSED - test_server.py@11::TestClass::test_method PASSED + test_server.py::test_something_quick PASSED + test_server.py::test_another PASSED + test_server.py::TestClass::test_method PASSED ================= 1 tests deselected by '-knot send_http' ================== ================== 3 passed, 1 deselected in 0.01 seconds ================== @@ -146,11 +146,11 @@ Or to select "http" and "quick" tests:: $ py.test -k "http or quick" -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 4 items - test_server.py@3::test_send_http PASSED - test_server.py@6::test_something_quick PASSED + test_server.py::test_send_http PASSED + test_server.py::test_something_quick PASSED ================= 2 tests deselected by '-khttp or quick' ================== ================== 2 passed, 2 deselected in 0.01 seconds ================== @@ -187,7 +187,7 @@ You can ask which markers exist for your test suite - the list includes our just @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - @pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html + @pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. @@ -326,7 +326,7 @@ the test needs:: $ py.test -E stage2 =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items test_someenv.py s @@ -337,7 +337,7 @@ and here is one that specifies exactly the environment needed:: $ py.test -E stage1 =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items test_someenv.py . @@ -351,7 +351,7 @@ The ``--markers`` option always gives you a list of available markers:: @pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html - @pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html + @pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html @pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples. @@ -455,26 +455,26 @@ then you will see two test skipped and two executed tests as expected:: $ py.test -rs # this option reports skip reasons =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items - test_plat.py s.s. + test_plat.py sss. ========================= short test summary info ========================== - SKIP [2] /tmp/doc-exec-142/conftest.py:12: cannot run on platform linux2 + SKIP [3] /tmp/doc-exec-238/conftest.py:12: cannot run on platform linux - =================== 2 passed, 2 skipped in 0.01 seconds ==================== + =================== 1 passed, 3 skipped in 0.01 seconds ==================== Note that if you specify a platform via the marker-command line option like this:: $ py.test -m linux2 =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items - test_plat.py . + test_plat.py s =================== 3 tests deselected by "-m 'linux2'" ==================== - ================== 1 passed, 3 deselected in 0.01 seconds ================== + ================= 1 skipped, 3 deselected in 0.01 seconds ================== then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests. @@ -519,7 +519,7 @@ We can now use the ``-m option`` to select one set:: $ py.test -m interface --tb=short =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items test_module.py FF @@ -540,7 +540,7 @@ or to select both "event" and "interface" tests:: $ py.test -m "interface or event" --tb=short =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items test_module.py FFF @@ -559,4 +559,4 @@ or to select both "event" and "interface" tests:: assert 0 E assert 0 ============= 1 tests deselected by "-m 'interface or event'" ============== - ================== 3 failed, 1 deselected in 0.02 seconds ================== + ================== 3 failed, 1 deselected in 0.01 seconds ================== diff --git a/doc/en/example/nonpython.txt b/doc/en/example/nonpython.txt index 7bebe78de..9506b7689 100644 --- a/doc/en/example/nonpython.txt +++ b/doc/en/example/nonpython.txt @@ -27,7 +27,7 @@ now execute the test specification:: nonpython $ py.test test_simple.yml =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_simple.yml .F @@ -56,11 +56,11 @@ consulted when reporting in ``verbose`` mode:: nonpython $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 2 items - test_simple.yml@1::usecase: ok PASSED - test_simple.yml@1::usecase: hello FAILED + test_simple.yml::usecase: ok PASSED + test_simple.yml::usecase: hello FAILED ================================= FAILURES ================================= ______________________________ usecase: hello ______________________________ @@ -74,7 +74,7 @@ interesting to just look at the collection tree:: nonpython $ py.test --collect-only =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items diff --git a/doc/en/example/parametrize.txt b/doc/en/example/parametrize.txt index 7dd77a5b1..408559bf6 100644 --- a/doc/en/example/parametrize.txt +++ b/doc/en/example/parametrize.txt @@ -106,7 +106,7 @@ this is a fully self-contained example which you can run with:: $ py.test test_scenarios.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items test_scenarios.py .... @@ -118,7 +118,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia $ py.test --collect-only test_scenarios.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items @@ -182,7 +182,7 @@ Let's first see how it looks like at collection time:: $ py.test test_backends.py --collect-only =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items @@ -197,7 +197,7 @@ And then when we run the test:: ================================= FAILURES ================================= _________________________ test_db_initialized[d2] __________________________ - db = + db = def test_db_initialized(db): # a dummy test @@ -251,9 +251,9 @@ argument sets to use for each test function. Let's run it:: $ py.test -q F.. ================================= FAILURES ================================= - ________________________ TestClass.test_equals[1-2] ________________________ + ________________________ TestClass.test_equals[2-1] ________________________ - self = , a = 1, b = 2 + self = , a = 1, b = 2 def test_equals(self, a, b): > assert a == b @@ -281,10 +281,10 @@ Running it results in some skips if we don't have all the python interpreters in . $ py.test -rs -q multipython.py ssssssssssssssssssssssssssssssssssss......sssssssss......ssssssssssssssssss ========================= short test summary info ========================== - SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.4' not found - SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.5' not found - 12 passed, 63 skipped in 0.66 seconds + SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found + SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.4' not found + 12 passed, 63 skipped in 0.65 seconds Indirect parametrization of optional implementations/imports -------------------------------------------------------------------- @@ -331,12 +331,12 @@ If you run this with reporting for skips enabled:: $ py.test -rs test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_module.py .s ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-144/conftest.py:10: could not import 'opt2' + SKIP [1] /tmp/doc-exec-240/conftest.py:10: could not import 'opt2' =================== 1 passed, 1 skipped in 0.01 seconds ==================== diff --git a/doc/en/example/pythoncollection.txt b/doc/en/example/pythoncollection.txt index bde0183d9..c74f0abdc 100644 --- a/doc/en/example/pythoncollection.txt +++ b/doc/en/example/pythoncollection.txt @@ -43,7 +43,7 @@ then the test collection looks like this:: $ py.test --collect-only =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items @@ -88,7 +88,7 @@ You can always peek at the collection tree without running tests like this:: . $ py.test --collect-only pythoncollection.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 3 items @@ -141,10 +141,8 @@ interpreters and will leave out the setup.py file:: $ py.test --collect-only =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 - collected 1 items - - + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 + collected 0 items ============================= in 0.01 seconds ============================= diff --git a/doc/en/example/reportingdemo.txt b/doc/en/example/reportingdemo.txt index 1837e25d5..4905b6778 100644 --- a/doc/en/example/reportingdemo.txt +++ b/doc/en/example/reportingdemo.txt @@ -13,7 +13,7 @@ get on the terminal - we are working on that): assertion $ py.test failure_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 39 items failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF @@ -30,7 +30,7 @@ get on the terminal - we are working on that): failure_demo.py:15: AssertionError _________________________ TestFailing.test_simple __________________________ - self = + self = def test_simple(self): def f(): @@ -40,13 +40,13 @@ get on the terminal - we are working on that): > assert f() == g() E assert 42 == 43 - E + where 42 = () - E + and 43 = () + E + where 42 = .f at 0x2aec3e47b158>() + E + and 43 = .g at 0x2aec3e47b268>() failure_demo.py:28: AssertionError ____________________ TestFailing.test_simple_multiline _____________________ - self = + self = def test_simple_multiline(self): otherfunc_multi( @@ -66,19 +66,19 @@ get on the terminal - we are working on that): failure_demo.py:11: AssertionError ___________________________ TestFailing.test_not ___________________________ - self = + self = def test_not(self): def f(): return 42 > assert not f() E assert not 42 - E + where 42 = () + E + where 42 = .f at 0x2aec3e47e620>() failure_demo.py:38: AssertionError _________________ TestSpecialisedExplanations.test_eq_text _________________ - self = + self = def test_eq_text(self): > assert 'spam' == 'eggs' @@ -89,7 +89,7 @@ get on the terminal - we are working on that): failure_demo.py:42: AssertionError _____________ TestSpecialisedExplanations.test_eq_similar_text _____________ - self = + self = def test_eq_similar_text(self): > assert 'foo 1 bar' == 'foo 2 bar' @@ -102,7 +102,7 @@ get on the terminal - we are working on that): failure_demo.py:45: AssertionError ____________ TestSpecialisedExplanations.test_eq_multiline_text ____________ - self = + self = def test_eq_multiline_text(self): > assert 'foo\nspam\nbar' == 'foo\neggs\nbar' @@ -115,7 +115,7 @@ get on the terminal - we are working on that): failure_demo.py:48: AssertionError ______________ TestSpecialisedExplanations.test_eq_long_text _______________ - self = + self = def test_eq_long_text(self): a = '1'*100 + 'a' + '2'*100 @@ -132,7 +132,7 @@ get on the terminal - we are working on that): failure_demo.py:53: AssertionError _________ TestSpecialisedExplanations.test_eq_long_text_multiline __________ - self = + self = def test_eq_long_text_multiline(self): a = '1\n'*100 + 'a' + '2\n'*100 @@ -156,7 +156,7 @@ get on the terminal - we are working on that): failure_demo.py:58: AssertionError _________________ TestSpecialisedExplanations.test_eq_list _________________ - self = + self = def test_eq_list(self): > assert [0, 1, 2] == [0, 1, 3] @@ -166,7 +166,7 @@ get on the terminal - we are working on that): failure_demo.py:61: AssertionError ______________ TestSpecialisedExplanations.test_eq_list_long _______________ - self = + self = def test_eq_list_long(self): a = [0]*100 + [1] + [3]*100 @@ -178,7 +178,7 @@ get on the terminal - we are working on that): failure_demo.py:66: AssertionError _________________ TestSpecialisedExplanations.test_eq_dict _________________ - self = + self = def test_eq_dict(self): > assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0} @@ -194,7 +194,7 @@ get on the terminal - we are working on that): failure_demo.py:69: AssertionError _________________ TestSpecialisedExplanations.test_eq_set __________________ - self = + self = def test_eq_set(self): > assert set([0, 10, 11, 12]) == set([0, 20, 21]) @@ -210,7 +210,7 @@ get on the terminal - we are working on that): failure_demo.py:72: AssertionError _____________ TestSpecialisedExplanations.test_eq_longer_list ______________ - self = + self = def test_eq_longer_list(self): > assert [1,2] == [1,2,3] @@ -220,7 +220,7 @@ get on the terminal - we are working on that): failure_demo.py:75: AssertionError _________________ TestSpecialisedExplanations.test_in_list _________________ - self = + self = def test_in_list(self): > assert 1 in [0, 2, 3, 4, 5] @@ -229,7 +229,7 @@ get on the terminal - we are working on that): failure_demo.py:78: AssertionError __________ TestSpecialisedExplanations.test_not_in_text_multiline __________ - self = + self = def test_not_in_text_multiline(self): text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail' @@ -247,7 +247,7 @@ get on the terminal - we are working on that): failure_demo.py:82: AssertionError ___________ TestSpecialisedExplanations.test_not_in_text_single ____________ - self = + self = def test_not_in_text_single(self): text = 'single foo line' @@ -260,7 +260,7 @@ get on the terminal - we are working on that): failure_demo.py:86: AssertionError _________ TestSpecialisedExplanations.test_not_in_text_single_long _________ - self = + self = def test_not_in_text_single_long(self): text = 'head ' * 50 + 'foo ' + 'tail ' * 20 @@ -273,7 +273,7 @@ get on the terminal - we are working on that): failure_demo.py:90: AssertionError ______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______ - self = + self = def test_not_in_text_single_long_term(self): text = 'head ' * 50 + 'f'*70 + 'tail ' * 20 @@ -292,7 +292,7 @@ get on the terminal - we are working on that): i = Foo() > assert i.b == 2 E assert 1 == 2 - E + where 1 = .b + E + where 1 = .Foo object at 0x2aec3e519c18>.b failure_demo.py:101: AssertionError _________________________ test_attribute_instance __________________________ @@ -302,8 +302,8 @@ get on the terminal - we are working on that): b = 1 > assert Foo().b == 2 E assert 1 == 2 - E + where 1 = .b - E + where = () + E + where 1 = .Foo object at 0x2aec3e52d898>.b + E + where .Foo object at 0x2aec3e52d898> = .Foo'>() failure_demo.py:107: AssertionError __________________________ test_attribute_failure __________________________ @@ -319,7 +319,7 @@ get on the terminal - we are working on that): failure_demo.py:116: _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ - self = + self = .Foo object at 0x2aec3e4e0b38> def _get_b(self): > raise Exception('Failed to get attrib') @@ -335,15 +335,15 @@ get on the terminal - we are working on that): b = 2 > assert Foo().b == Bar().b E assert 1 == 2 - E + where 1 = .b - E + where = () - E + and 2 = .b - E + where = () + E + where 1 = .Foo object at 0x2aec3e4a5748>.b + E + where .Foo object at 0x2aec3e4a5748> = .Foo'>() + E + and 2 = .Bar object at 0x2aec3e4a51d0>.b + E + where .Bar object at 0x2aec3e4a51d0> = .Bar'>() failure_demo.py:124: AssertionError __________________________ TestRaises.test_raises __________________________ - self = + self = def test_raises(self): s = 'qwe' @@ -355,10 +355,10 @@ get on the terminal - we are working on that): > int(s) E ValueError: invalid literal for int() with base 10: 'qwe' - <0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1028>:1: ValueError + <0-codegen /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1028>:1: ValueError ______________________ TestRaises.test_raises_doesnt _______________________ - self = + self = def test_raises_doesnt(self): > raises(IOError, "int('3')") @@ -367,7 +367,7 @@ get on the terminal - we are working on that): failure_demo.py:136: Failed __________________________ TestRaises.test_raise ___________________________ - self = + self = def test_raise(self): > raise ValueError("demo error") @@ -376,7 +376,7 @@ get on the terminal - we are working on that): failure_demo.py:139: ValueError ________________________ TestRaises.test_tupleerror ________________________ - self = + self = def test_tupleerror(self): > a,b = [1] @@ -385,7 +385,7 @@ get on the terminal - we are working on that): failure_demo.py:142: ValueError ______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______ - self = + self = def test_reinterpret_fails_with_print_for_the_fun_of_it(self): l = [1,2,3] @@ -398,11 +398,11 @@ get on the terminal - we are working on that): l is [1, 2, 3] ________________________ TestRaises.test_some_error ________________________ - self = + self = def test_some_error(self): > if namenotexi: - E NameError: global name 'namenotexi' is not defined + E NameError: name 'namenotexi' is not defined failure_demo.py:150: NameError ____________________ test_dynamic_compile_shows_nicely _____________________ @@ -426,7 +426,7 @@ get on the terminal - we are working on that): <2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError ____________________ TestMoreErrors.test_complex_error _____________________ - self = + self = def test_complex_error(self): def f(): @@ -450,7 +450,7 @@ get on the terminal - we are working on that): failure_demo.py:5: AssertionError ___________________ TestMoreErrors.test_z1_unpack_error ____________________ - self = + self = def test_z1_unpack_error(self): l = [] @@ -460,7 +460,7 @@ get on the terminal - we are working on that): failure_demo.py:179: ValueError ____________________ TestMoreErrors.test_z2_type_error _____________________ - self = + self = def test_z2_type_error(self): l = 3 @@ -470,19 +470,19 @@ get on the terminal - we are working on that): failure_demo.py:183: TypeError ______________________ TestMoreErrors.test_startswith ______________________ - self = + self = def test_startswith(self): s = "123" g = "456" > assert s.startswith(g) - E assert ('456') - E + where = '123'.startswith + E assert ('456') + E + where = '123'.startswith failure_demo.py:188: AssertionError __________________ TestMoreErrors.test_startswith_nested ___________________ - self = + self = def test_startswith_nested(self): def f(): @@ -490,15 +490,15 @@ get on the terminal - we are working on that): def g(): return "456" > assert f().startswith(g()) - E assert ('456') - E + where = '123'.startswith - E + where '123' = () - E + and '456' = () + E assert ('456') + E + where = '123'.startswith + E + where '123' = .f at 0x2aec3e5572f0>() + E + and '456' = .g at 0x2aec3e557268>() failure_demo.py:195: AssertionError _____________________ TestMoreErrors.test_global_func ______________________ - self = + self = def test_global_func(self): > assert isinstance(globf(42), float) @@ -508,18 +508,18 @@ get on the terminal - we are working on that): failure_demo.py:198: AssertionError _______________________ TestMoreErrors.test_instance _______________________ - self = + self = def test_instance(self): self.x = 6*7 > assert self.x != 42 E assert 42 != 42 - E + where 42 = .x + E + where 42 = .x failure_demo.py:202: AssertionError _______________________ TestMoreErrors.test_compare ________________________ - self = + self = def test_compare(self): > assert globf(10) < 5 @@ -529,7 +529,7 @@ get on the terminal - we are working on that): failure_demo.py:205: AssertionError _____________________ TestMoreErrors.test_try_finally ______________________ - self = + self = def test_try_finally(self): x = 1 @@ -538,4 +538,4 @@ get on the terminal - we are working on that): E assert 1 == 0 failure_demo.py:210: AssertionError - ======================== 39 failed in 0.21 seconds ========================= + ======================== 39 failed in 0.22 seconds ========================= diff --git a/doc/en/example/simple.txt b/doc/en/example/simple.txt index e42d81eec..1f353d2c6 100644 --- a/doc/en/example/simple.txt +++ b/doc/en/example/simple.txt @@ -108,7 +108,7 @@ directory with the above conftest.py:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 0 items ============================= in 0.00 seconds ============================= @@ -152,12 +152,12 @@ and when running it will see a skipped "slow" test:: $ py.test -rs # "-rs" means report details on the little 's' =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_module.py .s ========================= short test summary info ========================== - SKIP [1] /tmp/doc-exec-147/conftest.py:9: need --runslow option to run + SKIP [1] /tmp/doc-exec-243/conftest.py:9: need --runslow option to run =================== 1 passed, 1 skipped in 0.01 seconds ==================== @@ -165,7 +165,7 @@ Or run it including the ``slow`` marked test:: $ py.test --runslow =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_module.py .. @@ -256,7 +256,7 @@ which will add the string to the test header accordingly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 project deps: mylib-1.1 collected 0 items @@ -279,7 +279,7 @@ which will add info only when run with "--v":: $ py.test -v =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 info1: did you know that ... did you? collecting ... collected 0 items @@ -290,7 +290,7 @@ and nothing when run plainly:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 0 items ============================= in 0.00 seconds ============================= @@ -322,7 +322,7 @@ Now we can profile which test functions execute the slowest:: $ py.test --durations=3 =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 3 items test_some_are_slow.py ... @@ -383,7 +383,7 @@ If we run this:: $ py.test -rx =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 4 items test_step.py .Fx. @@ -391,7 +391,7 @@ If we run this:: ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - self = + self = def test_modification(self): > assert 0 @@ -453,7 +453,7 @@ We can run this:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 7 items test_step.py .Fx. @@ -463,17 +463,17 @@ We can run this:: ================================== ERRORS ================================== _______________________ ERROR at setup of test_root ________________________ - file /tmp/doc-exec-147/b/test_error.py, line 1 + file /tmp/doc-exec-243/b/test_error.py, line 1 def test_root(db): # no db here, will error out fixture 'db' not found - available fixtures: tmpdir, monkeypatch, pytestconfig, recwarn, capsys, capfd + available fixtures: tmpdir, monkeypatch, capsys, capfd, pytestconfig, recwarn use 'py.test --fixtures [testpath]' for help on them. - /tmp/doc-exec-147/b/test_error.py:1 + /tmp/doc-exec-243/b/test_error.py:1 ================================= FAILURES ================================= ____________________ TestUserHandling.test_modification ____________________ - self = + self = def test_modification(self): > assert 0 @@ -482,20 +482,20 @@ We can run this:: test_step.py:9: AssertionError _________________________________ test_a1 __________________________________ - db = + db = def test_a1(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: a/test_db.py:2: AssertionError _________________________________ test_a2 __________________________________ - db = + db = def test_a2(db): > assert 0, db # to show value - E AssertionError: + E AssertionError: a/test_db2.py:2: AssertionError ========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ========== @@ -553,7 +553,7 @@ and run them:: $ py.test test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_module.py FF @@ -561,7 +561,7 @@ and run them:: ================================= FAILURES ================================= ________________________________ test_fail1 ________________________________ - tmpdir = local('/tmp/pytest-28/test_fail10') + tmpdir = local('/tmp/pytest-509/test_fail10') def test_fail1(tmpdir): > assert 0 @@ -575,12 +575,12 @@ and run them:: E assert 0 test_module.py:4: AssertionError - ========================= 2 failed in 0.01 seconds ========================= + ========================= 2 failed in 0.02 seconds ========================= you will have a "failures" file which contains the failing test ids:: $ cat failures - test_module.py::test_fail1 (/tmp/pytest-28/test_fail10) + test_module.py::test_fail1 (/tmp/pytest-509/test_fail10) test_module.py::test_fail2 Making test result information available in fixtures @@ -642,41 +642,29 @@ if you then have failing tests:: and run it:: $ py.test -s test_module.py - =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 - collected 3 items + Traceback (most recent call last): + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 513, in getconftestmodules + return self._path2confmods[path] + KeyError: local('/tmp/doc-exec-243/test_module.py') - test_module.py Esetting up a test failed! test_module.py::test_setup_fails - Fexecuting test failed test_module.py::test_call_fails - F + During handling of the above exception, another exception occurred: + Traceback (most recent call last): + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 537, in importconftest + return self._conftestpath2mod[conftestpath] + KeyError: local('/tmp/doc-exec-243/conftest.py') - ================================== ERRORS ================================== - ____________________ ERROR at setup of test_setup_fails ____________________ + During handling of the above exception, another exception occurred: + Traceback (most recent call last): + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 543, in importconftest + mod = conftestpath.pyimport() + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py", line 620, in pyimport + __import__(modname) + File "/tmp/doc-exec-243/conftest.py", line 22 + print "setting up a test failed!", request.node.nodeid + ^ + SyntaxError: invalid syntax + ERROR: could not load /tmp/doc-exec-243/conftest.py - @pytest.fixture - def other(): - > assert 0 - E assert 0 - - test_module.py:6: AssertionError - ================================= FAILURES ================================= - _____________________________ test_call_fails ______________________________ - - something = None - - def test_call_fails(something): - > assert 0 - E assert 0 - - test_module.py:12: AssertionError - ________________________________ test_fail2 ________________________________ - - def test_fail2(): - > assert 0 - E assert 0 - - test_module.py:15: AssertionError - ==================== 2 failed, 1 error in 0.01 seconds ===================== You'll see that the fixture finalizers could use the precise reporting information. @@ -730,4 +718,5 @@ over to ``pytest`` instead. For example:: This makes it convenient to execute your tests from within your frozen application, using standard ``py.test`` command-line options:: - $ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/ \ No newline at end of file + $ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/ /bin/sh: 1: ./app_main: not found + /bin/sh: 1: ./app_main: not found diff --git a/doc/en/example/special.txt b/doc/en/example/special.txt index 411f14955..b5028c220 100644 --- a/doc/en/example/special.txt +++ b/doc/en/example/special.txt @@ -60,13 +60,26 @@ will be called ahead of running any tests:: If you run this without output capturing:: $ py.test -q -s test_module.py - callattr_ahead_of_alltests called - callme called! - callme other called - SomeTest callme called - test_method1 called - .test_method1 called - .test other - .test_unit1 method called - . - 4 passed in 0.03 seconds + Traceback (most recent call last): + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 513, in getconftestmodules + return self._path2confmods[path] + KeyError: local('/tmp/doc-exec-244/test_module.py') + + During handling of the above exception, another exception occurred: + Traceback (most recent call last): + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 537, in importconftest + return self._conftestpath2mod[conftestpath] + KeyError: local('/tmp/doc-exec-244/conftest.py') + + During handling of the above exception, another exception occurred: + Traceback (most recent call last): + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 543, in importconftest + mod = conftestpath.pyimport() + File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py", line 620, in pyimport + __import__(modname) + File "/tmp/doc-exec-244/conftest.py", line 6 + print "callattr_ahead_of_alltests called" + ^ + SyntaxError: invalid syntax + ERROR: could not load /tmp/doc-exec-244/conftest.py + diff --git a/doc/en/example/xfail_demo.py b/doc/en/example/xfail_demo.py index c6e147e4d..5648575e8 100644 --- a/doc/en/example/xfail_demo.py +++ b/doc/en/example/xfail_demo.py @@ -25,6 +25,6 @@ def test_hello6(): pytest.xfail("reason") @xfail(raises=IndexError) -def test_hello7() +def test_hello7(): x = [] x[1] = 1 diff --git a/doc/en/fixture.txt b/doc/en/fixture.txt index c4dfd74fb..ba956f8db 100644 --- a/doc/en/fixture.txt +++ b/doc/en/fixture.txt @@ -76,7 +76,7 @@ marked ``smtp`` fixture function. Running the test looks like this:: $ py.test test_smtpsimple.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 collected 1 items test_smtpsimple.py F @@ -84,17 +84,16 @@ marked ``smtp`` fixture function. Running the test looks like this:: ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - smtp = + smtp = def test_ehlo(smtp): response, msg = smtp.ehlo() assert response == 250 - assert "merlinux" in msg - > assert 0 # for demo purposes - E assert 0 + > assert "merlinux" in msg + E TypeError: Type str doesn't support the buffer API - test_smtpsimple.py:12: AssertionError - ========================= 1 failed in 0.15 seconds ========================= + test_smtpsimple.py:11: TypeError + ========================= 1 failed in 0.18 seconds ========================= In the failure traceback we see that the test function was called with a ``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture @@ -194,7 +193,7 @@ inspect what is going on and can now run the tests:: $ py.test test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 collected 2 items test_module.py FF @@ -202,19 +201,18 @@ inspect what is going on and can now run the tests:: ================================= FAILURES ================================= ________________________________ test_ehlo _________________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - assert "merlinux" in response[1] - > assert 0 # for demo purposes - E assert 0 + > assert "merlinux" in response[1] + E TypeError: Type str doesn't support the buffer API - test_module.py:6: AssertionError + test_module.py:5: TypeError ________________________________ test_noop _________________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -223,7 +221,7 @@ inspect what is going on and can now run the tests:: E assert 0 test_module.py:11: AssertionError - ========================= 2 failed in 0.16 seconds ========================= + ========================= 2 failed in 0.18 seconds ========================= You see the two ``assert 0`` failing and more importantly you can also see that the same (module-scoped) ``smtp`` object was passed into the two @@ -332,7 +330,7 @@ Running it:: ______________________________ test_showhelo _______________________________ test_anothersmtp.py:5: in test_showhelo assert 0, smtp.helo() - E AssertionError: (250, 'mail.python.org') + E AssertionError: (250, b'mail.python.org') voila! The ``smtp`` fixture function picked up our mail server name from the module namespace. @@ -379,19 +377,18 @@ So let's just do another run:: ================================= FAILURES ================================= __________________________ test_ehlo[merlinux.eu] __________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 - assert "merlinux" in response[1] - > assert 0 # for demo purposes - E assert 0 + > assert "merlinux" in response[1] + E TypeError: Type str doesn't support the buffer API - test_module.py:6: AssertionError + test_module.py:5: TypeError __________________________ test_noop[merlinux.eu] __________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -402,20 +399,20 @@ So let's just do another run:: test_module.py:11: AssertionError ________________________ test_ehlo[mail.python.org] ________________________ - smtp = + smtp = def test_ehlo(smtp): response = smtp.ehlo() assert response[0] == 250 > assert "merlinux" in response[1] - E assert 'merlinux' in 'mail.python.org\nSIZE 25600000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8' + E TypeError: Type str doesn't support the buffer API - test_module.py:5: AssertionError + test_module.py:5: TypeError -------------------------- Captured stdout setup --------------------------- - finalizing + finalizing ________________________ test_noop[mail.python.org] ________________________ - smtp = + smtp = def test_noop(smtp): response = smtp.noop() @@ -424,7 +421,7 @@ So let's just do another run:: E assert 0 test_module.py:11: AssertionError - 4 failed in 5.62 seconds + 4 failed in 6.37 seconds We see that our two test functions each ran twice, against the different ``smtp`` instances. Note also, that with the ``mail.python.org`` @@ -464,13 +461,13 @@ Here we declare an ``app`` fixture which receives the previously defined $ py.test -v test_appsetup.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 2 items - test_appsetup.py@12::test_smtp_exists[merlinux.eu] PASSED - test_appsetup.py@12::test_smtp_exists[mail.python.org] PASSED + test_appsetup.py::test_smtp_exists[merlinux.eu] PASSED + test_appsetup.py::test_smtp_exists[mail.python.org] PASSED - ========================= 2 passed in 6.27 seconds ========================= + ========================= 2 passed in 6.11 seconds ========================= Due to the parametrization of ``smtp`` the test will run twice with two different ``App`` instances and respective smtp servers. There is no @@ -508,7 +505,7 @@ to show the setup/teardown flow:: @pytest.fixture(scope="module", params=["mod1", "mod2"]) def modarg(request): param = request.param - print "create", param + print ("create", param) def fin(): print ("fin %s" % param) return param @@ -518,36 +515,36 @@ to show the setup/teardown flow:: return request.param def test_0(otherarg): - print " test0", otherarg + print (" test0", otherarg) def test_1(modarg): - print " test1", modarg + print (" test1", modarg) def test_2(otherarg, modarg): - print " test2", otherarg, modarg + print (" test2", otherarg, modarg) Let's run the tests in verbose mode and with looking at the print-output:: $ py.test -v -s test_module.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4 collecting ... collected 8 items - test_module.py@15::test_0[1] test0 1 + test_module.py::test_0[1] test0 1 PASSED - test_module.py@15::test_0[2] test0 2 + test_module.py::test_0[2] test0 2 PASSED - test_module.py@17::test_1[mod1] create mod1 + test_module.py::test_1[mod1] create mod1 test1 mod1 PASSED - test_module.py@19::test_2[1-mod1] test2 1 mod1 + test_module.py::test_2[1-mod1] test2 1 mod1 PASSED - test_module.py@19::test_2[2-mod1] test2 2 mod1 + test_module.py::test_2[2-mod1] test2 2 mod1 PASSED - test_module.py@17::test_1[mod2] create mod2 + test_module.py::test_1[mod2] create mod2 test1 mod2 PASSED - test_module.py@19::test_2[1-mod2] test2 1 mod2 + test_module.py::test_2[1-mod2] test2 1 mod2 PASSED - test_module.py@19::test_2[2-mod2] test2 2 mod2 + test_module.py::test_2[2-mod2] test2 2 mod2 PASSED ========================= 8 passed in 0.01 seconds ========================= diff --git a/doc/en/getting-started.txt b/doc/en/getting-started.txt index 1516bb06b..55dce9881 100644 --- a/doc/en/getting-started.txt +++ b/doc/en/getting-started.txt @@ -27,7 +27,7 @@ Installation options:: To check your installation has installed the correct version:: $ py.test --version - This is pytest version 2.6.0, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc + This is pytest version 2.6.1, imported from /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/pytest.py If you get an error checkout :ref:`installation issues`. @@ -49,7 +49,7 @@ That's it. You can execute the test function now:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items test_sample.py F @@ -127,7 +127,7 @@ run the module by passing its filename:: ================================= FAILURES ================================= ____________________________ TestClass.test_two ____________________________ - self = + self = def test_two(self): x = "hello" @@ -159,21 +159,18 @@ We list the name ``tmpdir`` in the test function signature and before performing the test function call. Let's just run it:: $ py.test -q test_tmpdir.py - F - ================================= FAILURES ================================= - _____________________________ test_needsfiles ______________________________ - tmpdir = local('/tmp/pytest-24/test_needsfiles0') - - def test_needsfiles(tmpdir): - print tmpdir - > assert 0 - E assert 0 - - test_tmpdir.py:3: AssertionError - --------------------------- Captured stdout call --------------------------- - /tmp/pytest-24/test_needsfiles0 - 1 failed in 0.01 seconds + ================================== ERRORS ================================== + _____________________ ERROR collecting test_tmpdir.py ______________________ + /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:463: in _importtestmodule + mod = self.fspath.pyimport(ensuresyspath=True) + /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py:620: in pyimport + __import__(modname) + E File "/tmp/doc-exec-187/test_tmpdir.py", line 2 + E print tmpdir + E ^ + E SyntaxError: invalid syntax + 1 error in 0.03 seconds Before the test runs, a unique-per-test-invocation temporary directory was created. More info at :ref:`tmpdir handling`. diff --git a/doc/en/parametrize.txt b/doc/en/parametrize.txt index 28253e0a7..76c5f0366 100644 --- a/doc/en/parametrize.txt +++ b/doc/en/parametrize.txt @@ -53,7 +53,7 @@ them in turn:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 3 items test_expectation.py ..F @@ -100,7 +100,7 @@ Let's run this:: $ py.test =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 3 items test_expectation.py ..x @@ -170,8 +170,8 @@ Let's also run with a stringinput that will lead to a failing test:: def test_valid_string(stringinput): > assert stringinput.isalpha() - E assert () - E + where = '!'.isalpha + E assert () + E + where = '!'.isalpha test_strings.py:3: AssertionError 1 failed in 0.01 seconds @@ -185,7 +185,7 @@ listlist:: $ py.test -q -rs test_strings.py s ========================= short test summary info ========================== - SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1139: got empty parameter set, function test_valid_string at /tmp/doc-exec-100/test_strings.py:1 + SKIP [1] /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1139: got empty parameter set, function test_valid_string at /tmp/doc-exec-195/test_strings.py:1 1 skipped in 0.01 seconds For further examples, you might want to look at :ref:`more diff --git a/doc/en/skipping.txt b/doc/en/skipping.txt index ab6c838ba..51b8d3265 100644 --- a/doc/en/skipping.txt +++ b/doc/en/skipping.txt @@ -164,10 +164,10 @@ Running it with the report-on-xfail option gives this output:: example $ py.test -rx xfail_demo.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 - collected 6 items + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 + collected 7 items - xfail_demo.py xxxxxx + xfail_demo.py xxxxxxx ========================= short test summary info ========================== XFAIL xfail_demo.py::test_hello XFAIL xfail_demo.py::test_hello2 @@ -180,8 +180,9 @@ Running it with the report-on-xfail option gives this output:: condition: pytest.__version__[0] != "17" XFAIL xfail_demo.py::test_hello6 reason: reason + XFAIL xfail_demo.py::test_hello7 - ======================== 6 xfailed in 0.05 seconds ========================= + ======================== 7 xfailed in 0.05 seconds ========================= .. _`skip/xfail with parametrize`: diff --git a/doc/en/tmpdir.txt b/doc/en/tmpdir.txt index 3d749eea3..e24f5180f 100644 --- a/doc/en/tmpdir.txt +++ b/doc/en/tmpdir.txt @@ -29,7 +29,7 @@ Running this would result in a passed test except for the last $ py.test test_tmpdir.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 1 items test_tmpdir.py F @@ -37,7 +37,7 @@ Running this would result in a passed test except for the last ================================= FAILURES ================================= _____________________________ test_create_file _____________________________ - tmpdir = local('/tmp/pytest-25/test_create_file0') + tmpdir = local('/tmp/pytest-506/test_create_file0') def test_create_file(tmpdir): p = tmpdir.mkdir("sub").join("hello.txt") @@ -48,7 +48,7 @@ Running this would result in a passed test except for the last E assert 0 test_tmpdir.py:7: AssertionError - ========================= 1 failed in 0.01 seconds ========================= + ========================= 1 failed in 0.02 seconds ========================= .. _`base temporary directory`: diff --git a/doc/en/unittest.txt b/doc/en/unittest.txt index fff7dc103..0472ac338 100644 --- a/doc/en/unittest.txt +++ b/doc/en/unittest.txt @@ -88,7 +88,7 @@ the ``self.db`` values in the traceback:: $ py.test test_unittest_db.py =========================== test session starts ============================ - platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 + platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 collected 2 items test_unittest_db.py FF @@ -101,7 +101,7 @@ the ``self.db`` values in the traceback:: def test_method1(self): assert hasattr(self, "db") > assert 0, self.db # fail for demo purposes - E AssertionError: + E AssertionError: .DummyDB object at 0x2b12849f90b8> test_unittest_db.py:9: AssertionError ___________________________ MyTest.test_method2 ____________________________ @@ -110,10 +110,10 @@ the ``self.db`` values in the traceback:: def test_method2(self): > assert 0, self.db # fail for demo purposes - E AssertionError: + E AssertionError: .DummyDB object at 0x2b12849f90b8> test_unittest_db.py:12: AssertionError - ========================= 2 failed in 0.04 seconds ========================= + ========================= 2 failed in 0.05 seconds ========================= This default pytest traceback shows that the two test methods share the same ``self.db`` instance which was our intention @@ -160,7 +160,7 @@ Running this test module ...:: $ py.test -q test_unittest_cleandir.py . - 1 passed in 0.03 seconds + 1 passed in 0.05 seconds ... gives us one passed test because the ``initdir`` fixture function was executed ahead of the ``test_method``. diff --git a/setup.py b/setup.py index e3ad2ad15..2c877151f 100644 --- a/setup.py +++ b/setup.py @@ -27,7 +27,7 @@ def main(): name='pytest', description='pytest: simple powerful testing with Python', long_description=long_description, - version='2.6.1.dev1', + version='2.6.2.dev1', url='http://pytest.org', license='MIT license', platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'], diff --git a/testing/python/fixture.py b/testing/python/fixture.py index d2b18c39b..fd8131827 100644 --- a/testing/python/fixture.py +++ b/testing/python/fixture.py @@ -1692,22 +1692,22 @@ class TestFixtureMarker: """) result = testdir.runpytest("-v") result.stdout.fnmatch_lines(""" - test_mod1.py@1::test_func[s1] PASSED - test_mod2.py@1::test_func2[s1] PASSED - test_mod2.py@3::test_func3[s1-m1] PASSED - test_mod2.py@5::test_func3b[s1-m1] PASSED - test_mod2.py@3::test_func3[s1-m2] PASSED - test_mod2.py@5::test_func3b[s1-m2] PASSED - test_mod1.py@1::test_func[s2] PASSED - test_mod2.py@1::test_func2[s2] PASSED - test_mod2.py@3::test_func3[s2-m1] PASSED - test_mod2.py@5::test_func3b[s2-m1] PASSED - test_mod2.py@7::test_func4[m1] PASSED - test_mod2.py@3::test_func3[s2-m2] PASSED - test_mod2.py@5::test_func3b[s2-m2] PASSED - test_mod2.py@7::test_func4[m2] PASSED - test_mod1.py@3::test_func1[m1] PASSED - test_mod1.py@3::test_func1[m2] PASSED + test_mod1.py::test_func[s1] PASSED + test_mod2.py::test_func2[s1] PASSED + test_mod2.py::test_func3[s1-m1] PASSED + test_mod2.py::test_func3b[s1-m1] PASSED + test_mod2.py::test_func3[s1-m2] PASSED + test_mod2.py::test_func3b[s1-m2] PASSED + test_mod1.py::test_func[s2] PASSED + test_mod2.py::test_func2[s2] PASSED + test_mod2.py::test_func3[s2-m1] PASSED + test_mod2.py::test_func3b[s2-m1] PASSED + test_mod2.py::test_func4[m1] PASSED + test_mod2.py::test_func3[s2-m2] PASSED + test_mod2.py::test_func3b[s2-m2] PASSED + test_mod2.py::test_func4[m2] PASSED + test_mod1.py::test_func1[m1] PASSED + test_mod1.py::test_func1[m2] PASSED """) def test_class_ordering(self, testdir): @@ -1744,18 +1744,18 @@ class TestFixtureMarker: """) result = testdir.runpytest("-vs") result.stdout.fnmatch_lines(""" - test_class_ordering.py@4::TestClass2::test_1[1-a] PASSED - test_class_ordering.py@4::TestClass2::test_1[2-a] PASSED - test_class_ordering.py@6::TestClass2::test_2[1-a] PASSED - test_class_ordering.py@6::TestClass2::test_2[2-a] PASSED - test_class_ordering.py@4::TestClass2::test_1[1-b] PASSED - test_class_ordering.py@4::TestClass2::test_1[2-b] PASSED - test_class_ordering.py@6::TestClass2::test_2[1-b] PASSED - test_class_ordering.py@6::TestClass2::test_2[2-b] PASSED - test_class_ordering.py@9::TestClass::test_3[1-a] PASSED - test_class_ordering.py@9::TestClass::test_3[2-a] PASSED - test_class_ordering.py@9::TestClass::test_3[1-b] PASSED - test_class_ordering.py@9::TestClass::test_3[2-b] PASSED + test_class_ordering.py::TestClass2::test_1[1-a] PASSED + test_class_ordering.py::TestClass2::test_1[2-a] PASSED + test_class_ordering.py::TestClass2::test_2[1-a] PASSED + test_class_ordering.py::TestClass2::test_2[2-a] PASSED + test_class_ordering.py::TestClass2::test_1[1-b] PASSED + test_class_ordering.py::TestClass2::test_1[2-b] PASSED + test_class_ordering.py::TestClass2::test_2[1-b] PASSED + test_class_ordering.py::TestClass2::test_2[2-b] PASSED + test_class_ordering.py::TestClass::test_3[1-a] PASSED + test_class_ordering.py::TestClass::test_3[2-a] PASSED + test_class_ordering.py::TestClass::test_3[1-b] PASSED + test_class_ordering.py::TestClass::test_3[2-b] PASSED """) def test_parametrize_separated_order_higher_scope_first(self, testdir): diff --git a/testing/test_assertrewrite.py b/testing/test_assertrewrite.py index 21e22c15f..72e2d4e10 100644 --- a/testing/test_assertrewrite.py +++ b/testing/test_assertrewrite.py @@ -539,3 +539,25 @@ class TestAssertionRewriteHookDetails(object): result.stdout.fnmatch_lines([ '* 1 passed*', ]) + + def test_read_pyc(self, tmpdir): + """ + Ensure that the `_read_pyc` can properly deal with corrupted pyc files. + In those circumstances it should just give up instead of generating + an exception that is propagated to the caller. + """ + import py_compile + from _pytest.assertion.rewrite import _read_pyc + + source = tmpdir.join('source.py') + pyc = source + 'c' + + source.write('def test(): pass') + py_compile.compile(str(source), str(pyc)) + + contents = pyc.read(mode='rb') + strip_bytes = 20 # header is around 8 bytes, strip a little more + assert len(contents) > strip_bytes + pyc.write(contents[:strip_bytes], mode='wb') + + assert _read_pyc(source, str(pyc)) is None # no error diff --git a/testing/test_capture.py b/testing/test_capture.py index 591b6761d..28199fb91 100644 --- a/testing/test_capture.py +++ b/testing/test_capture.py @@ -1012,3 +1012,13 @@ def test_capturing_and_logging_fundamentals(testdir, method): """) assert "atexit" not in result.stderr.str() + +def test_error_attribute_issue555(testdir): + testdir.makepyfile(""" + import sys + def test_capattr(): + assert sys.stdout.errors == "strict" + assert sys.stderr.errors == "strict" + """) + reprec = testdir.inline_run() + reprec.assertoutcome(passed=1) diff --git a/testing/test_conftest.py b/testing/test_conftest.py index 8bf936dbe..3f39cfc5d 100644 --- a/testing/test_conftest.py +++ b/testing/test_conftest.py @@ -251,7 +251,7 @@ def test_conftest_found_with_double_dash(testdir): def test_hello(found): assert found == 1 """)) - result = testdir.runpytest(str(p) + "@2::test_hello", "-h") + result = testdir.runpytest(str(p) + "::test_hello", "-h") result.stdout.fnmatch_lines(""" *--hello-world* """) diff --git a/testing/test_parseopt.py b/testing/test_parseopt.py index 36defef2c..4b3a71475 100644 --- a/testing/test_parseopt.py +++ b/testing/test_parseopt.py @@ -145,21 +145,6 @@ class TestParser: assert args.R == True assert args.S == False - def test_parse_removes_line_number_from_positional_arguments(self, parser): - args = parser.parse(['path.txt@2::item', - 'path2.py::func2[param with .py@123]', - 'path.py@123', - 'hello/path.py@123', - ]) - # we only remove "@NUM" syntax for .py files which are currently - # the only ones which can produce it. - assert getattr(args, parseopt.FILE_OR_DIR) == [ - 'path.txt@2::item', - 'path2.py::func2[param with .py@123]', - 'path.py', - 'hello/path.py', - ] - def test_parse_defaultgetter(self): def defaultget(option): if not hasattr(option, 'type'): diff --git a/testing/test_terminal.py b/testing/test_terminal.py index 202e08e0a..b543e1c6d 100644 --- a/testing/test_terminal.py +++ b/testing/test_terminal.py @@ -51,9 +51,9 @@ class TestTerminal: result = testdir.runpytest(*option.args) if option.verbose: result.stdout.fnmatch_lines([ - "*test_pass_skip_fail.py@2::test_ok PASS*", - "*test_pass_skip_fail.py@4::test_skip SKIP*", - "*test_pass_skip_fail.py@6::test_func FAIL*", + "*test_pass_skip_fail.py::test_ok PASS*", + "*test_pass_skip_fail.py::test_skip SKIP*", + "*test_pass_skip_fail.py::test_func FAIL*", ]) else: result.stdout.fnmatch_lines([ @@ -126,7 +126,7 @@ class TestTerminal: ]) result = testdir.runpytest("-v", p2) result.stdout.fnmatch_lines([ - "*test_p2.py <- *test_p1.py@2::TestMore::test_p1*", + "*test_p2.py <- *test_p1.py::TestMore::test_p1*", ]) def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir): @@ -450,17 +450,17 @@ class TestTerminalFunctional: """) result = testdir.runpytest(p1, '-v') result.stdout.fnmatch_lines([ - "*test_verbose_reporting.py@2::test_fail *FAIL*", - "*test_verbose_reporting.py@4::test_pass *PASS*", - "*test_verbose_reporting.py@7::TestClass::test_skip *SKIP*", - "*test_verbose_reporting.py@10::test_gen*0* *FAIL*", + "*test_verbose_reporting.py::test_fail *FAIL*", + "*test_verbose_reporting.py::test_pass *PASS*", + "*test_verbose_reporting.py::TestClass::test_skip *SKIP*", + "*test_verbose_reporting.py::test_gen*0* *FAIL*", ]) assert result.ret == 1 pytestconfig.pluginmanager.skipifmissing("xdist") result = testdir.runpytest(p1, '-v', '-n 1') result.stdout.fnmatch_lines([ - "*FAIL*test_verbose_reporting.py@2::test_fail*", + "*FAIL*test_verbose_reporting.py::test_fail*", ]) assert result.ret == 1 diff --git a/tox.ini b/tox.ini index d1214f5c2..9e2c23d55 100644 --- a/tox.ini +++ b/tox.ini @@ -99,7 +99,7 @@ deps=PyYAML commands= py.test -rfsxX --junitxml={envlogdir}/junit-{envname}.xml [] [testenv:regen] -basepython=python +basepython=python3.4 changedir=doc/en deps=sphinx PyYAML