commit
5873ca5146
1
.hgtags
1
.hgtags
|
@ -69,3 +69,4 @@ a064ad64d167508a8e9e73766b1a4e6bd10c85db 2.5.0
|
|||
60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0
|
||||
60725b17a9d1af4100abb8be3f9f4ddf6262bf34 2.6.0
|
||||
88af949b9611494e2c65d528f9e565b00fb7e8ca 2.6.0
|
||||
a4f9639702baa3eb4f3b16e162f74f7b69f3f9e1 2.6.1
|
||||
|
|
18
CHANGELOG
18
CHANGELOG
|
@ -1,6 +1,21 @@
|
|||
NEXT
|
||||
-----------
|
||||
|
||||
- fixed issue561: adapt autouse fixture example for python3.
|
||||
|
||||
2.6.1
|
||||
-----------------------------------
|
||||
|
||||
- No longer show line numbers in the --verbose output, the output is now
|
||||
purely the nodeid. The line number is still shown in failure reports.
|
||||
Thanks Floris Bruynooghe.
|
||||
|
||||
- fix issue437 where assertion rewriting could cause pytest-xdist slaves
|
||||
to collect different tests. Thanks Bruno Oliveira.
|
||||
|
||||
- fix issue555: add "errors" attribute to capture-streams to satisfy
|
||||
some distutils and possibly other code accessing sys.stdout.errors.
|
||||
|
||||
- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
|
||||
|
||||
- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
|
||||
|
@ -17,6 +32,9 @@ NEXT
|
|||
- fix issue544 by only removing "@NUM" at the end of "::" separated parts
|
||||
and if the part has an ".py" extension
|
||||
|
||||
- don't use py.std import helper, rather import things directly.
|
||||
Thanks Bruno Oliveira.
|
||||
|
||||
2.6
|
||||
-----------------------------------
|
||||
|
||||
|
|
|
@ -1,2 +1,2 @@
|
|||
#
|
||||
__version__ = '2.6.1.dev1'
|
||||
__version__ = '2.6.2.dev1'
|
||||
|
|
|
@ -1,3 +1,5 @@
|
|||
import traceback
|
||||
import types
|
||||
import py
|
||||
import sys, inspect
|
||||
from compiler import parse, ast, pycodegen
|
||||
|
@ -477,7 +479,7 @@ def check(s, frame=None):
|
|||
def interpret(source, frame, should_fail=False):
|
||||
module = Interpretable(parse(source, 'exec').node)
|
||||
#print "got module", module
|
||||
if isinstance(frame, py.std.types.FrameType):
|
||||
if isinstance(frame, types.FrameType):
|
||||
frame = py.code.Frame(frame)
|
||||
try:
|
||||
module.run(frame)
|
||||
|
@ -487,7 +489,6 @@ def interpret(source, frame, should_fail=False):
|
|||
except passthroughex:
|
||||
raise
|
||||
except:
|
||||
import traceback
|
||||
traceback.print_exc()
|
||||
if should_fail:
|
||||
return ("(assertion failed, but when it was re-run for "
|
||||
|
|
|
@ -131,7 +131,7 @@ class AssertionRewritingHook(object):
|
|||
pyc = os.path.join(cache_dir, cache_name)
|
||||
# Notice that even if we're in a read-only directory, I'm going
|
||||
# to check for a cached pyc. This may not be optimal...
|
||||
co = _read_pyc(fn_pypath, pyc)
|
||||
co = _read_pyc(fn_pypath, pyc, state.trace)
|
||||
if co is None:
|
||||
state.trace("rewriting %r" % (fn,))
|
||||
co = _rewrite_test(state, fn_pypath)
|
||||
|
@ -289,7 +289,7 @@ def _make_rewritten_pyc(state, fn, pyc, co):
|
|||
if _write_pyc(state, co, fn, proc_pyc):
|
||||
os.rename(proc_pyc, pyc)
|
||||
|
||||
def _read_pyc(source, pyc):
|
||||
def _read_pyc(source, pyc, trace=lambda x: None):
|
||||
"""Possibly read a pytest pyc containing rewritten code.
|
||||
|
||||
Return rewritten code if successful or None if not.
|
||||
|
@ -298,23 +298,27 @@ def _read_pyc(source, pyc):
|
|||
fp = open(pyc, "rb")
|
||||
except IOError:
|
||||
return None
|
||||
try:
|
||||
with fp:
|
||||
try:
|
||||
mtime = int(source.mtime())
|
||||
data = fp.read(8)
|
||||
except EnvironmentError:
|
||||
except EnvironmentError as e:
|
||||
trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
|
||||
return None
|
||||
# Check for invalid or out of date pyc file.
|
||||
if (len(data) != 8 or data[:4] != imp.get_magic() or
|
||||
struct.unpack("<l", data[4:])[0] != mtime):
|
||||
trace('_read_pyc(%s): invalid or out of date pyc' % source)
|
||||
return None
|
||||
try:
|
||||
co = marshal.load(fp)
|
||||
except Exception as e:
|
||||
trace('_read_pyc(%s): marshal.load error %s' % (source, e))
|
||||
return None
|
||||
if not isinstance(co, types.CodeType):
|
||||
# That's interesting....
|
||||
trace('_read_pyc(%s): not a code object' % source)
|
||||
return None
|
||||
return co
|
||||
finally:
|
||||
fp.close()
|
||||
|
||||
|
||||
def rewrite_asserts(mod):
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
"""Utilities for assertion debugging"""
|
||||
import pprint
|
||||
|
||||
import py
|
||||
try:
|
||||
|
@ -168,6 +169,7 @@ def _diff_text(left, right, verbose=False):
|
|||
|
||||
If the input are bytes they will be safely converted to text.
|
||||
"""
|
||||
from difflib import ndiff
|
||||
explanation = []
|
||||
if isinstance(left, py.builtin.bytes):
|
||||
left = u(repr(left)[1:-1]).replace(r'\n', '\n')
|
||||
|
@ -195,7 +197,7 @@ def _diff_text(left, right, verbose=False):
|
|||
left = left[:-i]
|
||||
right = right[:-i]
|
||||
explanation += [line.strip('\n')
|
||||
for line in py.std.difflib.ndiff(left.splitlines(),
|
||||
for line in ndiff(left.splitlines(),
|
||||
right.splitlines())]
|
||||
return explanation
|
||||
|
||||
|
@ -214,8 +216,8 @@ def _compare_eq_sequence(left, right, verbose=False):
|
|||
explanation += [
|
||||
u('Right contains more items, first extra item: %s') %
|
||||
py.io.saferepr(right[len(left)],)]
|
||||
return explanation # + _diff_text(py.std.pprint.pformat(left),
|
||||
# py.std.pprint.pformat(right))
|
||||
return explanation # + _diff_text(pprint.pformat(left),
|
||||
# pprint.pformat(right))
|
||||
|
||||
|
||||
def _compare_eq_set(left, right, verbose=False):
|
||||
|
@ -242,7 +244,7 @@ def _compare_eq_dict(left, right, verbose=False):
|
|||
len(same)]
|
||||
elif same:
|
||||
explanation += [u('Common items:')]
|
||||
explanation += py.std.pprint.pformat(same).splitlines()
|
||||
explanation += pprint.pformat(same).splitlines()
|
||||
diff = set(k for k in common if left[k] != right[k])
|
||||
if diff:
|
||||
explanation += [u('Differing items:')]
|
||||
|
@ -252,12 +254,12 @@ def _compare_eq_dict(left, right, verbose=False):
|
|||
extra_left = set(left) - set(right)
|
||||
if extra_left:
|
||||
explanation.append(u('Left contains more items:'))
|
||||
explanation.extend(py.std.pprint.pformat(
|
||||
explanation.extend(pprint.pformat(
|
||||
dict((k, left[k]) for k in extra_left)).splitlines())
|
||||
extra_right = set(right) - set(left)
|
||||
if extra_right:
|
||||
explanation.append(u('Right contains more items:'))
|
||||
explanation.extend(py.std.pprint.pformat(
|
||||
explanation.extend(pprint.pformat(
|
||||
dict((k, right[k]) for k in extra_right)).splitlines())
|
||||
return explanation
|
||||
|
||||
|
|
|
@ -223,6 +223,7 @@ def safe_text_dupfile(f, mode, default_encoding="UTF8"):
|
|||
|
||||
|
||||
class EncodedFile(object):
|
||||
errors = "strict" # possibly needed by py3 code (issue555)
|
||||
def __init__(self, buffer, encoding):
|
||||
self.buffer = buffer
|
||||
self.encoding = encoding
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
""" command line options, ini-file and conftest.py processing. """
|
||||
import argparse
|
||||
import shlex
|
||||
import traceback
|
||||
import types
|
||||
import warnings
|
||||
|
||||
import py
|
||||
# DON't import pytest here because it causes import cycle troubles
|
||||
import re
|
||||
import sys, os
|
||||
from _pytest import hookspec # the extension point definitions
|
||||
from _pytest.core import PluginManager
|
||||
|
@ -29,7 +33,7 @@ def main(args=None, plugins=None):
|
|||
except ConftestImportFailure:
|
||||
e = sys.exc_info()[1]
|
||||
tw = py.io.TerminalWriter(sys.stderr)
|
||||
for line in py.std.traceback.format_exception(*e.excinfo):
|
||||
for line in traceback.format_exception(*e.excinfo):
|
||||
tw.line(line.rstrip(), red=True)
|
||||
tw.line("ERROR: could not load %s\n" % (e.path), red=True)
|
||||
return 4
|
||||
|
@ -71,7 +75,7 @@ def _prepareconfig(args=None, plugins=None):
|
|||
elif not isinstance(args, (tuple, list)):
|
||||
if not isinstance(args, str):
|
||||
raise ValueError("not a string or argument list: %r" % (args,))
|
||||
args = py.std.shlex.split(args)
|
||||
args = shlex.split(args)
|
||||
pluginmanager = get_plugin_manager()
|
||||
try:
|
||||
if plugins:
|
||||
|
@ -181,8 +185,7 @@ class Parser:
|
|||
a = option.attrs()
|
||||
arggroup.add_argument(*n, **a)
|
||||
# bash like autocompletion for dirs (appending '/')
|
||||
optparser.add_argument(FILE_OR_DIR, nargs='*', type=node_with_line_number,
|
||||
).completer=filescompleter
|
||||
optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
|
||||
return optparser
|
||||
|
||||
def parse_setoption(self, args, option):
|
||||
|
@ -229,7 +232,7 @@ class ArgumentError(Exception):
|
|||
|
||||
|
||||
class Argument:
|
||||
"""class that mimics the necessary behaviour of py.std.optparse.Option """
|
||||
"""class that mimics the necessary behaviour of optparse.Option """
|
||||
_typ_map = {
|
||||
'int': int,
|
||||
'string': str,
|
||||
|
@ -247,7 +250,7 @@ class Argument:
|
|||
try:
|
||||
help = attrs['help']
|
||||
if '%default' in help:
|
||||
py.std.warnings.warn(
|
||||
warnings.warn(
|
||||
'pytest now uses argparse. "%default" should be'
|
||||
' changed to "%(default)s" ',
|
||||
FutureWarning,
|
||||
|
@ -263,7 +266,7 @@ class Argument:
|
|||
if isinstance(typ, py.builtin._basestring):
|
||||
if typ == 'choice':
|
||||
if self.TYPE_WARN:
|
||||
py.std.warnings.warn(
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
' For parsearg this is optional and when supplied '
|
||||
' should be a type.'
|
||||
|
@ -275,7 +278,7 @@ class Argument:
|
|||
attrs['type'] = type(attrs['choices'][0])
|
||||
else:
|
||||
if self.TYPE_WARN:
|
||||
py.std.warnings.warn(
|
||||
warnings.warn(
|
||||
'type argument to addoption() is a string %r.'
|
||||
' For parsearg this should be a type.'
|
||||
' (options: %s)' % (typ, names),
|
||||
|
@ -395,10 +398,10 @@ class OptionGroup:
|
|||
self.options.append(option)
|
||||
|
||||
|
||||
class MyOptionParser(py.std.argparse.ArgumentParser):
|
||||
class MyOptionParser(argparse.ArgumentParser):
|
||||
def __init__(self, parser):
|
||||
self._parser = parser
|
||||
py.std.argparse.ArgumentParser.__init__(self, usage=parser._usage,
|
||||
argparse.ArgumentParser.__init__(self, usage=parser._usage,
|
||||
add_help=False, formatter_class=DropShorterLongHelpFormatter)
|
||||
|
||||
def parse_args(self, args=None, namespace=None):
|
||||
|
@ -407,12 +410,12 @@ class MyOptionParser(py.std.argparse.ArgumentParser):
|
|||
if argv:
|
||||
for arg in argv:
|
||||
if arg and arg[0] == '-':
|
||||
msg = py.std.argparse._('unrecognized arguments: %s')
|
||||
msg = argparse._('unrecognized arguments: %s')
|
||||
self.error(msg % ' '.join(argv))
|
||||
getattr(args, FILE_OR_DIR).extend(argv)
|
||||
return args
|
||||
|
||||
class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter):
|
||||
class DropShorterLongHelpFormatter(argparse.HelpFormatter):
|
||||
"""shorten help for long options that differ only in extra hyphens
|
||||
|
||||
- collapse **long** options that are the same except for extra hyphens
|
||||
|
@ -422,7 +425,7 @@ class DropShorterLongHelpFormatter(py.std.argparse.HelpFormatter):
|
|||
- cache result on action object as this is called at least 2 times
|
||||
"""
|
||||
def _format_action_invocation(self, action):
|
||||
orgstr = py.std.argparse.HelpFormatter._format_action_invocation(self, action)
|
||||
orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
|
||||
if orgstr and orgstr[0] != '-': # only optional arguments
|
||||
return orgstr
|
||||
res = getattr(action, '_formatted_action_invocation', None)
|
||||
|
@ -746,7 +749,7 @@ class Config(object):
|
|||
self.hook.pytest_cmdline_preparse(config=self, args=args)
|
||||
args = self._parser.parse_setoption(args, self.option)
|
||||
if not args:
|
||||
args.append(py.std.os.getcwd())
|
||||
args.append(os.getcwd())
|
||||
self.args = args
|
||||
|
||||
def addinivalue_line(self, name, line):
|
||||
|
@ -784,11 +787,11 @@ class Config(object):
|
|||
if type == "pathlist":
|
||||
dp = py.path.local(self.inicfg.config.path).dirpath()
|
||||
l = []
|
||||
for relpath in py.std.shlex.split(value):
|
||||
for relpath in shlex.split(value):
|
||||
l.append(dp.join(relpath, abs=True))
|
||||
return l
|
||||
elif type == "args":
|
||||
return py.std.shlex.split(value)
|
||||
return shlex.split(value)
|
||||
elif type == "linelist":
|
||||
return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
|
||||
else:
|
||||
|
@ -862,13 +865,6 @@ def getcfg(args, inibasenames):
|
|||
return {}
|
||||
|
||||
|
||||
rex_pyat = re.compile(r'(.*\.py)@\d+$')
|
||||
|
||||
def node_with_line_number(string):
|
||||
return "::".join(rex_pyat.sub(lambda m: m.group(1), part)
|
||||
for part in string.split("::"))
|
||||
|
||||
|
||||
def setns(obj, dic):
|
||||
import pytest
|
||||
for name, value in dic.items():
|
||||
|
@ -876,7 +872,7 @@ def setns(obj, dic):
|
|||
mod = getattr(obj, name, None)
|
||||
if mod is None:
|
||||
modname = "pytest.%s" % name
|
||||
mod = py.std.types.ModuleType(modname)
|
||||
mod = types.ModuleType(modname)
|
||||
sys.modules[modname] = mod
|
||||
mod.__all__ = []
|
||||
setattr(obj, name, mod)
|
||||
|
|
|
@ -1,6 +1,7 @@
|
|||
"""
|
||||
pytest PluginManager, basic initialization and tracing.
|
||||
"""
|
||||
import os
|
||||
import sys
|
||||
import inspect
|
||||
import py
|
||||
|
@ -154,7 +155,7 @@ class PluginManager(object):
|
|||
# API for bootstrapping
|
||||
#
|
||||
def _envlist(self, varname):
|
||||
val = py.std.os.environ.get(varname, None)
|
||||
val = os.environ.get(varname, None)
|
||||
if val is not None:
|
||||
return val.split(',')
|
||||
return ()
|
||||
|
@ -221,7 +222,7 @@ class PluginManager(object):
|
|||
return self.import_plugin(modname[7:])
|
||||
raise
|
||||
except:
|
||||
e = py.std.sys.exc_info()[1]
|
||||
e = sys.exc_info()[1]
|
||||
import pytest
|
||||
if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
|
||||
raise
|
||||
|
|
|
@ -1,5 +1,6 @@
|
|||
""" discover and run doctests in modules and test files."""
|
||||
|
||||
from __future__ import absolute_import
|
||||
import traceback
|
||||
import pytest, py
|
||||
from _pytest.python import FixtureRequest, FuncFixtureInfo
|
||||
from py._code.code import TerminalRepr, ReprFileLocation
|
||||
|
@ -43,7 +44,7 @@ class DoctestItem(pytest.Item):
|
|||
self.runner.run(self.dtest)
|
||||
|
||||
def repr_failure(self, excinfo):
|
||||
doctest = py.std.doctest
|
||||
import doctest
|
||||
if excinfo.errisinstance((doctest.DocTestFailure,
|
||||
doctest.UnexpectedException)):
|
||||
doctestfailure = excinfo.value
|
||||
|
@ -56,8 +57,8 @@ class DoctestItem(pytest.Item):
|
|||
lineno = test.lineno + example.lineno + 1
|
||||
message = excinfo.type.__name__
|
||||
reprlocation = ReprFileLocation(filename, lineno, message)
|
||||
checker = py.std.doctest.OutputChecker()
|
||||
REPORT_UDIFF = py.std.doctest.REPORT_UDIFF
|
||||
checker = doctest.OutputChecker()
|
||||
REPORT_UDIFF = doctest.REPORT_UDIFF
|
||||
filelines = py.path.local(filename).readlines(cr=0)
|
||||
lines = []
|
||||
if lineno is not None:
|
||||
|
@ -78,7 +79,7 @@ class DoctestItem(pytest.Item):
|
|||
inner_excinfo = py.code.ExceptionInfo(excinfo.value.exc_info)
|
||||
lines += ["UNEXPECTED EXCEPTION: %s" %
|
||||
repr(inner_excinfo.value)]
|
||||
lines += py.std.traceback.format_exception(*excinfo.value.exc_info)
|
||||
lines += traceback.format_exception(*excinfo.value.exc_info)
|
||||
return ReprFailDoctest(reprlocation, lines)
|
||||
else:
|
||||
return super(DoctestItem, self).repr_failure(excinfo)
|
||||
|
@ -88,7 +89,7 @@ class DoctestItem(pytest.Item):
|
|||
|
||||
class DoctestTextfile(DoctestItem, pytest.File):
|
||||
def runtest(self):
|
||||
doctest = py.std.doctest
|
||||
import doctest
|
||||
# satisfy `FixtureRequest` constructor...
|
||||
self.funcargs = {}
|
||||
fm = self.session._fixturemanager
|
||||
|
@ -106,7 +107,7 @@ class DoctestTextfile(DoctestItem, pytest.File):
|
|||
|
||||
class DoctestModule(pytest.File):
|
||||
def collect(self):
|
||||
doctest = py.std.doctest
|
||||
import doctest
|
||||
if self.fspath.basename == "conftest.py":
|
||||
module = self.config._conftest.importconftest(self.fspath)
|
||||
else:
|
||||
|
|
|
@ -8,8 +8,9 @@ import py
|
|||
import _pytest
|
||||
|
||||
|
||||
|
||||
def find_toplevel(name):
|
||||
for syspath in py.std.sys.path:
|
||||
for syspath in sys.path:
|
||||
base = py.path.local(syspath)
|
||||
lib = base/name
|
||||
if lib.check(dir=1):
|
||||
|
@ -35,9 +36,10 @@ def pkg_to_mapping(name):
|
|||
return name2src
|
||||
|
||||
def compress_mapping(mapping):
|
||||
data = py.std.pickle.dumps(mapping, 2)
|
||||
data = py.std.zlib.compress(data, 9)
|
||||
data = py.std.base64.encodestring(data)
|
||||
import base64, pickle, zlib
|
||||
data = pickle.dumps(mapping, 2)
|
||||
data = zlib.compress(data, 9)
|
||||
data = base64.encodestring(data)
|
||||
data = data.decode('ascii')
|
||||
return data
|
||||
|
||||
|
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
Based on initial code from Ross Lawley.
|
||||
"""
|
||||
|
||||
import py
|
||||
import os
|
||||
import re
|
||||
|
@ -10,20 +9,13 @@ import sys
|
|||
import time
|
||||
|
||||
# Python 2.X and 3.X compatibility
|
||||
try:
|
||||
unichr(65)
|
||||
except NameError:
|
||||
if sys.version_info[0] < 3:
|
||||
from codecs import open
|
||||
else:
|
||||
unichr = chr
|
||||
try:
|
||||
unicode('A')
|
||||
except NameError:
|
||||
unicode = str
|
||||
try:
|
||||
long(1)
|
||||
except NameError:
|
||||
long = int
|
||||
|
||||
|
||||
class Junit(py.xml.Namespace):
|
||||
pass
|
||||
|
||||
|
@ -206,11 +198,7 @@ class LogXML(object):
|
|||
self.suite_start_time = time.time()
|
||||
|
||||
def pytest_sessionfinish(self):
|
||||
if py.std.sys.version_info[0] < 3:
|
||||
logfile = py.std.codecs.open(self.logfile, 'w', encoding='utf-8')
|
||||
else:
|
||||
logfile = open(self.logfile, 'w', encoding='utf-8')
|
||||
|
||||
suite_stop_time = time.time()
|
||||
suite_time_delta = suite_stop_time - self.suite_start_time
|
||||
numtests = self.passed + self.failed
|
||||
|
|
|
@ -1,4 +1,5 @@
|
|||
""" core implementation of testing process: init, session, runtest loop. """
|
||||
import re
|
||||
|
||||
import py
|
||||
import pytest, _pytest
|
||||
|
@ -19,7 +20,7 @@ EXIT_INTERRUPTED = 2
|
|||
EXIT_INTERNALERROR = 3
|
||||
EXIT_USAGEERROR = 4
|
||||
|
||||
name_re = py.std.re.compile("^[a-zA-Z_]\w*$")
|
||||
name_re = re.compile("^[a-zA-Z_]\w*$")
|
||||
|
||||
def pytest_addoption(parser):
|
||||
parser.addini("norecursedirs", "directory patterns to avoid for recursion",
|
||||
|
@ -315,7 +316,7 @@ class Node(object):
|
|||
except py.builtin._sysex:
|
||||
raise
|
||||
except:
|
||||
failure = py.std.sys.exc_info()
|
||||
failure = sys.exc_info()
|
||||
setattr(self, exattrname, failure)
|
||||
raise
|
||||
setattr(self, attrname, res)
|
||||
|
|
|
@ -1,8 +1,12 @@
|
|||
""" interactive debugging with PDB, the Python Debugger. """
|
||||
|
||||
import pytest, py
|
||||
from __future__ import absolute_import
|
||||
import pdb
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import py
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
group._addoption('--pdb',
|
||||
|
@ -16,10 +20,10 @@ def pytest_configure(config):
|
|||
if config.getvalue("usepdb"):
|
||||
config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
|
||||
|
||||
old = (py.std.pdb.set_trace, pytestPDB._pluginmanager)
|
||||
old = (pdb.set_trace, pytestPDB._pluginmanager)
|
||||
def fin():
|
||||
py.std.pdb.set_trace, pytestPDB._pluginmanager = old
|
||||
py.std.pdb.set_trace = pytest.set_trace
|
||||
pdb.set_trace, pytestPDB._pluginmanager = old
|
||||
pdb.set_trace = pytest.set_trace
|
||||
pytestPDB._pluginmanager = config.pluginmanager
|
||||
config._cleanup.append(fin)
|
||||
|
||||
|
@ -38,7 +42,7 @@ class pytestPDB:
|
|||
tw = py.io.TerminalWriter()
|
||||
tw.line()
|
||||
tw.sep(">", "PDB set_trace (IO-capturing turned off)")
|
||||
py.std.pdb.Pdb().set_trace(frame)
|
||||
pdb.Pdb().set_trace(frame)
|
||||
|
||||
|
||||
class PdbInvoke:
|
||||
|
@ -74,7 +78,8 @@ def _enter_pdb(node, excinfo, rep):
|
|||
def _postmortem_traceback(excinfo):
|
||||
# A doctest.UnexpectedException is not useful for post_mortem.
|
||||
# Use the underlying exception instead:
|
||||
if isinstance(excinfo.value, py.std.doctest.UnexpectedException):
|
||||
from doctest import UnexpectedException
|
||||
if isinstance(excinfo.value, UnexpectedException):
|
||||
return excinfo.value.exc_info[2]
|
||||
else:
|
||||
return excinfo._excinfo[2]
|
||||
|
@ -88,7 +93,6 @@ def _find_last_non_hidden_frame(stack):
|
|||
|
||||
|
||||
def post_mortem(t):
|
||||
pdb = py.std.pdb
|
||||
class Pdb(pdb.Pdb):
|
||||
def get_stack(self, f, t):
|
||||
stack, i = pdb.Pdb.get_stack(self, f, t)
|
||||
|
|
|
@ -1,15 +1,21 @@
|
|||
""" (disabled by default) support for testing pytest and pytest plugins. """
|
||||
|
||||
import py, pytest
|
||||
import sys, os
|
||||
import inspect
|
||||
import sys
|
||||
import os
|
||||
import codecs
|
||||
import re
|
||||
import time
|
||||
import platform
|
||||
from fnmatch import fnmatch
|
||||
from _pytest.main import Session, EXIT_OK
|
||||
import subprocess
|
||||
|
||||
import py
|
||||
import pytest
|
||||
from py.builtin import print_
|
||||
from _pytest.core import HookRelay
|
||||
|
||||
from _pytest.main import Session, EXIT_OK
|
||||
|
||||
|
||||
def get_public_names(l):
|
||||
"""Only return names from iterator l without a leading underscore."""
|
||||
|
@ -87,10 +93,10 @@ class HookRecorder:
|
|||
|
||||
def _makecallparser(self, method):
|
||||
name = method.__name__
|
||||
args, varargs, varkw, default = py.std.inspect.getargspec(method)
|
||||
args, varargs, varkw, default = inspect.getargspec(method)
|
||||
if not args or args[0] != "self":
|
||||
args.insert(0, 'self')
|
||||
fspec = py.std.inspect.formatargspec(args, varargs, varkw, default)
|
||||
fspec = inspect.formatargspec(args, varargs, varkw, default)
|
||||
# we use exec because we want to have early type
|
||||
# errors on wrong input arguments, using
|
||||
# *args/**kwargs delays this and gives errors
|
||||
|
@ -122,7 +128,7 @@ class HookRecorder:
|
|||
__tracebackhide__ = True
|
||||
i = 0
|
||||
entries = list(entries)
|
||||
backlocals = py.std.sys._getframe(1).f_locals
|
||||
backlocals = sys._getframe(1).f_locals
|
||||
while entries:
|
||||
name, check = entries.pop(0)
|
||||
for ind, call in enumerate(self.calls[i:]):
|
||||
|
@ -210,7 +216,7 @@ class TmpTestdir:
|
|||
|
||||
def finalize(self):
|
||||
for p in self._syspathremove:
|
||||
py.std.sys.path.remove(p)
|
||||
sys.path.remove(p)
|
||||
if hasattr(self, '_olddir'):
|
||||
self._olddir.chdir()
|
||||
# delete modules that have been loaded from tmpdir
|
||||
|
@ -283,7 +289,7 @@ class TmpTestdir:
|
|||
def syspathinsert(self, path=None):
|
||||
if path is None:
|
||||
path = self.tmpdir
|
||||
py.std.sys.path.insert(0, str(path))
|
||||
sys.path.insert(0, str(path))
|
||||
self._syspathremove.append(str(path))
|
||||
|
||||
def mkdir(self, name):
|
||||
|
@ -426,8 +432,7 @@ class TmpTestdir:
|
|||
env['PYTHONPATH'] = os.pathsep.join(filter(None, [
|
||||
str(os.getcwd()), env.get('PYTHONPATH', '')]))
|
||||
kw['env'] = env
|
||||
#print "env", env
|
||||
return py.std.subprocess.Popen(cmdargs,
|
||||
return subprocess.Popen(cmdargs,
|
||||
stdout=stdout, stderr=stderr, **kw)
|
||||
|
||||
def run(self, *cmdargs):
|
||||
|
@ -474,9 +479,9 @@ class TmpTestdir:
|
|||
def _getpybinargs(self, scriptname):
|
||||
if not self.request.config.getvalue("notoolsonpath"):
|
||||
# XXX we rely on script referring to the correct environment
|
||||
# we cannot use "(py.std.sys.executable,script)"
|
||||
# we cannot use "(sys.executable,script)"
|
||||
# because on windows the script is e.g. a py.test.exe
|
||||
return (py.std.sys.executable, _pytest_fullpath,) # noqa
|
||||
return (sys.executable, _pytest_fullpath,) # noqa
|
||||
else:
|
||||
pytest.skip("cannot run %r with --no-tools-on-path" % scriptname)
|
||||
|
||||
|
@ -496,7 +501,7 @@ class TmpTestdir:
|
|||
|
||||
def runpython_c(self, command):
|
||||
command = self._getsysprepend() + command
|
||||
return self.run(py.std.sys.executable, "-c", command)
|
||||
return self.run(sys.executable, "-c", command)
|
||||
|
||||
def runpytest(self, *args):
|
||||
p = py.path.local.make_numbered_dir(prefix="runpytest-",
|
||||
|
@ -523,7 +528,7 @@ class TmpTestdir:
|
|||
|
||||
def spawn(self, cmd, expect_timeout=10.0):
|
||||
pexpect = pytest.importorskip("pexpect", "3.0")
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in py.std.platform.machine():
|
||||
if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
|
||||
pytest.skip("pypy-64 bit not supported")
|
||||
if sys.platform == "darwin":
|
||||
pytest.xfail("pexpect does not work reliably on darwin?!")
|
||||
|
@ -670,7 +675,7 @@ class LineMatcher:
|
|||
|
||||
def fnmatch_lines(self, lines2):
|
||||
def show(arg1, arg2):
|
||||
py.builtin.print_(arg1, arg2, file=py.std.sys.stderr)
|
||||
py.builtin.print_(arg1, arg2, file=sys.stderr)
|
||||
lines2 = self._getlines(lines2)
|
||||
lines1 = self.lines[:]
|
||||
nextline = None
|
||||
|
|
|
@ -1,7 +1,8 @@
|
|||
""" recording warnings during test function execution. """
|
||||
|
||||
import py
|
||||
import sys
|
||||
import warnings
|
||||
|
||||
|
||||
def pytest_funcarg__recwarn(request):
|
||||
"""Return a WarningsRecorder instance that provides these methods:
|
||||
|
@ -13,7 +14,6 @@ def pytest_funcarg__recwarn(request):
|
|||
on warning categories.
|
||||
"""
|
||||
if sys.version_info >= (2,7):
|
||||
import warnings
|
||||
oldfilters = warnings.filters[:]
|
||||
warnings.simplefilter('default')
|
||||
def reset_filters():
|
||||
|
@ -30,26 +30,24 @@ def deprecated_call(func, *args, **kwargs):
|
|||
""" assert that calling ``func(*args, **kwargs)``
|
||||
triggers a DeprecationWarning.
|
||||
"""
|
||||
warningmodule = py.std.warnings
|
||||
l = []
|
||||
oldwarn_explicit = getattr(warningmodule, 'warn_explicit')
|
||||
oldwarn_explicit = getattr(warnings, 'warn_explicit')
|
||||
def warn_explicit(*args, **kwargs):
|
||||
l.append(args)
|
||||
oldwarn_explicit(*args, **kwargs)
|
||||
oldwarn = getattr(warningmodule, 'warn')
|
||||
oldwarn = getattr(warnings, 'warn')
|
||||
def warn(*args, **kwargs):
|
||||
l.append(args)
|
||||
oldwarn(*args, **kwargs)
|
||||
|
||||
warningmodule.warn_explicit = warn_explicit
|
||||
warningmodule.warn = warn
|
||||
warnings.warn_explicit = warn_explicit
|
||||
warnings.warn = warn
|
||||
try:
|
||||
ret = func(*args, **kwargs)
|
||||
finally:
|
||||
warningmodule.warn_explicit = warn_explicit
|
||||
warningmodule.warn = warn
|
||||
warnings.warn_explicit = warn_explicit
|
||||
warnings.warn = warn
|
||||
if not l:
|
||||
#print warningmodule
|
||||
__tracebackhide__ = True
|
||||
raise AssertionError("%r did not produce DeprecationWarning" %(func,))
|
||||
return ret
|
||||
|
@ -65,7 +63,6 @@ class RecordedWarning:
|
|||
|
||||
class WarningsRecorder:
|
||||
def __init__(self):
|
||||
warningmodule = py.std.warnings
|
||||
self.list = []
|
||||
def showwarning(message, category, filename, lineno, line=0):
|
||||
self.list.append(RecordedWarning(
|
||||
|
@ -76,8 +73,8 @@ class WarningsRecorder:
|
|||
except TypeError:
|
||||
# < python2.6
|
||||
self.old_showwarning(message, category, filename, lineno)
|
||||
self.old_showwarning = warningmodule.showwarning
|
||||
warningmodule.showwarning = showwarning
|
||||
self.old_showwarning = warnings.showwarning
|
||||
warnings.showwarning = showwarning
|
||||
|
||||
def pop(self, cls=Warning):
|
||||
""" pop the first recorded warning, raise exception if not exists."""
|
||||
|
@ -88,7 +85,6 @@ class WarningsRecorder:
|
|||
assert 0, "%r not found in %r" %(cls, self.list)
|
||||
|
||||
#def resetregistry(self):
|
||||
# import warnings
|
||||
# warnings.onceregistry.clear()
|
||||
# warnings.__warningregistry__.clear()
|
||||
|
||||
|
@ -96,4 +92,4 @@ class WarningsRecorder:
|
|||
self.list[:] = []
|
||||
|
||||
def finalize(self):
|
||||
py.std.warnings.showwarning = self.old_showwarning
|
||||
warnings.showwarning = self.old_showwarning
|
||||
|
|
|
@ -1,9 +1,10 @@
|
|||
""" basic collect and runtest protocol implementations """
|
||||
import bdb
|
||||
import sys
|
||||
from time import time
|
||||
|
||||
import py
|
||||
import pytest
|
||||
import sys
|
||||
from time import time
|
||||
from py._code.code import TerminalRepr
|
||||
|
||||
def pytest_namespace():
|
||||
|
@ -118,7 +119,7 @@ def check_interactive_exception(call, report):
|
|||
return call.excinfo and not (
|
||||
hasattr(report, "wasxfail") or
|
||||
call.excinfo.errisinstance(skip.Exception) or
|
||||
call.excinfo.errisinstance(py.std.bdb.BdbQuit))
|
||||
call.excinfo.errisinstance(bdb.BdbQuit))
|
||||
|
||||
def call_runtest_hook(item, when, **kwds):
|
||||
hookname = "pytest_runtest_" + when
|
||||
|
|
|
@ -1,7 +1,10 @@
|
|||
""" support for skip/xfail functions and markers. """
|
||||
|
||||
import py, pytest
|
||||
import os
|
||||
import sys
|
||||
import traceback
|
||||
|
||||
import py
|
||||
import pytest
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("general")
|
||||
|
@ -79,7 +82,7 @@ class MarkEvaluator:
|
|||
msg = [" " * (self.exc[1].offset + 4) + "^",]
|
||||
msg.append("SyntaxError: invalid syntax")
|
||||
else:
|
||||
msg = py.std.traceback.format_exception_only(*self.exc[:2])
|
||||
msg = traceback.format_exception_only(*self.exc[:2])
|
||||
pytest.fail("Error evaluating %r expression\n"
|
||||
" %s\n"
|
||||
"%s"
|
||||
|
@ -87,7 +90,7 @@ class MarkEvaluator:
|
|||
pytrace=False)
|
||||
|
||||
def _getglobals(self):
|
||||
d = {'os': py.std.os, 'sys': py.std.sys, 'config': self.item.config}
|
||||
d = {'os': os, 'sys': sys, 'config': self.item.config}
|
||||
func = self.item.obj
|
||||
try:
|
||||
d.update(func.__globals__)
|
||||
|
|
|
@ -5,6 +5,8 @@ This is a good source for looking at the various reporting hooks.
|
|||
import pytest
|
||||
import py
|
||||
import sys
|
||||
import time
|
||||
|
||||
|
||||
def pytest_addoption(parser):
|
||||
group = parser.getgroup("terminal reporting", "reporting", after="general")
|
||||
|
@ -49,7 +51,7 @@ def getreportopt(config):
|
|||
optvalue = config.option.report
|
||||
if optvalue:
|
||||
py.builtin.print_("DEPRECATED: use -r instead of --report option.",
|
||||
file=py.std.sys.stderr)
|
||||
file=sys.stderr)
|
||||
if optvalue:
|
||||
for setting in optvalue.split(","):
|
||||
setting = setting.strip()
|
||||
|
@ -95,7 +97,7 @@ class TerminalReporter:
|
|||
self.stats = {}
|
||||
self.startdir = self.curdir = py.path.local()
|
||||
if file is None:
|
||||
file = py.std.sys.stdout
|
||||
file = sys.stdout
|
||||
self._tw = self.writer = py.io.TerminalWriter(file)
|
||||
if self.config.option.color == 'yes':
|
||||
self._tw.hasmarkup = True
|
||||
|
@ -265,7 +267,7 @@ class TerminalReporter:
|
|||
|
||||
@pytest.mark.trylast
|
||||
def pytest_sessionstart(self, session):
|
||||
self._sessionstarttime = py.std.time.time()
|
||||
self._sessionstarttime = time.time()
|
||||
if not self.showheader:
|
||||
return
|
||||
self.write_sep("=", "test session starts", bold=True)
|
||||
|
@ -380,9 +382,6 @@ class TerminalReporter:
|
|||
fspath = "%s <- %s" % (collect_fspath, fspath)
|
||||
if fspath:
|
||||
line = str(fspath)
|
||||
if lineno is not None:
|
||||
lineno += 1
|
||||
line += "@" + str(lineno)
|
||||
if domain:
|
||||
split = str(domain).split('[')
|
||||
split[0] = split[0].replace('.', '::') # don't replace '.' in params
|
||||
|
@ -469,7 +468,7 @@ class TerminalReporter:
|
|||
self._tw.line(content)
|
||||
|
||||
def summary_stats(self):
|
||||
session_duration = py.std.time.time() - self._sessionstarttime
|
||||
session_duration = time.time() - self._sessionstarttime
|
||||
|
||||
keys = ("failed passed skipped deselected "
|
||||
"xfailed xpassed warnings").split()
|
||||
|
|
|
@ -1,7 +1,11 @@
|
|||
""" support for providing temporary directories to test functions. """
|
||||
import pytest, py
|
||||
import re
|
||||
|
||||
import pytest
|
||||
import py
|
||||
from _pytest.monkeypatch import monkeypatch
|
||||
|
||||
|
||||
class TempdirHandler:
|
||||
def __init__(self, config):
|
||||
self.config = config
|
||||
|
@ -63,7 +67,7 @@ def tmpdir(request):
|
|||
path object.
|
||||
"""
|
||||
name = request.node.name
|
||||
name = py.std.re.sub("[\W]", "_", name)
|
||||
name = re.sub("[\W]", "_", name)
|
||||
MAXVAL = 30
|
||||
if len(name) > MAXVAL:
|
||||
name = name[:MAXVAL]
|
||||
|
|
|
@ -1,26 +1,24 @@
|
|||
""" discovery and running of std-library "unittest" style tests. """
|
||||
import pytest, py
|
||||
from __future__ import absolute_import
|
||||
import traceback
|
||||
import sys
|
||||
|
||||
import pytest
|
||||
import py
|
||||
|
||||
|
||||
# for transfering markers
|
||||
from _pytest.python import transfer_markers
|
||||
|
||||
|
||||
def is_unittest(obj):
|
||||
"""Is obj a subclass of unittest.TestCase?"""
|
||||
unittest = sys.modules.get('unittest')
|
||||
if unittest is None:
|
||||
return # nobody can have derived unittest.TestCase
|
||||
try:
|
||||
return issubclass(obj, unittest.TestCase)
|
||||
except KeyboardInterrupt:
|
||||
raise
|
||||
except:
|
||||
return False
|
||||
|
||||
|
||||
def pytest_pycollect_makeitem(collector, name, obj):
|
||||
if is_unittest(obj):
|
||||
# has unittest been imported and is obj a subclass of its TestCase?
|
||||
try:
|
||||
if not issubclass(obj, sys.modules["unittest"].TestCase):
|
||||
return
|
||||
except Exception:
|
||||
return
|
||||
# yes, so let's collect it
|
||||
return UnitTestCase(name, parent=collector)
|
||||
|
||||
|
||||
|
@ -41,11 +39,12 @@ class UnitTestCase(pytest.Class):
|
|||
super(UnitTestCase, self).setup()
|
||||
|
||||
def collect(self):
|
||||
from unittest import TestLoader
|
||||
cls = self.obj
|
||||
if not getattr(cls, "__test__", True):
|
||||
return
|
||||
self.session._fixturemanager.parsefactories(self, unittest=True)
|
||||
loader = py.std.unittest.TestLoader()
|
||||
loader = TestLoader()
|
||||
module = self.getparent(pytest.Module).obj
|
||||
foundsomething = False
|
||||
for name in loader.getTestCaseNames(self.obj):
|
||||
|
@ -90,7 +89,7 @@ class TestCaseFunction(pytest.Function):
|
|||
except TypeError:
|
||||
try:
|
||||
try:
|
||||
l = py.std.traceback.format_exception(*rawexcinfo)
|
||||
l = traceback.format_exception(*rawexcinfo)
|
||||
l.insert(0, "NOTE: Incompatible Exception Representation, "
|
||||
"displaying natively:\n\n")
|
||||
pytest.fail("".join(l), pytrace=False)
|
||||
|
|
|
@ -0,0 +1,59 @@
|
|||
pytest-2.6.1: fixes and new xfail feature
|
||||
===========================================================================
|
||||
|
||||
pytest is a mature Python testing tool with more than a 1100 tests
|
||||
against itself, passing on many different interpreters and platforms.
|
||||
The 2.6.1 release is drop-in compatible to 2.5.2 and actually fixes some
|
||||
regressions introduced with 2.6.0. It also brings a little feature
|
||||
to the xfail marker which now recognizes expected exceptions,
|
||||
see the CHANGELOG below.
|
||||
|
||||
See docs at:
|
||||
|
||||
http://pytest.org
|
||||
|
||||
As usual, you can upgrade from pypi via::
|
||||
|
||||
pip install -U pytest
|
||||
|
||||
Thanks to all who contributed, among them:
|
||||
|
||||
Floris Bruynooghe
|
||||
Bruno Oliveira
|
||||
Nicolas Delaby
|
||||
|
||||
have fun,
|
||||
holger krekel
|
||||
|
||||
Changes 2.6.1
|
||||
=================
|
||||
|
||||
- No longer show line numbers in the --verbose output, the output is now
|
||||
purely the nodeid. The line number is still shown in failure reports.
|
||||
Thanks Floris Bruynooghe.
|
||||
|
||||
- fix issue437 where assertion rewriting could cause pytest-xdist slaves
|
||||
to collect different tests. Thanks Bruno Oliveira.
|
||||
|
||||
- fix issue555: add "errors" attribute to capture-streams to satisfy
|
||||
some distutils and possibly other code accessing sys.stdout.errors.
|
||||
|
||||
- fix issue547 capsys/capfd also work when output capturing ("-s") is disabled.
|
||||
|
||||
- address issue170: allow pytest.mark.xfail(...) to specify expected exceptions via
|
||||
an optional "raises=EXC" argument where EXC can be a single exception
|
||||
or a tuple of exception classes. Thanks David Mohr for the complete
|
||||
PR.
|
||||
|
||||
- fix integration of pytest with unittest.mock.patch decorator when
|
||||
it uses the "new" argument. Thanks Nicolas Delaby for test and PR.
|
||||
|
||||
- fix issue with detecting conftest files if the arguments contain
|
||||
"::" node id specifications (copy pasted from "-v" output)
|
||||
|
||||
- fix issue544 by only removing "@NUM" at the end of "::" separated parts
|
||||
and if the part has an ".py" extension
|
||||
|
||||
- don't use py.std import helper, rather import things directly.
|
||||
Thanks Bruno Oliveira.
|
||||
|
|
@ -26,7 +26,7 @@ you will see the return value of the function call::
|
|||
|
||||
$ py.test test_assert1.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_assert1.py F
|
||||
|
@ -132,7 +132,7 @@ if you run this module::
|
|||
|
||||
$ py.test test_assert2.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_assert2.py F
|
||||
|
|
|
@ -1,188 +0,0 @@
|
|||
|
||||
**Test classes, modules or whole projects can make use of
|
||||
one or more fixtures**. All required fixture functions will execute
|
||||
before a test from the specifying context executes. As You can use this
|
||||
to make tests operate from a pre-initialized directory or with
|
||||
certain environment variables or with pre-configured global application
|
||||
settings.
|
||||
|
||||
For example, the Django_ project requires database
|
||||
initialization to be able to import from and use its model objects.
|
||||
For that, the `pytest-django`_ plugin provides fixtures which your
|
||||
project can then easily depend or extend on, simply by referencing the
|
||||
name of the particular fixture.
|
||||
|
||||
Fixture functions have limited visilibity which depends on where they
|
||||
are defined. If they are defined on a test class, only its test methods
|
||||
may use it. A fixture defined in a module can only be used
|
||||
from that test module. A fixture defined in a conftest.py file
|
||||
can only be used by the tests below the directory of that file.
|
||||
Lastly, plugins can define fixtures which are available across all
|
||||
projects.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
Python, Java and many other languages support a so called xUnit_ style
|
||||
for providing a fixed state, `test fixtures`_, for running tests. It
|
||||
typically involves calling a autouse function ahead and a teardown
|
||||
function after test execute. In 2005 pytest introduced a scope-specific
|
||||
model of automatically detecting and calling autouse and teardown
|
||||
functions on a per-module, class or function basis. The Python unittest
|
||||
package and nose have subsequently incorporated them. This model
|
||||
remains supported by pytest as :ref:`classic xunit`.
|
||||
|
||||
One property of xunit fixture functions is that they work implicitely
|
||||
by preparing global state or setting attributes on TestCase objects.
|
||||
By contrast, pytest provides :ref:`funcargs` which allow to
|
||||
dependency-inject application test state into test functions or
|
||||
methods as function arguments. If your application is sufficiently modular
|
||||
or if you are creating a new project, we recommend you now rather head over to
|
||||
:ref:`funcargs` instead because many pytest users agree that using this
|
||||
paradigm leads to better application and test organisation.
|
||||
|
||||
However, not all programs and frameworks work and can be tested in
|
||||
a fully modular way. They rather require preparation of global state
|
||||
like database autouse on which further fixtures like preparing application
|
||||
specific tables or wrapping tests in transactions can take place. For those
|
||||
needs, pytest-2.3 now supports new **fixture functions** which come with
|
||||
a ton of improvements over classic xunit fixture writing. Fixture functions:
|
||||
|
||||
- allow to separate different autouse concerns into multiple modular functions
|
||||
|
||||
- can receive and fully interoperate with :ref:`funcargs <resources>`,
|
||||
|
||||
- are called multiple times if its funcargs are parametrized,
|
||||
|
||||
- don't need to be defined directly in your test classes or modules,
|
||||
they can also be defined in a plugin or :ref:`conftest.py <conftest.py>` files and get called
|
||||
|
||||
- are called on a per-session, per-module, per-class or per-function basis
|
||||
by means of a simple "scope" declaration.
|
||||
|
||||
- can access the :ref:`request <request>` object which allows to
|
||||
introspect and interact with the (scoped) testcontext.
|
||||
|
||||
- can add cleanup functions which will be invoked when the last test
|
||||
of the fixture test context has finished executing.
|
||||
|
||||
All of these features are now demonstrated by little examples.
|
||||
|
||||
|
||||
|
||||
|
||||
|
||||
test modules accessing a global resource
|
||||
-------------------------------------------------------
|
||||
|
||||
.. note::
|
||||
|
||||
Relying on `global state is considered bad programming practise <http://en.wikipedia.org/wiki/Global_variable>`_ but when you work with an application
|
||||
that relies on it you often have no choice.
|
||||
|
||||
If you want test modules to access a global resource,
|
||||
you can stick the resource to the module globals in
|
||||
a per-module autouse function. We use a :ref:`resource factory
|
||||
<@pytest.fixture>` to create our global resource::
|
||||
|
||||
# content of conftest.py
|
||||
import pytest
|
||||
|
||||
class GlobalResource:
|
||||
def __init__(self):
|
||||
pass
|
||||
|
||||
@pytest.fixture(scope="session")
|
||||
def globresource():
|
||||
return GlobalResource()
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def setresource(request, globresource):
|
||||
request.module.globresource = globresource
|
||||
|
||||
Now any test module can access ``globresource`` as a module global::
|
||||
|
||||
# content of test_glob.py
|
||||
|
||||
def test_1():
|
||||
print ("test_1 %s" % globresource)
|
||||
def test_2():
|
||||
print ("test_2 %s" % globresource)
|
||||
|
||||
Let's run this module without output-capturing::
|
||||
|
||||
$ py.test -qs test_glob.py
|
||||
FF
|
||||
================================= FAILURES =================================
|
||||
__________________________________ test_1 __________________________________
|
||||
|
||||
def test_1():
|
||||
> print ("test_1 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:3: NameError
|
||||
__________________________________ test_2 __________________________________
|
||||
|
||||
def test_2():
|
||||
> print ("test_2 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:5: NameError
|
||||
2 failed in 0.01 seconds
|
||||
|
||||
The two tests see the same global ``globresource`` object.
|
||||
|
||||
Parametrizing the global resource
|
||||
+++++++++++++++++++++++++++++++++++++++++++++++++
|
||||
|
||||
We extend the previous example and add parametrization to the globresource
|
||||
factory and also add a finalizer::
|
||||
|
||||
# content of conftest.py
|
||||
|
||||
import pytest
|
||||
|
||||
class GlobalResource:
|
||||
def __init__(self, param):
|
||||
self.param = param
|
||||
|
||||
@pytest.fixture(scope="session", params=[1,2])
|
||||
def globresource(request):
|
||||
g = GlobalResource(request.param)
|
||||
def fin():
|
||||
print "finalizing", g
|
||||
request.addfinalizer(fin)
|
||||
return g
|
||||
|
||||
@pytest.fixture(scope="module")
|
||||
def setresource(request, globresource):
|
||||
request.module.globresource = globresource
|
||||
|
||||
And then re-run our test module::
|
||||
|
||||
$ py.test -qs test_glob.py
|
||||
FF
|
||||
================================= FAILURES =================================
|
||||
__________________________________ test_1 __________________________________
|
||||
|
||||
def test_1():
|
||||
> print ("test_1 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:3: NameError
|
||||
__________________________________ test_2 __________________________________
|
||||
|
||||
def test_2():
|
||||
> print ("test_2 %s" % globresource)
|
||||
E NameError: global name 'globresource' is not defined
|
||||
|
||||
test_glob.py:5: NameError
|
||||
2 failed in 0.01 seconds
|
||||
|
||||
We are now running the two tests twice with two different global resource
|
||||
instances. Note that the tests are ordered such that only
|
||||
one instance is active at any given time: the finalizer of
|
||||
the first globresource instance is called before the second
|
||||
instance is created and sent to the autouse functions.
|
||||
|
|
@ -64,7 +64,7 @@ of the failing function and hide the other one::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py .F
|
||||
|
@ -78,7 +78,7 @@ of the failing function and hide the other one::
|
|||
|
||||
test_module.py:9: AssertionError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
setting up <function test_func2 at 0x2abe0d7241b8>
|
||||
setting up <function test_func2 at 0x2b5d6a81c9d8>
|
||||
==================== 1 failed, 1 passed in 0.01 seconds ====================
|
||||
|
||||
Accessing captured output from a test function
|
||||
|
|
|
@ -18,7 +18,7 @@
|
|||
# The full version, including alpha/beta/rc tags.
|
||||
# The short X.Y version.
|
||||
version = "2.6"
|
||||
release = "2.6.0"
|
||||
release = "2.6.1"
|
||||
|
||||
import sys, os
|
||||
|
||||
|
@ -225,7 +225,7 @@ latex_documents = [
|
|||
|
||||
# The name of an image file (relative to this directory) to place at the top of
|
||||
# the title page.
|
||||
#latex_logo = None
|
||||
latex_logo = 'img/pytest1.png'
|
||||
|
||||
# For "manual" documents, if this is true, then toplevel headings are parts,
|
||||
# not chapters.
|
||||
|
|
|
@ -44,12 +44,12 @@ then you can just invoke ``py.test`` without command line options::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
mymodule.py .
|
||||
|
||||
========================= 1 passed in 0.04 seconds =========================
|
||||
========================= 1 passed in 0.06 seconds =========================
|
||||
|
||||
It is possible to use fixtures using the ``getfixture`` helper::
|
||||
|
||||
|
|
|
@ -31,10 +31,10 @@ You can then restrict a test run to only run tests marked with ``webtest``::
|
|||
|
||||
$ py.test -v -m webtest
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py@3::test_send_http PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
|
||||
=================== 3 tests deselected by "-m 'webtest'" ===================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
|
@ -43,12 +43,12 @@ Or the inverse, running all tests except the webtest ones::
|
|||
|
||||
$ py.test -v -m "not webtest"
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py@6::test_something_quick PASSED
|
||||
test_server.py@8::test_another PASSED
|
||||
test_server.py@11::TestClass::test_method PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_another PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
================= 1 tests deselected by "-m 'not webtest'" =================
|
||||
================== 3 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
@ -62,10 +62,10 @@ tests based on their module, class, method, or function name::
|
|||
|
||||
$ py.test -v test_server.py::TestClass::test_method
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 5 items
|
||||
|
||||
test_server.py@11::TestClass::test_method PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
========================= 1 passed in 0.01 seconds =========================
|
||||
|
||||
|
@ -73,10 +73,10 @@ You can also select on the class::
|
|||
|
||||
$ py.test -v test_server.py::TestClass
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py@11::TestClass::test_method PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
========================= 1 passed in 0.01 seconds =========================
|
||||
|
||||
|
@ -84,11 +84,11 @@ Or select multiple nodes::
|
|||
|
||||
$ py.test -v test_server.py::TestClass test_server.py::test_send_http
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_server.py@11::TestClass::test_method PASSED
|
||||
test_server.py@3::test_send_http PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
|
||||
========================= 2 passed in 0.01 seconds =========================
|
||||
|
||||
|
@ -120,10 +120,10 @@ select tests based on their names::
|
|||
|
||||
$ py.test -v -k http # running with the above defined example module
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py@3::test_send_http PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
|
||||
====================== 3 tests deselected by '-khttp' ======================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
|
@ -132,12 +132,12 @@ And you can also run all tests except the ones that match the keyword::
|
|||
|
||||
$ py.test -k "not send_http" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py@6::test_something_quick PASSED
|
||||
test_server.py@8::test_another PASSED
|
||||
test_server.py@11::TestClass::test_method PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
test_server.py::test_another PASSED
|
||||
test_server.py::TestClass::test_method PASSED
|
||||
|
||||
================= 1 tests deselected by '-knot send_http' ==================
|
||||
================== 3 passed, 1 deselected in 0.01 seconds ==================
|
||||
|
@ -146,11 +146,11 @@ Or to select "http" and "quick" tests::
|
|||
|
||||
$ py.test -k "http or quick" -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 4 items
|
||||
|
||||
test_server.py@3::test_send_http PASSED
|
||||
test_server.py@6::test_something_quick PASSED
|
||||
test_server.py::test_send_http PASSED
|
||||
test_server.py::test_something_quick PASSED
|
||||
|
||||
================= 2 tests deselected by '-khttp or quick' ==================
|
||||
================== 2 passed, 2 deselected in 0.01 seconds ==================
|
||||
|
@ -187,7 +187,7 @@ You can ask which markers exist for your test suite - the list includes our just
|
|||
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
|
||||
|
||||
|
@ -326,7 +326,7 @@ the test needs::
|
|||
|
||||
$ py.test -E stage2
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_someenv.py s
|
||||
|
@ -337,7 +337,7 @@ and here is one that specifies exactly the environment needed::
|
|||
|
||||
$ py.test -E stage1
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_someenv.py .
|
||||
|
@ -351,7 +351,7 @@ The ``--markers`` option always gives you a list of available markers::
|
|||
|
||||
@pytest.mark.skipif(condition): skip the given test function if eval(condition) results in a True value. Evaluation happens within the module global context. Example: skipif('sys.platform == "win32"') skips the test if we are on the win32 platform. see http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.xfail(condition, reason=None, run=True): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. See http://pytest.org/latest/skipping.html
|
||||
@pytest.mark.xfail(condition, reason=None, run=True, raises=None): mark the the test function as an expected failure if eval(condition) has a True value. Optionally specify a reason for better reporting and run=False if you don't even want to execute the test function. If only specific exception(s) are expected, you can list them in raises, and if the test fails in other ways, it will be reported as a true failure. See http://pytest.org/latest/skipping.html
|
||||
|
||||
@pytest.mark.parametrize(argnames, argvalues): call a test function multiple times passing in different arguments in turn. argvalues generally needs to be a list of values if argnames specifies only one name or a list of tuples of values if argnames specifies multiple names. Example: @parametrize('arg1', [1,2]) would lead to two calls of the decorated test function, one with arg1=1 and another with arg1=2.see http://pytest.org/latest/parametrize.html for more info and examples.
|
||||
|
||||
|
@ -455,26 +455,26 @@ then you will see two test skipped and two executed tests as expected::
|
|||
|
||||
$ py.test -rs # this option reports skip reasons
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_plat.py s.s.
|
||||
test_plat.py sss.
|
||||
========================= short test summary info ==========================
|
||||
SKIP [2] /tmp/doc-exec-142/conftest.py:12: cannot run on platform linux2
|
||||
SKIP [3] /tmp/doc-exec-238/conftest.py:12: cannot run on platform linux
|
||||
|
||||
=================== 2 passed, 2 skipped in 0.01 seconds ====================
|
||||
=================== 1 passed, 3 skipped in 0.01 seconds ====================
|
||||
|
||||
Note that if you specify a platform via the marker-command line option like this::
|
||||
|
||||
$ py.test -m linux2
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_plat.py .
|
||||
test_plat.py s
|
||||
|
||||
=================== 3 tests deselected by "-m 'linux2'" ====================
|
||||
================== 1 passed, 3 deselected in 0.01 seconds ==================
|
||||
================= 1 skipped, 3 deselected in 0.01 seconds ==================
|
||||
|
||||
then the unmarked-tests will not be run. It is thus a way to restrict the run to the specific tests.
|
||||
|
||||
|
@ -519,7 +519,7 @@ We can now use the ``-m option`` to select one set::
|
|||
|
||||
$ py.test -m interface --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_module.py FF
|
||||
|
@ -540,7 +540,7 @@ or to select both "event" and "interface" tests::
|
|||
|
||||
$ py.test -m "interface or event" --tb=short
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_module.py FFF
|
||||
|
@ -559,4 +559,4 @@ or to select both "event" and "interface" tests::
|
|||
assert 0
|
||||
E assert 0
|
||||
============= 1 tests deselected by "-m 'interface or event'" ==============
|
||||
================== 3 failed, 1 deselected in 0.02 seconds ==================
|
||||
================== 3 failed, 1 deselected in 0.01 seconds ==================
|
||||
|
|
|
@ -27,7 +27,7 @@ now execute the test specification::
|
|||
|
||||
nonpython $ py.test test_simple.yml
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_simple.yml .F
|
||||
|
@ -56,11 +56,11 @@ consulted when reporting in ``verbose`` mode::
|
|||
|
||||
nonpython $ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_simple.yml@1::usecase: ok PASSED
|
||||
test_simple.yml@1::usecase: hello FAILED
|
||||
test_simple.yml::usecase: ok PASSED
|
||||
test_simple.yml::usecase: hello FAILED
|
||||
|
||||
================================= FAILURES =================================
|
||||
______________________________ usecase: hello ______________________________
|
||||
|
@ -74,7 +74,7 @@ interesting to just look at the collection tree::
|
|||
|
||||
nonpython $ py.test --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
<YamlFile 'test_simple.yml'>
|
||||
<YamlItem 'ok'>
|
||||
|
|
|
@ -106,7 +106,7 @@ this is a fully self-contained example which you can run with::
|
|||
|
||||
$ py.test test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_scenarios.py ....
|
||||
|
@ -118,7 +118,7 @@ If you just collect tests you'll also nicely see 'advanced' and 'basic' as varia
|
|||
|
||||
$ py.test --collect-only test_scenarios.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
<Module 'test_scenarios.py'>
|
||||
<Class 'TestSampleWithScenarios'>
|
||||
|
@ -182,7 +182,7 @@ Let's first see how it looks like at collection time::
|
|||
|
||||
$ py.test test_backends.py --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
<Module 'test_backends.py'>
|
||||
<Function 'test_db_initialized[d1]'>
|
||||
|
@ -197,7 +197,7 @@ And then when we run the test::
|
|||
================================= FAILURES =================================
|
||||
_________________________ test_db_initialized[d2] __________________________
|
||||
|
||||
db = <conftest.DB2 instance at 0x2b45c2b12050>
|
||||
db = <conftest.DB2 object at 0x2b83684b5eb8>
|
||||
|
||||
def test_db_initialized(db):
|
||||
# a dummy test
|
||||
|
@ -251,9 +251,9 @@ argument sets to use for each test function. Let's run it::
|
|||
$ py.test -q
|
||||
F..
|
||||
================================= FAILURES =================================
|
||||
________________________ TestClass.test_equals[1-2] ________________________
|
||||
________________________ TestClass.test_equals[2-1] ________________________
|
||||
|
||||
self = <test_parametrize.TestClass instance at 0x2acd519c6200>, a = 1, b = 2
|
||||
self = <test_parametrize.TestClass object at 0x2ae94130e390>, a = 1, b = 2
|
||||
|
||||
def test_equals(self, a, b):
|
||||
> assert a == b
|
||||
|
@ -281,10 +281,10 @@ Running it results in some skips if we don't have all the python interpreters in
|
|||
. $ py.test -rs -q multipython.py
|
||||
ssssssssssssssssssssssssssssssssssss......sssssssss......ssssssssssssssssss
|
||||
========================= short test summary info ==========================
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.4' not found
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.5' not found
|
||||
12 passed, 63 skipped in 0.66 seconds
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.8' not found
|
||||
SKIP [21] /home/hpk/p/pytest/doc/en/example/multipython.py:22: 'python2.4' not found
|
||||
12 passed, 63 skipped in 0.65 seconds
|
||||
|
||||
Indirect parametrization of optional implementations/imports
|
||||
--------------------------------------------------------------------
|
||||
|
@ -331,12 +331,12 @@ If you run this with reporting for skips enabled::
|
|||
|
||||
$ py.test -rs test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-144/conftest.py:10: could not import 'opt2'
|
||||
SKIP [1] /tmp/doc-exec-240/conftest.py:10: could not import 'opt2'
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
|
||||
|
|
|
@ -43,7 +43,7 @@ then the test collection looks like this::
|
|||
|
||||
$ py.test --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
<Module 'check_myapp.py'>
|
||||
<Class 'CheckMyApp'>
|
||||
|
@ -88,7 +88,7 @@ You can always peek at the collection tree without running tests like this::
|
|||
|
||||
. $ py.test --collect-only pythoncollection.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
<Module 'pythoncollection.py'>
|
||||
<Function 'test_function'>
|
||||
|
@ -141,10 +141,8 @@ interpreters and will leave out the setup.py file::
|
|||
|
||||
$ py.test --collect-only
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
collected 1 items
|
||||
<Module 'pkg/module_py2.py'>
|
||||
<Function 'test_only_on_python2'>
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.01 seconds =============================
|
||||
|
||||
|
|
|
@ -13,7 +13,7 @@ get on the terminal - we are working on that):
|
|||
|
||||
assertion $ py.test failure_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 39 items
|
||||
|
||||
failure_demo.py FFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFFF
|
||||
|
@ -30,7 +30,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:15: AssertionError
|
||||
_________________________ TestFailing.test_simple __________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x2afa614fb790>
|
||||
self = <failure_demo.TestFailing object at 0x2aec3e52d470>
|
||||
|
||||
def test_simple(self):
|
||||
def f():
|
||||
|
@ -40,13 +40,13 @@ get on the terminal - we are working on that):
|
|||
|
||||
> assert f() == g()
|
||||
E assert 42 == 43
|
||||
E + where 42 = <function f at 0x2afa6158a5f0>()
|
||||
E + and 43 = <function g at 0x2afa6158a7d0>()
|
||||
E + where 42 = <function TestFailing.test_simple.<locals>.f at 0x2aec3e47b158>()
|
||||
E + and 43 = <function TestFailing.test_simple.<locals>.g at 0x2aec3e47b268>()
|
||||
|
||||
failure_demo.py:28: AssertionError
|
||||
____________________ TestFailing.test_simple_multiline _____________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x2afa60d16b50>
|
||||
self = <failure_demo.TestFailing object at 0x2aec3e474ac8>
|
||||
|
||||
def test_simple_multiline(self):
|
||||
otherfunc_multi(
|
||||
|
@ -66,19 +66,19 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:11: AssertionError
|
||||
___________________________ TestFailing.test_not ___________________________
|
||||
|
||||
self = <failure_demo.TestFailing object at 0x2afa61560ad0>
|
||||
self = <failure_demo.TestFailing object at 0x2aec3e5156a0>
|
||||
|
||||
def test_not(self):
|
||||
def f():
|
||||
return 42
|
||||
> assert not f()
|
||||
E assert not 42
|
||||
E + where 42 = <function f at 0x2afa6158a6e0>()
|
||||
E + where 42 = <function TestFailing.test_not.<locals>.f at 0x2aec3e47e620>()
|
||||
|
||||
failure_demo.py:38: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_text _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa6154fc90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e50cba8>
|
||||
|
||||
def test_eq_text(self):
|
||||
> assert 'spam' == 'eggs'
|
||||
|
@ -89,7 +89,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:42: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_similar_text _____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60da1d10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4e24e0>
|
||||
|
||||
def test_eq_similar_text(self):
|
||||
> assert 'foo 1 bar' == 'foo 2 bar'
|
||||
|
@ -102,7 +102,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:45: AssertionError
|
||||
____________ TestSpecialisedExplanations.test_eq_multiline_text ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60d45a90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4cc6d8>
|
||||
|
||||
def test_eq_multiline_text(self):
|
||||
> assert 'foo\nspam\nbar' == 'foo\neggs\nbar'
|
||||
|
@ -115,7 +115,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:48: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_long_text _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60d0de50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e501908>
|
||||
|
||||
def test_eq_long_text(self):
|
||||
a = '1'*100 + 'a' + '2'*100
|
||||
|
@ -132,7 +132,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:53: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_eq_long_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa6154fbd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e3af048>
|
||||
|
||||
def test_eq_long_text_multiline(self):
|
||||
a = '1\n'*100 + 'a' + '2\n'*100
|
||||
|
@ -156,7 +156,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:58: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60d16290>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e474c50>
|
||||
|
||||
def test_eq_list(self):
|
||||
> assert [0, 1, 2] == [0, 1, 3]
|
||||
|
@ -166,7 +166,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:61: AssertionError
|
||||
______________ TestSpecialisedExplanations.test_eq_list_long _______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60da1c50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e515dd8>
|
||||
|
||||
def test_eq_list_long(self):
|
||||
a = [0]*100 + [1] + [3]*100
|
||||
|
@ -178,7 +178,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:66: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_dict _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60d45d90>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4a5ef0>
|
||||
|
||||
def test_eq_dict(self):
|
||||
> assert {'a': 0, 'b': 1, 'c': 0} == {'a': 0, 'b': 2, 'd': 0}
|
||||
|
@ -194,7 +194,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:69: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_eq_set __________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa614fb3d0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4a2e48>
|
||||
|
||||
def test_eq_set(self):
|
||||
> assert set([0, 10, 11, 12]) == set([0, 20, 21])
|
||||
|
@ -210,7 +210,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:72: AssertionError
|
||||
_____________ TestSpecialisedExplanations.test_eq_longer_list ______________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa61560bd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4e0c50>
|
||||
|
||||
def test_eq_longer_list(self):
|
||||
> assert [1,2] == [1,2,3]
|
||||
|
@ -220,7 +220,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:75: AssertionError
|
||||
_________________ TestSpecialisedExplanations.test_in_list _________________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa6154fc10>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4950f0>
|
||||
|
||||
def test_in_list(self):
|
||||
> assert 1 in [0, 2, 3, 4, 5]
|
||||
|
@ -229,7 +229,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:78: AssertionError
|
||||
__________ TestSpecialisedExplanations.test_not_in_text_multiline __________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60d0db50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e474f98>
|
||||
|
||||
def test_not_in_text_multiline(self):
|
||||
text = 'some multiline\ntext\nwhich\nincludes foo\nand a\ntail'
|
||||
|
@ -247,7 +247,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:82: AssertionError
|
||||
___________ TestSpecialisedExplanations.test_not_in_text_single ____________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa61548810>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e5333c8>
|
||||
|
||||
def test_not_in_text_single(self):
|
||||
text = 'single foo line'
|
||||
|
@ -260,7 +260,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:86: AssertionError
|
||||
_________ TestSpecialisedExplanations.test_not_in_text_single_long _________
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa614f9fd0>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e4ccb70>
|
||||
|
||||
def test_not_in_text_single_long(self):
|
||||
text = 'head ' * 50 + 'foo ' + 'tail ' * 20
|
||||
|
@ -273,7 +273,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:90: AssertionError
|
||||
______ TestSpecialisedExplanations.test_not_in_text_single_long_term _______
|
||||
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2afa60da1d50>
|
||||
self = <failure_demo.TestSpecialisedExplanations object at 0x2aec3e502080>
|
||||
|
||||
def test_not_in_text_single_long_term(self):
|
||||
text = 'head ' * 50 + 'f'*70 + 'tail ' * 20
|
||||
|
@ -292,7 +292,7 @@ get on the terminal - we are working on that):
|
|||
i = Foo()
|
||||
> assert i.b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x2afa61548510>.b
|
||||
E + where 1 = <failure_demo.test_attribute.<locals>.Foo object at 0x2aec3e519c18>.b
|
||||
|
||||
failure_demo.py:101: AssertionError
|
||||
_________________________ test_attribute_instance __________________________
|
||||
|
@ -302,8 +302,8 @@ get on the terminal - we are working on that):
|
|||
b = 1
|
||||
> assert Foo().b == 2
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x2afa60d16610>.b
|
||||
E + where <failure_demo.Foo object at 0x2afa60d16610> = <class 'failure_demo.Foo'>()
|
||||
E + where 1 = <failure_demo.test_attribute_instance.<locals>.Foo object at 0x2aec3e52d898>.b
|
||||
E + where <failure_demo.test_attribute_instance.<locals>.Foo object at 0x2aec3e52d898> = <class 'failure_demo.test_attribute_instance.<locals>.Foo'>()
|
||||
|
||||
failure_demo.py:107: AssertionError
|
||||
__________________________ test_attribute_failure __________________________
|
||||
|
@ -319,7 +319,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:116:
|
||||
_ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _
|
||||
|
||||
self = <failure_demo.Foo object at 0x2afa614fb1d0>
|
||||
self = <failure_demo.test_attribute_failure.<locals>.Foo object at 0x2aec3e4e0b38>
|
||||
|
||||
def _get_b(self):
|
||||
> raise Exception('Failed to get attrib')
|
||||
|
@ -335,15 +335,15 @@ get on the terminal - we are working on that):
|
|||
b = 2
|
||||
> assert Foo().b == Bar().b
|
||||
E assert 1 == 2
|
||||
E + where 1 = <failure_demo.Foo object at 0x2afa60da1f50>.b
|
||||
E + where <failure_demo.Foo object at 0x2afa60da1f50> = <class 'failure_demo.Foo'>()
|
||||
E + and 2 = <failure_demo.Bar object at 0x2afa61505c50>.b
|
||||
E + where <failure_demo.Bar object at 0x2afa61505c50> = <class 'failure_demo.Bar'>()
|
||||
E + where 1 = <failure_demo.test_attribute_multiple.<locals>.Foo object at 0x2aec3e4a5748>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Foo object at 0x2aec3e4a5748> = <class 'failure_demo.test_attribute_multiple.<locals>.Foo'>()
|
||||
E + and 2 = <failure_demo.test_attribute_multiple.<locals>.Bar object at 0x2aec3e4a51d0>.b
|
||||
E + where <failure_demo.test_attribute_multiple.<locals>.Bar object at 0x2aec3e4a51d0> = <class 'failure_demo.test_attribute_multiple.<locals>.Bar'>()
|
||||
|
||||
failure_demo.py:124: AssertionError
|
||||
__________________________ TestRaises.test_raises __________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2afa60d78440>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e4a2d68>
|
||||
|
||||
def test_raises(self):
|
||||
s = 'qwe'
|
||||
|
@ -355,10 +355,10 @@ get on the terminal - we are working on that):
|
|||
> int(s)
|
||||
E ValueError: invalid literal for int() with base 10: 'qwe'
|
||||
|
||||
<0-codegen /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1028>:1: ValueError
|
||||
<0-codegen /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1028>:1: ValueError
|
||||
______________________ TestRaises.test_raises_doesnt _______________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2afa6153a7a0>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e4e2198>
|
||||
|
||||
def test_raises_doesnt(self):
|
||||
> raises(IOError, "int('3')")
|
||||
|
@ -367,7 +367,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:136: Failed
|
||||
__________________________ TestRaises.test_raise ___________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2afa61542128>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e5017b8>
|
||||
|
||||
def test_raise(self):
|
||||
> raise ValueError("demo error")
|
||||
|
@ -376,7 +376,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:139: ValueError
|
||||
________________________ TestRaises.test_tupleerror ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2afa60dc9e60>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e533160>
|
||||
|
||||
def test_tupleerror(self):
|
||||
> a,b = [1]
|
||||
|
@ -385,7 +385,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:142: ValueError
|
||||
______ TestRaises.test_reinterpret_fails_with_print_for_the_fun_of_it ______
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2afa60d69b90>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e4cc438>
|
||||
|
||||
def test_reinterpret_fails_with_print_for_the_fun_of_it(self):
|
||||
l = [1,2,3]
|
||||
|
@ -398,11 +398,11 @@ get on the terminal - we are working on that):
|
|||
l is [1, 2, 3]
|
||||
________________________ TestRaises.test_some_error ________________________
|
||||
|
||||
self = <failure_demo.TestRaises instance at 0x2afa60d5c680>
|
||||
self = <failure_demo.TestRaises object at 0x2aec3e5199e8>
|
||||
|
||||
def test_some_error(self):
|
||||
> if namenotexi:
|
||||
E NameError: global name 'namenotexi' is not defined
|
||||
E NameError: name 'namenotexi' is not defined
|
||||
|
||||
failure_demo.py:150: NameError
|
||||
____________________ test_dynamic_compile_shows_nicely _____________________
|
||||
|
@ -426,7 +426,7 @@ get on the terminal - we are working on that):
|
|||
<2-codegen 'abc-123' /home/hpk/p/pytest/doc/en/example/assertion/failure_demo.py:162>:2: AssertionError
|
||||
____________________ TestMoreErrors.test_complex_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa60d6b1b8>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e515cf8>
|
||||
|
||||
def test_complex_error(self):
|
||||
def f():
|
||||
|
@ -450,7 +450,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:5: AssertionError
|
||||
___________________ TestMoreErrors.test_z1_unpack_error ____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa61546ef0>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e4f7a58>
|
||||
|
||||
def test_z1_unpack_error(self):
|
||||
l = []
|
||||
|
@ -460,7 +460,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:179: ValueError
|
||||
____________________ TestMoreErrors.test_z2_type_error _____________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa60d5e680>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e52db38>
|
||||
|
||||
def test_z2_type_error(self):
|
||||
l = 3
|
||||
|
@ -470,19 +470,19 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:183: TypeError
|
||||
______________________ TestMoreErrors.test_startswith ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa60d697e8>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e538a58>
|
||||
|
||||
def test_startswith(self):
|
||||
s = "123"
|
||||
g = "456"
|
||||
> assert s.startswith(g)
|
||||
E assert <built-in method startswith of str object at 0x2afa61549a08>('456')
|
||||
E + where <built-in method startswith of str object at 0x2afa61549a08> = '123'.startswith
|
||||
E assert <built-in method startswith of str object at 0x2aec3e501420>('456')
|
||||
E + where <built-in method startswith of str object at 0x2aec3e501420> = '123'.startswith
|
||||
|
||||
failure_demo.py:188: AssertionError
|
||||
__________________ TestMoreErrors.test_startswith_nested ___________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa60d4dfc8>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e4f1b00>
|
||||
|
||||
def test_startswith_nested(self):
|
||||
def f():
|
||||
|
@ -490,15 +490,15 @@ get on the terminal - we are working on that):
|
|||
def g():
|
||||
return "456"
|
||||
> assert f().startswith(g())
|
||||
E assert <built-in method startswith of str object at 0x2afa61549a08>('456')
|
||||
E + where <built-in method startswith of str object at 0x2afa61549a08> = '123'.startswith
|
||||
E + where '123' = <function f at 0x2afa60d37b90>()
|
||||
E + and '456' = <function g at 0x2afa60d37e60>()
|
||||
E assert <built-in method startswith of str object at 0x2aec3e501420>('456')
|
||||
E + where <built-in method startswith of str object at 0x2aec3e501420> = '123'.startswith
|
||||
E + where '123' = <function TestMoreErrors.test_startswith_nested.<locals>.f at 0x2aec3e5572f0>()
|
||||
E + and '456' = <function TestMoreErrors.test_startswith_nested.<locals>.g at 0x2aec3e557268>()
|
||||
|
||||
failure_demo.py:195: AssertionError
|
||||
_____________________ TestMoreErrors.test_global_func ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa60d4ecf8>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e495438>
|
||||
|
||||
def test_global_func(self):
|
||||
> assert isinstance(globf(42), float)
|
||||
|
@ -508,18 +508,18 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:198: AssertionError
|
||||
_______________________ TestMoreErrors.test_instance _______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa614fea28>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e567240>
|
||||
|
||||
def test_instance(self):
|
||||
self.x = 6*7
|
||||
> assert self.x != 42
|
||||
E assert 42 != 42
|
||||
E + where 42 = <failure_demo.TestMoreErrors instance at 0x2afa614fea28>.x
|
||||
E + where 42 = <failure_demo.TestMoreErrors object at 0x2aec3e567240>.x
|
||||
|
||||
failure_demo.py:202: AssertionError
|
||||
_______________________ TestMoreErrors.test_compare ________________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa614fe0e0>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e502cc0>
|
||||
|
||||
def test_compare(self):
|
||||
> assert globf(10) < 5
|
||||
|
@ -529,7 +529,7 @@ get on the terminal - we are working on that):
|
|||
failure_demo.py:205: AssertionError
|
||||
_____________________ TestMoreErrors.test_try_finally ______________________
|
||||
|
||||
self = <failure_demo.TestMoreErrors instance at 0x2afa60d6b830>
|
||||
self = <failure_demo.TestMoreErrors object at 0x2aec3e5197f0>
|
||||
|
||||
def test_try_finally(self):
|
||||
x = 1
|
||||
|
@ -538,4 +538,4 @@ get on the terminal - we are working on that):
|
|||
E assert 1 == 0
|
||||
|
||||
failure_demo.py:210: AssertionError
|
||||
======================== 39 failed in 0.21 seconds =========================
|
||||
======================== 39 failed in 0.22 seconds =========================
|
||||
|
|
|
@ -108,7 +108,7 @@ directory with the above conftest.py::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
@ -152,12 +152,12 @@ and when running it will see a skipped "slow" test::
|
|||
|
||||
$ py.test -rs # "-rs" means report details on the little 's'
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py .s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /tmp/doc-exec-147/conftest.py:9: need --runslow option to run
|
||||
SKIP [1] /tmp/doc-exec-243/conftest.py:9: need --runslow option to run
|
||||
|
||||
=================== 1 passed, 1 skipped in 0.01 seconds ====================
|
||||
|
||||
|
@ -165,7 +165,7 @@ Or run it including the ``slow`` marked test::
|
|||
|
||||
$ py.test --runslow
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py ..
|
||||
|
@ -256,7 +256,7 @@ which will add the string to the test header accordingly::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
project deps: mylib-1.1
|
||||
collected 0 items
|
||||
|
||||
|
@ -279,7 +279,7 @@ which will add info only when run with "--v"::
|
|||
|
||||
$ py.test -v
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
info1: did you know that ...
|
||||
did you?
|
||||
collecting ... collected 0 items
|
||||
|
@ -290,7 +290,7 @@ and nothing when run plainly::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 0 items
|
||||
|
||||
============================= in 0.00 seconds =============================
|
||||
|
@ -322,7 +322,7 @@ Now we can profile which test functions execute the slowest::
|
|||
|
||||
$ py.test --durations=3
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
|
||||
test_some_are_slow.py ...
|
||||
|
@ -383,7 +383,7 @@ If we run this::
|
|||
|
||||
$ py.test -rx
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 4 items
|
||||
|
||||
test_step.py .Fx.
|
||||
|
@ -391,7 +391,7 @@ If we run this::
|
|||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling instance at 0x2aca13f66e18>
|
||||
self = <test_step.TestUserHandling object at 0x2b2ef2a4feb8>
|
||||
|
||||
def test_modification(self):
|
||||
> assert 0
|
||||
|
@ -453,7 +453,7 @@ We can run this::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 7 items
|
||||
|
||||
test_step.py .Fx.
|
||||
|
@ -463,17 +463,17 @@ We can run this::
|
|||
|
||||
================================== ERRORS ==================================
|
||||
_______________________ ERROR at setup of test_root ________________________
|
||||
file /tmp/doc-exec-147/b/test_error.py, line 1
|
||||
file /tmp/doc-exec-243/b/test_error.py, line 1
|
||||
def test_root(db): # no db here, will error out
|
||||
fixture 'db' not found
|
||||
available fixtures: tmpdir, monkeypatch, pytestconfig, recwarn, capsys, capfd
|
||||
available fixtures: tmpdir, monkeypatch, capsys, capfd, pytestconfig, recwarn
|
||||
use 'py.test --fixtures [testpath]' for help on them.
|
||||
|
||||
/tmp/doc-exec-147/b/test_error.py:1
|
||||
/tmp/doc-exec-243/b/test_error.py:1
|
||||
================================= FAILURES =================================
|
||||
____________________ TestUserHandling.test_modification ____________________
|
||||
|
||||
self = <test_step.TestUserHandling instance at 0x2afc14d78e18>
|
||||
self = <test_step.TestUserHandling object at 0x2b63a7aec710>
|
||||
|
||||
def test_modification(self):
|
||||
> assert 0
|
||||
|
@ -482,20 +482,20 @@ We can run this::
|
|||
test_step.py:9: AssertionError
|
||||
_________________________________ test_a1 __________________________________
|
||||
|
||||
db = <conftest.DB instance at 0x2afc145495a8>
|
||||
db = <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
def test_a1(db):
|
||||
> assert 0, db # to show value
|
||||
E AssertionError: <conftest.DB instance at 0x2afc145495a8>
|
||||
E AssertionError: <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
a/test_db.py:2: AssertionError
|
||||
_________________________________ test_a2 __________________________________
|
||||
|
||||
db = <conftest.DB instance at 0x2afc145495a8>
|
||||
db = <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
def test_a2(db):
|
||||
> assert 0, db # to show value
|
||||
E AssertionError: <conftest.DB instance at 0x2afc145495a8>
|
||||
E AssertionError: <conftest.DB object at 0x2b63a7b04470>
|
||||
|
||||
a/test_db2.py:2: AssertionError
|
||||
========== 3 failed, 2 passed, 1 xfailed, 1 error in 0.03 seconds ==========
|
||||
|
@ -553,7 +553,7 @@ and run them::
|
|||
|
||||
$ py.test test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
|
@ -561,7 +561,7 @@ and run them::
|
|||
================================= FAILURES =================================
|
||||
________________________________ test_fail1 ________________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-28/test_fail10')
|
||||
tmpdir = local('/tmp/pytest-509/test_fail10')
|
||||
|
||||
def test_fail1(tmpdir):
|
||||
> assert 0
|
||||
|
@ -575,12 +575,12 @@ and run them::
|
|||
E assert 0
|
||||
|
||||
test_module.py:4: AssertionError
|
||||
========================= 2 failed in 0.01 seconds =========================
|
||||
========================= 2 failed in 0.02 seconds =========================
|
||||
|
||||
you will have a "failures" file which contains the failing test ids::
|
||||
|
||||
$ cat failures
|
||||
test_module.py::test_fail1 (/tmp/pytest-28/test_fail10)
|
||||
test_module.py::test_fail1 (/tmp/pytest-509/test_fail10)
|
||||
test_module.py::test_fail2
|
||||
|
||||
Making test result information available in fixtures
|
||||
|
@ -642,41 +642,29 @@ if you then have failing tests::
|
|||
and run it::
|
||||
|
||||
$ py.test -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
collected 3 items
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 513, in getconftestmodules
|
||||
return self._path2confmods[path]
|
||||
KeyError: local('/tmp/doc-exec-243/test_module.py')
|
||||
|
||||
test_module.py Esetting up a test failed! test_module.py::test_setup_fails
|
||||
Fexecuting test failed test_module.py::test_call_fails
|
||||
F
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 537, in importconftest
|
||||
return self._conftestpath2mod[conftestpath]
|
||||
KeyError: local('/tmp/doc-exec-243/conftest.py')
|
||||
|
||||
================================== ERRORS ==================================
|
||||
____________________ ERROR at setup of test_setup_fails ____________________
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 543, in importconftest
|
||||
mod = conftestpath.pyimport()
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py", line 620, in pyimport
|
||||
__import__(modname)
|
||||
File "/tmp/doc-exec-243/conftest.py", line 22
|
||||
print "setting up a test failed!", request.node.nodeid
|
||||
^
|
||||
SyntaxError: invalid syntax
|
||||
ERROR: could not load /tmp/doc-exec-243/conftest.py
|
||||
|
||||
@pytest.fixture
|
||||
def other():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_call_fails ______________________________
|
||||
|
||||
something = None
|
||||
|
||||
def test_call_fails(something):
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:12: AssertionError
|
||||
________________________________ test_fail2 ________________________________
|
||||
|
||||
def test_fail2():
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_module.py:15: AssertionError
|
||||
==================== 2 failed, 1 error in 0.01 seconds =====================
|
||||
|
||||
You'll see that the fixture finalizers could use the precise reporting
|
||||
information.
|
||||
|
@ -730,4 +718,5 @@ over to ``pytest`` instead. For example::
|
|||
This makes it convenient to execute your tests from within your frozen
|
||||
application, using standard ``py.test`` command-line options::
|
||||
|
||||
$ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/
|
||||
$ ./app_main --pytest --verbose --tb=long --junit-xml=results.xml test-suite/ /bin/sh: 1: ./app_main: not found
|
||||
/bin/sh: 1: ./app_main: not found
|
||||
|
|
|
@ -60,13 +60,26 @@ will be called ahead of running any tests::
|
|||
If you run this without output capturing::
|
||||
|
||||
$ py.test -q -s test_module.py
|
||||
callattr_ahead_of_alltests called
|
||||
callme called!
|
||||
callme other called
|
||||
SomeTest callme called
|
||||
test_method1 called
|
||||
.test_method1 called
|
||||
.test other
|
||||
.test_unit1 method called
|
||||
.
|
||||
4 passed in 0.03 seconds
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 513, in getconftestmodules
|
||||
return self._path2confmods[path]
|
||||
KeyError: local('/tmp/doc-exec-244/test_module.py')
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 537, in importconftest
|
||||
return self._conftestpath2mod[conftestpath]
|
||||
KeyError: local('/tmp/doc-exec-244/conftest.py')
|
||||
|
||||
During handling of the above exception, another exception occurred:
|
||||
Traceback (most recent call last):
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/config.py", line 543, in importconftest
|
||||
mod = conftestpath.pyimport()
|
||||
File "/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py", line 620, in pyimport
|
||||
__import__(modname)
|
||||
File "/tmp/doc-exec-244/conftest.py", line 6
|
||||
print "callattr_ahead_of_alltests called"
|
||||
^
|
||||
SyntaxError: invalid syntax
|
||||
ERROR: could not load /tmp/doc-exec-244/conftest.py
|
||||
|
||||
|
|
|
@ -25,6 +25,6 @@ def test_hello6():
|
|||
pytest.xfail("reason")
|
||||
|
||||
@xfail(raises=IndexError)
|
||||
def test_hello7()
|
||||
def test_hello7():
|
||||
x = []
|
||||
x[1] = 1
|
||||
|
|
|
@ -76,7 +76,7 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
|||
|
||||
$ py.test test_smtpsimple.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1
|
||||
collected 1 items
|
||||
|
||||
test_smtpsimple.py F
|
||||
|
@ -84,17 +84,16 @@ marked ``smtp`` fixture function. Running the test looks like this::
|
|||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2b8dbdd43638>
|
||||
smtp = <smtplib.SMTP object at 0x2ade77b37e48>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response, msg = smtp.ehlo()
|
||||
assert response == 250
|
||||
assert "merlinux" in msg
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
> assert "merlinux" in msg
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_smtpsimple.py:12: AssertionError
|
||||
========================= 1 failed in 0.15 seconds =========================
|
||||
test_smtpsimple.py:11: TypeError
|
||||
========================= 1 failed in 0.18 seconds =========================
|
||||
|
||||
In the failure traceback we see that the test function was called with a
|
||||
``smtp`` argument, the ``smtplib.SMTP()`` instance created by the fixture
|
||||
|
@ -194,7 +193,7 @@ inspect what is going on and can now run the tests::
|
|||
|
||||
$ py.test test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1
|
||||
collected 2 items
|
||||
|
||||
test_module.py FF
|
||||
|
@ -202,19 +201,18 @@ inspect what is going on and can now run the tests::
|
|||
================================= FAILURES =================================
|
||||
________________________________ test_ehlo _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2b0d30a59f38>
|
||||
smtp = <smtplib.SMTP object at 0x2b4b07e38e48>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
assert response[0] == 250
|
||||
assert "merlinux" in response[1]
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
> assert "merlinux" in response[1]
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
test_module.py:5: TypeError
|
||||
________________________________ test_noop _________________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2b0d30a59f38>
|
||||
smtp = <smtplib.SMTP object at 0x2b4b07e38e48>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
|
@ -223,7 +221,7 @@ inspect what is going on and can now run the tests::
|
|||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
========================= 2 failed in 0.16 seconds =========================
|
||||
========================= 2 failed in 0.18 seconds =========================
|
||||
|
||||
You see the two ``assert 0`` failing and more importantly you can also see
|
||||
that the same (module-scoped) ``smtp`` object was passed into the two
|
||||
|
@ -332,7 +330,7 @@ Running it::
|
|||
______________________________ test_showhelo _______________________________
|
||||
test_anothersmtp.py:5: in test_showhelo
|
||||
assert 0, smtp.helo()
|
||||
E AssertionError: (250, 'mail.python.org')
|
||||
E AssertionError: (250, b'mail.python.org')
|
||||
|
||||
voila! The ``smtp`` fixture function picked up our mail server name
|
||||
from the module namespace.
|
||||
|
@ -379,19 +377,18 @@ So let's just do another run::
|
|||
================================= FAILURES =================================
|
||||
__________________________ test_ehlo[merlinux.eu] __________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2ba3fee43950>
|
||||
smtp = <smtplib.SMTP object at 0x2b824acf3e80>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
assert response[0] == 250
|
||||
assert "merlinux" in response[1]
|
||||
> assert 0 # for demo purposes
|
||||
E assert 0
|
||||
> assert "merlinux" in response[1]
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_module.py:6: AssertionError
|
||||
test_module.py:5: TypeError
|
||||
__________________________ test_noop[merlinux.eu] __________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2ba3fee43950>
|
||||
smtp = <smtplib.SMTP object at 0x2b824acf3e80>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
|
@ -402,20 +399,20 @@ So let's just do another run::
|
|||
test_module.py:11: AssertionError
|
||||
________________________ test_ehlo[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2ba3fedf9ea8>
|
||||
smtp = <smtplib.SMTP object at 0x2b824b19fb38>
|
||||
|
||||
def test_ehlo(smtp):
|
||||
response = smtp.ehlo()
|
||||
assert response[0] == 250
|
||||
> assert "merlinux" in response[1]
|
||||
E assert 'merlinux' in 'mail.python.org\nSIZE 25600000\nETRN\nSTARTTLS\nENHANCEDSTATUSCODES\n8BITMIME\nDSN\nSMTPUTF8'
|
||||
E TypeError: Type str doesn't support the buffer API
|
||||
|
||||
test_module.py:5: AssertionError
|
||||
test_module.py:5: TypeError
|
||||
-------------------------- Captured stdout setup ---------------------------
|
||||
finalizing <smtplib.SMTP instance at 0x2ba3fee43950>
|
||||
finalizing <smtplib.SMTP object at 0x2b824acf3e80>
|
||||
________________________ test_noop[mail.python.org] ________________________
|
||||
|
||||
smtp = <smtplib.SMTP instance at 0x2ba3fedf9ea8>
|
||||
smtp = <smtplib.SMTP object at 0x2b824b19fb38>
|
||||
|
||||
def test_noop(smtp):
|
||||
response = smtp.noop()
|
||||
|
@ -424,7 +421,7 @@ So let's just do another run::
|
|||
E assert 0
|
||||
|
||||
test_module.py:11: AssertionError
|
||||
4 failed in 5.62 seconds
|
||||
4 failed in 6.37 seconds
|
||||
|
||||
We see that our two test functions each ran twice, against the different
|
||||
``smtp`` instances. Note also, that with the ``mail.python.org``
|
||||
|
@ -464,13 +461,13 @@ Here we declare an ``app`` fixture which receives the previously defined
|
|||
|
||||
$ py.test -v test_appsetup.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 2 items
|
||||
|
||||
test_appsetup.py@12::test_smtp_exists[merlinux.eu] PASSED
|
||||
test_appsetup.py@12::test_smtp_exists[mail.python.org] PASSED
|
||||
test_appsetup.py::test_smtp_exists[merlinux.eu] PASSED
|
||||
test_appsetup.py::test_smtp_exists[mail.python.org] PASSED
|
||||
|
||||
========================= 2 passed in 6.27 seconds =========================
|
||||
========================= 2 passed in 6.11 seconds =========================
|
||||
|
||||
Due to the parametrization of ``smtp`` the test will run twice with two
|
||||
different ``App`` instances and respective smtp servers. There is no
|
||||
|
@ -508,7 +505,7 @@ to show the setup/teardown flow::
|
|||
@pytest.fixture(scope="module", params=["mod1", "mod2"])
|
||||
def modarg(request):
|
||||
param = request.param
|
||||
print "create", param
|
||||
print ("create", param)
|
||||
def fin():
|
||||
print ("fin %s" % param)
|
||||
return param
|
||||
|
@ -518,36 +515,36 @@ to show the setup/teardown flow::
|
|||
return request.param
|
||||
|
||||
def test_0(otherarg):
|
||||
print " test0", otherarg
|
||||
print (" test0", otherarg)
|
||||
def test_1(modarg):
|
||||
print " test1", modarg
|
||||
print (" test1", modarg)
|
||||
def test_2(otherarg, modarg):
|
||||
print " test2", otherarg, modarg
|
||||
print (" test2", otherarg, modarg)
|
||||
|
||||
Let's run the tests in verbose mode and with looking at the print-output::
|
||||
|
||||
$ py.test -v -s test_module.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0 -- /home/hpk/p/pytest/.tox/regen/bin/python
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.2.dev1 -- /home/hpk/p/pytest/.tox/regen/bin/python3.4
|
||||
collecting ... collected 8 items
|
||||
|
||||
test_module.py@15::test_0[1] test0 1
|
||||
test_module.py::test_0[1] test0 1
|
||||
PASSED
|
||||
test_module.py@15::test_0[2] test0 2
|
||||
test_module.py::test_0[2] test0 2
|
||||
PASSED
|
||||
test_module.py@17::test_1[mod1] create mod1
|
||||
test_module.py::test_1[mod1] create mod1
|
||||
test1 mod1
|
||||
PASSED
|
||||
test_module.py@19::test_2[1-mod1] test2 1 mod1
|
||||
test_module.py::test_2[1-mod1] test2 1 mod1
|
||||
PASSED
|
||||
test_module.py@19::test_2[2-mod1] test2 2 mod1
|
||||
test_module.py::test_2[2-mod1] test2 2 mod1
|
||||
PASSED
|
||||
test_module.py@17::test_1[mod2] create mod2
|
||||
test_module.py::test_1[mod2] create mod2
|
||||
test1 mod2
|
||||
PASSED
|
||||
test_module.py@19::test_2[1-mod2] test2 1 mod2
|
||||
test_module.py::test_2[1-mod2] test2 1 mod2
|
||||
PASSED
|
||||
test_module.py@19::test_2[2-mod2] test2 2 mod2
|
||||
test_module.py::test_2[2-mod2] test2 2 mod2
|
||||
PASSED
|
||||
|
||||
========================= 8 passed in 0.01 seconds =========================
|
||||
|
|
|
@ -27,7 +27,7 @@ Installation options::
|
|||
To check your installation has installed the correct version::
|
||||
|
||||
$ py.test --version
|
||||
This is pytest version 2.6.0, imported from /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/pytest.pyc
|
||||
This is pytest version 2.6.1, imported from /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/pytest.py
|
||||
|
||||
If you get an error checkout :ref:`installation issues`.
|
||||
|
||||
|
@ -49,7 +49,7 @@ That's it. You can execute the test function now::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_sample.py F
|
||||
|
@ -127,7 +127,7 @@ run the module by passing its filename::
|
|||
================================= FAILURES =================================
|
||||
____________________________ TestClass.test_two ____________________________
|
||||
|
||||
self = <test_class.TestClass instance at 0x2b0b0ac73098>
|
||||
self = <test_class.TestClass object at 0x2ad4b005b710>
|
||||
|
||||
def test_two(self):
|
||||
x = "hello"
|
||||
|
@ -159,21 +159,18 @@ We list the name ``tmpdir`` in the test function signature and
|
|||
before performing the test function call. Let's just run it::
|
||||
|
||||
$ py.test -q test_tmpdir.py
|
||||
F
|
||||
================================= FAILURES =================================
|
||||
_____________________________ test_needsfiles ______________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-24/test_needsfiles0')
|
||||
|
||||
def test_needsfiles(tmpdir):
|
||||
print tmpdir
|
||||
> assert 0
|
||||
E assert 0
|
||||
|
||||
test_tmpdir.py:3: AssertionError
|
||||
--------------------------- Captured stdout call ---------------------------
|
||||
/tmp/pytest-24/test_needsfiles0
|
||||
1 failed in 0.01 seconds
|
||||
================================== ERRORS ==================================
|
||||
_____________________ ERROR collecting test_tmpdir.py ______________________
|
||||
/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:463: in _importtestmodule
|
||||
mod = self.fspath.pyimport(ensuresyspath=True)
|
||||
/home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/py/_path/local.py:620: in pyimport
|
||||
__import__(modname)
|
||||
E File "/tmp/doc-exec-187/test_tmpdir.py", line 2
|
||||
E print tmpdir
|
||||
E ^
|
||||
E SyntaxError: invalid syntax
|
||||
1 error in 0.03 seconds
|
||||
|
||||
Before the test runs, a unique-per-test-invocation temporary directory
|
||||
was created. More info at :ref:`tmpdir handling`.
|
||||
|
|
|
@ -53,7 +53,7 @@ them in turn::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..F
|
||||
|
@ -100,7 +100,7 @@ Let's run this::
|
|||
|
||||
$ py.test
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 3 items
|
||||
|
||||
test_expectation.py ..x
|
||||
|
@ -170,8 +170,8 @@ Let's also run with a stringinput that will lead to a failing test::
|
|||
|
||||
def test_valid_string(stringinput):
|
||||
> assert stringinput.isalpha()
|
||||
E assert <built-in method isalpha of str object at 0x2b7e70b5d210>()
|
||||
E + where <built-in method isalpha of str object at 0x2b7e70b5d210> = '!'.isalpha
|
||||
E assert <built-in method isalpha of str object at 0x2ab7463a6b58>()
|
||||
E + where <built-in method isalpha of str object at 0x2ab7463a6b58> = '!'.isalpha
|
||||
|
||||
test_strings.py:3: AssertionError
|
||||
1 failed in 0.01 seconds
|
||||
|
@ -185,7 +185,7 @@ listlist::
|
|||
$ py.test -q -rs test_strings.py
|
||||
s
|
||||
========================= short test summary info ==========================
|
||||
SKIP [1] /home/hpk/p/pytest/.tox/regen/local/lib/python2.7/site-packages/_pytest/python.py:1139: got empty parameter set, function test_valid_string at /tmp/doc-exec-100/test_strings.py:1
|
||||
SKIP [1] /home/hpk/p/pytest/.tox/regen/lib/python3.4/site-packages/_pytest/python.py:1139: got empty parameter set, function test_valid_string at /tmp/doc-exec-195/test_strings.py:1
|
||||
1 skipped in 0.01 seconds
|
||||
|
||||
For further examples, you might want to look at :ref:`more
|
||||
|
|
|
@ -164,10 +164,10 @@ Running it with the report-on-xfail option gives this output::
|
|||
|
||||
example $ py.test -rx xfail_demo.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
collected 6 items
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 7 items
|
||||
|
||||
xfail_demo.py xxxxxx
|
||||
xfail_demo.py xxxxxxx
|
||||
========================= short test summary info ==========================
|
||||
XFAIL xfail_demo.py::test_hello
|
||||
XFAIL xfail_demo.py::test_hello2
|
||||
|
@ -180,8 +180,9 @@ Running it with the report-on-xfail option gives this output::
|
|||
condition: pytest.__version__[0] != "17"
|
||||
XFAIL xfail_demo.py::test_hello6
|
||||
reason: reason
|
||||
XFAIL xfail_demo.py::test_hello7
|
||||
|
||||
======================== 6 xfailed in 0.05 seconds =========================
|
||||
======================== 7 xfailed in 0.05 seconds =========================
|
||||
|
||||
.. _`skip/xfail with parametrize`:
|
||||
|
||||
|
|
|
@ -29,7 +29,7 @@ Running this would result in a passed test except for the last
|
|||
|
||||
$ py.test test_tmpdir.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 1 items
|
||||
|
||||
test_tmpdir.py F
|
||||
|
@ -37,7 +37,7 @@ Running this would result in a passed test except for the last
|
|||
================================= FAILURES =================================
|
||||
_____________________________ test_create_file _____________________________
|
||||
|
||||
tmpdir = local('/tmp/pytest-25/test_create_file0')
|
||||
tmpdir = local('/tmp/pytest-506/test_create_file0')
|
||||
|
||||
def test_create_file(tmpdir):
|
||||
p = tmpdir.mkdir("sub").join("hello.txt")
|
||||
|
@ -48,7 +48,7 @@ Running this would result in a passed test except for the last
|
|||
E assert 0
|
||||
|
||||
test_tmpdir.py:7: AssertionError
|
||||
========================= 1 failed in 0.01 seconds =========================
|
||||
========================= 1 failed in 0.02 seconds =========================
|
||||
|
||||
.. _`base temporary directory`:
|
||||
|
||||
|
|
|
@ -88,7 +88,7 @@ the ``self.db`` values in the traceback::
|
|||
|
||||
$ py.test test_unittest_db.py
|
||||
=========================== test session starts ============================
|
||||
platform linux2 -- Python 2.7.6 -- py-1.4.22 -- pytest-2.6.0
|
||||
platform linux -- Python 3.4.0 -- py-1.4.23 -- pytest-2.6.1
|
||||
collected 2 items
|
||||
|
||||
test_unittest_db.py FF
|
||||
|
@ -101,7 +101,7 @@ the ``self.db`` values in the traceback::
|
|||
def test_method1(self):
|
||||
assert hasattr(self, "db")
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.DummyDB instance at 0x2ba71cccb128>
|
||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0x2b12849f90b8>
|
||||
|
||||
test_unittest_db.py:9: AssertionError
|
||||
___________________________ MyTest.test_method2 ____________________________
|
||||
|
@ -110,10 +110,10 @@ the ``self.db`` values in the traceback::
|
|||
|
||||
def test_method2(self):
|
||||
> assert 0, self.db # fail for demo purposes
|
||||
E AssertionError: <conftest.DummyDB instance at 0x2ba71cccb128>
|
||||
E AssertionError: <conftest.db_class.<locals>.DummyDB object at 0x2b12849f90b8>
|
||||
|
||||
test_unittest_db.py:12: AssertionError
|
||||
========================= 2 failed in 0.04 seconds =========================
|
||||
========================= 2 failed in 0.05 seconds =========================
|
||||
|
||||
This default pytest traceback shows that the two test methods
|
||||
share the same ``self.db`` instance which was our intention
|
||||
|
@ -160,7 +160,7 @@ Running this test module ...::
|
|||
|
||||
$ py.test -q test_unittest_cleandir.py
|
||||
.
|
||||
1 passed in 0.03 seconds
|
||||
1 passed in 0.05 seconds
|
||||
|
||||
... gives us one passed test because the ``initdir`` fixture function
|
||||
was executed ahead of the ``test_method``.
|
||||
|
|
2
setup.py
2
setup.py
|
@ -27,7 +27,7 @@ def main():
|
|||
name='pytest',
|
||||
description='pytest: simple powerful testing with Python',
|
||||
long_description=long_description,
|
||||
version='2.6.1.dev1',
|
||||
version='2.6.2.dev1',
|
||||
url='http://pytest.org',
|
||||
license='MIT license',
|
||||
platforms=['unix', 'linux', 'osx', 'cygwin', 'win32'],
|
||||
|
|
|
@ -1692,22 +1692,22 @@ class TestFixtureMarker:
|
|||
""")
|
||||
result = testdir.runpytest("-v")
|
||||
result.stdout.fnmatch_lines("""
|
||||
test_mod1.py@1::test_func[s1] PASSED
|
||||
test_mod2.py@1::test_func2[s1] PASSED
|
||||
test_mod2.py@3::test_func3[s1-m1] PASSED
|
||||
test_mod2.py@5::test_func3b[s1-m1] PASSED
|
||||
test_mod2.py@3::test_func3[s1-m2] PASSED
|
||||
test_mod2.py@5::test_func3b[s1-m2] PASSED
|
||||
test_mod1.py@1::test_func[s2] PASSED
|
||||
test_mod2.py@1::test_func2[s2] PASSED
|
||||
test_mod2.py@3::test_func3[s2-m1] PASSED
|
||||
test_mod2.py@5::test_func3b[s2-m1] PASSED
|
||||
test_mod2.py@7::test_func4[m1] PASSED
|
||||
test_mod2.py@3::test_func3[s2-m2] PASSED
|
||||
test_mod2.py@5::test_func3b[s2-m2] PASSED
|
||||
test_mod2.py@7::test_func4[m2] PASSED
|
||||
test_mod1.py@3::test_func1[m1] PASSED
|
||||
test_mod1.py@3::test_func1[m2] PASSED
|
||||
test_mod1.py::test_func[s1] PASSED
|
||||
test_mod2.py::test_func2[s1] PASSED
|
||||
test_mod2.py::test_func3[s1-m1] PASSED
|
||||
test_mod2.py::test_func3b[s1-m1] PASSED
|
||||
test_mod2.py::test_func3[s1-m2] PASSED
|
||||
test_mod2.py::test_func3b[s1-m2] PASSED
|
||||
test_mod1.py::test_func[s2] PASSED
|
||||
test_mod2.py::test_func2[s2] PASSED
|
||||
test_mod2.py::test_func3[s2-m1] PASSED
|
||||
test_mod2.py::test_func3b[s2-m1] PASSED
|
||||
test_mod2.py::test_func4[m1] PASSED
|
||||
test_mod2.py::test_func3[s2-m2] PASSED
|
||||
test_mod2.py::test_func3b[s2-m2] PASSED
|
||||
test_mod2.py::test_func4[m2] PASSED
|
||||
test_mod1.py::test_func1[m1] PASSED
|
||||
test_mod1.py::test_func1[m2] PASSED
|
||||
""")
|
||||
|
||||
def test_class_ordering(self, testdir):
|
||||
|
@ -1744,18 +1744,18 @@ class TestFixtureMarker:
|
|||
""")
|
||||
result = testdir.runpytest("-vs")
|
||||
result.stdout.fnmatch_lines("""
|
||||
test_class_ordering.py@4::TestClass2::test_1[1-a] PASSED
|
||||
test_class_ordering.py@4::TestClass2::test_1[2-a] PASSED
|
||||
test_class_ordering.py@6::TestClass2::test_2[1-a] PASSED
|
||||
test_class_ordering.py@6::TestClass2::test_2[2-a] PASSED
|
||||
test_class_ordering.py@4::TestClass2::test_1[1-b] PASSED
|
||||
test_class_ordering.py@4::TestClass2::test_1[2-b] PASSED
|
||||
test_class_ordering.py@6::TestClass2::test_2[1-b] PASSED
|
||||
test_class_ordering.py@6::TestClass2::test_2[2-b] PASSED
|
||||
test_class_ordering.py@9::TestClass::test_3[1-a] PASSED
|
||||
test_class_ordering.py@9::TestClass::test_3[2-a] PASSED
|
||||
test_class_ordering.py@9::TestClass::test_3[1-b] PASSED
|
||||
test_class_ordering.py@9::TestClass::test_3[2-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[1-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[2-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[1-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[2-a] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[1-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_1[2-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[1-b] PASSED
|
||||
test_class_ordering.py::TestClass2::test_2[2-b] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[1-a] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[2-a] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[1-b] PASSED
|
||||
test_class_ordering.py::TestClass::test_3[2-b] PASSED
|
||||
""")
|
||||
|
||||
def test_parametrize_separated_order_higher_scope_first(self, testdir):
|
||||
|
|
|
@ -539,3 +539,25 @@ class TestAssertionRewriteHookDetails(object):
|
|||
result.stdout.fnmatch_lines([
|
||||
'* 1 passed*',
|
||||
])
|
||||
|
||||
def test_read_pyc(self, tmpdir):
|
||||
"""
|
||||
Ensure that the `_read_pyc` can properly deal with corrupted pyc files.
|
||||
In those circumstances it should just give up instead of generating
|
||||
an exception that is propagated to the caller.
|
||||
"""
|
||||
import py_compile
|
||||
from _pytest.assertion.rewrite import _read_pyc
|
||||
|
||||
source = tmpdir.join('source.py')
|
||||
pyc = source + 'c'
|
||||
|
||||
source.write('def test(): pass')
|
||||
py_compile.compile(str(source), str(pyc))
|
||||
|
||||
contents = pyc.read(mode='rb')
|
||||
strip_bytes = 20 # header is around 8 bytes, strip a little more
|
||||
assert len(contents) > strip_bytes
|
||||
pyc.write(contents[:strip_bytes], mode='wb')
|
||||
|
||||
assert _read_pyc(source, str(pyc)) is None # no error
|
||||
|
|
|
@ -1012,3 +1012,13 @@ def test_capturing_and_logging_fundamentals(testdir, method):
|
|||
""")
|
||||
assert "atexit" not in result.stderr.str()
|
||||
|
||||
|
||||
def test_error_attribute_issue555(testdir):
|
||||
testdir.makepyfile("""
|
||||
import sys
|
||||
def test_capattr():
|
||||
assert sys.stdout.errors == "strict"
|
||||
assert sys.stderr.errors == "strict"
|
||||
""")
|
||||
reprec = testdir.inline_run()
|
||||
reprec.assertoutcome(passed=1)
|
||||
|
|
|
@ -251,7 +251,7 @@ def test_conftest_found_with_double_dash(testdir):
|
|||
def test_hello(found):
|
||||
assert found == 1
|
||||
"""))
|
||||
result = testdir.runpytest(str(p) + "@2::test_hello", "-h")
|
||||
result = testdir.runpytest(str(p) + "::test_hello", "-h")
|
||||
result.stdout.fnmatch_lines("""
|
||||
*--hello-world*
|
||||
""")
|
||||
|
|
|
@ -145,21 +145,6 @@ class TestParser:
|
|||
assert args.R == True
|
||||
assert args.S == False
|
||||
|
||||
def test_parse_removes_line_number_from_positional_arguments(self, parser):
|
||||
args = parser.parse(['path.txt@2::item',
|
||||
'path2.py::func2[param with .py@123]',
|
||||
'path.py@123',
|
||||
'hello/path.py@123',
|
||||
])
|
||||
# we only remove "@NUM" syntax for .py files which are currently
|
||||
# the only ones which can produce it.
|
||||
assert getattr(args, parseopt.FILE_OR_DIR) == [
|
||||
'path.txt@2::item',
|
||||
'path2.py::func2[param with .py@123]',
|
||||
'path.py',
|
||||
'hello/path.py',
|
||||
]
|
||||
|
||||
def test_parse_defaultgetter(self):
|
||||
def defaultget(option):
|
||||
if not hasattr(option, 'type'):
|
||||
|
|
|
@ -51,9 +51,9 @@ class TestTerminal:
|
|||
result = testdir.runpytest(*option.args)
|
||||
if option.verbose:
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_pass_skip_fail.py@2::test_ok PASS*",
|
||||
"*test_pass_skip_fail.py@4::test_skip SKIP*",
|
||||
"*test_pass_skip_fail.py@6::test_func FAIL*",
|
||||
"*test_pass_skip_fail.py::test_ok PASS*",
|
||||
"*test_pass_skip_fail.py::test_skip SKIP*",
|
||||
"*test_pass_skip_fail.py::test_func FAIL*",
|
||||
])
|
||||
else:
|
||||
result.stdout.fnmatch_lines([
|
||||
|
@ -126,7 +126,7 @@ class TestTerminal:
|
|||
])
|
||||
result = testdir.runpytest("-v", p2)
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_p2.py <- *test_p1.py@2::TestMore::test_p1*",
|
||||
"*test_p2.py <- *test_p1.py::TestMore::test_p1*",
|
||||
])
|
||||
|
||||
def test_itemreport_directclasses_not_shown_as_subclasses(self, testdir):
|
||||
|
@ -450,17 +450,17 @@ class TestTerminalFunctional:
|
|||
""")
|
||||
result = testdir.runpytest(p1, '-v')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*test_verbose_reporting.py@2::test_fail *FAIL*",
|
||||
"*test_verbose_reporting.py@4::test_pass *PASS*",
|
||||
"*test_verbose_reporting.py@7::TestClass::test_skip *SKIP*",
|
||||
"*test_verbose_reporting.py@10::test_gen*0* *FAIL*",
|
||||
"*test_verbose_reporting.py::test_fail *FAIL*",
|
||||
"*test_verbose_reporting.py::test_pass *PASS*",
|
||||
"*test_verbose_reporting.py::TestClass::test_skip *SKIP*",
|
||||
"*test_verbose_reporting.py::test_gen*0* *FAIL*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
pytestconfig.pluginmanager.skipifmissing("xdist")
|
||||
result = testdir.runpytest(p1, '-v', '-n 1')
|
||||
result.stdout.fnmatch_lines([
|
||||
"*FAIL*test_verbose_reporting.py@2::test_fail*",
|
||||
"*FAIL*test_verbose_reporting.py::test_fail*",
|
||||
])
|
||||
assert result.ret == 1
|
||||
|
||||
|
|
Loading…
Reference in New Issue