summaryrefslogtreecommitdiffstats
path: root/python/pytest/_pytest
diff options
context:
space:
mode:
authorMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
committerMatt A. Tobin <mattatobin@localhost.localdomain>2018-02-02 04:16:08 -0500
commit5f8de423f190bbb79a62f804151bc24824fa32d8 (patch)
tree10027f336435511475e392454359edea8e25895d /python/pytest/_pytest
parent49ee0794b5d912db1f95dce6eb52d781dc210db5 (diff)
downloadUXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.gz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.lz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.tar.xz
UXP-5f8de423f190bbb79a62f804151bc24824fa32d8.zip
Add m-esr52 at 52.6.0
Diffstat (limited to 'python/pytest/_pytest')
-rw-r--r--python/pytest/_pytest/__init__.py2
-rw-r--r--python/pytest/_pytest/_argcomplete.py101
-rw-r--r--python/pytest/_pytest/_code/__init__.py12
-rw-r--r--python/pytest/_pytest/_code/_py2traceback.py81
-rw-r--r--python/pytest/_pytest/_code/code.py805
-rw-r--r--python/pytest/_pytest/_code/source.py421
-rw-r--r--python/pytest/_pytest/_pluggy.py11
-rw-r--r--python/pytest/_pytest/assertion/__init__.py176
-rw-r--r--python/pytest/_pytest/assertion/reinterpret.py407
-rw-r--r--python/pytest/_pytest/assertion/rewrite.py885
-rw-r--r--python/pytest/_pytest/assertion/util.py332
-rwxr-xr-xpython/pytest/_pytest/cacheprovider.py245
-rw-r--r--python/pytest/_pytest/capture.py472
-rw-r--r--python/pytest/_pytest/config.py1192
-rw-r--r--python/pytest/_pytest/doctest.py290
-rwxr-xr-xpython/pytest/_pytest/genscript.py132
-rw-r--r--python/pytest/_pytest/helpconfig.py139
-rw-r--r--python/pytest/_pytest/hookspec.py295
-rw-r--r--python/pytest/_pytest/junitxml.py387
-rw-r--r--python/pytest/_pytest/main.py744
-rw-r--r--python/pytest/_pytest/mark.py311
-rw-r--r--python/pytest/_pytest/monkeypatch.py254
-rw-r--r--python/pytest/_pytest/nose.py71
-rw-r--r--python/pytest/_pytest/pastebin.py92
-rw-r--r--python/pytest/_pytest/pdb.py109
-rw-r--r--python/pytest/_pytest/pytester.py1110
-rw-r--r--python/pytest/_pytest/python.py2300
-rw-r--r--python/pytest/_pytest/recwarn.py221
-rw-r--r--python/pytest/_pytest/resultlog.py104
-rw-r--r--python/pytest/_pytest/runner.py515
-rw-r--r--python/pytest/_pytest/skipping.py361
-rwxr-xr-xpython/pytest/_pytest/standalonetemplate.py89
-rw-r--r--python/pytest/_pytest/terminal.py593
-rw-r--r--python/pytest/_pytest/tmpdir.py123
-rw-r--r--python/pytest/_pytest/unittest.py205
-rw-r--r--python/pytest/_pytest/vendored_packages/README.md13
-rw-r--r--python/pytest/_pytest/vendored_packages/__init__.py0
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst10
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA39
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD8
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL6
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json1
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json1
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt1
-rw-r--r--python/pytest/_pytest/vendored_packages/pluggy.py777
45 files changed, 14443 insertions, 0 deletions
diff --git a/python/pytest/_pytest/__init__.py b/python/pytest/_pytest/__init__.py
new file mode 100644
index 000000000..23dac6d05
--- /dev/null
+++ b/python/pytest/_pytest/__init__.py
@@ -0,0 +1,2 @@
+#
+__version__ = '2.9.2'
diff --git a/python/pytest/_pytest/_argcomplete.py b/python/pytest/_pytest/_argcomplete.py
new file mode 100644
index 000000000..955855a96
--- /dev/null
+++ b/python/pytest/_pytest/_argcomplete.py
@@ -0,0 +1,101 @@
+
+"""allow bash-completion for argparse with argcomplete if installed
+needs argcomplete>=0.5.6 for python 3.2/3.3 (older versions fail
+to find the magic string, so _ARGCOMPLETE env. var is never set, and
+this does not need special code.
+
+argcomplete does not support python 2.5 (although the changes for that
+are minor).
+
+Function try_argcomplete(parser) should be called directly before
+the call to ArgumentParser.parse_args().
+
+The filescompleter is what you normally would use on the positional
+arguments specification, in order to get "dirname/" after "dirn<TAB>"
+instead of the default "dirname ":
+
+ optparser.add_argument(Config._file_or_dir, nargs='*'
+ ).completer=filescompleter
+
+Other, application specific, completers should go in the file
+doing the add_argument calls as they need to be specified as .completer
+attributes as well. (If argcomplete is not installed, the function the
+attribute points to will not be used).
+
+SPEEDUP
+=======
+The generic argcomplete script for bash-completion
+(/etc/bash_completion.d/python-argcomplete.sh )
+uses a python program to determine startup script generated by pip.
+You can speed up completion somewhat by changing this script to include
+ # PYTHON_ARGCOMPLETE_OK
+so the the python-argcomplete-check-easy-install-script does not
+need to be called to find the entry point of the code and see if that is
+marked with PYTHON_ARGCOMPLETE_OK
+
+INSTALL/DEBUGGING
+=================
+To include this support in another application that has setup.py generated
+scripts:
+- add the line:
+ # PYTHON_ARGCOMPLETE_OK
+ near the top of the main python entry point
+- include in the file calling parse_args():
+ from _argcomplete import try_argcomplete, filescompleter
+ , call try_argcomplete just before parse_args(), and optionally add
+ filescompleter to the positional arguments' add_argument()
+If things do not work right away:
+- switch on argcomplete debugging with (also helpful when doing custom
+ completers):
+ export _ARC_DEBUG=1
+- run:
+ python-argcomplete-check-easy-install-script $(which appname)
+ echo $?
+ will echo 0 if the magic line has been found, 1 if not
+- sometimes it helps to find early on errors using:
+ _ARGCOMPLETE=1 _ARC_DEBUG=1 appname
+ which should throw a KeyError: 'COMPLINE' (which is properly set by the
+ global argcomplete script).
+"""
+
+import sys
+import os
+from glob import glob
+
+class FastFilesCompleter:
+ 'Fast file completer class'
+ def __init__(self, directories=True):
+ self.directories = directories
+
+ def __call__(self, prefix, **kwargs):
+ """only called on non option completions"""
+ if os.path.sep in prefix[1:]: #
+ prefix_dir = len(os.path.dirname(prefix) + os.path.sep)
+ else:
+ prefix_dir = 0
+ completion = []
+ globbed = []
+ if '*' not in prefix and '?' not in prefix:
+ if prefix[-1] == os.path.sep: # we are on unix, otherwise no bash
+ globbed.extend(glob(prefix + '.*'))
+ prefix += '*'
+ globbed.extend(glob(prefix))
+ for x in sorted(globbed):
+ if os.path.isdir(x):
+ x += '/'
+ # append stripping the prefix (like bash, not like compgen)
+ completion.append(x[prefix_dir:])
+ return completion
+
+if os.environ.get('_ARGCOMPLETE'):
+ try:
+ import argcomplete.completers
+ except ImportError:
+ sys.exit(-1)
+ filescompleter = FastFilesCompleter()
+
+ def try_argcomplete(parser):
+ argcomplete.autocomplete(parser)
+else:
+ def try_argcomplete(parser): pass
+ filescompleter = None
diff --git a/python/pytest/_pytest/_code/__init__.py b/python/pytest/_pytest/_code/__init__.py
new file mode 100644
index 000000000..c046b9716
--- /dev/null
+++ b/python/pytest/_pytest/_code/__init__.py
@@ -0,0 +1,12 @@
+""" python inspection/code generation API """
+from .code import Code # noqa
+from .code import ExceptionInfo # noqa
+from .code import Frame # noqa
+from .code import Traceback # noqa
+from .code import getrawcode # noqa
+from .code import patch_builtins # noqa
+from .code import unpatch_builtins # noqa
+from .source import Source # noqa
+from .source import compile_ as compile # noqa
+from .source import getfslineno # noqa
+
diff --git a/python/pytest/_pytest/_code/_py2traceback.py b/python/pytest/_pytest/_code/_py2traceback.py
new file mode 100644
index 000000000..a830d9899
--- /dev/null
+++ b/python/pytest/_pytest/_code/_py2traceback.py
@@ -0,0 +1,81 @@
+# copied from python-2.7.3's traceback.py
+# CHANGES:
+# - some_str is replaced, trying to create unicode strings
+#
+import types
+
+def format_exception_only(etype, value):
+ """Format the exception part of a traceback.
+
+ The arguments are the exception type and value such as given by
+ sys.last_type and sys.last_value. The return value is a list of
+ strings, each ending in a newline.
+
+ Normally, the list contains a single string; however, for
+ SyntaxError exceptions, it contains several lines that (when
+ printed) display detailed information about where the syntax
+ error occurred.
+
+ The message indicating which exception occurred is always the last
+ string in the list.
+
+ """
+
+ # An instance should not have a meaningful value parameter, but
+ # sometimes does, particularly for string exceptions, such as
+ # >>> raise string1, string2 # deprecated
+ #
+ # Clear these out first because issubtype(string1, SyntaxError)
+ # would throw another exception and mask the original problem.
+ if (isinstance(etype, BaseException) or
+ isinstance(etype, types.InstanceType) or
+ etype is None or type(etype) is str):
+ return [_format_final_exc_line(etype, value)]
+
+ stype = etype.__name__
+
+ if not issubclass(etype, SyntaxError):
+ return [_format_final_exc_line(stype, value)]
+
+ # It was a syntax error; show exactly where the problem was found.
+ lines = []
+ try:
+ msg, (filename, lineno, offset, badline) = value.args
+ except Exception:
+ pass
+ else:
+ filename = filename or "<string>"
+ lines.append(' File "%s", line %d\n' % (filename, lineno))
+ if badline is not None:
+ if isinstance(badline, bytes): # python 2 only
+ badline = badline.decode('utf-8', 'replace')
+ lines.append(u' %s\n' % badline.strip())
+ if offset is not None:
+ caretspace = badline.rstrip('\n')[:offset].lstrip()
+ # non-space whitespace (likes tabs) must be kept for alignment
+ caretspace = ((c.isspace() and c or ' ') for c in caretspace)
+ # only three spaces to account for offset1 == pos 0
+ lines.append(' %s^\n' % ''.join(caretspace))
+ value = msg
+
+ lines.append(_format_final_exc_line(stype, value))
+ return lines
+
+def _format_final_exc_line(etype, value):
+ """Return a list of a single line -- normal case for format_exception_only"""
+ valuestr = _some_str(value)
+ if value is None or not valuestr:
+ line = "%s\n" % etype
+ else:
+ line = "%s: %s\n" % (etype, valuestr)
+ return line
+
+def _some_str(value):
+ try:
+ return unicode(value)
+ except Exception:
+ try:
+ return str(value)
+ except Exception:
+ pass
+ return '<unprintable %s object>' % type(value).__name__
diff --git a/python/pytest/_pytest/_code/code.py b/python/pytest/_pytest/_code/code.py
new file mode 100644
index 000000000..8995cc1f7
--- /dev/null
+++ b/python/pytest/_pytest/_code/code.py
@@ -0,0 +1,805 @@
+import sys
+from inspect import CO_VARARGS, CO_VARKEYWORDS
+
+import py
+
+builtin_repr = repr
+
+reprlib = py.builtin._tryimport('repr', 'reprlib')
+
+if sys.version_info[0] >= 3:
+ from traceback import format_exception_only
+else:
+ from ._py2traceback import format_exception_only
+
+class Code(object):
+ """ wrapper around Python code objects """
+ def __init__(self, rawcode):
+ if not hasattr(rawcode, "co_filename"):
+ rawcode = getrawcode(rawcode)
+ try:
+ self.filename = rawcode.co_filename
+ self.firstlineno = rawcode.co_firstlineno - 1
+ self.name = rawcode.co_name
+ except AttributeError:
+ raise TypeError("not a code object: %r" %(rawcode,))
+ self.raw = rawcode
+
+ def __eq__(self, other):
+ return self.raw == other.raw
+
+ def __ne__(self, other):
+ return not self == other
+
+ @property
+ def path(self):
+ """ return a path object pointing to source code (note that it
+ might not point to an actually existing file). """
+ p = py.path.local(self.raw.co_filename)
+ # maybe don't try this checking
+ if not p.check():
+ # XXX maybe try harder like the weird logic
+ # in the standard lib [linecache.updatecache] does?
+ p = self.raw.co_filename
+ return p
+
+ @property
+ def fullsource(self):
+ """ return a _pytest._code.Source object for the full source file of the code
+ """
+ from _pytest._code import source
+ full, _ = source.findsource(self.raw)
+ return full
+
+ def source(self):
+ """ return a _pytest._code.Source object for the code object's source only
+ """
+ # return source only for that part of code
+ import _pytest._code
+ return _pytest._code.Source(self.raw)
+
+ def getargs(self, var=False):
+ """ return a tuple with the argument names for the code object
+
+ if 'var' is set True also return the names of the variable and
+ keyword arguments when present
+ """
+ # handfull shortcut for getting args
+ raw = self.raw
+ argcount = raw.co_argcount
+ if var:
+ argcount += raw.co_flags & CO_VARARGS
+ argcount += raw.co_flags & CO_VARKEYWORDS
+ return raw.co_varnames[:argcount]
+
+class Frame(object):
+ """Wrapper around a Python frame holding f_locals and f_globals
+ in which expressions can be evaluated."""
+
+ def __init__(self, frame):
+ self.lineno = frame.f_lineno - 1
+ self.f_globals = frame.f_globals
+ self.f_locals = frame.f_locals
+ self.raw = frame
+ self.code = Code(frame.f_code)
+
+ @property
+ def statement(self):
+ """ statement this frame is at """
+ import _pytest._code
+ if self.code.fullsource is None:
+ return _pytest._code.Source("")
+ return self.code.fullsource.getstatement(self.lineno)
+
+ def eval(self, code, **vars):
+ """ evaluate 'code' in the frame
+
+ 'vars' are optional additional local variables
+
+ returns the result of the evaluation
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ return eval(code, self.f_globals, f_locals)
+
+ def exec_(self, code, **vars):
+ """ exec 'code' in the frame
+
+ 'vars' are optiona; additional local variables
+ """
+ f_locals = self.f_locals.copy()
+ f_locals.update(vars)
+ py.builtin.exec_(code, self.f_globals, f_locals )
+
+ def repr(self, object):
+ """ return a 'safe' (non-recursive, one-line) string repr for 'object'
+ """
+ return py.io.saferepr(object)
+
+ def is_true(self, object):
+ return object
+
+ def getargs(self, var=False):
+ """ return a list of tuples (name, value) for all arguments
+
+ if 'var' is set True also include the variable and keyword
+ arguments when present
+ """
+ retval = []
+ for arg in self.code.getargs(var):
+ try:
+ retval.append((arg, self.f_locals[arg]))
+ except KeyError:
+ pass # this can occur when using Psyco
+ return retval
+
+class TracebackEntry(object):
+ """ a single entry in a traceback """
+
+ _repr_style = None
+ exprinfo = None
+
+ def __init__(self, rawentry):
+ self._rawentry = rawentry
+ self.lineno = rawentry.tb_lineno - 1
+
+ def set_repr_style(self, mode):
+ assert mode in ("short", "long")
+ self._repr_style = mode
+
+ @property
+ def frame(self):
+ import _pytest._code
+ return _pytest._code.Frame(self._rawentry.tb_frame)
+
+ @property
+ def relline(self):
+ return self.lineno - self.frame.code.firstlineno
+
+ def __repr__(self):
+ return "<TracebackEntry %s:%d>" %(self.frame.code.path, self.lineno+1)
+
+ @property
+ def statement(self):
+ """ _pytest._code.Source object for the current statement """
+ source = self.frame.code.fullsource
+ return source.getstatement(self.lineno)
+
+ @property
+ def path(self):
+ """ path to the source code """
+ return self.frame.code.path
+
+ def getlocals(self):
+ return self.frame.f_locals
+ locals = property(getlocals, None, None, "locals of underlaying frame")
+
+ def reinterpret(self):
+ """Reinterpret the failing statement and returns a detailed information
+ about what operations are performed."""
+ from _pytest.assertion.reinterpret import reinterpret
+ if self.exprinfo is None:
+ source = py.builtin._totext(self.statement).strip()
+ x = reinterpret(source, self.frame, should_fail=True)
+ if not py.builtin._istext(x):
+ raise TypeError("interpret returned non-string %r" % (x,))
+ self.exprinfo = x
+ return self.exprinfo
+
+ def getfirstlinesource(self):
+ # on Jython this firstlineno can be -1 apparently
+ return max(self.frame.code.firstlineno, 0)
+
+ def getsource(self, astcache=None):
+ """ return failing source code. """
+ # we use the passed in astcache to not reparse asttrees
+ # within exception info printing
+ from _pytest._code.source import getstatementrange_ast
+ source = self.frame.code.fullsource
+ if source is None:
+ return None
+ key = astnode = None
+ if astcache is not None:
+ key = self.frame.code.path
+ if key is not None:
+ astnode = astcache.get(key, None)
+ start = self.getfirstlinesource()
+ try:
+ astnode, _, end = getstatementrange_ast(self.lineno, source,
+ astnode=astnode)
+ except SyntaxError:
+ end = self.lineno + 1
+ else:
+ if key is not None:
+ astcache[key] = astnode
+ return source[start:end]
+
+ source = property(getsource)
+
+ def ishidden(self):
+ """ return True if the current frame has a var __tracebackhide__
+ resolving to True
+
+ mostly for internal use
+ """
+ try:
+ return self.frame.f_locals['__tracebackhide__']
+ except KeyError:
+ try:
+ return self.frame.f_globals['__tracebackhide__']
+ except KeyError:
+ return False
+
+ def __str__(self):
+ try:
+ fn = str(self.path)
+ except py.error.Error:
+ fn = '???'
+ name = self.frame.code.name
+ try:
+ line = str(self.statement).lstrip()
+ except KeyboardInterrupt:
+ raise
+ except:
+ line = "???"
+ return " File %r:%d in %s\n %s\n" %(fn, self.lineno+1, name, line)
+
+ def name(self):
+ return self.frame.code.raw.co_name
+ name = property(name, None, None, "co_name of underlaying code")
+
+class Traceback(list):
+ """ Traceback objects encapsulate and offer higher level
+ access to Traceback entries.
+ """
+ Entry = TracebackEntry
+ def __init__(self, tb):
+ """ initialize from given python traceback object. """
+ if hasattr(tb, 'tb_next'):
+ def f(cur):
+ while cur is not None:
+ yield self.Entry(cur)
+ cur = cur.tb_next
+ list.__init__(self, f(tb))
+ else:
+ list.__init__(self, tb)
+
+ def cut(self, path=None, lineno=None, firstlineno=None, excludepath=None):
+ """ return a Traceback instance wrapping part of this Traceback
+
+ by provding any combination of path, lineno and firstlineno, the
+ first frame to start the to-be-returned traceback is determined
+
+ this allows cutting the first part of a Traceback instance e.g.
+ for formatting reasons (removing some uninteresting bits that deal
+ with handling of the exception/traceback)
+ """
+ for x in self:
+ code = x.frame.code
+ codepath = code.path
+ if ((path is None or codepath == path) and
+ (excludepath is None or not hasattr(codepath, 'relto') or
+ not codepath.relto(excludepath)) and
+ (lineno is None or x.lineno == lineno) and
+ (firstlineno is None or x.frame.code.firstlineno == firstlineno)):
+ return Traceback(x._rawentry)
+ return self
+
+ def __getitem__(self, key):
+ val = super(Traceback, self).__getitem__(key)
+ if isinstance(key, type(slice(0))):
+ val = self.__class__(val)
+ return val
+
+ def filter(self, fn=lambda x: not x.ishidden()):
+ """ return a Traceback instance with certain items removed
+
+ fn is a function that gets a single argument, a TracebackEntry
+ instance, and should return True when the item should be added
+ to the Traceback, False when not
+
+ by default this removes all the TracebackEntries which are hidden
+ (see ishidden() above)
+ """
+ return Traceback(filter(fn, self))
+
+ def getcrashentry(self):
+ """ return last non-hidden traceback entry that lead
+ to the exception of a traceback.
+ """
+ for i in range(-1, -len(self)-1, -1):
+ entry = self[i]
+ if not entry.ishidden():
+ return entry
+ return self[-1]
+
+ def recursionindex(self):
+ """ return the index of the frame/TracebackEntry where recursion
+ originates if appropriate, None if no recursion occurred
+ """
+ cache = {}
+ for i, entry in enumerate(self):
+ # id for the code.raw is needed to work around
+ # the strange metaprogramming in the decorator lib from pypi
+ # which generates code objects that have hash/value equality
+ #XXX needs a test
+ key = entry.frame.code.path, id(entry.frame.code.raw), entry.lineno
+ #print "checking for recursion at", key
+ l = cache.setdefault(key, [])
+ if l:
+ f = entry.frame
+ loc = f.f_locals
+ for otherloc in l:
+ if f.is_true(f.eval(co_equal,
+ __recursioncache_locals_1=loc,
+ __recursioncache_locals_2=otherloc)):
+ return i
+ l.append(entry.frame.f_locals)
+ return None
+
+co_equal = compile('__recursioncache_locals_1 == __recursioncache_locals_2',
+ '?', 'eval')
+
+class ExceptionInfo(object):
+ """ wraps sys.exc_info() objects and offers
+ help for navigating the traceback.
+ """
+ _striptext = ''
+ def __init__(self, tup=None, exprinfo=None):
+ import _pytest._code
+ if tup is None:
+ tup = sys.exc_info()
+ if exprinfo is None and isinstance(tup[1], AssertionError):
+ exprinfo = getattr(tup[1], 'msg', None)
+ if exprinfo is None:
+ exprinfo = str(tup[1])
+ if exprinfo and exprinfo.startswith('assert '):
+ self._striptext = 'AssertionError: '
+ self._excinfo = tup
+ #: the exception class
+ self.type = tup[0]
+ #: the exception instance
+ self.value = tup[1]
+ #: the exception raw traceback
+ self.tb = tup[2]
+ #: the exception type name
+ self.typename = self.type.__name__
+ #: the exception traceback (_pytest._code.Traceback instance)
+ self.traceback = _pytest._code.Traceback(self.tb)
+
+ def __repr__(self):
+ return "<ExceptionInfo %s tblen=%d>" % (self.typename, len(self.traceback))
+
+ def exconly(self, tryshort=False):
+ """ return the exception as a string
+
+ when 'tryshort' resolves to True, and the exception is a
+ _pytest._code._AssertionError, only the actual exception part of
+ the exception representation is returned (so 'AssertionError: ' is
+ removed from the beginning)
+ """
+ lines = format_exception_only(self.type, self.value)
+ text = ''.join(lines)
+ text = text.rstrip()
+ if tryshort:
+ if text.startswith(self._striptext):
+ text = text[len(self._striptext):]
+ return text
+
+ def errisinstance(self, exc):
+ """ return True if the exception is an instance of exc """
+ return isinstance(self.value, exc)
+
+ def _getreprcrash(self):
+ exconly = self.exconly(tryshort=True)
+ entry = self.traceback.getcrashentry()
+ path, lineno = entry.frame.code.raw.co_filename, entry.lineno
+ return ReprFileLocation(path, lineno+1, exconly)
+
+ def getrepr(self, showlocals=False, style="long",
+ abspath=False, tbfilter=True, funcargs=False):
+ """ return str()able representation of this exception info.
+ showlocals: show locals per traceback entry
+ style: long|short|no|native traceback style
+ tbfilter: hide entries (where __tracebackhide__ is true)
+
+ in case of style==native, tbfilter and showlocals is ignored.
+ """
+ if style == 'native':
+ return ReprExceptionInfo(ReprTracebackNative(
+ py.std.traceback.format_exception(
+ self.type,
+ self.value,
+ self.traceback[0]._rawentry,
+ )), self._getreprcrash())
+
+ fmt = FormattedExcinfo(showlocals=showlocals, style=style,
+ abspath=abspath, tbfilter=tbfilter, funcargs=funcargs)
+ return fmt.repr_excinfo(self)
+
+ def __str__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return str(loc)
+
+ def __unicode__(self):
+ entry = self.traceback[-1]
+ loc = ReprFileLocation(entry.path, entry.lineno + 1, self.exconly())
+ return unicode(loc)
+
+
+class FormattedExcinfo(object):
+ """ presenting information about failing Functions and Generators. """
+ # for traceback entries
+ flow_marker = ">"
+ fail_marker = "E"
+
+ def __init__(self, showlocals=False, style="long", abspath=True, tbfilter=True, funcargs=False):
+ self.showlocals = showlocals
+ self.style = style
+ self.tbfilter = tbfilter
+ self.funcargs = funcargs
+ self.abspath = abspath
+ self.astcache = {}
+
+ def _getindent(self, source):
+ # figure out indent for given source
+ try:
+ s = str(source.getstatement(len(source)-1))
+ except KeyboardInterrupt:
+ raise
+ except:
+ try:
+ s = str(source[-1])
+ except KeyboardInterrupt:
+ raise
+ except:
+ return 0
+ return 4 + (len(s) - len(s.lstrip()))
+
+ def _getentrysource(self, entry):
+ source = entry.getsource(self.astcache)
+ if source is not None:
+ source = source.deindent()
+ return source
+
+ def _saferepr(self, obj):
+ return py.io.saferepr(obj)
+
+ def repr_args(self, entry):
+ if self.funcargs:
+ args = []
+ for argname, argvalue in entry.frame.getargs(var=True):
+ args.append((argname, self._saferepr(argvalue)))
+ return ReprFuncArgs(args)
+
+ def get_source(self, source, line_index=-1, excinfo=None, short=False):
+ """ return formatted and marked up source lines. """
+ import _pytest._code
+ lines = []
+ if source is None or line_index >= len(source.lines):
+ source = _pytest._code.Source("???")
+ line_index = 0
+ if line_index < 0:
+ line_index += len(source)
+ space_prefix = " "
+ if short:
+ lines.append(space_prefix + source.lines[line_index].strip())
+ else:
+ for line in source.lines[:line_index]:
+ lines.append(space_prefix + line)
+ lines.append(self.flow_marker + " " + source.lines[line_index])
+ for line in source.lines[line_index+1:]:
+ lines.append(space_prefix + line)
+ if excinfo is not None:
+ indent = 4 if short else self._getindent(source)
+ lines.extend(self.get_exconly(excinfo, indent=indent, markall=True))
+ return lines
+
+ def get_exconly(self, excinfo, indent=4, markall=False):
+ lines = []
+ indent = " " * indent
+ # get the real exception information out
+ exlines = excinfo.exconly(tryshort=True).split('\n')
+ failindent = self.fail_marker + indent[1:]
+ for line in exlines:
+ lines.append(failindent + line)
+ if not markall:
+ failindent = indent
+ return lines
+
+ def repr_locals(self, locals):
+ if self.showlocals:
+ lines = []
+ keys = [loc for loc in locals if loc[0] != "@"]
+ keys.sort()
+ for name in keys:
+ value = locals[name]
+ if name == '__builtins__':
+ lines.append("__builtins__ = <builtins>")
+ else:
+ # This formatting could all be handled by the
+ # _repr() function, which is only reprlib.Repr in
+ # disguise, so is very configurable.
+ str_repr = self._saferepr(value)
+ #if len(str_repr) < 70 or not isinstance(value,
+ # (list, tuple, dict)):
+ lines.append("%-10s = %s" %(name, str_repr))
+ #else:
+ # self._line("%-10s =\\" % (name,))
+ # # XXX
+ # py.std.pprint.pprint(value, stream=self.excinfowriter)
+ return ReprLocals(lines)
+
+ def repr_traceback_entry(self, entry, excinfo=None):
+ import _pytest._code
+ source = self._getentrysource(entry)
+ if source is None:
+ source = _pytest._code.Source("???")
+ line_index = 0
+ else:
+ # entry.getfirstlinesource() can be -1, should be 0 on jython
+ line_index = entry.lineno - max(entry.getfirstlinesource(), 0)
+
+ lines = []
+ style = entry._repr_style
+ if style is None:
+ style = self.style
+ if style in ("short", "long"):
+ short = style == "short"
+ reprargs = self.repr_args(entry) if not short else None
+ s = self.get_source(source, line_index, excinfo, short=short)
+ lines.extend(s)
+ if short:
+ message = "in %s" %(entry.name)
+ else:
+ message = excinfo and excinfo.typename or ""
+ path = self._makepath(entry.path)
+ filelocrepr = ReprFileLocation(path, entry.lineno+1, message)
+ localsrepr = None
+ if not short:
+ localsrepr = self.repr_locals(entry.locals)
+ return ReprEntry(lines, reprargs, localsrepr, filelocrepr, style)
+ if excinfo:
+ lines.extend(self.get_exconly(excinfo, indent=4))
+ return ReprEntry(lines, None, None, None, style)
+
+ def _makepath(self, path):
+ if not self.abspath:
+ try:
+ np = py.path.local().bestrelpath(path)
+ except OSError:
+ return path
+ if len(np) < len(str(path)):
+ path = np
+ return path
+
+ def repr_traceback(self, excinfo):
+ traceback = excinfo.traceback
+ if self.tbfilter:
+ traceback = traceback.filter()
+ recursionindex = None
+ if is_recursion_error(excinfo):
+ recursionindex = traceback.recursionindex()
+ last = traceback[-1]
+ entries = []
+ extraline = None
+ for index, entry in enumerate(traceback):
+ einfo = (last == entry) and excinfo or None
+ reprentry = self.repr_traceback_entry(entry, einfo)
+ entries.append(reprentry)
+ if index == recursionindex:
+ extraline = "!!! Recursion detected (same locals & position)"
+ break
+ return ReprTraceback(entries, extraline, style=self.style)
+
+ def repr_excinfo(self, excinfo):
+ reprtraceback = self.repr_traceback(excinfo)
+ reprcrash = excinfo._getreprcrash()
+ return ReprExceptionInfo(reprtraceback, reprcrash)
+
+class TerminalRepr:
+ def __str__(self):
+ s = self.__unicode__()
+ if sys.version_info[0] < 3:
+ s = s.encode('utf-8')
+ return s
+
+ def __unicode__(self):
+ # FYI this is called from pytest-xdist's serialization of exception
+ # information.
+ io = py.io.TextIO()
+ tw = py.io.TerminalWriter(file=io)
+ self.toterminal(tw)
+ return io.getvalue().strip()
+
+ def __repr__(self):
+ return "<%s instance at %0x>" %(self.__class__, id(self))
+
+
+class ReprExceptionInfo(TerminalRepr):
+ def __init__(self, reprtraceback, reprcrash):
+ self.reprtraceback = reprtraceback
+ self.reprcrash = reprcrash
+ self.sections = []
+
+ def addsection(self, name, content, sep="-"):
+ self.sections.append((name, content, sep))
+
+ def toterminal(self, tw):
+ self.reprtraceback.toterminal(tw)
+ for name, content, sep in self.sections:
+ tw.sep(sep, name)
+ tw.line(content)
+
+class ReprTraceback(TerminalRepr):
+ entrysep = "_ "
+
+ def __init__(self, reprentries, extraline, style):
+ self.reprentries = reprentries
+ self.extraline = extraline
+ self.style = style
+
+ def toterminal(self, tw):
+ # the entries might have different styles
+ for i, entry in enumerate(self.reprentries):
+ if entry.style == "long":
+ tw.line("")
+ entry.toterminal(tw)
+ if i < len(self.reprentries) - 1:
+ next_entry = self.reprentries[i+1]
+ if entry.style == "long" or \
+ entry.style == "short" and next_entry.style == "long":
+ tw.sep(self.entrysep)
+
+ if self.extraline:
+ tw.line(self.extraline)
+
+class ReprTracebackNative(ReprTraceback):
+ def __init__(self, tblines):
+ self.style = "native"
+ self.reprentries = [ReprEntryNative(tblines)]
+ self.extraline = None
+
+class ReprEntryNative(TerminalRepr):
+ style = "native"
+
+ def __init__(self, tblines):
+ self.lines = tblines
+
+ def toterminal(self, tw):
+ tw.write("".join(self.lines))
+
+class ReprEntry(TerminalRepr):
+ localssep = "_ "
+
+ def __init__(self, lines, reprfuncargs, reprlocals, filelocrepr, style):
+ self.lines = lines
+ self.reprfuncargs = reprfuncargs
+ self.reprlocals = reprlocals
+ self.reprfileloc = filelocrepr
+ self.style = style
+
+ def toterminal(self, tw):
+ if self.style == "short":
+ self.reprfileloc.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ #tw.line("")
+ return
+ if self.reprfuncargs:
+ self.reprfuncargs.toterminal(tw)
+ for line in self.lines:
+ red = line.startswith("E ")
+ tw.line(line, bold=True, red=red)
+ if self.reprlocals:
+ #tw.sep(self.localssep, "Locals")
+ tw.line("")
+ self.reprlocals.toterminal(tw)
+ if self.reprfileloc:
+ if self.lines:
+ tw.line("")
+ self.reprfileloc.toterminal(tw)
+
+ def __str__(self):
+ return "%s\n%s\n%s" % ("\n".join(self.lines),
+ self.reprlocals,
+ self.reprfileloc)
+
+class ReprFileLocation(TerminalRepr):
+ def __init__(self, path, lineno, message):
+ self.path = str(path)
+ self.lineno = lineno
+ self.message = message
+
+ def toterminal(self, tw):
+ # filename and lineno output for each entry,
+ # using an output format that most editors unterstand
+ msg = self.message
+ i = msg.find("\n")
+ if i != -1:
+ msg = msg[:i]
+ tw.line("%s:%s: %s" %(self.path, self.lineno, msg))
+
+class ReprLocals(TerminalRepr):
+ def __init__(self, lines):
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+
+class ReprFuncArgs(TerminalRepr):
+ def __init__(self, args):
+ self.args = args
+
+ def toterminal(self, tw):
+ if self.args:
+ linesofar = ""
+ for name, value in self.args:
+ ns = "%s = %s" %(name, value)
+ if len(ns) + len(linesofar) + 2 > tw.fullwidth:
+ if linesofar:
+ tw.line(linesofar)
+ linesofar = ns
+ else:
+ if linesofar:
+ linesofar += ", " + ns
+ else:
+ linesofar = ns
+ if linesofar:
+ tw.line(linesofar)
+ tw.line("")
+
+
+
+oldbuiltins = {}
+
+def patch_builtins(assertion=True, compile=True):
+ """ put compile and AssertionError builtins to Python's builtins. """
+ if assertion:
+ from _pytest.assertion import reinterpret
+ l = oldbuiltins.setdefault('AssertionError', [])
+ l.append(py.builtin.builtins.AssertionError)
+ py.builtin.builtins.AssertionError = reinterpret.AssertionError
+ if compile:
+ import _pytest._code
+ l = oldbuiltins.setdefault('compile', [])
+ l.append(py.builtin.builtins.compile)
+ py.builtin.builtins.compile = _pytest._code.compile
+
+def unpatch_builtins(assertion=True, compile=True):
+ """ remove compile and AssertionError builtins from Python builtins. """
+ if assertion:
+ py.builtin.builtins.AssertionError = oldbuiltins['AssertionError'].pop()
+ if compile:
+ py.builtin.builtins.compile = oldbuiltins['compile'].pop()
+
+def getrawcode(obj, trycall=True):
+ """ return code object for given function. """
+ try:
+ return obj.__code__
+ except AttributeError:
+ obj = getattr(obj, 'im_func', obj)
+ obj = getattr(obj, 'func_code', obj)
+ obj = getattr(obj, 'f_code', obj)
+ obj = getattr(obj, '__code__', obj)
+ if trycall and not hasattr(obj, 'co_firstlineno'):
+ if hasattr(obj, '__call__') and not py.std.inspect.isclass(obj):
+ x = getrawcode(obj.__call__, trycall=False)
+ if hasattr(x, 'co_firstlineno'):
+ return x
+ return obj
+
+if sys.version_info[:2] >= (3, 5): # RecursionError introduced in 3.5
+ def is_recursion_error(excinfo):
+ return excinfo.errisinstance(RecursionError) # noqa
+else:
+ def is_recursion_error(excinfo):
+ if not excinfo.errisinstance(RuntimeError):
+ return False
+ try:
+ return "maximum recursion depth exceeded" in str(excinfo.value)
+ except UnicodeError:
+ return False
diff --git a/python/pytest/_pytest/_code/source.py b/python/pytest/_pytest/_code/source.py
new file mode 100644
index 000000000..a1521f8a2
--- /dev/null
+++ b/python/pytest/_pytest/_code/source.py
@@ -0,0 +1,421 @@
+from __future__ import generators
+
+from bisect import bisect_right
+import sys
+import inspect, tokenize
+import py
+from types import ModuleType
+cpy_compile = compile
+
+try:
+ import _ast
+ from _ast import PyCF_ONLY_AST as _AST_FLAG
+except ImportError:
+ _AST_FLAG = 0
+ _ast = None
+
+
+class Source(object):
+ """ a immutable object holding a source code fragment,
+ possibly deindenting it.
+ """
+ _compilecounter = 0
+ def __init__(self, *parts, **kwargs):
+ self.lines = lines = []
+ de = kwargs.get('deindent', True)
+ rstrip = kwargs.get('rstrip', True)
+ for part in parts:
+ if not part:
+ partlines = []
+ if isinstance(part, Source):
+ partlines = part.lines
+ elif isinstance(part, (tuple, list)):
+ partlines = [x.rstrip("\n") for x in part]
+ elif isinstance(part, py.builtin._basestring):
+ partlines = part.split('\n')
+ if rstrip:
+ while partlines:
+ if partlines[-1].strip():
+ break
+ partlines.pop()
+ else:
+ partlines = getsource(part, deindent=de).lines
+ if de:
+ partlines = deindent(partlines)
+ lines.extend(partlines)
+
+ def __eq__(self, other):
+ try:
+ return self.lines == other.lines
+ except AttributeError:
+ if isinstance(other, str):
+ return str(self) == other
+ return False
+
+ def __getitem__(self, key):
+ if isinstance(key, int):
+ return self.lines[key]
+ else:
+ if key.step not in (None, 1):
+ raise IndexError("cannot slice a Source with a step")
+ return self.__getslice__(key.start, key.stop)
+
+ def __len__(self):
+ return len(self.lines)
+
+ def __getslice__(self, start, end):
+ newsource = Source()
+ newsource.lines = self.lines[start:end]
+ return newsource
+
+ def strip(self):
+ """ return new source object with trailing
+ and leading blank lines removed.
+ """
+ start, end = 0, len(self)
+ while start < end and not self.lines[start].strip():
+ start += 1
+ while end > start and not self.lines[end-1].strip():
+ end -= 1
+ source = Source()
+ source.lines[:] = self.lines[start:end]
+ return source
+
+ def putaround(self, before='', after='', indent=' ' * 4):
+ """ return a copy of the source object with
+ 'before' and 'after' wrapped around it.
+ """
+ before = Source(before)
+ after = Source(after)
+ newsource = Source()
+ lines = [ (indent + line) for line in self.lines]
+ newsource.lines = before.lines + lines + after.lines
+ return newsource
+
+ def indent(self, indent=' ' * 4):
+ """ return a copy of the source object with
+ all lines indented by the given indent-string.
+ """
+ newsource = Source()
+ newsource.lines = [(indent+line) for line in self.lines]
+ return newsource
+
+ def getstatement(self, lineno, assertion=False):
+ """ return Source statement which contains the
+ given linenumber (counted from 0).
+ """
+ start, end = self.getstatementrange(lineno, assertion)
+ return self[start:end]
+
+ def getstatementrange(self, lineno, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ """
+ if not (0 <= lineno < len(self)):
+ raise IndexError("lineno out of range")
+ ast, start, end = getstatementrange_ast(lineno, self)
+ return start, end
+
+ def deindent(self, offset=None):
+ """ return a new source object deindented by offset.
+ If offset is None then guess an indentation offset from
+ the first non-blank line. Subsequent lines which have a
+ lower indentation offset will be copied verbatim as
+ they are assumed to be part of multilines.
+ """
+ # XXX maybe use the tokenizer to properly handle multiline
+ # strings etc.pp?
+ newsource = Source()
+ newsource.lines[:] = deindent(self.lines, offset)
+ return newsource
+
+ def isparseable(self, deindent=True):
+ """ return True if source is parseable, heuristically
+ deindenting it by default.
+ """
+ try:
+ import parser
+ except ImportError:
+ syntax_checker = lambda x: compile(x, 'asd', 'exec')
+ else:
+ syntax_checker = parser.suite
+
+ if deindent:
+ source = str(self.deindent())
+ else:
+ source = str(self)
+ try:
+ #compile(source+'\n', "x", "exec")
+ syntax_checker(source+'\n')
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ return False
+ else:
+ return True
+
+ def __str__(self):
+ return "\n".join(self.lines)
+
+ def compile(self, filename=None, mode='exec',
+ flag=generators.compiler_flag,
+ dont_inherit=0, _genframe=None):
+ """ return compiled code object. if filename is None
+ invent an artificial filename which displays
+ the source/line position of the caller frame.
+ """
+ if not filename or py.path.local(filename).check(file=0):
+ if _genframe is None:
+ _genframe = sys._getframe(1) # the caller
+ fn,lineno = _genframe.f_code.co_filename, _genframe.f_lineno
+ base = "<%d-codegen " % self._compilecounter
+ self.__class__._compilecounter += 1
+ if not filename:
+ filename = base + '%s:%d>' % (fn, lineno)
+ else:
+ filename = base + '%r %s:%d>' % (filename, fn, lineno)
+ source = "\n".join(self.lines) + '\n'
+ try:
+ co = cpy_compile(source, filename, mode, flag)
+ except SyntaxError:
+ ex = sys.exc_info()[1]
+ # re-represent syntax errors from parsing python strings
+ msglines = self.lines[:ex.lineno]
+ if ex.offset:
+ msglines.append(" "*ex.offset + '^')
+ msglines.append("(code was compiled probably from here: %s)" % filename)
+ newex = SyntaxError('\n'.join(msglines))
+ newex.offset = ex.offset
+ newex.lineno = ex.lineno
+ newex.text = ex.text
+ raise newex
+ else:
+ if flag & _AST_FLAG:
+ return co
+ lines = [(x + "\n") for x in self.lines]
+ if sys.version_info[0] >= 3:
+ # XXX py3's inspect.getsourcefile() checks for a module
+ # and a pep302 __loader__ ... we don't have a module
+ # at code compile-time so we need to fake it here
+ m = ModuleType("_pycodecompile_pseudo_module")
+ py.std.inspect.modulesbyfile[filename] = None
+ py.std.sys.modules[None] = m
+ m.__loader__ = 1
+ py.std.linecache.cache[filename] = (1, None, lines, filename)
+ return co
+
+#
+# public API shortcut functions
+#
+
+def compile_(source, filename=None, mode='exec', flags=
+ generators.compiler_flag, dont_inherit=0):
+ """ compile the given source to a raw code object,
+ and maintain an internal cache which allows later
+ retrieval of the source code for the code object
+ and any recursively created code objects.
+ """
+ if _ast is not None and isinstance(source, _ast.AST):
+ # XXX should Source support having AST?
+ return cpy_compile(source, filename, mode, flags, dont_inherit)
+ _genframe = sys._getframe(1) # the caller
+ s = Source(source)
+ co = s.compile(filename, mode, flags, _genframe=_genframe)
+ return co
+
+
+def getfslineno(obj):
+ """ Return source location (path, lineno) for the given object.
+ If the source cannot be determined return ("", -1)
+ """
+ import _pytest._code
+ try:
+ code = _pytest._code.Code(obj)
+ except TypeError:
+ try:
+ fn = (py.std.inspect.getsourcefile(obj) or
+ py.std.inspect.getfile(obj))
+ except TypeError:
+ return "", -1
+
+ fspath = fn and py.path.local(fn) or None
+ lineno = -1
+ if fspath:
+ try:
+ _, lineno = findsource(obj)
+ except IOError:
+ pass
+ else:
+ fspath = code.path
+ lineno = code.firstlineno
+ assert isinstance(lineno, int)
+ return fspath, lineno
+
+#
+# helper functions
+#
+
+def findsource(obj):
+ try:
+ sourcelines, lineno = py.std.inspect.findsource(obj)
+ except py.builtin._sysex:
+ raise
+ except:
+ return None, -1
+ source = Source()
+ source.lines = [line.rstrip() for line in sourcelines]
+ return source, lineno
+
+def getsource(obj, **kwargs):
+ import _pytest._code
+ obj = _pytest._code.getrawcode(obj)
+ try:
+ strsrc = inspect.getsource(obj)
+ except IndentationError:
+ strsrc = "\"Buggy python version consider upgrading, cannot get source\""
+ assert isinstance(strsrc, str)
+ return Source(strsrc, **kwargs)
+
+def deindent(lines, offset=None):
+ if offset is None:
+ for line in lines:
+ line = line.expandtabs()
+ s = line.lstrip()
+ if s:
+ offset = len(line)-len(s)
+ break
+ else:
+ offset = 0
+ if offset == 0:
+ return list(lines)
+ newlines = []
+ def readline_generator(lines):
+ for line in lines:
+ yield line + '\n'
+ while True:
+ yield ''
+
+ it = readline_generator(lines)
+
+ try:
+ for _, _, (sline, _), (eline, _), _ in tokenize.generate_tokens(lambda: next(it)):
+ if sline > len(lines):
+ break # End of input reached
+ if sline > len(newlines):
+ line = lines[sline - 1].expandtabs()
+ if line.lstrip() and line[:offset].isspace():
+ line = line[offset:] # Deindent
+ newlines.append(line)
+
+ for i in range(sline, eline):
+ # Don't deindent continuing lines of
+ # multiline tokens (i.e. multiline strings)
+ newlines.append(lines[i])
+ except (IndentationError, tokenize.TokenError):
+ pass
+ # Add any lines we didn't see. E.g. if an exception was raised.
+ newlines.extend(lines[len(newlines):])
+ return newlines
+
+
+def get_statement_startend2(lineno, node):
+ import ast
+ # flatten all statements and except handlers into one lineno-list
+ # AST's line numbers start indexing at 1
+ l = []
+ for x in ast.walk(node):
+ if isinstance(x, _ast.stmt) or isinstance(x, _ast.ExceptHandler):
+ l.append(x.lineno - 1)
+ for name in "finalbody", "orelse":
+ val = getattr(x, name, None)
+ if val:
+ # treat the finally/orelse part as its own statement
+ l.append(val[0].lineno - 1 - 1)
+ l.sort()
+ insert_index = bisect_right(l, lineno)
+ start = l[insert_index - 1]
+ if insert_index >= len(l):
+ end = None
+ else:
+ end = l[insert_index]
+ return start, end
+
+
+def getstatementrange_ast(lineno, source, assertion=False, astnode=None):
+ if astnode is None:
+ content = str(source)
+ if sys.version_info < (2,7):
+ content += "\n"
+ try:
+ astnode = compile(content, "source", "exec", 1024) # 1024 for AST
+ except ValueError:
+ start, end = getstatementrange_old(lineno, source, assertion)
+ return None, start, end
+ start, end = get_statement_startend2(lineno, astnode)
+ # we need to correct the end:
+ # - ast-parsing strips comments
+ # - there might be empty lines
+ # - we might have lesser indented code blocks at the end
+ if end is None:
+ end = len(source.lines)
+
+ if end > start + 1:
+ # make sure we don't span differently indented code blocks
+ # by using the BlockFinder helper used which inspect.getsource() uses itself
+ block_finder = inspect.BlockFinder()
+ # if we start with an indented line, put blockfinder to "started" mode
+ block_finder.started = source.lines[start][0].isspace()
+ it = ((x + "\n") for x in source.lines[start:end])
+ try:
+ for tok in tokenize.generate_tokens(lambda: next(it)):
+ block_finder.tokeneater(*tok)
+ except (inspect.EndOfBlock, IndentationError):
+ end = block_finder.last + start
+ except Exception:
+ pass
+
+ # the end might still point to a comment or empty line, correct it
+ while end:
+ line = source.lines[end - 1].lstrip()
+ if line.startswith("#") or not line:
+ end -= 1
+ else:
+ break
+ return astnode, start, end
+
+
+def getstatementrange_old(lineno, source, assertion=False):
+ """ return (start, end) tuple which spans the minimal
+ statement region which containing the given lineno.
+ raise an IndexError if no such statementrange can be found.
+ """
+ # XXX this logic is only used on python2.4 and below
+ # 1. find the start of the statement
+ from codeop import compile_command
+ for start in range(lineno, -1, -1):
+ if assertion:
+ line = source.lines[start]
+ # the following lines are not fully tested, change with care
+ if 'super' in line and 'self' in line and '__init__' in line:
+ raise IndexError("likely a subclass")
+ if "assert" not in line and "raise" not in line:
+ continue
+ trylines = source.lines[start:lineno+1]
+ # quick hack to prepare parsing an indented line with
+ # compile_command() (which errors on "return" outside defs)
+ trylines.insert(0, 'def xxx():')
+ trysource = '\n '.join(trylines)
+ # ^ space here
+ try:
+ compile_command(trysource)
+ except (SyntaxError, OverflowError, ValueError):
+ continue
+
+ # 2. find the end of the statement
+ for end in range(lineno+1, len(source)+1):
+ trysource = source[start:end]
+ if trysource.isparseable():
+ return start, end
+ raise SyntaxError("no valid source range around line %d " % (lineno,))
+
+
diff --git a/python/pytest/_pytest/_pluggy.py b/python/pytest/_pytest/_pluggy.py
new file mode 100644
index 000000000..87d32cf8d
--- /dev/null
+++ b/python/pytest/_pytest/_pluggy.py
@@ -0,0 +1,11 @@
+"""
+imports symbols from vendored "pluggy" if available, otherwise
+falls back to importing "pluggy" from the default namespace.
+"""
+
+try:
+ from _pytest.vendored_packages.pluggy import * # noqa
+ from _pytest.vendored_packages.pluggy import __version__ # noqa
+except ImportError:
+ from pluggy import * # noqa
+ from pluggy import __version__ # noqa
diff --git a/python/pytest/_pytest/assertion/__init__.py b/python/pytest/_pytest/assertion/__init__.py
new file mode 100644
index 000000000..6921deb2a
--- /dev/null
+++ b/python/pytest/_pytest/assertion/__init__.py
@@ -0,0 +1,176 @@
+"""
+support for presenting detailed information in failing assertions.
+"""
+import py
+import os
+import sys
+from _pytest.monkeypatch import monkeypatch
+from _pytest.assertion import util
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption('--assert',
+ action="store",
+ dest="assertmode",
+ choices=("rewrite", "reinterp", "plain",),
+ default="rewrite",
+ metavar="MODE",
+ help="""control assertion debugging tools. 'plain'
+ performs no assertion debugging. 'reinterp'
+ reinterprets assert statements after they failed
+ to provide assertion expression information.
+ 'rewrite' (the default) rewrites assert
+ statements in test modules on import to
+ provide assert expression information. """)
+ group.addoption('--no-assert',
+ action="store_true",
+ default=False,
+ dest="noassert",
+ help="DEPRECATED equivalent to --assert=plain")
+ group.addoption('--nomagic', '--no-magic',
+ action="store_true",
+ default=False,
+ help="DEPRECATED equivalent to --assert=plain")
+
+
+class AssertionState:
+ """State for the assertion plugin."""
+
+ def __init__(self, config, mode):
+ self.mode = mode
+ self.trace = config.trace.root.get("assertion")
+
+
+def pytest_configure(config):
+ mode = config.getvalue("assertmode")
+ if config.getvalue("noassert") or config.getvalue("nomagic"):
+ mode = "plain"
+ if mode == "rewrite":
+ try:
+ import ast # noqa
+ except ImportError:
+ mode = "reinterp"
+ else:
+ # Both Jython and CPython 2.6.0 have AST bugs that make the
+ # assertion rewriting hook malfunction.
+ if (sys.platform.startswith('java') or
+ sys.version_info[:3] == (2, 6, 0)):
+ mode = "reinterp"
+ if mode != "plain":
+ _load_modules(mode)
+ m = monkeypatch()
+ config._cleanup.append(m.undo)
+ m.setattr(py.builtin.builtins, 'AssertionError',
+ reinterpret.AssertionError) # noqa
+ hook = None
+ if mode == "rewrite":
+ hook = rewrite.AssertionRewritingHook() # noqa
+ sys.meta_path.insert(0, hook)
+ warn_about_missing_assertion(mode)
+ config._assertstate = AssertionState(config, mode)
+ config._assertstate.hook = hook
+ config._assertstate.trace("configured with mode set to %r" % (mode,))
+ def undo():
+ hook = config._assertstate.hook
+ if hook is not None and hook in sys.meta_path:
+ sys.meta_path.remove(hook)
+ config.add_cleanup(undo)
+
+
+def pytest_collection(session):
+ # this hook is only called when test modules are collected
+ # so for example not in the master process of pytest-xdist
+ # (which does not collect test modules)
+ hook = session.config._assertstate.hook
+ if hook is not None:
+ hook.set_session(session)
+
+
+def _running_on_ci():
+ """Check if we're currently running on a CI system."""
+ env_vars = ['CI', 'BUILD_NUMBER']
+ return any(var in os.environ for var in env_vars)
+
+
+def pytest_runtest_setup(item):
+ """Setup the pytest_assertrepr_compare hook
+
+ The newinterpret and rewrite modules will use util._reprcompare if
+ it exists to use custom reporting via the
+ pytest_assertrepr_compare hook. This sets up this custom
+ comparison for the test.
+ """
+ def callbinrepr(op, left, right):
+ """Call the pytest_assertrepr_compare hook and prepare the result
+
+ This uses the first result from the hook and then ensures the
+ following:
+ * Overly verbose explanations are dropped unless -vv was used or
+ running on a CI.
+ * Embedded newlines are escaped to help util.format_explanation()
+ later.
+ * If the rewrite mode is used embedded %-characters are replaced
+ to protect later % formatting.
+
+ The result can be formatted by util.format_explanation() for
+ pretty printing.
+ """
+ hook_result = item.ihook.pytest_assertrepr_compare(
+ config=item.config, op=op, left=left, right=right)
+ for new_expl in hook_result:
+ if new_expl:
+ if (sum(len(p) for p in new_expl[1:]) > 80*8 and
+ item.config.option.verbose < 2 and
+ not _running_on_ci()):
+ show_max = 10
+ truncated_lines = len(new_expl) - show_max
+ new_expl[show_max:] = [py.builtin._totext(
+ 'Detailed information truncated (%d more lines)'
+ ', use "-vv" to show' % truncated_lines)]
+ new_expl = [line.replace("\n", "\\n") for line in new_expl]
+ res = py.builtin._totext("\n~").join(new_expl)
+ if item.config.getvalue("assertmode") == "rewrite":
+ res = res.replace("%", "%%")
+ return res
+ util._reprcompare = callbinrepr
+
+
+def pytest_runtest_teardown(item):
+ util._reprcompare = None
+
+
+def pytest_sessionfinish(session):
+ hook = session.config._assertstate.hook
+ if hook is not None:
+ hook.session = None
+
+
+def _load_modules(mode):
+ """Lazily import assertion related code."""
+ global rewrite, reinterpret
+ from _pytest.assertion import reinterpret # noqa
+ if mode == "rewrite":
+ from _pytest.assertion import rewrite # noqa
+
+
+def warn_about_missing_assertion(mode):
+ try:
+ assert False
+ except AssertionError:
+ pass
+ else:
+ if mode == "rewrite":
+ specifically = ("assertions which are not in test modules "
+ "will be ignored")
+ else:
+ specifically = "failing tests may report as passing"
+
+ sys.stderr.write("WARNING: " + specifically +
+ " because assert statements are not executed "
+ "by the underlying Python interpreter "
+ "(are you using python -O?)\n")
+
+
+# Expose this plugin's implementation for the pytest_assertrepr_compare hook
+pytest_assertrepr_compare = util.assertrepr_compare
diff --git a/python/pytest/_pytest/assertion/reinterpret.py b/python/pytest/_pytest/assertion/reinterpret.py
new file mode 100644
index 000000000..f4262c3ac
--- /dev/null
+++ b/python/pytest/_pytest/assertion/reinterpret.py
@@ -0,0 +1,407 @@
+"""
+Find intermediate evalutation results in assert statements through builtin AST.
+"""
+import ast
+import sys
+
+import _pytest._code
+import py
+from _pytest.assertion import util
+u = py.builtin._totext
+
+
+class AssertionError(util.BuiltinAssertionError):
+ def __init__(self, *args):
+ util.BuiltinAssertionError.__init__(self, *args)
+ if args:
+ # on Python2.6 we get len(args)==2 for: assert 0, (x,y)
+ # on Python2.7 and above we always get len(args) == 1
+ # with args[0] being the (x,y) tuple.
+ if len(args) > 1:
+ toprint = args
+ else:
+ toprint = args[0]
+ try:
+ self.msg = u(toprint)
+ except Exception:
+ self.msg = u(
+ "<[broken __repr__] %s at %0xd>"
+ % (toprint.__class__, id(toprint)))
+ else:
+ f = _pytest._code.Frame(sys._getframe(1))
+ try:
+ source = f.code.fullsource
+ if source is not None:
+ try:
+ source = source.getstatement(f.lineno, assertion=True)
+ except IndexError:
+ source = None
+ else:
+ source = str(source.deindent()).strip()
+ except py.error.ENOENT:
+ source = None
+ # this can also occur during reinterpretation, when the
+ # co_filename is set to "<run>".
+ if source:
+ self.msg = reinterpret(source, f, should_fail=True)
+ else:
+ self.msg = "<could not determine information>"
+ if not self.args:
+ self.args = (self.msg,)
+
+if sys.version_info > (3, 0):
+ AssertionError.__module__ = "builtins"
+
+if sys.platform.startswith("java"):
+ # See http://bugs.jython.org/issue1497
+ _exprs = ("BoolOp", "BinOp", "UnaryOp", "Lambda", "IfExp", "Dict",
+ "ListComp", "GeneratorExp", "Yield", "Compare", "Call",
+ "Repr", "Num", "Str", "Attribute", "Subscript", "Name",
+ "List", "Tuple")
+ _stmts = ("FunctionDef", "ClassDef", "Return", "Delete", "Assign",
+ "AugAssign", "Print", "For", "While", "If", "With", "Raise",
+ "TryExcept", "TryFinally", "Assert", "Import", "ImportFrom",
+ "Exec", "Global", "Expr", "Pass", "Break", "Continue")
+ _expr_nodes = set(getattr(ast, name) for name in _exprs)
+ _stmt_nodes = set(getattr(ast, name) for name in _stmts)
+ def _is_ast_expr(node):
+ return node.__class__ in _expr_nodes
+ def _is_ast_stmt(node):
+ return node.__class__ in _stmt_nodes
+else:
+ def _is_ast_expr(node):
+ return isinstance(node, ast.expr)
+ def _is_ast_stmt(node):
+ return isinstance(node, ast.stmt)
+
+try:
+ _Starred = ast.Starred
+except AttributeError:
+ # Python 2. Define a dummy class so isinstance() will always be False.
+ class _Starred(object): pass
+
+
+class Failure(Exception):
+ """Error found while interpreting AST."""
+
+ def __init__(self, explanation=""):
+ self.cause = sys.exc_info()
+ self.explanation = explanation
+
+
+def reinterpret(source, frame, should_fail=False):
+ mod = ast.parse(source)
+ visitor = DebugInterpreter(frame)
+ try:
+ visitor.visit(mod)
+ except Failure:
+ failure = sys.exc_info()[1]
+ return getfailure(failure)
+ if should_fail:
+ return ("(assertion failed, but when it was re-run for "
+ "printing intermediate values, it did not fail. Suggestions: "
+ "compute assert expression before the assert or use --assert=plain)")
+
+def run(offending_line, frame=None):
+ if frame is None:
+ frame = _pytest._code.Frame(sys._getframe(1))
+ return reinterpret(offending_line, frame)
+
+def getfailure(e):
+ explanation = util.format_explanation(e.explanation)
+ value = e.cause[1]
+ if str(value):
+ lines = explanation.split('\n')
+ lines[0] += " << %s" % (value,)
+ explanation = '\n'.join(lines)
+ text = "%s: %s" % (e.cause[0].__name__, explanation)
+ if text.startswith('AssertionError: assert '):
+ text = text[16:]
+ return text
+
+operator_map = {
+ ast.BitOr : "|",
+ ast.BitXor : "^",
+ ast.BitAnd : "&",
+ ast.LShift : "<<",
+ ast.RShift : ">>",
+ ast.Add : "+",
+ ast.Sub : "-",
+ ast.Mult : "*",
+ ast.Div : "/",
+ ast.FloorDiv : "//",
+ ast.Mod : "%",
+ ast.Eq : "==",
+ ast.NotEq : "!=",
+ ast.Lt : "<",
+ ast.LtE : "<=",
+ ast.Gt : ">",
+ ast.GtE : ">=",
+ ast.Pow : "**",
+ ast.Is : "is",
+ ast.IsNot : "is not",
+ ast.In : "in",
+ ast.NotIn : "not in"
+}
+
+unary_map = {
+ ast.Not : "not %s",
+ ast.Invert : "~%s",
+ ast.USub : "-%s",
+ ast.UAdd : "+%s"
+}
+
+
+class DebugInterpreter(ast.NodeVisitor):
+ """Interpret AST nodes to gleam useful debugging information. """
+
+ def __init__(self, frame):
+ self.frame = frame
+
+ def generic_visit(self, node):
+ # Fallback when we don't have a special implementation.
+ if _is_ast_expr(node):
+ mod = ast.Expression(node)
+ co = self._compile(mod)
+ try:
+ result = self.frame.eval(co)
+ except Exception:
+ raise Failure()
+ explanation = self.frame.repr(result)
+ return explanation, result
+ elif _is_ast_stmt(node):
+ mod = ast.Module([node])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co)
+ except Exception:
+ raise Failure()
+ return None, None
+ else:
+ raise AssertionError("can't handle %s" %(node,))
+
+ def _compile(self, source, mode="eval"):
+ return compile(source, "<assertion interpretation>", mode)
+
+ def visit_Expr(self, expr):
+ return self.visit(expr.value)
+
+ def visit_Module(self, mod):
+ for stmt in mod.body:
+ self.visit(stmt)
+
+ def visit_Name(self, name):
+ explanation, result = self.generic_visit(name)
+ # See if the name is local.
+ source = "%r in locals() is not globals()" % (name.id,)
+ co = self._compile(source)
+ try:
+ local = self.frame.eval(co)
+ except Exception:
+ # have to assume it isn't
+ local = None
+ if local is None or not self.frame.is_true(local):
+ return name.id, result
+ return explanation, result
+
+ def visit_Compare(self, comp):
+ left = comp.left
+ left_explanation, left_result = self.visit(left)
+ for op, next_op in zip(comp.ops, comp.comparators):
+ next_explanation, next_result = self.visit(next_op)
+ op_symbol = operator_map[op.__class__]
+ explanation = "%s %s %s" % (left_explanation, op_symbol,
+ next_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (op_symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=next_result)
+ except Exception:
+ raise Failure(explanation)
+ try:
+ if not self.frame.is_true(result):
+ break
+ except KeyboardInterrupt:
+ raise
+ except:
+ break
+ left_explanation, left_result = next_explanation, next_result
+
+ if util._reprcompare is not None:
+ res = util._reprcompare(op_symbol, left_result, next_result)
+ if res:
+ explanation = res
+ return explanation, result
+
+ def visit_BoolOp(self, boolop):
+ is_or = isinstance(boolop.op, ast.Or)
+ explanations = []
+ for operand in boolop.values:
+ explanation, result = self.visit(operand)
+ explanations.append(explanation)
+ if result == is_or:
+ break
+ name = is_or and " or " or " and "
+ explanation = "(" + name.join(explanations) + ")"
+ return explanation, result
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_explanation, operand_result = self.visit(unary.operand)
+ explanation = pattern % (operand_explanation,)
+ co = self._compile(pattern % ("__exprinfo_expr",))
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=operand_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_BinOp(self, binop):
+ left_explanation, left_result = self.visit(binop.left)
+ right_explanation, right_result = self.visit(binop.right)
+ symbol = operator_map[binop.op.__class__]
+ explanation = "(%s %s %s)" % (left_explanation, symbol,
+ right_explanation)
+ source = "__exprinfo_left %s __exprinfo_right" % (symbol,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, __exprinfo_left=left_result,
+ __exprinfo_right=right_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, result
+
+ def visit_Call(self, call):
+ func_explanation, func = self.visit(call.func)
+ arg_explanations = []
+ ns = {"__exprinfo_func" : func}
+ arguments = []
+ for arg in call.args:
+ arg_explanation, arg_result = self.visit(arg)
+ if isinstance(arg, _Starred):
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+ else:
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ ns[arg_name] = arg_result
+ arguments.append(arg_name)
+ arg_explanations.append(arg_explanation)
+ for keyword in call.keywords:
+ arg_explanation, arg_result = self.visit(keyword.value)
+ if keyword.arg:
+ arg_name = "__exprinfo_%s" % (len(ns),)
+ keyword_source = "%s=%%s" % (keyword.arg)
+ arguments.append(keyword_source % (arg_name,))
+ arg_explanations.append(keyword_source % (arg_explanation,))
+ else:
+ arg_name = "__exprinfo_kwds"
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+
+ ns[arg_name] = arg_result
+
+ if getattr(call, 'starargs', None):
+ arg_explanation, arg_result = self.visit(call.starargs)
+ arg_name = "__exprinfo_star"
+ ns[arg_name] = arg_result
+ arguments.append("*%s" % (arg_name,))
+ arg_explanations.append("*%s" % (arg_explanation,))
+
+ if getattr(call, 'kwargs', None):
+ arg_explanation, arg_result = self.visit(call.kwargs)
+ arg_name = "__exprinfo_kwds"
+ ns[arg_name] = arg_result
+ arguments.append("**%s" % (arg_name,))
+ arg_explanations.append("**%s" % (arg_explanation,))
+ args_explained = ", ".join(arg_explanations)
+ explanation = "%s(%s)" % (func_explanation, args_explained)
+ args = ", ".join(arguments)
+ source = "__exprinfo_func(%s)" % (args,)
+ co = self._compile(source)
+ try:
+ result = self.frame.eval(co, **ns)
+ except Exception:
+ raise Failure(explanation)
+ pattern = "%s\n{%s = %s\n}"
+ rep = self.frame.repr(result)
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def _is_builtin_name(self, name):
+ pattern = "%r not in globals() and %r not in locals()"
+ source = pattern % (name.id, name.id)
+ co = self._compile(source)
+ try:
+ return self.frame.eval(co)
+ except Exception:
+ return False
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ source_explanation, source_result = self.visit(attr.value)
+ explanation = "%s.%s" % (source_explanation, attr.attr)
+ source = "__exprinfo_expr.%s" % (attr.attr,)
+ co = self._compile(source)
+ try:
+ try:
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except AttributeError:
+ # Maybe the attribute name needs to be mangled?
+ if not attr.attr.startswith("__") or attr.attr.endswith("__"):
+ raise
+ source = "getattr(__exprinfo_expr.__class__, '__name__', '')"
+ co = self._compile(source)
+ class_name = self.frame.eval(co, __exprinfo_expr=source_result)
+ mangled_attr = "_" + class_name + attr.attr
+ source = "__exprinfo_expr.%s" % (mangled_attr,)
+ co = self._compile(source)
+ result = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ raise Failure(explanation)
+ explanation = "%s\n{%s = %s.%s\n}" % (self.frame.repr(result),
+ self.frame.repr(result),
+ source_explanation, attr.attr)
+ # Check if the attr is from an instance.
+ source = "%r in getattr(__exprinfo_expr, '__dict__', {})"
+ source = source % (attr.attr,)
+ co = self._compile(source)
+ try:
+ from_instance = self.frame.eval(co, __exprinfo_expr=source_result)
+ except Exception:
+ from_instance = None
+ if from_instance is None or self.frame.is_true(from_instance):
+ rep = self.frame.repr(result)
+ pattern = "%s\n{%s = %s\n}"
+ explanation = pattern % (rep, rep, explanation)
+ return explanation, result
+
+ def visit_Assert(self, assrt):
+ test_explanation, test_result = self.visit(assrt.test)
+ explanation = "assert %s" % (test_explanation,)
+ if not self.frame.is_true(test_result):
+ try:
+ raise util.BuiltinAssertionError
+ except Exception:
+ raise Failure(explanation)
+ return explanation, test_result
+
+ def visit_Assign(self, assign):
+ value_explanation, value_result = self.visit(assign.value)
+ explanation = "... = %s" % (value_explanation,)
+ name = ast.Name("__exprinfo_expr", ast.Load(),
+ lineno=assign.value.lineno,
+ col_offset=assign.value.col_offset)
+ new_assign = ast.Assign(assign.targets, name, lineno=assign.lineno,
+ col_offset=assign.col_offset)
+ mod = ast.Module([new_assign])
+ co = self._compile(mod, "exec")
+ try:
+ self.frame.exec_(co, __exprinfo_expr=value_result)
+ except Exception:
+ raise Failure(explanation)
+ return explanation, value_result
+
diff --git a/python/pytest/_pytest/assertion/rewrite.py b/python/pytest/_pytest/assertion/rewrite.py
new file mode 100644
index 000000000..14b8e49db
--- /dev/null
+++ b/python/pytest/_pytest/assertion/rewrite.py
@@ -0,0 +1,885 @@
+"""Rewrite assertion AST to produce nice error messages"""
+
+import ast
+import errno
+import itertools
+import imp
+import marshal
+import os
+import re
+import struct
+import sys
+import types
+
+import py
+from _pytest.assertion import util
+
+
+# pytest caches rewritten pycs in __pycache__.
+if hasattr(imp, "get_tag"):
+ PYTEST_TAG = imp.get_tag() + "-PYTEST"
+else:
+ if hasattr(sys, "pypy_version_info"):
+ impl = "pypy"
+ elif sys.platform == "java":
+ impl = "jython"
+ else:
+ impl = "cpython"
+ ver = sys.version_info
+ PYTEST_TAG = "%s-%s%s-PYTEST" % (impl, ver[0], ver[1])
+ del ver, impl
+
+PYC_EXT = ".py" + (__debug__ and "c" or "o")
+PYC_TAIL = "." + PYTEST_TAG + PYC_EXT
+
+REWRITE_NEWLINES = sys.version_info[:2] != (2, 7) and sys.version_info < (3, 2)
+ASCII_IS_DEFAULT_ENCODING = sys.version_info[0] < 3
+
+if sys.version_info >= (3,5):
+ ast_Call = ast.Call
+else:
+ ast_Call = lambda a,b,c: ast.Call(a, b, c, None, None)
+
+
+class AssertionRewritingHook(object):
+ """PEP302 Import hook which rewrites asserts."""
+
+ def __init__(self):
+ self.session = None
+ self.modules = {}
+ self._register_with_pkg_resources()
+
+ def set_session(self, session):
+ self.fnpats = session.config.getini("python_files")
+ self.session = session
+
+ def find_module(self, name, path=None):
+ if self.session is None:
+ return None
+ sess = self.session
+ state = sess.config._assertstate
+ state.trace("find_module called for: %s" % name)
+ names = name.rsplit(".", 1)
+ lastname = names[-1]
+ pth = None
+ if path is not None:
+ # Starting with Python 3.3, path is a _NamespacePath(), which
+ # causes problems if not converted to list.
+ path = list(path)
+ if len(path) == 1:
+ pth = path[0]
+ if pth is None:
+ try:
+ fd, fn, desc = imp.find_module(lastname, path)
+ except ImportError:
+ return None
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ if tp == imp.PY_COMPILED:
+ if hasattr(imp, "source_from_cache"):
+ fn = imp.source_from_cache(fn)
+ else:
+ fn = fn[:-1]
+ elif tp != imp.PY_SOURCE:
+ # Don't know what this is.
+ return None
+ else:
+ fn = os.path.join(pth, name.rpartition(".")[2] + ".py")
+ fn_pypath = py.path.local(fn)
+ # Is this a test file?
+ if not sess.isinitpath(fn):
+ # We have to be very careful here because imports in this code can
+ # trigger a cycle.
+ self.session = None
+ try:
+ for pat in self.fnpats:
+ if fn_pypath.fnmatch(pat):
+ state.trace("matched test file %r" % (fn,))
+ break
+ else:
+ return None
+ finally:
+ self.session = sess
+ else:
+ state.trace("matched test file (was specified on cmdline): %r" %
+ (fn,))
+ # The requested module looks like a test file, so rewrite it. This is
+ # the most magical part of the process: load the source, rewrite the
+ # asserts, and load the rewritten source. We also cache the rewritten
+ # module code in a special pyc. We must be aware of the possibility of
+ # concurrent pytest processes rewriting and loading pycs. To avoid
+ # tricky race conditions, we maintain the following invariant: The
+ # cached pyc is always a complete, valid pyc. Operations on it must be
+ # atomic. POSIX's atomic rename comes in handy.
+ write = not sys.dont_write_bytecode
+ cache_dir = os.path.join(fn_pypath.dirname, "__pycache__")
+ if write:
+ try:
+ os.mkdir(cache_dir)
+ except OSError:
+ e = sys.exc_info()[1].errno
+ if e == errno.EEXIST:
+ # Either the __pycache__ directory already exists (the
+ # common case) or it's blocked by a non-dir node. In the
+ # latter case, we'll ignore it in _write_pyc.
+ pass
+ elif e in [errno.ENOENT, errno.ENOTDIR]:
+ # One of the path components was not a directory, likely
+ # because we're in a zip file.
+ write = False
+ elif e in [errno.EACCES, errno.EROFS, errno.EPERM]:
+ state.trace("read only directory: %r" % fn_pypath.dirname)
+ write = False
+ else:
+ raise
+ cache_name = fn_pypath.basename[:-3] + PYC_TAIL
+ pyc = os.path.join(cache_dir, cache_name)
+ # Notice that even if we're in a read-only directory, I'm going
+ # to check for a cached pyc. This may not be optimal...
+ co = _read_pyc(fn_pypath, pyc, state.trace)
+ if co is None:
+ state.trace("rewriting %r" % (fn,))
+ source_stat, co = _rewrite_test(state, fn_pypath)
+ if co is None:
+ # Probably a SyntaxError in the test.
+ return None
+ if write:
+ _make_rewritten_pyc(state, source_stat, pyc, co)
+ else:
+ state.trace("found cached rewritten pyc for %r" % (fn,))
+ self.modules[name] = co, pyc
+ return self
+
+ def load_module(self, name):
+ # If there is an existing module object named 'fullname' in
+ # sys.modules, the loader must use that existing module. (Otherwise,
+ # the reload() builtin will not work correctly.)
+ if name in sys.modules:
+ return sys.modules[name]
+
+ co, pyc = self.modules.pop(name)
+ # I wish I could just call imp.load_compiled here, but __file__ has to
+ # be set properly. In Python 3.2+, this all would be handled correctly
+ # by load_compiled.
+ mod = sys.modules[name] = imp.new_module(name)
+ try:
+ mod.__file__ = co.co_filename
+ # Normally, this attribute is 3.2+.
+ mod.__cached__ = pyc
+ mod.__loader__ = self
+ py.builtin.exec_(co, mod.__dict__)
+ except:
+ del sys.modules[name]
+ raise
+ return sys.modules[name]
+
+
+
+ def is_package(self, name):
+ try:
+ fd, fn, desc = imp.find_module(name)
+ except ImportError:
+ return False
+ if fd is not None:
+ fd.close()
+ tp = desc[2]
+ return tp == imp.PKG_DIRECTORY
+
+ @classmethod
+ def _register_with_pkg_resources(cls):
+ """
+ Ensure package resources can be loaded from this loader. May be called
+ multiple times, as the operation is idempotent.
+ """
+ try:
+ import pkg_resources
+ # access an attribute in case a deferred importer is present
+ pkg_resources.__name__
+ except ImportError:
+ return
+
+ # Since pytest tests are always located in the file system, the
+ # DefaultProvider is appropriate.
+ pkg_resources.register_loader_type(cls, pkg_resources.DefaultProvider)
+
+ def get_data(self, pathname):
+ """Optional PEP302 get_data API.
+ """
+ with open(pathname, 'rb') as f:
+ return f.read()
+
+
+def _write_pyc(state, co, source_stat, pyc):
+ # Technically, we don't have to have the same pyc format as
+ # (C)Python, since these "pycs" should never be seen by builtin
+ # import. However, there's little reason deviate, and I hope
+ # sometime to be able to use imp.load_compiled to load them. (See
+ # the comment in load_module above.)
+ try:
+ fp = open(pyc, "wb")
+ except IOError:
+ err = sys.exc_info()[1].errno
+ state.trace("error writing pyc file at %s: errno=%s" %(pyc, err))
+ # we ignore any failure to write the cache file
+ # there are many reasons, permission-denied, __pycache__ being a
+ # file etc.
+ return False
+ try:
+ fp.write(imp.get_magic())
+ mtime = int(source_stat.mtime)
+ size = source_stat.size & 0xFFFFFFFF
+ fp.write(struct.pack("<ll", mtime, size))
+ marshal.dump(co, fp)
+ finally:
+ fp.close()
+ return True
+
+RN = "\r\n".encode("utf-8")
+N = "\n".encode("utf-8")
+
+cookie_re = re.compile(r"^[ \t\f]*#.*coding[:=][ \t]*[-\w.]+")
+BOM_UTF8 = '\xef\xbb\xbf'
+
+def _rewrite_test(state, fn):
+ """Try to read and rewrite *fn* and return the code object."""
+ try:
+ stat = fn.stat()
+ source = fn.read("rb")
+ except EnvironmentError:
+ return None, None
+ if ASCII_IS_DEFAULT_ENCODING:
+ # ASCII is the default encoding in Python 2. Without a coding
+ # declaration, Python 2 will complain about any bytes in the file
+ # outside the ASCII range. Sadly, this behavior does not extend to
+ # compile() or ast.parse(), which prefer to interpret the bytes as
+ # latin-1. (At least they properly handle explicit coding cookies.) To
+ # preserve this error behavior, we could force ast.parse() to use ASCII
+ # as the encoding by inserting a coding cookie. Unfortunately, that
+ # messes up line numbers. Thus, we have to check ourselves if anything
+ # is outside the ASCII range in the case no encoding is explicitly
+ # declared. For more context, see issue #269. Yay for Python 3 which
+ # gets this right.
+ end1 = source.find("\n")
+ end2 = source.find("\n", end1 + 1)
+ if (not source.startswith(BOM_UTF8) and
+ cookie_re.match(source[0:end1]) is None and
+ cookie_re.match(source[end1 + 1:end2]) is None):
+ if hasattr(state, "_indecode"):
+ # encodings imported us again, so don't rewrite.
+ return None, None
+ state._indecode = True
+ try:
+ try:
+ source.decode("ascii")
+ except UnicodeDecodeError:
+ # Let it fail in real import.
+ return None, None
+ finally:
+ del state._indecode
+ # On Python versions which are not 2.7 and less than or equal to 3.1, the
+ # parser expects *nix newlines.
+ if REWRITE_NEWLINES:
+ source = source.replace(RN, N) + N
+ try:
+ tree = ast.parse(source)
+ except SyntaxError:
+ # Let this pop up again in the real import.
+ state.trace("failed to parse: %r" % (fn,))
+ return None, None
+ rewrite_asserts(tree)
+ try:
+ co = compile(tree, fn.strpath, "exec")
+ except SyntaxError:
+ # It's possible that this error is from some bug in the
+ # assertion rewriting, but I don't know of a fast way to tell.
+ state.trace("failed to compile: %r" % (fn,))
+ return None, None
+ return stat, co
+
+def _make_rewritten_pyc(state, source_stat, pyc, co):
+ """Try to dump rewritten code to *pyc*."""
+ if sys.platform.startswith("win"):
+ # Windows grants exclusive access to open files and doesn't have atomic
+ # rename, so just write into the final file.
+ _write_pyc(state, co, source_stat, pyc)
+ else:
+ # When not on windows, assume rename is atomic. Dump the code object
+ # into a file specific to this process and atomically replace it.
+ proc_pyc = pyc + "." + str(os.getpid())
+ if _write_pyc(state, co, source_stat, proc_pyc):
+ os.rename(proc_pyc, pyc)
+
+def _read_pyc(source, pyc, trace=lambda x: None):
+ """Possibly read a pytest pyc containing rewritten code.
+
+ Return rewritten code if successful or None if not.
+ """
+ try:
+ fp = open(pyc, "rb")
+ except IOError:
+ return None
+ with fp:
+ try:
+ mtime = int(source.mtime())
+ size = source.size()
+ data = fp.read(12)
+ except EnvironmentError as e:
+ trace('_read_pyc(%s): EnvironmentError %s' % (source, e))
+ return None
+ # Check for invalid or out of date pyc file.
+ if (len(data) != 12 or data[:4] != imp.get_magic() or
+ struct.unpack("<ll", data[4:]) != (mtime, size)):
+ trace('_read_pyc(%s): invalid or out of date pyc' % source)
+ return None
+ try:
+ co = marshal.load(fp)
+ except Exception as e:
+ trace('_read_pyc(%s): marshal.load error %s' % (source, e))
+ return None
+ if not isinstance(co, types.CodeType):
+ trace('_read_pyc(%s): not a code object' % source)
+ return None
+ return co
+
+
+def rewrite_asserts(mod):
+ """Rewrite the assert statements in mod."""
+ AssertionRewriter().run(mod)
+
+
+def _saferepr(obj):
+ """Get a safe repr of an object for assertion error messages.
+
+ The assertion formatting (util.format_explanation()) requires
+ newlines to be escaped since they are a special character for it.
+ Normally assertion.util.format_explanation() does this but for a
+ custom repr it is possible to contain one of the special escape
+ sequences, especially '\n{' and '\n}' are likely to be present in
+ JSON reprs.
+
+ """
+ repr = py.io.saferepr(obj)
+ if py.builtin._istext(repr):
+ t = py.builtin.text
+ else:
+ t = py.builtin.bytes
+ return repr.replace(t("\n"), t("\\n"))
+
+
+from _pytest.assertion.util import format_explanation as _format_explanation # noqa
+
+def _format_assertmsg(obj):
+ """Format the custom assertion message given.
+
+ For strings this simply replaces newlines with '\n~' so that
+ util.format_explanation() will preserve them instead of escaping
+ newlines. For other objects py.io.saferepr() is used first.
+
+ """
+ # reprlib appears to have a bug which means that if a string
+ # contains a newline it gets escaped, however if an object has a
+ # .__repr__() which contains newlines it does not get escaped.
+ # However in either case we want to preserve the newline.
+ if py.builtin._istext(obj) or py.builtin._isbytes(obj):
+ s = obj
+ is_repr = False
+ else:
+ s = py.io.saferepr(obj)
+ is_repr = True
+ if py.builtin._istext(s):
+ t = py.builtin.text
+ else:
+ t = py.builtin.bytes
+ s = s.replace(t("\n"), t("\n~")).replace(t("%"), t("%%"))
+ if is_repr:
+ s = s.replace(t("\\n"), t("\n~"))
+ return s
+
+def _should_repr_global_name(obj):
+ return not hasattr(obj, "__name__") and not py.builtin.callable(obj)
+
+def _format_boolop(explanations, is_or):
+ explanation = "(" + (is_or and " or " or " and ").join(explanations) + ")"
+ if py.builtin._istext(explanation):
+ t = py.builtin.text
+ else:
+ t = py.builtin.bytes
+ return explanation.replace(t('%'), t('%%'))
+
+def _call_reprcompare(ops, results, expls, each_obj):
+ for i, res, expl in zip(range(len(ops)), results, expls):
+ try:
+ done = not res
+ except Exception:
+ done = True
+ if done:
+ break
+ if util._reprcompare is not None:
+ custom = util._reprcompare(ops[i], each_obj[i], each_obj[i + 1])
+ if custom is not None:
+ return custom
+ return expl
+
+
+unary_map = {
+ ast.Not: "not %s",
+ ast.Invert: "~%s",
+ ast.USub: "-%s",
+ ast.UAdd: "+%s"
+}
+
+binop_map = {
+ ast.BitOr: "|",
+ ast.BitXor: "^",
+ ast.BitAnd: "&",
+ ast.LShift: "<<",
+ ast.RShift: ">>",
+ ast.Add: "+",
+ ast.Sub: "-",
+ ast.Mult: "*",
+ ast.Div: "/",
+ ast.FloorDiv: "//",
+ ast.Mod: "%%", # escaped for string formatting
+ ast.Eq: "==",
+ ast.NotEq: "!=",
+ ast.Lt: "<",
+ ast.LtE: "<=",
+ ast.Gt: ">",
+ ast.GtE: ">=",
+ ast.Pow: "**",
+ ast.Is: "is",
+ ast.IsNot: "is not",
+ ast.In: "in",
+ ast.NotIn: "not in"
+}
+# Python 3.5+ compatibility
+try:
+ binop_map[ast.MatMult] = "@"
+except AttributeError:
+ pass
+
+# Python 3.4+ compatibility
+if hasattr(ast, "NameConstant"):
+ _NameConstant = ast.NameConstant
+else:
+ def _NameConstant(c):
+ return ast.Name(str(c), ast.Load())
+
+
+def set_location(node, lineno, col_offset):
+ """Set node location information recursively."""
+ def _fix(node, lineno, col_offset):
+ if "lineno" in node._attributes:
+ node.lineno = lineno
+ if "col_offset" in node._attributes:
+ node.col_offset = col_offset
+ for child in ast.iter_child_nodes(node):
+ _fix(child, lineno, col_offset)
+ _fix(node, lineno, col_offset)
+ return node
+
+
+class AssertionRewriter(ast.NodeVisitor):
+ """Assertion rewriting implementation.
+
+ The main entrypoint is to call .run() with an ast.Module instance,
+ this will then find all the assert statements and re-write them to
+ provide intermediate values and a detailed assertion error. See
+ http://pybites.blogspot.be/2011/07/behind-scenes-of-pytests-new-assertion.html
+ for an overview of how this works.
+
+ The entry point here is .run() which will iterate over all the
+ statements in an ast.Module and for each ast.Assert statement it
+ finds call .visit() with it. Then .visit_Assert() takes over and
+ is responsible for creating new ast statements to replace the
+ original assert statement: it re-writes the test of an assertion
+ to provide intermediate values and replace it with an if statement
+ which raises an assertion error with a detailed explanation in
+ case the expression is false.
+
+ For this .visit_Assert() uses the visitor pattern to visit all the
+ AST nodes of the ast.Assert.test field, each visit call returning
+ an AST node and the corresponding explanation string. During this
+ state is kept in several instance attributes:
+
+ :statements: All the AST statements which will replace the assert
+ statement.
+
+ :variables: This is populated by .variable() with each variable
+ used by the statements so that they can all be set to None at
+ the end of the statements.
+
+ :variable_counter: Counter to create new unique variables needed
+ by statements. Variables are created using .variable() and
+ have the form of "@py_assert0".
+
+ :on_failure: The AST statements which will be executed if the
+ assertion test fails. This is the code which will construct
+ the failure message and raises the AssertionError.
+
+ :explanation_specifiers: A dict filled by .explanation_param()
+ with %-formatting placeholders and their corresponding
+ expressions to use in the building of an assertion message.
+ This is used by .pop_format_context() to build a message.
+
+ :stack: A stack of the explanation_specifiers dicts maintained by
+ .push_format_context() and .pop_format_context() which allows
+ to build another %-formatted string while already building one.
+
+ This state is reset on every new assert statement visited and used
+ by the other visitors.
+
+ """
+
+ def run(self, mod):
+ """Find all assert statements in *mod* and rewrite them."""
+ if not mod.body:
+ # Nothing to do.
+ return
+ # Insert some special imports at the top of the module but after any
+ # docstrings and __future__ imports.
+ aliases = [ast.alias(py.builtin.builtins.__name__, "@py_builtins"),
+ ast.alias("_pytest.assertion.rewrite", "@pytest_ar")]
+ expect_docstring = True
+ pos = 0
+ lineno = 0
+ for item in mod.body:
+ if (expect_docstring and isinstance(item, ast.Expr) and
+ isinstance(item.value, ast.Str)):
+ doc = item.value.s
+ if "PYTEST_DONT_REWRITE" in doc:
+ # The module has disabled assertion rewriting.
+ return
+ lineno += len(doc) - 1
+ expect_docstring = False
+ elif (not isinstance(item, ast.ImportFrom) or item.level > 0 or
+ item.module != "__future__"):
+ lineno = item.lineno
+ break
+ pos += 1
+ imports = [ast.Import([alias], lineno=lineno, col_offset=0)
+ for alias in aliases]
+ mod.body[pos:pos] = imports
+ # Collect asserts.
+ nodes = [mod]
+ while nodes:
+ node = nodes.pop()
+ for name, field in ast.iter_fields(node):
+ if isinstance(field, list):
+ new = []
+ for i, child in enumerate(field):
+ if isinstance(child, ast.Assert):
+ # Transform assert.
+ new.extend(self.visit(child))
+ else:
+ new.append(child)
+ if isinstance(child, ast.AST):
+ nodes.append(child)
+ setattr(node, name, new)
+ elif (isinstance(field, ast.AST) and
+ # Don't recurse into expressions as they can't contain
+ # asserts.
+ not isinstance(field, ast.expr)):
+ nodes.append(field)
+
+ def variable(self):
+ """Get a new variable."""
+ # Use a character invalid in python identifiers to avoid clashing.
+ name = "@py_assert" + str(next(self.variable_counter))
+ self.variables.append(name)
+ return name
+
+ def assign(self, expr):
+ """Give *expr* a name."""
+ name = self.variable()
+ self.statements.append(ast.Assign([ast.Name(name, ast.Store())], expr))
+ return ast.Name(name, ast.Load())
+
+ def display(self, expr):
+ """Call py.io.saferepr on the expression."""
+ return self.helper("saferepr", expr)
+
+ def helper(self, name, *args):
+ """Call a helper in this module."""
+ py_name = ast.Name("@pytest_ar", ast.Load())
+ attr = ast.Attribute(py_name, "_" + name, ast.Load())
+ return ast_Call(attr, list(args), [])
+
+ def builtin(self, name):
+ """Return the builtin called *name*."""
+ builtin_name = ast.Name("@py_builtins", ast.Load())
+ return ast.Attribute(builtin_name, name, ast.Load())
+
+ def explanation_param(self, expr):
+ """Return a new named %-formatting placeholder for expr.
+
+ This creates a %-formatting placeholder for expr in the
+ current formatting context, e.g. ``%(py0)s``. The placeholder
+ and expr are placed in the current format context so that it
+ can be used on the next call to .pop_format_context().
+
+ """
+ specifier = "py" + str(next(self.variable_counter))
+ self.explanation_specifiers[specifier] = expr
+ return "%(" + specifier + ")s"
+
+ def push_format_context(self):
+ """Create a new formatting context.
+
+ The format context is used for when an explanation wants to
+ have a variable value formatted in the assertion message. In
+ this case the value required can be added using
+ .explanation_param(). Finally .pop_format_context() is used
+ to format a string of %-formatted values as added by
+ .explanation_param().
+
+ """
+ self.explanation_specifiers = {}
+ self.stack.append(self.explanation_specifiers)
+
+ def pop_format_context(self, expl_expr):
+ """Format the %-formatted string with current format context.
+
+ The expl_expr should be an ast.Str instance constructed from
+ the %-placeholders created by .explanation_param(). This will
+ add the required code to format said string to .on_failure and
+ return the ast.Name instance of the formatted string.
+
+ """
+ current = self.stack.pop()
+ if self.stack:
+ self.explanation_specifiers = self.stack[-1]
+ keys = [ast.Str(key) for key in current.keys()]
+ format_dict = ast.Dict(keys, list(current.values()))
+ form = ast.BinOp(expl_expr, ast.Mod(), format_dict)
+ name = "@py_format" + str(next(self.variable_counter))
+ self.on_failure.append(ast.Assign([ast.Name(name, ast.Store())], form))
+ return ast.Name(name, ast.Load())
+
+ def generic_visit(self, node):
+ """Handle expressions we don't have custom code for."""
+ assert isinstance(node, ast.expr)
+ res = self.assign(node)
+ return res, self.explanation_param(self.display(res))
+
+ def visit_Assert(self, assert_):
+ """Return the AST statements to replace the ast.Assert instance.
+
+ This re-writes the test of an assertion to provide
+ intermediate values and replace it with an if statement which
+ raises an assertion error with a detailed explanation in case
+ the expression is false.
+
+ """
+ self.statements = []
+ self.variables = []
+ self.variable_counter = itertools.count()
+ self.stack = []
+ self.on_failure = []
+ self.push_format_context()
+ # Rewrite assert into a bunch of statements.
+ top_condition, explanation = self.visit(assert_.test)
+ # Create failure message.
+ body = self.on_failure
+ negation = ast.UnaryOp(ast.Not(), top_condition)
+ self.statements.append(ast.If(negation, body, []))
+ if assert_.msg:
+ assertmsg = self.helper('format_assertmsg', assert_.msg)
+ explanation = "\n>assert " + explanation
+ else:
+ assertmsg = ast.Str("")
+ explanation = "assert " + explanation
+ template = ast.BinOp(assertmsg, ast.Add(), ast.Str(explanation))
+ msg = self.pop_format_context(template)
+ fmt = self.helper("format_explanation", msg)
+ err_name = ast.Name("AssertionError", ast.Load())
+ exc = ast_Call(err_name, [fmt], [])
+ if sys.version_info[0] >= 3:
+ raise_ = ast.Raise(exc, None)
+ else:
+ raise_ = ast.Raise(exc, None, None)
+ body.append(raise_)
+ # Clear temporary variables by setting them to None.
+ if self.variables:
+ variables = [ast.Name(name, ast.Store())
+ for name in self.variables]
+ clear = ast.Assign(variables, _NameConstant(None))
+ self.statements.append(clear)
+ # Fix line numbers.
+ for stmt in self.statements:
+ set_location(stmt, assert_.lineno, assert_.col_offset)
+ return self.statements
+
+ def visit_Name(self, name):
+ # Display the repr of the name if it's a local variable or
+ # _should_repr_global_name() thinks it's acceptable.
+ locs = ast_Call(self.builtin("locals"), [], [])
+ inlocs = ast.Compare(ast.Str(name.id), [ast.In()], [locs])
+ dorepr = self.helper("should_repr_global_name", name)
+ test = ast.BoolOp(ast.Or(), [inlocs, dorepr])
+ expr = ast.IfExp(test, self.display(name), ast.Str(name.id))
+ return name, self.explanation_param(expr)
+
+ def visit_BoolOp(self, boolop):
+ res_var = self.variable()
+ expl_list = self.assign(ast.List([], ast.Load()))
+ app = ast.Attribute(expl_list, "append", ast.Load())
+ is_or = int(isinstance(boolop.op, ast.Or))
+ body = save = self.statements
+ fail_save = self.on_failure
+ levels = len(boolop.values) - 1
+ self.push_format_context()
+ # Process each operand, short-circuting if needed.
+ for i, v in enumerate(boolop.values):
+ if i:
+ fail_inner = []
+ # cond is set in a prior loop iteration below
+ self.on_failure.append(ast.If(cond, fail_inner, [])) # noqa
+ self.on_failure = fail_inner
+ self.push_format_context()
+ res, expl = self.visit(v)
+ body.append(ast.Assign([ast.Name(res_var, ast.Store())], res))
+ expl_format = self.pop_format_context(ast.Str(expl))
+ call = ast_Call(app, [expl_format], [])
+ self.on_failure.append(ast.Expr(call))
+ if i < levels:
+ cond = res
+ if is_or:
+ cond = ast.UnaryOp(ast.Not(), cond)
+ inner = []
+ self.statements.append(ast.If(cond, inner, []))
+ self.statements = body = inner
+ self.statements = save
+ self.on_failure = fail_save
+ expl_template = self.helper("format_boolop", expl_list, ast.Num(is_or))
+ expl = self.pop_format_context(expl_template)
+ return ast.Name(res_var, ast.Load()), self.explanation_param(expl)
+
+ def visit_UnaryOp(self, unary):
+ pattern = unary_map[unary.op.__class__]
+ operand_res, operand_expl = self.visit(unary.operand)
+ res = self.assign(ast.UnaryOp(unary.op, operand_res))
+ return res, pattern % (operand_expl,)
+
+ def visit_BinOp(self, binop):
+ symbol = binop_map[binop.op.__class__]
+ left_expr, left_expl = self.visit(binop.left)
+ right_expr, right_expl = self.visit(binop.right)
+ explanation = "(%s %s %s)" % (left_expl, symbol, right_expl)
+ res = self.assign(ast.BinOp(left_expr, binop.op, right_expr))
+ return res, explanation
+
+ def visit_Call_35(self, call):
+ """
+ visit `ast.Call` nodes on Python3.5 and after
+ """
+ new_func, func_expl = self.visit(call.func)
+ arg_expls = []
+ new_args = []
+ new_kwargs = []
+ for arg in call.args:
+ res, expl = self.visit(arg)
+ arg_expls.append(expl)
+ new_args.append(res)
+ for keyword in call.keywords:
+ res, expl = self.visit(keyword.value)
+ new_kwargs.append(ast.keyword(keyword.arg, res))
+ if keyword.arg:
+ arg_expls.append(keyword.arg + "=" + expl)
+ else: ## **args have `arg` keywords with an .arg of None
+ arg_expls.append("**" + expl)
+
+ expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+ new_call = ast.Call(new_func, new_args, new_kwargs)
+ res = self.assign(new_call)
+ res_expl = self.explanation_param(self.display(res))
+ outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+ return res, outer_expl
+
+ def visit_Starred(self, starred):
+ # From Python 3.5, a Starred node can appear in a function call
+ res, expl = self.visit(starred.value)
+ return starred, '*' + expl
+
+ def visit_Call_legacy(self, call):
+ """
+ visit `ast.Call nodes on 3.4 and below`
+ """
+ new_func, func_expl = self.visit(call.func)
+ arg_expls = []
+ new_args = []
+ new_kwargs = []
+ new_star = new_kwarg = None
+ for arg in call.args:
+ res, expl = self.visit(arg)
+ new_args.append(res)
+ arg_expls.append(expl)
+ for keyword in call.keywords:
+ res, expl = self.visit(keyword.value)
+ new_kwargs.append(ast.keyword(keyword.arg, res))
+ arg_expls.append(keyword.arg + "=" + expl)
+ if call.starargs:
+ new_star, expl = self.visit(call.starargs)
+ arg_expls.append("*" + expl)
+ if call.kwargs:
+ new_kwarg, expl = self.visit(call.kwargs)
+ arg_expls.append("**" + expl)
+ expl = "%s(%s)" % (func_expl, ', '.join(arg_expls))
+ new_call = ast.Call(new_func, new_args, new_kwargs,
+ new_star, new_kwarg)
+ res = self.assign(new_call)
+ res_expl = self.explanation_param(self.display(res))
+ outer_expl = "%s\n{%s = %s\n}" % (res_expl, res_expl, expl)
+ return res, outer_expl
+
+ # ast.Call signature changed on 3.5,
+ # conditionally change which methods is named
+ # visit_Call depending on Python version
+ if sys.version_info >= (3, 5):
+ visit_Call = visit_Call_35
+ else:
+ visit_Call = visit_Call_legacy
+
+
+ def visit_Attribute(self, attr):
+ if not isinstance(attr.ctx, ast.Load):
+ return self.generic_visit(attr)
+ value, value_expl = self.visit(attr.value)
+ res = self.assign(ast.Attribute(value, attr.attr, ast.Load()))
+ res_expl = self.explanation_param(self.display(res))
+ pat = "%s\n{%s = %s.%s\n}"
+ expl = pat % (res_expl, res_expl, value_expl, attr.attr)
+ return res, expl
+
+ def visit_Compare(self, comp):
+ self.push_format_context()
+ left_res, left_expl = self.visit(comp.left)
+ res_variables = [self.variable() for i in range(len(comp.ops))]
+ load_names = [ast.Name(v, ast.Load()) for v in res_variables]
+ store_names = [ast.Name(v, ast.Store()) for v in res_variables]
+ it = zip(range(len(comp.ops)), comp.ops, comp.comparators)
+ expls = []
+ syms = []
+ results = [left_res]
+ for i, op, next_operand in it:
+ next_res, next_expl = self.visit(next_operand)
+ results.append(next_res)
+ sym = binop_map[op.__class__]
+ syms.append(ast.Str(sym))
+ expl = "%s %s %s" % (left_expl, sym, next_expl)
+ expls.append(ast.Str(expl))
+ res_expr = ast.Compare(left_res, [op], [next_res])
+ self.statements.append(ast.Assign([store_names[i]], res_expr))
+ left_res, left_expl = next_res, next_expl
+ # Use pytest.assertion.util._reprcompare if that's available.
+ expl_call = self.helper("call_reprcompare",
+ ast.Tuple(syms, ast.Load()),
+ ast.Tuple(load_names, ast.Load()),
+ ast.Tuple(expls, ast.Load()),
+ ast.Tuple(results, ast.Load()))
+ if len(comp.ops) > 1:
+ res = ast.BoolOp(ast.And(), load_names)
+ else:
+ res = load_names[0]
+ return res, self.explanation_param(self.pop_format_context(expl_call))
diff --git a/python/pytest/_pytest/assertion/util.py b/python/pytest/_pytest/assertion/util.py
new file mode 100644
index 000000000..f2f23efea
--- /dev/null
+++ b/python/pytest/_pytest/assertion/util.py
@@ -0,0 +1,332 @@
+"""Utilities for assertion debugging"""
+import pprint
+
+import _pytest._code
+import py
+try:
+ from collections import Sequence
+except ImportError:
+ Sequence = list
+
+BuiltinAssertionError = py.builtin.builtins.AssertionError
+u = py.builtin._totext
+
+# The _reprcompare attribute on the util module is used by the new assertion
+# interpretation code and assertion rewriter to detect this plugin was
+# loaded and in turn call the hooks defined here as part of the
+# DebugInterpreter.
+_reprcompare = None
+
+
+# the re-encoding is needed for python2 repr
+# with non-ascii characters (see issue 877 and 1379)
+def ecu(s):
+ try:
+ return u(s, 'utf-8', 'replace')
+ except TypeError:
+ return s
+
+
+def format_explanation(explanation):
+ """This formats an explanation
+
+ Normally all embedded newlines are escaped, however there are
+ three exceptions: \n{, \n} and \n~. The first two are intended
+ cover nested explanations, see function and attribute explanations
+ for examples (.visit_Call(), visit_Attribute()). The last one is
+ for when one explanation needs to span multiple lines, e.g. when
+ displaying diffs.
+ """
+ explanation = ecu(explanation)
+ explanation = _collapse_false(explanation)
+ lines = _split_explanation(explanation)
+ result = _format_lines(lines)
+ return u('\n').join(result)
+
+
+def _collapse_false(explanation):
+ """Collapse expansions of False
+
+ So this strips out any "assert False\n{where False = ...\n}"
+ blocks.
+ """
+ where = 0
+ while True:
+ start = where = explanation.find("False\n{False = ", where)
+ if where == -1:
+ break
+ level = 0
+ prev_c = explanation[start]
+ for i, c in enumerate(explanation[start:]):
+ if prev_c + c == "\n{":
+ level += 1
+ elif prev_c + c == "\n}":
+ level -= 1
+ if not level:
+ break
+ prev_c = c
+ else:
+ raise AssertionError("unbalanced braces: %r" % (explanation,))
+ end = start + i
+ where = end
+ if explanation[end - 1] == '\n':
+ explanation = (explanation[:start] + explanation[start+15:end-1] +
+ explanation[end+1:])
+ where -= 17
+ return explanation
+
+
+def _split_explanation(explanation):
+ """Return a list of individual lines in the explanation
+
+ This will return a list of lines split on '\n{', '\n}' and '\n~'.
+ Any other newlines will be escaped and appear in the line as the
+ literal '\n' characters.
+ """
+ raw_lines = (explanation or u('')).split('\n')
+ lines = [raw_lines[0]]
+ for l in raw_lines[1:]:
+ if l and l[0] in ['{', '}', '~', '>']:
+ lines.append(l)
+ else:
+ lines[-1] += '\\n' + l
+ return lines
+
+
+def _format_lines(lines):
+ """Format the individual lines
+
+ This will replace the '{', '}' and '~' characters of our mini
+ formatting language with the proper 'where ...', 'and ...' and ' +
+ ...' text, taking care of indentation along the way.
+
+ Return a list of formatted lines.
+ """
+ result = lines[:1]
+ stack = [0]
+ stackcnt = [0]
+ for line in lines[1:]:
+ if line.startswith('{'):
+ if stackcnt[-1]:
+ s = u('and ')
+ else:
+ s = u('where ')
+ stack.append(len(result))
+ stackcnt[-1] += 1
+ stackcnt.append(0)
+ result.append(u(' +') + u(' ')*(len(stack)-1) + s + line[1:])
+ elif line.startswith('}'):
+ stack.pop()
+ stackcnt.pop()
+ result[stack[-1]] += line[1:]
+ else:
+ assert line[0] in ['~', '>']
+ stack[-1] += 1
+ indent = len(stack) if line.startswith('~') else len(stack) - 1
+ result.append(u(' ')*indent + line[1:])
+ assert len(stack) == 1
+ return result
+
+
+# Provide basestring in python3
+try:
+ basestring = basestring
+except NameError:
+ basestring = str
+
+
+def assertrepr_compare(config, op, left, right):
+ """Return specialised explanations for some operators/operands"""
+ width = 80 - 15 - len(op) - 2 # 15 chars indentation, 1 space around op
+ left_repr = py.io.saferepr(left, maxsize=int(width/2))
+ right_repr = py.io.saferepr(right, maxsize=width-len(left_repr))
+
+ summary = u('%s %s %s') % (ecu(left_repr), op, ecu(right_repr))
+
+ issequence = lambda x: (isinstance(x, (list, tuple, Sequence)) and
+ not isinstance(x, basestring))
+ istext = lambda x: isinstance(x, basestring)
+ isdict = lambda x: isinstance(x, dict)
+ isset = lambda x: isinstance(x, (set, frozenset))
+
+ def isiterable(obj):
+ try:
+ iter(obj)
+ return not istext(obj)
+ except TypeError:
+ return False
+
+ verbose = config.getoption('verbose')
+ explanation = None
+ try:
+ if op == '==':
+ if istext(left) and istext(right):
+ explanation = _diff_text(left, right, verbose)
+ else:
+ if issequence(left) and issequence(right):
+ explanation = _compare_eq_sequence(left, right, verbose)
+ elif isset(left) and isset(right):
+ explanation = _compare_eq_set(left, right, verbose)
+ elif isdict(left) and isdict(right):
+ explanation = _compare_eq_dict(left, right, verbose)
+ if isiterable(left) and isiterable(right):
+ expl = _compare_eq_iterable(left, right, verbose)
+ if explanation is not None:
+ explanation.extend(expl)
+ else:
+ explanation = expl
+ elif op == 'not in':
+ if istext(left) and istext(right):
+ explanation = _notin_text(left, right, verbose)
+ except Exception:
+ explanation = [
+ u('(pytest_assertion plugin: representation of details failed. '
+ 'Probably an object has a faulty __repr__.)'),
+ u(_pytest._code.ExceptionInfo())]
+
+ if not explanation:
+ return None
+
+ return [summary] + explanation
+
+
+def _diff_text(left, right, verbose=False):
+ """Return the explanation for the diff between text or bytes
+
+ Unless --verbose is used this will skip leading and trailing
+ characters which are identical to keep the diff minimal.
+
+ If the input are bytes they will be safely converted to text.
+ """
+ from difflib import ndiff
+ explanation = []
+ if isinstance(left, py.builtin.bytes):
+ left = u(repr(left)[1:-1]).replace(r'\n', '\n')
+ if isinstance(right, py.builtin.bytes):
+ right = u(repr(right)[1:-1]).replace(r'\n', '\n')
+ if not verbose:
+ i = 0 # just in case left or right has zero length
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation = [u('Skipping %s identical leading '
+ 'characters in diff, use -v to show') % i]
+ left = left[i:]
+ right = right[i:]
+ if len(left) == len(right):
+ for i in range(len(left)):
+ if left[-i] != right[-i]:
+ break
+ if i > 42:
+ i -= 10 # Provide some context
+ explanation += [u('Skipping %s identical trailing '
+ 'characters in diff, use -v to show') % i]
+ left = left[:-i]
+ right = right[:-i]
+ explanation += [line.strip('\n')
+ for line in ndiff(left.splitlines(),
+ right.splitlines())]
+ return explanation
+
+
+def _compare_eq_iterable(left, right, verbose=False):
+ if not verbose:
+ return [u('Use -v to get the full diff')]
+ # dynamic import to speedup pytest
+ import difflib
+
+ try:
+ left_formatting = pprint.pformat(left).splitlines()
+ right_formatting = pprint.pformat(right).splitlines()
+ explanation = [u('Full diff:')]
+ except Exception:
+ # hack: PrettyPrinter.pformat() in python 2 fails when formatting items that can't be sorted(), ie, calling
+ # sorted() on a list would raise. See issue #718.
+ # As a workaround, the full diff is generated by using the repr() string of each item of each container.
+ left_formatting = sorted(repr(x) for x in left)
+ right_formatting = sorted(repr(x) for x in right)
+ explanation = [u('Full diff (fallback to calling repr on each item):')]
+ explanation.extend(line.strip() for line in difflib.ndiff(left_formatting, right_formatting))
+ return explanation
+
+
+def _compare_eq_sequence(left, right, verbose=False):
+ explanation = []
+ for i in range(min(len(left), len(right))):
+ if left[i] != right[i]:
+ explanation += [u('At index %s diff: %r != %r')
+ % (i, left[i], right[i])]
+ break
+ if len(left) > len(right):
+ explanation += [u('Left contains more items, first extra item: %s')
+ % py.io.saferepr(left[len(right)],)]
+ elif len(left) < len(right):
+ explanation += [
+ u('Right contains more items, first extra item: %s') %
+ py.io.saferepr(right[len(left)],)]
+ return explanation
+
+
+def _compare_eq_set(left, right, verbose=False):
+ explanation = []
+ diff_left = left - right
+ diff_right = right - left
+ if diff_left:
+ explanation.append(u('Extra items in the left set:'))
+ for item in diff_left:
+ explanation.append(py.io.saferepr(item))
+ if diff_right:
+ explanation.append(u('Extra items in the right set:'))
+ for item in diff_right:
+ explanation.append(py.io.saferepr(item))
+ return explanation
+
+
+def _compare_eq_dict(left, right, verbose=False):
+ explanation = []
+ common = set(left).intersection(set(right))
+ same = dict((k, left[k]) for k in common if left[k] == right[k])
+ if same and not verbose:
+ explanation += [u('Omitting %s identical items, use -v to show') %
+ len(same)]
+ elif same:
+ explanation += [u('Common items:')]
+ explanation += pprint.pformat(same).splitlines()
+ diff = set(k for k in common if left[k] != right[k])
+ if diff:
+ explanation += [u('Differing items:')]
+ for k in diff:
+ explanation += [py.io.saferepr({k: left[k]}) + ' != ' +
+ py.io.saferepr({k: right[k]})]
+ extra_left = set(left) - set(right)
+ if extra_left:
+ explanation.append(u('Left contains more items:'))
+ explanation.extend(pprint.pformat(
+ dict((k, left[k]) for k in extra_left)).splitlines())
+ extra_right = set(right) - set(left)
+ if extra_right:
+ explanation.append(u('Right contains more items:'))
+ explanation.extend(pprint.pformat(
+ dict((k, right[k]) for k in extra_right)).splitlines())
+ return explanation
+
+
+def _notin_text(term, text, verbose=False):
+ index = text.find(term)
+ head = text[:index]
+ tail = text[index+len(term):]
+ correct_text = head + tail
+ diff = _diff_text(correct_text, text, verbose)
+ newdiff = [u('%s is contained here:') % py.io.saferepr(term, maxsize=42)]
+ for line in diff:
+ if line.startswith(u('Skipping')):
+ continue
+ if line.startswith(u('- ')):
+ continue
+ if line.startswith(u('+ ')):
+ newdiff.append(u(' ') + line[2:])
+ else:
+ newdiff.append(line)
+ return newdiff
diff --git a/python/pytest/_pytest/cacheprovider.py b/python/pytest/_pytest/cacheprovider.py
new file mode 100755
index 000000000..0657001f2
--- /dev/null
+++ b/python/pytest/_pytest/cacheprovider.py
@@ -0,0 +1,245 @@
+"""
+merged implementation of the cache provider
+
+the name cache was not choosen to ensure pluggy automatically
+ignores the external pytest-cache
+"""
+
+import py
+import pytest
+import json
+from os.path import sep as _sep, altsep as _altsep
+
+
+class Cache(object):
+ def __init__(self, config):
+ self.config = config
+ self._cachedir = config.rootdir.join(".cache")
+ self.trace = config.trace.root.get("cache")
+ if config.getvalue("cacheclear"):
+ self.trace("clearing cachedir")
+ if self._cachedir.check():
+ self._cachedir.remove()
+ self._cachedir.mkdir()
+
+ def makedir(self, name):
+ """ return a directory path object with the given name. If the
+ directory does not yet exist, it will be created. You can use it
+ to manage files likes e. g. store/retrieve database
+ dumps across test sessions.
+
+ :param name: must be a string not containing a ``/`` separator.
+ Make sure the name contains your plugin or application
+ identifiers to prevent clashes with other cache users.
+ """
+ if _sep in name or _altsep is not None and _altsep in name:
+ raise ValueError("name is not allowed to contain path separators")
+ return self._cachedir.ensure_dir("d", name)
+
+ def _getvaluepath(self, key):
+ return self._cachedir.join('v', *key.split('/'))
+
+ def get(self, key, default):
+ """ return cached value for the given key. If no value
+ was yet cached or the value cannot be read, the specified
+ default is returned.
+
+ :param key: must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param default: must be provided in case of a cache-miss or
+ invalid cache values.
+
+ """
+ path = self._getvaluepath(key)
+ if path.check():
+ try:
+ with path.open("r") as f:
+ return json.load(f)
+ except ValueError:
+ self.trace("cache-invalid at %s" % (path,))
+ return default
+
+ def set(self, key, value):
+ """ save value for the given key.
+
+ :param key: must be a ``/`` separated value. Usually the first
+ name is the name of your plugin or your application.
+ :param value: must be of any combination of basic
+ python types, including nested types
+ like e. g. lists of dictionaries.
+ """
+ path = self._getvaluepath(key)
+ try:
+ path.dirpath().ensure_dir()
+ except (py.error.EEXIST, py.error.EACCES):
+ self.config.warn(
+ code='I9', message='could not create cache path %s' % (path,)
+ )
+ return
+ try:
+ f = path.open('w')
+ except py.error.ENOTDIR:
+ self.config.warn(
+ code='I9', message='cache could not write path %s' % (path,))
+ else:
+ with f:
+ self.trace("cache-write %s: %r" % (key, value,))
+ json.dump(value, f, indent=2, sort_keys=True)
+
+
+class LFPlugin:
+ """ Plugin which implements the --lf (run last-failing) option """
+ def __init__(self, config):
+ self.config = config
+ active_keys = 'lf', 'failedfirst'
+ self.active = any(config.getvalue(key) for key in active_keys)
+ if self.active:
+ self.lastfailed = config.cache.get("cache/lastfailed", {})
+ else:
+ self.lastfailed = {}
+
+ def pytest_report_header(self):
+ if self.active:
+ if not self.lastfailed:
+ mode = "run all (no recorded failures)"
+ else:
+ mode = "rerun last %d failures%s" % (
+ len(self.lastfailed),
+ " first" if self.config.getvalue("failedfirst") else "")
+ return "run-last-failure: %s" % mode
+
+ def pytest_runtest_logreport(self, report):
+ if report.failed and "xfail" not in report.keywords:
+ self.lastfailed[report.nodeid] = True
+ elif not report.failed:
+ if report.when == "call":
+ self.lastfailed.pop(report.nodeid, None)
+
+ def pytest_collectreport(self, report):
+ passed = report.outcome in ('passed', 'skipped')
+ if passed:
+ if report.nodeid in self.lastfailed:
+ self.lastfailed.pop(report.nodeid)
+ self.lastfailed.update(
+ (item.nodeid, True)
+ for item in report.result)
+ else:
+ self.lastfailed[report.nodeid] = True
+
+ def pytest_collection_modifyitems(self, session, config, items):
+ if self.active and self.lastfailed:
+ previously_failed = []
+ previously_passed = []
+ for item in items:
+ if item.nodeid in self.lastfailed:
+ previously_failed.append(item)
+ else:
+ previously_passed.append(item)
+ if not previously_failed and previously_passed:
+ # running a subset of all tests with recorded failures outside
+ # of the set of tests currently executing
+ pass
+ elif self.config.getvalue("failedfirst"):
+ items[:] = previously_failed + previously_passed
+ else:
+ items[:] = previously_failed
+ config.hook.pytest_deselected(items=previously_passed)
+
+ def pytest_sessionfinish(self, session):
+ config = self.config
+ if config.getvalue("cacheshow") or hasattr(config, "slaveinput"):
+ return
+ prev_failed = config.cache.get("cache/lastfailed", None) is not None
+ if (session.testscollected and prev_failed) or self.lastfailed:
+ config.cache.set("cache/lastfailed", self.lastfailed)
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption(
+ '--lf', '--last-failed', action='store_true', dest="lf",
+ help="rerun only the tests that failed "
+ "at the last run (or all if none failed)")
+ group.addoption(
+ '--ff', '--failed-first', action='store_true', dest="failedfirst",
+ help="run all tests but run the last failures first. "
+ "This may re-order tests and thus lead to "
+ "repeated fixture setup/teardown")
+ group.addoption(
+ '--cache-show', action='store_true', dest="cacheshow",
+ help="show cache contents, don't perform collection or tests")
+ group.addoption(
+ '--cache-clear', action='store_true', dest="cacheclear",
+ help="remove all cache contents at start of test run.")
+
+
+def pytest_cmdline_main(config):
+ if config.option.cacheshow:
+ from _pytest.main import wrap_session
+ return wrap_session(config, cacheshow)
+
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_configure(config):
+ config.cache = Cache(config)
+ config.pluginmanager.register(LFPlugin(config), "lfplugin")
+
+
+@pytest.fixture
+def cache(request):
+ """
+ Return a cache object that can persist state between testing sessions.
+
+ cache.get(key, default)
+ cache.set(key, value)
+
+ Keys must be a ``/`` separated value, where the first part is usually the
+ name of your plugin or application to avoid clashes with other cache users.
+
+ Values can be any object handled by the json stdlib module.
+ """
+ return request.config.cache
+
+
+def pytest_report_header(config):
+ if config.option.verbose:
+ relpath = py.path.local().bestrelpath(config.cache._cachedir)
+ return "cachedir: %s" % relpath
+
+
+def cacheshow(config, session):
+ from pprint import pprint
+ tw = py.io.TerminalWriter()
+ tw.line("cachedir: " + str(config.cache._cachedir))
+ if not config.cache._cachedir.check():
+ tw.line("cache is empty")
+ return 0
+ dummy = object()
+ basedir = config.cache._cachedir
+ vdir = basedir.join("v")
+ tw.sep("-", "cache values")
+ for valpath in vdir.visit(lambda x: x.isfile()):
+ key = valpath.relto(vdir).replace(valpath.sep, "/")
+ val = config.cache.get(key, dummy)
+ if val is dummy:
+ tw.line("%s contains unreadable content, "
+ "will be ignored" % key)
+ else:
+ tw.line("%s contains:" % key)
+ stream = py.io.TextIO()
+ pprint(val, stream=stream)
+ for line in stream.getvalue().splitlines():
+ tw.line(" " + line)
+
+ ddir = basedir.join("d")
+ if ddir.isdir() and ddir.listdir():
+ tw.sep("-", "cache directories")
+ for p in basedir.join("d").visit():
+ #if p.check(dir=1):
+ # print("%s/" % p.relto(basedir))
+ if p.isfile():
+ key = p.relto(basedir)
+ tw.line("%s is a file of length %d" % (
+ key, p.size()))
+ return 0
diff --git a/python/pytest/_pytest/capture.py b/python/pytest/_pytest/capture.py
new file mode 100644
index 000000000..3895a714a
--- /dev/null
+++ b/python/pytest/_pytest/capture.py
@@ -0,0 +1,472 @@
+"""
+per-test stdout/stderr capturing mechanism.
+
+"""
+from __future__ import with_statement
+
+import sys
+import os
+from tempfile import TemporaryFile
+
+import py
+import pytest
+
+from py.io import TextIO
+unicode = py.builtin.text
+
+patchsysdict = {0: 'stdin', 1: 'stdout', 2: 'stderr'}
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption(
+ '--capture', action="store",
+ default="fd" if hasattr(os, "dup") else "sys",
+ metavar="method", choices=['fd', 'sys', 'no'],
+ help="per-test capturing method: one of fd|sys|no.")
+ group._addoption(
+ '-s', action="store_const", const="no", dest="capture",
+ help="shortcut for --capture=no.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_load_initial_conftests(early_config, parser, args):
+ _readline_workaround()
+ ns = early_config.known_args_namespace
+ pluginmanager = early_config.pluginmanager
+ capman = CaptureManager(ns.capture)
+ pluginmanager.register(capman, "capturemanager")
+
+ # make sure that capturemanager is properly reset at final shutdown
+ early_config.add_cleanup(capman.reset_capturings)
+
+ # make sure logging does not raise exceptions at the end
+ def silence_logging_at_shutdown():
+ if "logging" in sys.modules:
+ sys.modules["logging"].raiseExceptions = False
+ early_config.add_cleanup(silence_logging_at_shutdown)
+
+ # finally trigger conftest loading but while capturing (issue93)
+ capman.init_capturings()
+ outcome = yield
+ out, err = capman.suspendcapture()
+ if outcome.excinfo is not None:
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+
+
+class CaptureManager:
+ def __init__(self, method):
+ self._method = method
+
+ def _getcapture(self, method):
+ if method == "fd":
+ return MultiCapture(out=True, err=True, Capture=FDCapture)
+ elif method == "sys":
+ return MultiCapture(out=True, err=True, Capture=SysCapture)
+ elif method == "no":
+ return MultiCapture(out=False, err=False, in_=False)
+ else:
+ raise ValueError("unknown capturing method: %r" % method)
+
+ def init_capturings(self):
+ assert not hasattr(self, "_capturing")
+ self._capturing = self._getcapture(self._method)
+ self._capturing.start_capturing()
+
+ def reset_capturings(self):
+ cap = self.__dict__.pop("_capturing", None)
+ if cap is not None:
+ cap.pop_outerr_to_orig()
+ cap.stop_capturing()
+
+ def resumecapture(self):
+ self._capturing.resume_capturing()
+
+ def suspendcapture(self, in_=False):
+ self.deactivate_funcargs()
+ cap = getattr(self, "_capturing", None)
+ if cap is not None:
+ try:
+ outerr = cap.readouterr()
+ finally:
+ cap.suspend_capturing(in_=in_)
+ return outerr
+
+ def activate_funcargs(self, pyfuncitem):
+ capfuncarg = pyfuncitem.__dict__.pop("_capfuncarg", None)
+ if capfuncarg is not None:
+ capfuncarg._start()
+ self._capfuncarg = capfuncarg
+
+ def deactivate_funcargs(self):
+ capfuncarg = self.__dict__.pop("_capfuncarg", None)
+ if capfuncarg is not None:
+ capfuncarg.close()
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_make_collect_report(self, collector):
+ if isinstance(collector, pytest.File):
+ self.resumecapture()
+ outcome = yield
+ out, err = self.suspendcapture()
+ rep = outcome.get_result()
+ if out:
+ rep.sections.append(("Captured stdout", out))
+ if err:
+ rep.sections.append(("Captured stderr", err))
+ else:
+ yield
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_setup(self, item):
+ self.resumecapture()
+ yield
+ self.suspendcapture_item(item, "setup")
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_call(self, item):
+ self.resumecapture()
+ self.activate_funcargs(item)
+ yield
+ #self.deactivate_funcargs() called from suspendcapture()
+ self.suspendcapture_item(item, "call")
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_runtest_teardown(self, item):
+ self.resumecapture()
+ yield
+ self.suspendcapture_item(item, "teardown")
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_keyboard_interrupt(self, excinfo):
+ self.reset_capturings()
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_internalerror(self, excinfo):
+ self.reset_capturings()
+
+ def suspendcapture_item(self, item, when):
+ out, err = self.suspendcapture()
+ item.add_report_section(when, "stdout", out)
+ item.add_report_section(when, "stderr", err)
+
+error_capsysfderror = "cannot use capsys and capfd at the same time"
+
+
+@pytest.fixture
+def capsys(request):
+ """enables capturing of writes to sys.stdout/sys.stderr and makes
+ captured output available via ``capsys.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ """
+ if "capfd" in request._funcargs:
+ raise request.raiseerror(error_capsysfderror)
+ request.node._capfuncarg = c = CaptureFixture(SysCapture)
+ return c
+
+@pytest.fixture
+def capfd(request):
+ """enables capturing of writes to file descriptors 1 and 2 and makes
+ captured output available via ``capfd.readouterr()`` method calls
+ which return a ``(out, err)`` tuple.
+ """
+ if "capsys" in request._funcargs:
+ request.raiseerror(error_capsysfderror)
+ if not hasattr(os, 'dup'):
+ pytest.skip("capfd funcarg needs os.dup")
+ request.node._capfuncarg = c = CaptureFixture(FDCapture)
+ return c
+
+
+class CaptureFixture:
+ def __init__(self, captureclass):
+ self.captureclass = captureclass
+
+ def _start(self):
+ self._capture = MultiCapture(out=True, err=True, in_=False,
+ Capture=self.captureclass)
+ self._capture.start_capturing()
+
+ def close(self):
+ cap = self.__dict__.pop("_capture", None)
+ if cap is not None:
+ self._outerr = cap.pop_outerr_to_orig()
+ cap.stop_capturing()
+
+ def readouterr(self):
+ try:
+ return self._capture.readouterr()
+ except AttributeError:
+ return self._outerr
+
+
+def safe_text_dupfile(f, mode, default_encoding="UTF8"):
+ """ return a open text file object that's a duplicate of f on the
+ FD-level if possible.
+ """
+ encoding = getattr(f, "encoding", None)
+ try:
+ fd = f.fileno()
+ except Exception:
+ if "b" not in getattr(f, "mode", "") and hasattr(f, "encoding"):
+ # we seem to have a text stream, let's just use it
+ return f
+ else:
+ newfd = os.dup(fd)
+ if "b" not in mode:
+ mode += "b"
+ f = os.fdopen(newfd, mode, 0) # no buffering
+ return EncodedFile(f, encoding or default_encoding)
+
+
+class EncodedFile(object):
+ errors = "strict" # possibly needed by py3 code (issue555)
+ def __init__(self, buffer, encoding):
+ self.buffer = buffer
+ self.encoding = encoding
+
+ def write(self, obj):
+ if isinstance(obj, unicode):
+ obj = obj.encode(self.encoding, "replace")
+ self.buffer.write(obj)
+
+ def writelines(self, linelist):
+ data = ''.join(linelist)
+ self.write(data)
+
+ def __getattr__(self, name):
+ return getattr(object.__getattribute__(self, "buffer"), name)
+
+
+class MultiCapture(object):
+ out = err = in_ = None
+
+ def __init__(self, out=True, err=True, in_=True, Capture=None):
+ if in_:
+ self.in_ = Capture(0)
+ if out:
+ self.out = Capture(1)
+ if err:
+ self.err = Capture(2)
+
+ def start_capturing(self):
+ if self.in_:
+ self.in_.start()
+ if self.out:
+ self.out.start()
+ if self.err:
+ self.err.start()
+
+ def pop_outerr_to_orig(self):
+ """ pop current snapshot out/err capture and flush to orig streams. """
+ out, err = self.readouterr()
+ if out:
+ self.out.writeorg(out)
+ if err:
+ self.err.writeorg(err)
+ return out, err
+
+ def suspend_capturing(self, in_=False):
+ if self.out:
+ self.out.suspend()
+ if self.err:
+ self.err.suspend()
+ if in_ and self.in_:
+ self.in_.suspend()
+ self._in_suspended = True
+
+ def resume_capturing(self):
+ if self.out:
+ self.out.resume()
+ if self.err:
+ self.err.resume()
+ if hasattr(self, "_in_suspended"):
+ self.in_.resume()
+ del self._in_suspended
+
+ def stop_capturing(self):
+ """ stop capturing and reset capturing streams """
+ if hasattr(self, '_reset'):
+ raise ValueError("was already stopped")
+ self._reset = True
+ if self.out:
+ self.out.done()
+ if self.err:
+ self.err.done()
+ if self.in_:
+ self.in_.done()
+
+ def readouterr(self):
+ """ return snapshot unicode value of stdout/stderr capturings. """
+ return (self.out.snap() if self.out is not None else "",
+ self.err.snap() if self.err is not None else "")
+
+class NoCapture:
+ __init__ = start = done = suspend = resume = lambda *args: None
+
+class FDCapture:
+ """ Capture IO to/from a given os-level filedescriptor. """
+
+ def __init__(self, targetfd, tmpfile=None):
+ self.targetfd = targetfd
+ try:
+ self.targetfd_save = os.dup(self.targetfd)
+ except OSError:
+ self.start = lambda: None
+ self.done = lambda: None
+ else:
+ if targetfd == 0:
+ assert not tmpfile, "cannot set tmpfile with stdin"
+ tmpfile = open(os.devnull, "r")
+ self.syscapture = SysCapture(targetfd)
+ else:
+ if tmpfile is None:
+ f = TemporaryFile()
+ with f:
+ tmpfile = safe_text_dupfile(f, mode="wb+")
+ if targetfd in patchsysdict:
+ self.syscapture = SysCapture(targetfd, tmpfile)
+ else:
+ self.syscapture = NoCapture()
+ self.tmpfile = tmpfile
+ self.tmpfile_fd = tmpfile.fileno()
+
+ def __repr__(self):
+ return "<FDCapture %s oldfd=%s>" % (self.targetfd, self.targetfd_save)
+
+ def start(self):
+ """ Start capturing on targetfd using memorized tmpfile. """
+ try:
+ os.fstat(self.targetfd_save)
+ except (AttributeError, OSError):
+ raise ValueError("saved filedescriptor not valid anymore")
+ os.dup2(self.tmpfile_fd, self.targetfd)
+ self.syscapture.start()
+
+ def snap(self):
+ f = self.tmpfile
+ f.seek(0)
+ res = f.read()
+ if res:
+ enc = getattr(f, "encoding", None)
+ if enc and isinstance(res, bytes):
+ res = py.builtin._totext(res, enc, "replace")
+ f.truncate(0)
+ f.seek(0)
+ return res
+ return ''
+
+ def done(self):
+ """ stop capturing, restore streams, return original capture file,
+ seeked to position zero. """
+ targetfd_save = self.__dict__.pop("targetfd_save")
+ os.dup2(targetfd_save, self.targetfd)
+ os.close(targetfd_save)
+ self.syscapture.done()
+ self.tmpfile.close()
+
+ def suspend(self):
+ self.syscapture.suspend()
+ os.dup2(self.targetfd_save, self.targetfd)
+
+ def resume(self):
+ self.syscapture.resume()
+ os.dup2(self.tmpfile_fd, self.targetfd)
+
+ def writeorg(self, data):
+ """ write to original file descriptor. """
+ if py.builtin._istext(data):
+ data = data.encode("utf8") # XXX use encoding of original stream
+ os.write(self.targetfd_save, data)
+
+
+class SysCapture:
+ def __init__(self, fd, tmpfile=None):
+ name = patchsysdict[fd]
+ self._old = getattr(sys, name)
+ self.name = name
+ if tmpfile is None:
+ if name == "stdin":
+ tmpfile = DontReadFromInput()
+ else:
+ tmpfile = TextIO()
+ self.tmpfile = tmpfile
+
+ def start(self):
+ setattr(sys, self.name, self.tmpfile)
+
+ def snap(self):
+ f = self.tmpfile
+ res = f.getvalue()
+ f.truncate(0)
+ f.seek(0)
+ return res
+
+ def done(self):
+ setattr(sys, self.name, self._old)
+ del self._old
+ self.tmpfile.close()
+
+ def suspend(self):
+ setattr(sys, self.name, self._old)
+
+ def resume(self):
+ setattr(sys, self.name, self.tmpfile)
+
+ def writeorg(self, data):
+ self._old.write(data)
+ self._old.flush()
+
+
+class DontReadFromInput:
+ """Temporary stub class. Ideally when stdin is accessed, the
+ capturing should be turned off, with possibly all data captured
+ so far sent to the screen. This should be configurable, though,
+ because in automated test runs it is better to crash than
+ hang indefinitely.
+ """
+
+ encoding = None
+
+ def read(self, *args):
+ raise IOError("reading from stdin while output is captured")
+ readline = read
+ readlines = read
+ __iter__ = read
+
+ def fileno(self):
+ raise ValueError("redirected Stdin is pseudofile, has no fileno()")
+
+ def isatty(self):
+ return False
+
+ def close(self):
+ pass
+
+
+def _readline_workaround():
+ """
+ Ensure readline is imported so that it attaches to the correct stdio
+ handles on Windows.
+
+ Pdb uses readline support where available--when not running from the Python
+ prompt, the readline module is not imported until running the pdb REPL. If
+ running py.test with the --pdb option this means the readline module is not
+ imported until after I/O capture has been started.
+
+ This is a problem for pyreadline, which is often used to implement readline
+ support on Windows, as it does not attach to the correct handles for stdout
+ and/or stdin if they have been redirected by the FDCapture mechanism. This
+ workaround ensures that readline is imported before I/O capture is setup so
+ that it can attach to the actual stdin/out for the console.
+
+ See https://github.com/pytest-dev/pytest/pull/1281
+ """
+
+ if not sys.platform.startswith('win32'):
+ return
+ try:
+ import readline # noqa
+ except ImportError:
+ pass
diff --git a/python/pytest/_pytest/config.py b/python/pytest/_pytest/config.py
new file mode 100644
index 000000000..9a308df2b
--- /dev/null
+++ b/python/pytest/_pytest/config.py
@@ -0,0 +1,1192 @@
+""" command line options, ini-file and conftest.py processing. """
+import argparse
+import shlex
+import traceback
+import types
+import warnings
+
+import py
+# DON't import pytest here because it causes import cycle troubles
+import sys, os
+import _pytest._code
+import _pytest.hookspec # the extension point definitions
+from _pytest._pluggy import PluginManager, HookimplMarker, HookspecMarker
+
+hookimpl = HookimplMarker("pytest")
+hookspec = HookspecMarker("pytest")
+
+# pytest startup
+#
+
+
+class ConftestImportFailure(Exception):
+ def __init__(self, path, excinfo):
+ Exception.__init__(self, path, excinfo)
+ self.path = path
+ self.excinfo = excinfo
+
+
+def main(args=None, plugins=None):
+ """ return exit code, after performing an in-process test run.
+
+ :arg args: list of command line arguments.
+
+ :arg plugins: list of plugin objects to be auto-registered during
+ initialization.
+ """
+ try:
+ try:
+ config = _prepareconfig(args, plugins)
+ except ConftestImportFailure as e:
+ tw = py.io.TerminalWriter(sys.stderr)
+ for line in traceback.format_exception(*e.excinfo):
+ tw.line(line.rstrip(), red=True)
+ tw.line("ERROR: could not load %s\n" % (e.path), red=True)
+ return 4
+ else:
+ try:
+ config.pluginmanager.check_pending()
+ return config.hook.pytest_cmdline_main(config=config)
+ finally:
+ config._ensure_unconfigure()
+ except UsageError as e:
+ for msg in e.args:
+ sys.stderr.write("ERROR: %s\n" %(msg,))
+ return 4
+
+class cmdline: # compatibility namespace
+ main = staticmethod(main)
+
+class UsageError(Exception):
+ """ error in pytest usage or invocation"""
+
+_preinit = []
+
+default_plugins = (
+ "mark main terminal runner python pdb unittest capture skipping "
+ "tmpdir monkeypatch recwarn pastebin helpconfig nose assertion genscript "
+ "junitxml resultlog doctest cacheprovider").split()
+
+builtin_plugins = set(default_plugins)
+builtin_plugins.add("pytester")
+
+
+def _preloadplugins():
+ assert not _preinit
+ _preinit.append(get_config())
+
+def get_config():
+ if _preinit:
+ return _preinit.pop(0)
+ # subsequent calls to main will create a fresh instance
+ pluginmanager = PytestPluginManager()
+ config = Config(pluginmanager)
+ for spec in default_plugins:
+ pluginmanager.import_plugin(spec)
+ return config
+
+def get_plugin_manager():
+ """
+ Obtain a new instance of the
+ :py:class:`_pytest.config.PytestPluginManager`, with default plugins
+ already loaded.
+
+ This function can be used by integration with other tools, like hooking
+ into pytest to run tests into an IDE.
+ """
+ return get_config().pluginmanager
+
+def _prepareconfig(args=None, plugins=None):
+ if args is None:
+ args = sys.argv[1:]
+ elif isinstance(args, py.path.local):
+ args = [str(args)]
+ elif not isinstance(args, (tuple, list)):
+ if not isinstance(args, str):
+ raise ValueError("not a string or argument list: %r" % (args,))
+ args = shlex.split(args, posix=sys.platform != "win32")
+ config = get_config()
+ pluginmanager = config.pluginmanager
+ try:
+ if plugins:
+ for plugin in plugins:
+ if isinstance(plugin, py.builtin._basestring):
+ pluginmanager.consider_pluginarg(plugin)
+ else:
+ pluginmanager.register(plugin)
+ return pluginmanager.hook.pytest_cmdline_parse(
+ pluginmanager=pluginmanager, args=args)
+ except BaseException:
+ config._ensure_unconfigure()
+ raise
+
+
+class PytestPluginManager(PluginManager):
+ """
+ Overwrites :py:class:`pluggy.PluginManager` to add pytest-specific
+ functionality:
+
+ * loading plugins from the command line, ``PYTEST_PLUGIN`` env variable and
+ ``pytest_plugins`` global variables found in plugins being loaded;
+ * ``conftest.py`` loading during start-up;
+ """
+ def __init__(self):
+ super(PytestPluginManager, self).__init__("pytest", implprefix="pytest_")
+ self._conftest_plugins = set()
+
+ # state related to local conftest plugins
+ self._path2confmods = {}
+ self._conftestpath2mod = {}
+ self._confcutdir = None
+ self._noconftest = False
+
+ self.add_hookspecs(_pytest.hookspec)
+ self.register(self)
+ if os.environ.get('PYTEST_DEBUG'):
+ err = sys.stderr
+ encoding = getattr(err, 'encoding', 'utf8')
+ try:
+ err = py.io.dupfile(err, encoding=encoding)
+ except Exception:
+ pass
+ self.trace.root.setwriter(err.write)
+ self.enable_tracing()
+
+ def addhooks(self, module_or_class):
+ """
+ .. deprecated:: 2.8
+
+ Use :py:meth:`pluggy.PluginManager.add_hookspecs` instead.
+ """
+ warning = dict(code="I2",
+ fslocation=_pytest._code.getfslineno(sys._getframe(1)),
+ nodeid=None,
+ message="use pluginmanager.add_hookspecs instead of "
+ "deprecated addhooks() method.")
+ self._warn(warning)
+ return self.add_hookspecs(module_or_class)
+
+ def parse_hookimpl_opts(self, plugin, name):
+ # pytest hooks are always prefixed with pytest_
+ # so we avoid accessing possibly non-readable attributes
+ # (see issue #1073)
+ if not name.startswith("pytest_"):
+ return
+ # ignore some historic special names which can not be hooks anyway
+ if name == "pytest_plugins" or name.startswith("pytest_funcarg__"):
+ return
+
+ method = getattr(plugin, name)
+ opts = super(PytestPluginManager, self).parse_hookimpl_opts(plugin, name)
+ if opts is not None:
+ for name in ("tryfirst", "trylast", "optionalhook", "hookwrapper"):
+ opts.setdefault(name, hasattr(method, name))
+ return opts
+
+ def parse_hookspec_opts(self, module_or_class, name):
+ opts = super(PytestPluginManager, self).parse_hookspec_opts(
+ module_or_class, name)
+ if opts is None:
+ method = getattr(module_or_class, name)
+ if name.startswith("pytest_"):
+ opts = {"firstresult": hasattr(method, "firstresult"),
+ "historic": hasattr(method, "historic")}
+ return opts
+
+ def _verify_hook(self, hook, hookmethod):
+ super(PytestPluginManager, self)._verify_hook(hook, hookmethod)
+ if "__multicall__" in hookmethod.argnames:
+ fslineno = _pytest._code.getfslineno(hookmethod.function)
+ warning = dict(code="I1",
+ fslocation=fslineno,
+ nodeid=None,
+ message="%r hook uses deprecated __multicall__ "
+ "argument" % (hook.name))
+ self._warn(warning)
+
+ def register(self, plugin, name=None):
+ ret = super(PytestPluginManager, self).register(plugin, name)
+ if ret:
+ self.hook.pytest_plugin_registered.call_historic(
+ kwargs=dict(plugin=plugin, manager=self))
+ return ret
+
+ def getplugin(self, name):
+ # support deprecated naming because plugins (xdist e.g.) use it
+ return self.get_plugin(name)
+
+ def hasplugin(self, name):
+ """Return True if the plugin with the given name is registered."""
+ return bool(self.get_plugin(name))
+
+ def pytest_configure(self, config):
+ # XXX now that the pluginmanager exposes hookimpl(tryfirst...)
+ # we should remove tryfirst/trylast as markers
+ config.addinivalue_line("markers",
+ "tryfirst: mark a hook implementation function such that the "
+ "plugin machinery will try to call it first/as early as possible.")
+ config.addinivalue_line("markers",
+ "trylast: mark a hook implementation function such that the "
+ "plugin machinery will try to call it last/as late as possible.")
+
+ def _warn(self, message):
+ kwargs = message if isinstance(message, dict) else {
+ 'code': 'I1',
+ 'message': message,
+ 'fslocation': None,
+ 'nodeid': None,
+ }
+ self.hook.pytest_logwarning.call_historic(kwargs=kwargs)
+
+ #
+ # internal API for local conftest plugin handling
+ #
+ def _set_initial_conftests(self, namespace):
+ """ load initial conftest files given a preparsed "namespace".
+ As conftest files may add their own command line options
+ which have arguments ('--my-opt somepath') we might get some
+ false positives. All builtin and 3rd party plugins will have
+ been loaded, however, so common options will not confuse our logic
+ here.
+ """
+ current = py.path.local()
+ self._confcutdir = current.join(namespace.confcutdir, abs=True) \
+ if namespace.confcutdir else None
+ self._noconftest = namespace.noconftest
+ testpaths = namespace.file_or_dir
+ foundanchor = False
+ for path in testpaths:
+ path = str(path)
+ # remove node-id syntax
+ i = path.find("::")
+ if i != -1:
+ path = path[:i]
+ anchor = current.join(path, abs=1)
+ if exists(anchor): # we found some file object
+ self._try_load_conftest(anchor)
+ foundanchor = True
+ if not foundanchor:
+ self._try_load_conftest(current)
+
+ def _try_load_conftest(self, anchor):
+ self._getconftestmodules(anchor)
+ # let's also consider test* subdirs
+ if anchor.check(dir=1):
+ for x in anchor.listdir("test*"):
+ if x.check(dir=1):
+ self._getconftestmodules(x)
+
+ def _getconftestmodules(self, path):
+ if self._noconftest:
+ return []
+ try:
+ return self._path2confmods[path]
+ except KeyError:
+ if path.isfile():
+ clist = self._getconftestmodules(path.dirpath())
+ else:
+ # XXX these days we may rather want to use config.rootdir
+ # and allow users to opt into looking into the rootdir parent
+ # directories instead of requiring to specify confcutdir
+ clist = []
+ for parent in path.parts():
+ if self._confcutdir and self._confcutdir.relto(parent):
+ continue
+ conftestpath = parent.join("conftest.py")
+ if conftestpath.isfile():
+ mod = self._importconftest(conftestpath)
+ clist.append(mod)
+
+ self._path2confmods[path] = clist
+ return clist
+
+ def _rget_with_confmod(self, name, path):
+ modules = self._getconftestmodules(path)
+ for mod in reversed(modules):
+ try:
+ return mod, getattr(mod, name)
+ except AttributeError:
+ continue
+ raise KeyError(name)
+
+ def _importconftest(self, conftestpath):
+ try:
+ return self._conftestpath2mod[conftestpath]
+ except KeyError:
+ pkgpath = conftestpath.pypkgpath()
+ if pkgpath is None:
+ _ensure_removed_sysmodule(conftestpath.purebasename)
+ try:
+ mod = conftestpath.pyimport()
+ except Exception:
+ raise ConftestImportFailure(conftestpath, sys.exc_info())
+
+ self._conftest_plugins.add(mod)
+ self._conftestpath2mod[conftestpath] = mod
+ dirpath = conftestpath.dirpath()
+ if dirpath in self._path2confmods:
+ for path, mods in self._path2confmods.items():
+ if path and path.relto(dirpath) or path == dirpath:
+ assert mod not in mods
+ mods.append(mod)
+ self.trace("loaded conftestmodule %r" %(mod))
+ self.consider_conftest(mod)
+ return mod
+
+ #
+ # API for bootstrapping plugin loading
+ #
+ #
+
+ def consider_preparse(self, args):
+ for opt1,opt2 in zip(args, args[1:]):
+ if opt1 == "-p":
+ self.consider_pluginarg(opt2)
+
+ def consider_pluginarg(self, arg):
+ if arg.startswith("no:"):
+ name = arg[3:]
+ self.set_blocked(name)
+ if not name.startswith("pytest_"):
+ self.set_blocked("pytest_" + name)
+ else:
+ self.import_plugin(arg)
+
+ def consider_conftest(self, conftestmodule):
+ if self.register(conftestmodule, name=conftestmodule.__file__):
+ self.consider_module(conftestmodule)
+
+ def consider_env(self):
+ self._import_plugin_specs(os.environ.get("PYTEST_PLUGINS"))
+
+ def consider_module(self, mod):
+ self._import_plugin_specs(getattr(mod, "pytest_plugins", None))
+
+ def _import_plugin_specs(self, spec):
+ if spec:
+ if isinstance(spec, str):
+ spec = spec.split(",")
+ for import_spec in spec:
+ self.import_plugin(import_spec)
+
+ def import_plugin(self, modname):
+ # most often modname refers to builtin modules, e.g. "pytester",
+ # "terminal" or "capture". Those plugins are registered under their
+ # basename for historic purposes but must be imported with the
+ # _pytest prefix.
+ assert isinstance(modname, str)
+ if self.get_plugin(modname) is not None:
+ return
+ if modname in builtin_plugins:
+ importspec = "_pytest." + modname
+ else:
+ importspec = modname
+ try:
+ __import__(importspec)
+ except ImportError as e:
+ new_exc = ImportError('Error importing plugin "%s": %s' % (modname, e))
+ # copy over name and path attributes
+ for attr in ('name', 'path'):
+ if hasattr(e, attr):
+ setattr(new_exc, attr, getattr(e, attr))
+ raise new_exc
+ except Exception as e:
+ import pytest
+ if not hasattr(pytest, 'skip') or not isinstance(e, pytest.skip.Exception):
+ raise
+ self._warn("skipped plugin %r: %s" %((modname, e.msg)))
+ else:
+ mod = sys.modules[importspec]
+ self.register(mod, modname)
+ self.consider_module(mod)
+
+
+class Parser:
+ """ Parser for command line arguments and ini-file values.
+
+ :ivar extra_info: dict of generic param -> value to display in case
+ there's an error processing the command line arguments.
+ """
+
+ def __init__(self, usage=None, processopt=None):
+ self._anonymous = OptionGroup("custom options", parser=self)
+ self._groups = []
+ self._processopt = processopt
+ self._usage = usage
+ self._inidict = {}
+ self._ininames = []
+ self.extra_info = {}
+
+ def processoption(self, option):
+ if self._processopt:
+ if option.dest:
+ self._processopt(option)
+
+ def getgroup(self, name, description="", after=None):
+ """ get (or create) a named option Group.
+
+ :name: name of the option group.
+ :description: long description for --help output.
+ :after: name of other group, used for ordering --help output.
+
+ The returned group object has an ``addoption`` method with the same
+ signature as :py:func:`parser.addoption
+ <_pytest.config.Parser.addoption>` but will be shown in the
+ respective group in the output of ``pytest. --help``.
+ """
+ for group in self._groups:
+ if group.name == name:
+ return group
+ group = OptionGroup(name, description, parser=self)
+ i = 0
+ for i, grp in enumerate(self._groups):
+ if grp.name == after:
+ break
+ self._groups.insert(i+1, group)
+ return group
+
+ def addoption(self, *opts, **attrs):
+ """ register a command line option.
+
+ :opts: option names, can be short or long options.
+ :attrs: same attributes which the ``add_option()`` function of the
+ `argparse library
+ <http://docs.python.org/2/library/argparse.html>`_
+ accepts.
+
+ After command line parsing options are available on the pytest config
+ object via ``config.option.NAME`` where ``NAME`` is usually set
+ by passing a ``dest`` attribute, for example
+ ``addoption("--long", dest="NAME", ...)``.
+ """
+ self._anonymous.addoption(*opts, **attrs)
+
+ def parse(self, args, namespace=None):
+ from _pytest._argcomplete import try_argcomplete
+ self.optparser = self._getparser()
+ try_argcomplete(self.optparser)
+ return self.optparser.parse_args([str(x) for x in args], namespace=namespace)
+
+ def _getparser(self):
+ from _pytest._argcomplete import filescompleter
+ optparser = MyOptionParser(self, self.extra_info)
+ groups = self._groups + [self._anonymous]
+ for group in groups:
+ if group.options:
+ desc = group.description or group.name
+ arggroup = optparser.add_argument_group(desc)
+ for option in group.options:
+ n = option.names()
+ a = option.attrs()
+ arggroup.add_argument(*n, **a)
+ # bash like autocompletion for dirs (appending '/')
+ optparser.add_argument(FILE_OR_DIR, nargs='*').completer=filescompleter
+ return optparser
+
+ def parse_setoption(self, args, option, namespace=None):
+ parsedoption = self.parse(args, namespace=namespace)
+ for name, value in parsedoption.__dict__.items():
+ setattr(option, name, value)
+ return getattr(parsedoption, FILE_OR_DIR)
+
+ def parse_known_args(self, args, namespace=None):
+ """parses and returns a namespace object with known arguments at this
+ point.
+ """
+ return self.parse_known_and_unknown_args(args, namespace=namespace)[0]
+
+ def parse_known_and_unknown_args(self, args, namespace=None):
+ """parses and returns a namespace object with known arguments, and
+ the remaining arguments unknown at this point.
+ """
+ optparser = self._getparser()
+ args = [str(x) for x in args]
+ return optparser.parse_known_args(args, namespace=namespace)
+
+ def addini(self, name, help, type=None, default=None):
+ """ register an ini-file option.
+
+ :name: name of the ini-variable
+ :type: type of the variable, can be ``pathlist``, ``args``, ``linelist``
+ or ``bool``.
+ :default: default value if no ini-file option exists but is queried.
+
+ The value of ini-variables can be retrieved via a call to
+ :py:func:`config.getini(name) <_pytest.config.Config.getini>`.
+ """
+ assert type in (None, "pathlist", "args", "linelist", "bool")
+ self._inidict[name] = (help, type, default)
+ self._ininames.append(name)
+
+
+class ArgumentError(Exception):
+ """
+ Raised if an Argument instance is created with invalid or
+ inconsistent arguments.
+ """
+
+ def __init__(self, msg, option):
+ self.msg = msg
+ self.option_id = str(option)
+
+ def __str__(self):
+ if self.option_id:
+ return "option %s: %s" % (self.option_id, self.msg)
+ else:
+ return self.msg
+
+
+class Argument:
+ """class that mimics the necessary behaviour of optparse.Option """
+ _typ_map = {
+ 'int': int,
+ 'string': str,
+ }
+ # enable after some grace period for plugin writers
+ TYPE_WARN = False
+
+ def __init__(self, *names, **attrs):
+ """store parms in private vars for use in add_argument"""
+ self._attrs = attrs
+ self._short_opts = []
+ self._long_opts = []
+ self.dest = attrs.get('dest')
+ if self.TYPE_WARN:
+ try:
+ help = attrs['help']
+ if '%default' in help:
+ warnings.warn(
+ 'pytest now uses argparse. "%default" should be'
+ ' changed to "%(default)s" ',
+ FutureWarning,
+ stacklevel=3)
+ except KeyError:
+ pass
+ try:
+ typ = attrs['type']
+ except KeyError:
+ pass
+ else:
+ # this might raise a keyerror as well, don't want to catch that
+ if isinstance(typ, py.builtin._basestring):
+ if typ == 'choice':
+ if self.TYPE_WARN:
+ warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this is optional and when supplied '
+ ' should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ # argparse expects a type here take it from
+ # the type of the first element
+ attrs['type'] = type(attrs['choices'][0])
+ else:
+ if self.TYPE_WARN:
+ warnings.warn(
+ 'type argument to addoption() is a string %r.'
+ ' For parsearg this should be a type.'
+ ' (options: %s)' % (typ, names),
+ FutureWarning,
+ stacklevel=3)
+ attrs['type'] = Argument._typ_map[typ]
+ # used in test_parseopt -> test_parse_defaultgetter
+ self.type = attrs['type']
+ else:
+ self.type = typ
+ try:
+ # attribute existence is tested in Config._processopt
+ self.default = attrs['default']
+ except KeyError:
+ pass
+ self._set_opt_strings(names)
+ if not self.dest:
+ if self._long_opts:
+ self.dest = self._long_opts[0][2:].replace('-', '_')
+ else:
+ try:
+ self.dest = self._short_opts[0][1:]
+ except IndexError:
+ raise ArgumentError(
+ 'need a long or short option', self)
+
+ def names(self):
+ return self._short_opts + self._long_opts
+
+ def attrs(self):
+ # update any attributes set by processopt
+ attrs = 'default dest help'.split()
+ if self.dest:
+ attrs.append(self.dest)
+ for attr in attrs:
+ try:
+ self._attrs[attr] = getattr(self, attr)
+ except AttributeError:
+ pass
+ if self._attrs.get('help'):
+ a = self._attrs['help']
+ a = a.replace('%default', '%(default)s')
+ #a = a.replace('%prog', '%(prog)s')
+ self._attrs['help'] = a
+ return self._attrs
+
+ def _set_opt_strings(self, opts):
+ """directly from optparse
+
+ might not be necessary as this is passed to argparse later on"""
+ for opt in opts:
+ if len(opt) < 2:
+ raise ArgumentError(
+ "invalid option string %r: "
+ "must be at least two characters long" % opt, self)
+ elif len(opt) == 2:
+ if not (opt[0] == "-" and opt[1] != "-"):
+ raise ArgumentError(
+ "invalid short option string %r: "
+ "must be of the form -x, (x any non-dash char)" % opt,
+ self)
+ self._short_opts.append(opt)
+ else:
+ if not (opt[0:2] == "--" and opt[2] != "-"):
+ raise ArgumentError(
+ "invalid long option string %r: "
+ "must start with --, followed by non-dash" % opt,
+ self)
+ self._long_opts.append(opt)
+
+ def __repr__(self):
+ retval = 'Argument('
+ if self._short_opts:
+ retval += '_short_opts: ' + repr(self._short_opts) + ', '
+ if self._long_opts:
+ retval += '_long_opts: ' + repr(self._long_opts) + ', '
+ retval += 'dest: ' + repr(self.dest) + ', '
+ if hasattr(self, 'type'):
+ retval += 'type: ' + repr(self.type) + ', '
+ if hasattr(self, 'default'):
+ retval += 'default: ' + repr(self.default) + ', '
+ if retval[-2:] == ', ': # always long enough to test ("Argument(" )
+ retval = retval[:-2]
+ retval += ')'
+ return retval
+
+
+class OptionGroup:
+ def __init__(self, name, description="", parser=None):
+ self.name = name
+ self.description = description
+ self.options = []
+ self.parser = parser
+
+ def addoption(self, *optnames, **attrs):
+ """ add an option to this group.
+
+ if a shortened version of a long option is specified it will
+ be suppressed in the help. addoption('--twowords', '--two-words')
+ results in help showing '--two-words' only, but --twowords gets
+ accepted **and** the automatic destination is in args.twowords
+ """
+ option = Argument(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=False)
+
+ def _addoption(self, *optnames, **attrs):
+ option = Argument(*optnames, **attrs)
+ self._addoption_instance(option, shortupper=True)
+
+ def _addoption_instance(self, option, shortupper=False):
+ if not shortupper:
+ for opt in option._short_opts:
+ if opt[0] == '-' and opt[1].islower():
+ raise ValueError("lowercase shortoptions reserved")
+ if self.parser:
+ self.parser.processoption(option)
+ self.options.append(option)
+
+
+class MyOptionParser(argparse.ArgumentParser):
+ def __init__(self, parser, extra_info=None):
+ if not extra_info:
+ extra_info = {}
+ self._parser = parser
+ argparse.ArgumentParser.__init__(self, usage=parser._usage,
+ add_help=False, formatter_class=DropShorterLongHelpFormatter)
+ # extra_info is a dict of (param -> value) to display if there's
+ # an usage error to provide more contextual information to the user
+ self.extra_info = extra_info
+
+ def parse_args(self, args=None, namespace=None):
+ """allow splitting of positional arguments"""
+ args, argv = self.parse_known_args(args, namespace)
+ if argv:
+ for arg in argv:
+ if arg and arg[0] == '-':
+ lines = ['unrecognized arguments: %s' % (' '.join(argv))]
+ for k, v in sorted(self.extra_info.items()):
+ lines.append(' %s: %s' % (k, v))
+ self.error('\n'.join(lines))
+ getattr(args, FILE_OR_DIR).extend(argv)
+ return args
+
+
+class DropShorterLongHelpFormatter(argparse.HelpFormatter):
+ """shorten help for long options that differ only in extra hyphens
+
+ - collapse **long** options that are the same except for extra hyphens
+ - special action attribute map_long_option allows surpressing additional
+ long options
+ - shortcut if there are only two options and one of them is a short one
+ - cache result on action object as this is called at least 2 times
+ """
+ def _format_action_invocation(self, action):
+ orgstr = argparse.HelpFormatter._format_action_invocation(self, action)
+ if orgstr and orgstr[0] != '-': # only optional arguments
+ return orgstr
+ res = getattr(action, '_formatted_action_invocation', None)
+ if res:
+ return res
+ options = orgstr.split(', ')
+ if len(options) == 2 and (len(options[0]) == 2 or len(options[1]) == 2):
+ # a shortcut for '-h, --help' or '--abc', '-a'
+ action._formatted_action_invocation = orgstr
+ return orgstr
+ return_list = []
+ option_map = getattr(action, 'map_long_option', {})
+ if option_map is None:
+ option_map = {}
+ short_long = {}
+ for option in options:
+ if len(option) == 2 or option[2] == ' ':
+ continue
+ if not option.startswith('--'):
+ raise ArgumentError('long optional argument without "--": [%s]'
+ % (option), self)
+ xxoption = option[2:]
+ if xxoption.split()[0] not in option_map:
+ shortened = xxoption.replace('-', '')
+ if shortened not in short_long or \
+ len(short_long[shortened]) < len(xxoption):
+ short_long[shortened] = xxoption
+ # now short_long has been filled out to the longest with dashes
+ # **and** we keep the right option ordering from add_argument
+ for option in options: #
+ if len(option) == 2 or option[2] == ' ':
+ return_list.append(option)
+ if option[2:] == short_long.get(option.replace('-', '')):
+ return_list.append(option.replace(' ', '='))
+ action._formatted_action_invocation = ', '.join(return_list)
+ return action._formatted_action_invocation
+
+
+
+def _ensure_removed_sysmodule(modname):
+ try:
+ del sys.modules[modname]
+ except KeyError:
+ pass
+
+class CmdOptions(object):
+ """ holds cmdline options as attributes."""
+ def __init__(self, values=()):
+ self.__dict__.update(values)
+ def __repr__(self):
+ return "<CmdOptions %r>" %(self.__dict__,)
+ def copy(self):
+ return CmdOptions(self.__dict__)
+
+class Notset:
+ def __repr__(self):
+ return "<NOTSET>"
+
+notset = Notset()
+FILE_OR_DIR = 'file_or_dir'
+
+class Config(object):
+ """ access to configuration values, pluginmanager and plugin hooks. """
+
+ def __init__(self, pluginmanager):
+ #: access to command line option as attributes.
+ #: (deprecated), use :py:func:`getoption() <_pytest.config.Config.getoption>` instead
+ self.option = CmdOptions()
+ _a = FILE_OR_DIR
+ self._parser = Parser(
+ usage="%%(prog)s [options] [%s] [%s] [...]" % (_a, _a),
+ processopt=self._processopt,
+ )
+ #: a pluginmanager instance
+ self.pluginmanager = pluginmanager
+ self.trace = self.pluginmanager.trace.root.get("config")
+ self.hook = self.pluginmanager.hook
+ self._inicache = {}
+ self._opt2dest = {}
+ self._cleanup = []
+ self._warn = self.pluginmanager._warn
+ self.pluginmanager.register(self, "pytestconfig")
+ self._configured = False
+ def do_setns(dic):
+ import pytest
+ setns(pytest, dic)
+ self.hook.pytest_namespace.call_historic(do_setns, {})
+ self.hook.pytest_addoption.call_historic(kwargs=dict(parser=self._parser))
+
+ def add_cleanup(self, func):
+ """ Add a function to be called when the config object gets out of
+ use (usually coninciding with pytest_unconfigure)."""
+ self._cleanup.append(func)
+
+ def _do_configure(self):
+ assert not self._configured
+ self._configured = True
+ self.hook.pytest_configure.call_historic(kwargs=dict(config=self))
+
+ def _ensure_unconfigure(self):
+ if self._configured:
+ self._configured = False
+ self.hook.pytest_unconfigure(config=self)
+ self.hook.pytest_configure._call_history = []
+ while self._cleanup:
+ fin = self._cleanup.pop()
+ fin()
+
+ def warn(self, code, message, fslocation=None):
+ """ generate a warning for this test session. """
+ self.hook.pytest_logwarning.call_historic(kwargs=dict(
+ code=code, message=message,
+ fslocation=fslocation, nodeid=None))
+
+ def get_terminal_writer(self):
+ return self.pluginmanager.get_plugin("terminalreporter")._tw
+
+ def pytest_cmdline_parse(self, pluginmanager, args):
+ # REF1 assert self == pluginmanager.config, (self, pluginmanager.config)
+ self.parse(args)
+ return self
+
+ def notify_exception(self, excinfo, option=None):
+ if option and option.fulltrace:
+ style = "long"
+ else:
+ style = "native"
+ excrepr = excinfo.getrepr(funcargs=True,
+ showlocals=getattr(option, 'showlocals', False),
+ style=style,
+ )
+ res = self.hook.pytest_internalerror(excrepr=excrepr,
+ excinfo=excinfo)
+ if not py.builtin.any(res):
+ for line in str(excrepr).split("\n"):
+ sys.stderr.write("INTERNALERROR> %s\n" %line)
+ sys.stderr.flush()
+
+ def cwd_relative_nodeid(self, nodeid):
+ # nodeid's are relative to the rootpath, compute relative to cwd
+ if self.invocation_dir != self.rootdir:
+ fullpath = self.rootdir.join(nodeid)
+ nodeid = self.invocation_dir.bestrelpath(fullpath)
+ return nodeid
+
+ @classmethod
+ def fromdictargs(cls, option_dict, args):
+ """ constructor useable for subprocesses. """
+ config = get_config()
+ config.option.__dict__.update(option_dict)
+ config.parse(args, addopts=False)
+ for x in config.option.plugins:
+ config.pluginmanager.consider_pluginarg(x)
+ return config
+
+ def _processopt(self, opt):
+ for name in opt._short_opts + opt._long_opts:
+ self._opt2dest[name] = opt.dest
+
+ if hasattr(opt, 'default') and opt.dest:
+ if not hasattr(self.option, opt.dest):
+ setattr(self.option, opt.dest, opt.default)
+
+ @hookimpl(trylast=True)
+ def pytest_load_initial_conftests(self, early_config):
+ self.pluginmanager._set_initial_conftests(early_config.known_args_namespace)
+
+ def _initini(self, args):
+ ns, unknown_args = self._parser.parse_known_and_unknown_args(args, namespace=self.option.copy())
+ r = determine_setup(ns.inifilename, ns.file_or_dir + unknown_args)
+ self.rootdir, self.inifile, self.inicfg = r
+ self._parser.extra_info['rootdir'] = self.rootdir
+ self._parser.extra_info['inifile'] = self.inifile
+ self.invocation_dir = py.path.local()
+ self._parser.addini('addopts', 'extra command line options', 'args')
+ self._parser.addini('minversion', 'minimally required pytest version')
+
+ def _preparse(self, args, addopts=True):
+ self._initini(args)
+ if addopts:
+ args[:] = shlex.split(os.environ.get('PYTEST_ADDOPTS', '')) + args
+ args[:] = self.getini("addopts") + args
+ self._checkversion()
+ self.pluginmanager.consider_preparse(args)
+ try:
+ self.pluginmanager.load_setuptools_entrypoints("pytest11")
+ except ImportError as e:
+ self.warn("I2", "could not load setuptools entry import: %s" % (e,))
+ self.pluginmanager.consider_env()
+ self.known_args_namespace = ns = self._parser.parse_known_args(args, namespace=self.option.copy())
+ if self.known_args_namespace.confcutdir is None and self.inifile:
+ confcutdir = py.path.local(self.inifile).dirname
+ self.known_args_namespace.confcutdir = confcutdir
+ try:
+ self.hook.pytest_load_initial_conftests(early_config=self,
+ args=args, parser=self._parser)
+ except ConftestImportFailure:
+ e = sys.exc_info()[1]
+ if ns.help or ns.version:
+ # we don't want to prevent --help/--version to work
+ # so just let is pass and print a warning at the end
+ self._warn("could not load initial conftests (%s)\n" % e.path)
+ else:
+ raise
+
+ def _checkversion(self):
+ import pytest
+ minver = self.inicfg.get('minversion', None)
+ if minver:
+ ver = minver.split(".")
+ myver = pytest.__version__.split(".")
+ if myver < ver:
+ raise pytest.UsageError(
+ "%s:%d: requires pytest-%s, actual pytest-%s'" %(
+ self.inicfg.config.path, self.inicfg.lineof('minversion'),
+ minver, pytest.__version__))
+
+ def parse(self, args, addopts=True):
+ # parse given cmdline arguments into this config object.
+ assert not hasattr(self, 'args'), (
+ "can only parse cmdline args at most once per Config object")
+ self._origargs = args
+ self.hook.pytest_addhooks.call_historic(
+ kwargs=dict(pluginmanager=self.pluginmanager))
+ self._preparse(args, addopts=addopts)
+ # XXX deprecated hook:
+ self.hook.pytest_cmdline_preparse(config=self, args=args)
+ args = self._parser.parse_setoption(args, self.option, namespace=self.option)
+ if not args:
+ cwd = os.getcwd()
+ if cwd == self.rootdir:
+ args = self.getini('testpaths')
+ if not args:
+ args = [cwd]
+ self.args = args
+
+ def addinivalue_line(self, name, line):
+ """ add a line to an ini-file option. The option must have been
+ declared but might not yet be set in which case the line becomes the
+ the first line in its value. """
+ x = self.getini(name)
+ assert isinstance(x, list)
+ x.append(line) # modifies the cached list inline
+
+ def getini(self, name):
+ """ return configuration value from an :ref:`ini file <inifiles>`. If the
+ specified name hasn't been registered through a prior
+ :py:func:`parser.addini <pytest.config.Parser.addini>`
+ call (usually from a plugin), a ValueError is raised. """
+ try:
+ return self._inicache[name]
+ except KeyError:
+ self._inicache[name] = val = self._getini(name)
+ return val
+
+ def _getini(self, name):
+ try:
+ description, type, default = self._parser._inidict[name]
+ except KeyError:
+ raise ValueError("unknown configuration value: %r" %(name,))
+ try:
+ value = self.inicfg[name]
+ except KeyError:
+ if default is not None:
+ return default
+ if type is None:
+ return ''
+ return []
+ if type == "pathlist":
+ dp = py.path.local(self.inicfg.config.path).dirpath()
+ l = []
+ for relpath in shlex.split(value):
+ l.append(dp.join(relpath, abs=True))
+ return l
+ elif type == "args":
+ return shlex.split(value)
+ elif type == "linelist":
+ return [t for t in map(lambda x: x.strip(), value.split("\n")) if t]
+ elif type == "bool":
+ return bool(_strtobool(value.strip()))
+ else:
+ assert type is None
+ return value
+
+ def _getconftest_pathlist(self, name, path):
+ try:
+ mod, relroots = self.pluginmanager._rget_with_confmod(name, path)
+ except KeyError:
+ return None
+ modpath = py.path.local(mod.__file__).dirpath()
+ l = []
+ for relroot in relroots:
+ if not isinstance(relroot, py.path.local):
+ relroot = relroot.replace("/", py.path.local.sep)
+ relroot = modpath.join(relroot, abs=True)
+ l.append(relroot)
+ return l
+
+ def getoption(self, name, default=notset, skip=False):
+ """ return command line option value.
+
+ :arg name: name of the option. You may also specify
+ the literal ``--OPT`` option instead of the "dest" option name.
+ :arg default: default value if no option of that name exists.
+ :arg skip: if True raise pytest.skip if option does not exists
+ or has a None value.
+ """
+ name = self._opt2dest.get(name, name)
+ try:
+ val = getattr(self.option, name)
+ if val is None and skip:
+ raise AttributeError(name)
+ return val
+ except AttributeError:
+ if default is not notset:
+ return default
+ if skip:
+ import pytest
+ pytest.skip("no %r option found" %(name,))
+ raise ValueError("no option named %r" % (name,))
+
+ def getvalue(self, name, path=None):
+ """ (deprecated, use getoption()) """
+ return self.getoption(name)
+
+ def getvalueorskip(self, name, path=None):
+ """ (deprecated, use getoption(skip=True)) """
+ return self.getoption(name, skip=True)
+
+def exists(path, ignore=EnvironmentError):
+ try:
+ return path.check()
+ except ignore:
+ return False
+
+def getcfg(args, inibasenames):
+ args = [x for x in args if not str(x).startswith("-")]
+ if not args:
+ args = [py.path.local()]
+ for arg in args:
+ arg = py.path.local(arg)
+ for base in arg.parts(reverse=True):
+ for inibasename in inibasenames:
+ p = base.join(inibasename)
+ if exists(p):
+ iniconfig = py.iniconfig.IniConfig(p)
+ if 'pytest' in iniconfig.sections:
+ return base, p, iniconfig['pytest']
+ elif inibasename == "pytest.ini":
+ # allowed to be empty
+ return base, p, {}
+ return None, None, None
+
+
+def get_common_ancestor(args):
+ # args are what we get after early command line parsing (usually
+ # strings, but can be py.path.local objects as well)
+ common_ancestor = None
+ for arg in args:
+ if str(arg)[0] == "-":
+ continue
+ p = py.path.local(arg)
+ if common_ancestor is None:
+ common_ancestor = p
+ else:
+ if p.relto(common_ancestor) or p == common_ancestor:
+ continue
+ elif common_ancestor.relto(p):
+ common_ancestor = p
+ else:
+ shared = p.common(common_ancestor)
+ if shared is not None:
+ common_ancestor = shared
+ if common_ancestor is None:
+ common_ancestor = py.path.local()
+ elif not common_ancestor.isdir():
+ common_ancestor = common_ancestor.dirpath()
+ return common_ancestor
+
+
+def determine_setup(inifile, args):
+ if inifile:
+ iniconfig = py.iniconfig.IniConfig(inifile)
+ try:
+ inicfg = iniconfig["pytest"]
+ except KeyError:
+ inicfg = None
+ rootdir = get_common_ancestor(args)
+ else:
+ ancestor = get_common_ancestor(args)
+ rootdir, inifile, inicfg = getcfg(
+ [ancestor], ["pytest.ini", "tox.ini", "setup.cfg"])
+ if rootdir is None:
+ for rootdir in ancestor.parts(reverse=True):
+ if rootdir.join("setup.py").exists():
+ break
+ else:
+ rootdir = ancestor
+ return rootdir, inifile, inicfg or {}
+
+
+def setns(obj, dic):
+ import pytest
+ for name, value in dic.items():
+ if isinstance(value, dict):
+ mod = getattr(obj, name, None)
+ if mod is None:
+ modname = "pytest.%s" % name
+ mod = types.ModuleType(modname)
+ sys.modules[modname] = mod
+ mod.__all__ = []
+ setattr(obj, name, mod)
+ obj.__all__.append(name)
+ setns(mod, value)
+ else:
+ setattr(obj, name, value)
+ obj.__all__.append(name)
+ #if obj != pytest:
+ # pytest.__all__.append(name)
+ setattr(pytest, name, value)
+
+
+def create_terminal_writer(config, *args, **kwargs):
+ """Create a TerminalWriter instance configured according to the options
+ in the config object. Every code which requires a TerminalWriter object
+ and has access to a config object should use this function.
+ """
+ tw = py.io.TerminalWriter(*args, **kwargs)
+ if config.option.color == 'yes':
+ tw.hasmarkup = True
+ if config.option.color == 'no':
+ tw.hasmarkup = False
+ return tw
+
+
+def _strtobool(val):
+ """Convert a string representation of truth to true (1) or false (0).
+
+ True values are 'y', 'yes', 't', 'true', 'on', and '1'; false values
+ are 'n', 'no', 'f', 'false', 'off', and '0'. Raises ValueError if
+ 'val' is anything else.
+
+ .. note:: copied from distutils.util
+ """
+ val = val.lower()
+ if val in ('y', 'yes', 't', 'true', 'on', '1'):
+ return 1
+ elif val in ('n', 'no', 'f', 'false', 'off', '0'):
+ return 0
+ else:
+ raise ValueError("invalid truth value %r" % (val,))
diff --git a/python/pytest/_pytest/doctest.py b/python/pytest/_pytest/doctest.py
new file mode 100644
index 000000000..a57f7a494
--- /dev/null
+++ b/python/pytest/_pytest/doctest.py
@@ -0,0 +1,290 @@
+""" discover and run doctests in modules and test files."""
+from __future__ import absolute_import
+
+import traceback
+
+import pytest
+from _pytest._code.code import TerminalRepr, ReprFileLocation, ExceptionInfo
+from _pytest.python import FixtureRequest
+
+
+
+def pytest_addoption(parser):
+ parser.addini('doctest_optionflags', 'option flags for doctests',
+ type="args", default=["ELLIPSIS"])
+ group = parser.getgroup("collect")
+ group.addoption("--doctest-modules",
+ action="store_true", default=False,
+ help="run doctests in all .py modules",
+ dest="doctestmodules")
+ group.addoption("--doctest-glob",
+ action="append", default=[], metavar="pat",
+ help="doctests file matching pattern, default: test*.txt",
+ dest="doctestglob")
+ group.addoption("--doctest-ignore-import-errors",
+ action="store_true", default=False,
+ help="ignore doctest ImportErrors",
+ dest="doctest_ignore_import_errors")
+
+
+def pytest_collect_file(path, parent):
+ config = parent.config
+ if path.ext == ".py":
+ if config.option.doctestmodules:
+ return DoctestModule(path, parent)
+ elif _is_doctest(config, path, parent):
+ return DoctestTextfile(path, parent)
+
+
+def _is_doctest(config, path, parent):
+ if path.ext in ('.txt', '.rst') and parent.session.isinitpath(path):
+ return True
+ globs = config.getoption("doctestglob") or ['test*.txt']
+ for glob in globs:
+ if path.check(fnmatch=glob):
+ return True
+ return False
+
+
+class ReprFailDoctest(TerminalRepr):
+
+ def __init__(self, reprlocation, lines):
+ self.reprlocation = reprlocation
+ self.lines = lines
+
+ def toterminal(self, tw):
+ for line in self.lines:
+ tw.line(line)
+ self.reprlocation.toterminal(tw)
+
+
+class DoctestItem(pytest.Item):
+
+ def __init__(self, name, parent, runner=None, dtest=None):
+ super(DoctestItem, self).__init__(name, parent)
+ self.runner = runner
+ self.dtest = dtest
+ self.obj = None
+ self.fixture_request = None
+
+ def setup(self):
+ if self.dtest is not None:
+ self.fixture_request = _setup_fixtures(self)
+ globs = dict(getfixture=self.fixture_request.getfuncargvalue)
+ self.dtest.globs.update(globs)
+
+ def runtest(self):
+ _check_all_skipped(self.dtest)
+ self.runner.run(self.dtest)
+
+ def repr_failure(self, excinfo):
+ import doctest
+ if excinfo.errisinstance((doctest.DocTestFailure,
+ doctest.UnexpectedException)):
+ doctestfailure = excinfo.value
+ example = doctestfailure.example
+ test = doctestfailure.test
+ filename = test.filename
+ if test.lineno is None:
+ lineno = None
+ else:
+ lineno = test.lineno + example.lineno + 1
+ message = excinfo.type.__name__
+ reprlocation = ReprFileLocation(filename, lineno, message)
+ checker = _get_checker()
+ REPORT_UDIFF = doctest.REPORT_UDIFF
+ if lineno is not None:
+ lines = doctestfailure.test.docstring.splitlines(False)
+ # add line numbers to the left of the error message
+ lines = ["%03d %s" % (i + test.lineno + 1, x)
+ for (i, x) in enumerate(lines)]
+ # trim docstring error lines to 10
+ lines = lines[example.lineno - 9:example.lineno + 1]
+ else:
+ lines = ['EXAMPLE LOCATION UNKNOWN, not showing all tests of that example']
+ indent = '>>>'
+ for line in example.source.splitlines():
+ lines.append('??? %s %s' % (indent, line))
+ indent = '...'
+ if excinfo.errisinstance(doctest.DocTestFailure):
+ lines += checker.output_difference(example,
+ doctestfailure.got, REPORT_UDIFF).split("\n")
+ else:
+ inner_excinfo = ExceptionInfo(excinfo.value.exc_info)
+ lines += ["UNEXPECTED EXCEPTION: %s" %
+ repr(inner_excinfo.value)]
+ lines += traceback.format_exception(*excinfo.value.exc_info)
+ return ReprFailDoctest(reprlocation, lines)
+ else:
+ return super(DoctestItem, self).repr_failure(excinfo)
+
+ def reportinfo(self):
+ return self.fspath, None, "[doctest] %s" % self.name
+
+
+def _get_flag_lookup():
+ import doctest
+ return dict(DONT_ACCEPT_TRUE_FOR_1=doctest.DONT_ACCEPT_TRUE_FOR_1,
+ DONT_ACCEPT_BLANKLINE=doctest.DONT_ACCEPT_BLANKLINE,
+ NORMALIZE_WHITESPACE=doctest.NORMALIZE_WHITESPACE,
+ ELLIPSIS=doctest.ELLIPSIS,
+ IGNORE_EXCEPTION_DETAIL=doctest.IGNORE_EXCEPTION_DETAIL,
+ COMPARISON_FLAGS=doctest.COMPARISON_FLAGS,
+ ALLOW_UNICODE=_get_allow_unicode_flag(),
+ ALLOW_BYTES=_get_allow_bytes_flag(),
+ )
+
+
+def get_optionflags(parent):
+ optionflags_str = parent.config.getini("doctest_optionflags")
+ flag_lookup_table = _get_flag_lookup()
+ flag_acc = 0
+ for flag in optionflags_str:
+ flag_acc |= flag_lookup_table[flag]
+ return flag_acc
+
+
+class DoctestTextfile(DoctestItem, pytest.Module):
+
+ def runtest(self):
+ import doctest
+ fixture_request = _setup_fixtures(self)
+
+ # inspired by doctest.testfile; ideally we would use it directly,
+ # but it doesn't support passing a custom checker
+ text = self.fspath.read()
+ filename = str(self.fspath)
+ name = self.fspath.basename
+ globs = dict(getfixture=fixture_request.getfuncargvalue)
+ if '__name__' not in globs:
+ globs['__name__'] = '__main__'
+
+ optionflags = get_optionflags(self)
+ runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
+ checker=_get_checker())
+
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(text, globs, name, filename, 0)
+ _check_all_skipped(test)
+ runner.run(test)
+
+
+def _check_all_skipped(test):
+ """raises pytest.skip() if all examples in the given DocTest have the SKIP
+ option set.
+ """
+ import doctest
+ all_skipped = all(x.options.get(doctest.SKIP, False) for x in test.examples)
+ if all_skipped:
+ pytest.skip('all tests skipped by +SKIP option')
+
+
+class DoctestModule(pytest.Module):
+ def collect(self):
+ import doctest
+ if self.fspath.basename == "conftest.py":
+ module = self.config.pluginmanager._importconftest(self.fspath)
+ else:
+ try:
+ module = self.fspath.pyimport()
+ except ImportError:
+ if self.config.getvalue('doctest_ignore_import_errors'):
+ pytest.skip('unable to import module %r' % self.fspath)
+ else:
+ raise
+ # uses internal doctest module parsing mechanism
+ finder = doctest.DocTestFinder()
+ optionflags = get_optionflags(self)
+ runner = doctest.DebugRunner(verbose=0, optionflags=optionflags,
+ checker=_get_checker())
+ for test in finder.find(module, module.__name__):
+ if test.examples: # skip empty doctests
+ yield DoctestItem(test.name, self, runner, test)
+
+
+def _setup_fixtures(doctest_item):
+ """
+ Used by DoctestTextfile and DoctestItem to setup fixture information.
+ """
+ def func():
+ pass
+
+ doctest_item.funcargs = {}
+ fm = doctest_item.session._fixturemanager
+ doctest_item._fixtureinfo = fm.getfixtureinfo(node=doctest_item, func=func,
+ cls=None, funcargs=False)
+ fixture_request = FixtureRequest(doctest_item)
+ fixture_request._fillfixtures()
+ return fixture_request
+
+
+def _get_checker():
+ """
+ Returns a doctest.OutputChecker subclass that takes in account the
+ ALLOW_UNICODE option to ignore u'' prefixes in strings and ALLOW_BYTES
+ to strip b'' prefixes.
+ Useful when the same doctest should run in Python 2 and Python 3.
+
+ An inner class is used to avoid importing "doctest" at the module
+ level.
+ """
+ if hasattr(_get_checker, 'LiteralsOutputChecker'):
+ return _get_checker.LiteralsOutputChecker()
+
+ import doctest
+ import re
+
+ class LiteralsOutputChecker(doctest.OutputChecker):
+ """
+ Copied from doctest_nose_plugin.py from the nltk project:
+ https://github.com/nltk/nltk
+
+ Further extended to also support byte literals.
+ """
+
+ _unicode_literal_re = re.compile(r"(\W|^)[uU]([rR]?[\'\"])", re.UNICODE)
+ _bytes_literal_re = re.compile(r"(\W|^)[bB]([rR]?[\'\"])", re.UNICODE)
+
+ def check_output(self, want, got, optionflags):
+ res = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ if res:
+ return True
+
+ allow_unicode = optionflags & _get_allow_unicode_flag()
+ allow_bytes = optionflags & _get_allow_bytes_flag()
+ if not allow_unicode and not allow_bytes:
+ return False
+
+ else: # pragma: no cover
+ def remove_prefixes(regex, txt):
+ return re.sub(regex, r'\1\2', txt)
+
+ if allow_unicode:
+ want = remove_prefixes(self._unicode_literal_re, want)
+ got = remove_prefixes(self._unicode_literal_re, got)
+ if allow_bytes:
+ want = remove_prefixes(self._bytes_literal_re, want)
+ got = remove_prefixes(self._bytes_literal_re, got)
+ res = doctest.OutputChecker.check_output(self, want, got,
+ optionflags)
+ return res
+
+ _get_checker.LiteralsOutputChecker = LiteralsOutputChecker
+ return _get_checker.LiteralsOutputChecker()
+
+
+def _get_allow_unicode_flag():
+ """
+ Registers and returns the ALLOW_UNICODE flag.
+ """
+ import doctest
+ return doctest.register_optionflag('ALLOW_UNICODE')
+
+
+def _get_allow_bytes_flag():
+ """
+ Registers and returns the ALLOW_BYTES flag.
+ """
+ import doctest
+ return doctest.register_optionflag('ALLOW_BYTES')
diff --git a/python/pytest/_pytest/genscript.py b/python/pytest/_pytest/genscript.py
new file mode 100755
index 000000000..d2962d8fc
--- /dev/null
+++ b/python/pytest/_pytest/genscript.py
@@ -0,0 +1,132 @@
+""" (deprecated) generate a single-file self-contained version of pytest """
+import os
+import sys
+import pkgutil
+
+import py
+import _pytest
+
+
+
+def find_toplevel(name):
+ for syspath in sys.path:
+ base = py.path.local(syspath)
+ lib = base/name
+ if lib.check(dir=1):
+ return lib
+ mod = base.join("%s.py" % name)
+ if mod.check(file=1):
+ return mod
+ raise LookupError(name)
+
+def pkgname(toplevel, rootpath, path):
+ parts = path.parts()[len(rootpath.parts()):]
+ return '.'.join([toplevel] + [x.purebasename for x in parts])
+
+def pkg_to_mapping(name):
+ toplevel = find_toplevel(name)
+ name2src = {}
+ if toplevel.check(file=1): # module
+ name2src[toplevel.purebasename] = toplevel.read()
+ else: # package
+ for pyfile in toplevel.visit('*.py'):
+ pkg = pkgname(name, toplevel, pyfile)
+ name2src[pkg] = pyfile.read()
+ # with wheels py source code might be not be installed
+ # and the resulting genscript is useless, just bail out.
+ assert name2src, "no source code found for %r at %r" %(name, toplevel)
+ return name2src
+
+def compress_mapping(mapping):
+ import base64, pickle, zlib
+ data = pickle.dumps(mapping, 2)
+ data = zlib.compress(data, 9)
+ data = base64.encodestring(data)
+ data = data.decode('ascii')
+ return data
+
+
+def compress_packages(names):
+ mapping = {}
+ for name in names:
+ mapping.update(pkg_to_mapping(name))
+ return compress_mapping(mapping)
+
+def generate_script(entry, packages):
+ data = compress_packages(packages)
+ tmpl = py.path.local(__file__).dirpath().join('standalonetemplate.py')
+ exe = tmpl.read()
+ exe = exe.replace('@SOURCES@', data)
+ exe = exe.replace('@ENTRY@', entry)
+ return exe
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("debugconfig")
+ group.addoption("--genscript", action="store", default=None,
+ dest="genscript", metavar="path",
+ help="create standalone pytest script at given target path.")
+
+def pytest_cmdline_main(config):
+ import _pytest.config
+ genscript = config.getvalue("genscript")
+ if genscript:
+ tw = _pytest.config.create_terminal_writer(config)
+ tw.line("WARNING: usage of genscript is deprecated.",
+ red=True)
+ deps = ['py', '_pytest', 'pytest'] # pluggy is vendored
+ if sys.version_info < (2,7):
+ deps.append("argparse")
+ tw.line("generated script will run on python2.6-python3.3++")
+ else:
+ tw.line("WARNING: generated script will not run on python2.6 "
+ "due to 'argparse' dependency. Use python2.6 "
+ "to generate a python2.6 compatible script", red=True)
+ script = generate_script(
+ 'import pytest; raise SystemExit(pytest.cmdline.main())',
+ deps,
+ )
+ genscript = py.path.local(genscript)
+ genscript.write(script)
+ tw.line("generated pytest standalone script: %s" % genscript,
+ bold=True)
+ return 0
+
+
+def pytest_namespace():
+ return {'freeze_includes': freeze_includes}
+
+
+def freeze_includes():
+ """
+ Returns a list of module names used by py.test that should be
+ included by cx_freeze.
+ """
+ result = list(_iter_all_modules(py))
+ result += list(_iter_all_modules(_pytest))
+ return result
+
+
+def _iter_all_modules(package, prefix=''):
+ """
+ Iterates over the names of all modules that can be found in the given
+ package, recursively.
+
+ Example:
+ _iter_all_modules(_pytest) ->
+ ['_pytest.assertion.newinterpret',
+ '_pytest.capture',
+ '_pytest.core',
+ ...
+ ]
+ """
+ if type(package) is not str:
+ path, prefix = package.__path__[0], package.__name__ + '.'
+ else:
+ path = package
+ for _, name, is_package in pkgutil.iter_modules([path]):
+ if is_package:
+ for m in _iter_all_modules(os.path.join(path, name), prefix=name + '.'):
+ yield prefix + m
+ else:
+ yield prefix + name
diff --git a/python/pytest/_pytest/helpconfig.py b/python/pytest/_pytest/helpconfig.py
new file mode 100644
index 000000000..1df0c56ac
--- /dev/null
+++ b/python/pytest/_pytest/helpconfig.py
@@ -0,0 +1,139 @@
+""" version info, help messages, tracing configuration. """
+import py
+import pytest
+import os, sys
+
+def pytest_addoption(parser):
+ group = parser.getgroup('debugconfig')
+ group.addoption('--version', action="store_true",
+ help="display pytest lib version and import information.")
+ group._addoption("-h", "--help", action="store_true", dest="help",
+ help="show help message and configuration info")
+ group._addoption('-p', action="append", dest="plugins", default = [],
+ metavar="name",
+ help="early-load given plugin (multi-allowed). "
+ "To avoid loading of plugins, use the `no:` prefix, e.g. "
+ "`no:doctest`.")
+ group.addoption('--traceconfig', '--trace-config',
+ action="store_true", default=False,
+ help="trace considerations of conftest.py files."),
+ group.addoption('--debug',
+ action="store_true", dest="debug", default=False,
+ help="store internal tracing debug information in 'pytestdebug.log'.")
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_cmdline_parse():
+ outcome = yield
+ config = outcome.get_result()
+ if config.option.debug:
+ path = os.path.abspath("pytestdebug.log")
+ debugfile = open(path, 'w')
+ debugfile.write("versions pytest-%s, py-%s, "
+ "python-%s\ncwd=%s\nargs=%s\n\n" %(
+ pytest.__version__, py.__version__,
+ ".".join(map(str, sys.version_info)),
+ os.getcwd(), config._origargs))
+ config.trace.root.setwriter(debugfile.write)
+ undo_tracing = config.pluginmanager.enable_tracing()
+ sys.stderr.write("writing pytestdebug information to %s\n" % path)
+ def unset_tracing():
+ debugfile.close()
+ sys.stderr.write("wrote pytestdebug information to %s\n" %
+ debugfile.name)
+ config.trace.root.setwriter(None)
+ undo_tracing()
+ config.add_cleanup(unset_tracing)
+
+def pytest_cmdline_main(config):
+ if config.option.version:
+ p = py.path.local(pytest.__file__)
+ sys.stderr.write("This is pytest version %s, imported from %s\n" %
+ (pytest.__version__, p))
+ plugininfo = getpluginversioninfo(config)
+ if plugininfo:
+ for line in plugininfo:
+ sys.stderr.write(line + "\n")
+ return 0
+ elif config.option.help:
+ config._do_configure()
+ showhelp(config)
+ config._ensure_unconfigure()
+ return 0
+
+def showhelp(config):
+ reporter = config.pluginmanager.get_plugin('terminalreporter')
+ tw = reporter._tw
+ tw.write(config._parser.optparser.format_help())
+ tw.line()
+ tw.line()
+ #tw.sep( "=", "config file settings")
+ tw.line("[pytest] ini-options in the next "
+ "pytest.ini|tox.ini|setup.cfg file:")
+ tw.line()
+
+ for name in config._parser._ininames:
+ help, type, default = config._parser._inidict[name]
+ if type is None:
+ type = "string"
+ spec = "%s (%s)" % (name, type)
+ line = " %-24s %s" %(spec, help)
+ tw.line(line[:tw.fullwidth])
+
+ tw.line()
+ tw.line("environment variables:")
+ vars = [
+ ("PYTEST_ADDOPTS", "extra command line options"),
+ ("PYTEST_PLUGINS", "comma-separated plugins to load during startup"),
+ ("PYTEST_DEBUG", "set to enable debug tracing of pytest's internals")
+ ]
+ for name, help in vars:
+ tw.line(" %-24s %s" % (name, help))
+ tw.line()
+ tw.line()
+
+ tw.line("to see available markers type: py.test --markers")
+ tw.line("to see available fixtures type: py.test --fixtures")
+ tw.line("(shown according to specified file_or_dir or current dir "
+ "if not specified)")
+
+ for warningreport in reporter.stats.get('warnings', []):
+ tw.line("warning : " + warningreport.message, red=True)
+ return
+
+
+conftest_options = [
+ ('pytest_plugins', 'list of plugin names to load'),
+]
+
+def getpluginversioninfo(config):
+ lines = []
+ plugininfo = config.pluginmanager.list_plugin_distinfo()
+ if plugininfo:
+ lines.append("setuptools registered plugins:")
+ for plugin, dist in plugininfo:
+ loc = getattr(plugin, '__file__', repr(plugin))
+ content = "%s-%s at %s" % (dist.project_name, dist.version, loc)
+ lines.append(" " + content)
+ return lines
+
+def pytest_report_header(config):
+ lines = []
+ if config.option.debug or config.option.traceconfig:
+ lines.append("using: pytest-%s pylib-%s" %
+ (pytest.__version__,py.__version__))
+
+ verinfo = getpluginversioninfo(config)
+ if verinfo:
+ lines.extend(verinfo)
+
+ if config.option.traceconfig:
+ lines.append("active plugins:")
+ items = config.pluginmanager.list_name_plugin()
+ for name, plugin in items:
+ if hasattr(plugin, '__file__'):
+ r = plugin.__file__
+ else:
+ r = repr(plugin)
+ lines.append(" %-20s: %s" %(name, r))
+ return lines
diff --git a/python/pytest/_pytest/hookspec.py b/python/pytest/_pytest/hookspec.py
new file mode 100644
index 000000000..60e9b47d2
--- /dev/null
+++ b/python/pytest/_pytest/hookspec.py
@@ -0,0 +1,295 @@
+""" hook specifications for pytest plugins, invoked from main.py and builtin plugins. """
+
+from _pytest._pluggy import HookspecMarker
+
+hookspec = HookspecMarker("pytest")
+
+# -------------------------------------------------------------------------
+# Initialization hooks called for every plugin
+# -------------------------------------------------------------------------
+
+@hookspec(historic=True)
+def pytest_addhooks(pluginmanager):
+ """called at plugin registration time to allow adding new hooks via a call to
+ pluginmanager.add_hookspecs(module_or_class, prefix)."""
+
+
+@hookspec(historic=True)
+def pytest_namespace():
+ """return dict of name->object to be made globally available in
+ the pytest namespace. This hook is called at plugin registration
+ time.
+ """
+
+@hookspec(historic=True)
+def pytest_plugin_registered(plugin, manager):
+ """ a new pytest plugin got registered. """
+
+
+@hookspec(historic=True)
+def pytest_addoption(parser):
+ """register argparse-style options and ini-style config values,
+ called once at the beginning of a test run.
+
+ .. note::
+
+ This function should be implemented only in plugins or ``conftest.py``
+ files situated at the tests root directory due to how py.test
+ :ref:`discovers plugins during startup <pluginorder>`.
+
+ :arg parser: To add command line options, call
+ :py:func:`parser.addoption(...) <_pytest.config.Parser.addoption>`.
+ To add ini-file values call :py:func:`parser.addini(...)
+ <_pytest.config.Parser.addini>`.
+
+ Options can later be accessed through the
+ :py:class:`config <_pytest.config.Config>` object, respectively:
+
+ - :py:func:`config.getoption(name) <_pytest.config.Config.getoption>` to
+ retrieve the value of a command line option.
+
+ - :py:func:`config.getini(name) <_pytest.config.Config.getini>` to retrieve
+ a value read from an ini-style file.
+
+ The config object is passed around on many internal objects via the ``.config``
+ attribute or can be retrieved as the ``pytestconfig`` fixture or accessed
+ via (deprecated) ``pytest.config``.
+ """
+
+@hookspec(historic=True)
+def pytest_configure(config):
+ """ called after command line options have been parsed
+ and all plugins and initial conftest files been loaded.
+ This hook is called for every plugin.
+ """
+
+# -------------------------------------------------------------------------
+# Bootstrapping hooks called for plugins registered early enough:
+# internal and 3rd party plugins as well as directly
+# discoverable conftest.py local plugins.
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_cmdline_parse(pluginmanager, args):
+ """return initialized config object, parsing the specified args. """
+
+def pytest_cmdline_preparse(config, args):
+ """(deprecated) modify command line arguments before option parsing. """
+
+@hookspec(firstresult=True)
+def pytest_cmdline_main(config):
+ """ called for performing the main command line action. The default
+ implementation will invoke the configure hooks and runtest_mainloop. """
+
+def pytest_load_initial_conftests(early_config, parser, args):
+ """ implements the loading of initial conftest files ahead
+ of command line option parsing. """
+
+
+# -------------------------------------------------------------------------
+# collection hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_collection(session):
+ """ perform the collection protocol for the given session. """
+
+def pytest_collection_modifyitems(session, config, items):
+ """ called after collection has been performed, may filter or re-order
+ the items in-place."""
+
+def pytest_collection_finish(session):
+ """ called after collection has been performed and modified. """
+
+@hookspec(firstresult=True)
+def pytest_ignore_collect(path, config):
+ """ return True to prevent considering this path for collection.
+ This hook is consulted for all files and directories prior to calling
+ more specific hooks.
+ """
+
+@hookspec(firstresult=True)
+def pytest_collect_directory(path, parent):
+ """ called before traversing a directory for collection files. """
+
+def pytest_collect_file(path, parent):
+ """ return collection Node or None for the given path. Any new node
+ needs to have the specified ``parent`` as a parent."""
+
+# logging hooks for collection
+def pytest_collectstart(collector):
+ """ collector starts collecting. """
+
+def pytest_itemcollected(item):
+ """ we just collected a test item. """
+
+def pytest_collectreport(report):
+ """ collector finished collecting. """
+
+def pytest_deselected(items):
+ """ called for test items deselected by keyword. """
+
+@hookspec(firstresult=True)
+def pytest_make_collect_report(collector):
+ """ perform ``collector.collect()`` and return a CollectReport. """
+
+# -------------------------------------------------------------------------
+# Python test function related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_pycollect_makemodule(path, parent):
+ """ return a Module collector or None for the given path.
+ This hook will be called for each matching test module path.
+ The pytest_collect_file hook needs to be used if you want to
+ create test modules for files that do not match as a test module.
+ """
+
+@hookspec(firstresult=True)
+def pytest_pycollect_makeitem(collector, name, obj):
+ """ return custom item/collector for a python object in a module, or None. """
+
+@hookspec(firstresult=True)
+def pytest_pyfunc_call(pyfuncitem):
+ """ call underlying test function. """
+
+def pytest_generate_tests(metafunc):
+ """ generate (multiple) parametrized calls to a test function."""
+
+# -------------------------------------------------------------------------
+# generic runtest related hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_runtestloop(session):
+ """ called for performing the main runtest loop
+ (after collection finished). """
+
+def pytest_itemstart(item, node):
+ """ (deprecated, use pytest_runtest_logstart). """
+
+@hookspec(firstresult=True)
+def pytest_runtest_protocol(item, nextitem):
+ """ implements the runtest_setup/call/teardown protocol for
+ the given test item, including capturing exceptions and calling
+ reporting hooks.
+
+ :arg item: test item for which the runtest protocol is performed.
+
+ :arg nextitem: the scheduled-to-be-next test item (or None if this
+ is the end my friend). This argument is passed on to
+ :py:func:`pytest_runtest_teardown`.
+
+ :return boolean: True if no further hook implementations should be invoked.
+ """
+
+def pytest_runtest_logstart(nodeid, location):
+ """ signal the start of running a single test item. """
+
+def pytest_runtest_setup(item):
+ """ called before ``pytest_runtest_call(item)``. """
+
+def pytest_runtest_call(item):
+ """ called to execute the test ``item``. """
+
+def pytest_runtest_teardown(item, nextitem):
+ """ called after ``pytest_runtest_call``.
+
+ :arg nextitem: the scheduled-to-be-next test item (None if no further
+ test item is scheduled). This argument can be used to
+ perform exact teardowns, i.e. calling just enough finalizers
+ so that nextitem only needs to call setup-functions.
+ """
+
+@hookspec(firstresult=True)
+def pytest_runtest_makereport(item, call):
+ """ return a :py:class:`_pytest.runner.TestReport` object
+ for the given :py:class:`pytest.Item` and
+ :py:class:`_pytest.runner.CallInfo`.
+ """
+
+def pytest_runtest_logreport(report):
+ """ process a test setup/call/teardown report relating to
+ the respective phase of executing a test. """
+
+# -------------------------------------------------------------------------
+# test session related hooks
+# -------------------------------------------------------------------------
+
+def pytest_sessionstart(session):
+ """ before session.main() is called. """
+
+def pytest_sessionfinish(session, exitstatus):
+ """ whole test run finishes. """
+
+def pytest_unconfigure(config):
+ """ called before test process is exited. """
+
+
+# -------------------------------------------------------------------------
+# hooks for customising the assert methods
+# -------------------------------------------------------------------------
+
+def pytest_assertrepr_compare(config, op, left, right):
+ """return explanation for comparisons in failing assert expressions.
+
+ Return None for no custom explanation, otherwise return a list
+ of strings. The strings will be joined by newlines but any newlines
+ *in* a string will be escaped. Note that all but the first line will
+ be indented sligthly, the intention is for the first line to be a summary.
+ """
+
+# -------------------------------------------------------------------------
+# hooks for influencing reporting (invoked from _pytest_terminal)
+# -------------------------------------------------------------------------
+
+def pytest_report_header(config, startdir):
+ """ return a string to be displayed as header info for terminal reporting."""
+
+@hookspec(firstresult=True)
+def pytest_report_teststatus(report):
+ """ return result-category, shortletter and verbose word for reporting."""
+
+def pytest_terminal_summary(terminalreporter):
+ """ add additional section in terminal summary reporting. """
+
+
+@hookspec(historic=True)
+def pytest_logwarning(message, code, nodeid, fslocation):
+ """ process a warning specified by a message, a code string,
+ a nodeid and fslocation (both of which may be None
+ if the warning is not tied to a partilar node/location)."""
+
+# -------------------------------------------------------------------------
+# doctest hooks
+# -------------------------------------------------------------------------
+
+@hookspec(firstresult=True)
+def pytest_doctest_prepare_content(content):
+ """ return processed content for a given doctest"""
+
+# -------------------------------------------------------------------------
+# error handling and internal debugging hooks
+# -------------------------------------------------------------------------
+
+def pytest_internalerror(excrepr, excinfo):
+ """ called for internal errors. """
+
+def pytest_keyboard_interrupt(excinfo):
+ """ called for keyboard interrupt. """
+
+def pytest_exception_interact(node, call, report):
+ """called when an exception was raised which can potentially be
+ interactively handled.
+
+ This hook is only called if an exception was raised
+ that is not an internal exception like ``skip.Exception``.
+ """
+
+def pytest_enter_pdb(config):
+ """ called upon pdb.set_trace(), can be used by plugins to take special
+ action just before the python debugger enters in interactive mode.
+
+ :arg config: pytest config object
+ :type config: _pytest.config.Config
+ """
diff --git a/python/pytest/_pytest/junitxml.py b/python/pytest/_pytest/junitxml.py
new file mode 100644
index 000000000..f4de1343e
--- /dev/null
+++ b/python/pytest/_pytest/junitxml.py
@@ -0,0 +1,387 @@
+"""
+ report test results in JUnit-XML format,
+ for use with Jenkins and build integration servers.
+
+
+Based on initial code from Ross Lawley.
+"""
+# Output conforms to https://github.com/jenkinsci/xunit-plugin/blob/master/
+# src/main/resources/org/jenkinsci/plugins/xunit/types/model/xsd/junit-10.xsd
+
+import py
+import os
+import re
+import sys
+import time
+import pytest
+
+# Python 2.X and 3.X compatibility
+if sys.version_info[0] < 3:
+ from codecs import open
+else:
+ unichr = chr
+ unicode = str
+ long = int
+
+
+class Junit(py.xml.Namespace):
+ pass
+
+# We need to get the subset of the invalid unicode ranges according to
+# XML 1.0 which are valid in this python build. Hence we calculate
+# this dynamically instead of hardcoding it. The spec range of valid
+# chars is: Char ::= #x9 | #xA | #xD | [#x20-#xD7FF] | [#xE000-#xFFFD]
+# | [#x10000-#x10FFFF]
+_legal_chars = (0x09, 0x0A, 0x0d)
+_legal_ranges = (
+ (0x20, 0x7E), (0x80, 0xD7FF), (0xE000, 0xFFFD), (0x10000, 0x10FFFF),
+)
+_legal_xml_re = [
+ unicode("%s-%s") % (unichr(low), unichr(high))
+ for (low, high) in _legal_ranges if low < sys.maxunicode
+]
+_legal_xml_re = [unichr(x) for x in _legal_chars] + _legal_xml_re
+illegal_xml_re = re.compile(unicode('[^%s]') % unicode('').join(_legal_xml_re))
+del _legal_chars
+del _legal_ranges
+del _legal_xml_re
+
+_py_ext_re = re.compile(r"\.py$")
+
+
+def bin_xml_escape(arg):
+ def repl(matchobj):
+ i = ord(matchobj.group())
+ if i <= 0xFF:
+ return unicode('#x%02X') % i
+ else:
+ return unicode('#x%04X') % i
+
+ return py.xml.raw(illegal_xml_re.sub(repl, py.xml.escape(arg)))
+
+
+class _NodeReporter(object):
+ def __init__(self, nodeid, xml):
+
+ self.id = nodeid
+ self.xml = xml
+ self.add_stats = self.xml.add_stats
+ self.duration = 0
+ self.properties = []
+ self.nodes = []
+ self.testcase = None
+ self.attrs = {}
+
+ def append(self, node):
+ self.xml.add_stats(type(node).__name__)
+ self.nodes.append(node)
+
+ def add_property(self, name, value):
+ self.properties.append((str(name), bin_xml_escape(value)))
+
+ def make_properties_node(self):
+ """Return a Junit node containing custom properties, if any.
+ """
+ if self.properties:
+ return Junit.properties([
+ Junit.property(name=name, value=value)
+ for name, value in self.properties
+ ])
+ return ''
+
+ def record_testreport(self, testreport):
+ assert not self.testcase
+ names = mangle_test_address(testreport.nodeid)
+ classnames = names[:-1]
+ if self.xml.prefix:
+ classnames.insert(0, self.xml.prefix)
+ attrs = {
+ "classname": ".".join(classnames),
+ "name": bin_xml_escape(names[-1]),
+ "file": testreport.location[0],
+ }
+ if testreport.location[1] is not None:
+ attrs["line"] = testreport.location[1]
+ self.attrs = attrs
+
+ def to_xml(self):
+ testcase = Junit.testcase(time=self.duration, **self.attrs)
+ testcase.append(self.make_properties_node())
+ for node in self.nodes:
+ testcase.append(node)
+ return testcase
+
+ def _add_simple(self, kind, message, data=None):
+ data = bin_xml_escape(data)
+ node = kind(data, message=message)
+ self.append(node)
+
+ def _write_captured_output(self, report):
+ for capname in ('out', 'err'):
+ allcontent = ""
+ for name, content in report.get_sections("Captured std%s" %
+ capname):
+ allcontent += content
+ if allcontent:
+ tag = getattr(Junit, 'system-' + capname)
+ self.append(tag(bin_xml_escape(allcontent)))
+
+ def append_pass(self, report):
+ self.add_stats('passed')
+ self._write_captured_output(report)
+
+ def append_failure(self, report):
+ # msg = str(report.longrepr.reprtraceback.extraline)
+ if hasattr(report, "wasxfail"):
+ self._add_simple(
+ Junit.skipped,
+ "xfail-marked test passes unexpectedly")
+ else:
+ if hasattr(report.longrepr, "reprcrash"):
+ message = report.longrepr.reprcrash.message
+ elif isinstance(report.longrepr, (unicode, str)):
+ message = report.longrepr
+ else:
+ message = str(report.longrepr)
+ message = bin_xml_escape(message)
+ fail = Junit.failure(message=message)
+ fail.append(bin_xml_escape(report.longrepr))
+ self.append(fail)
+ self._write_captured_output(report)
+
+ def append_collect_error(self, report):
+ # msg = str(report.longrepr.reprtraceback.extraline)
+ self.append(Junit.error(bin_xml_escape(report.longrepr),
+ message="collection failure"))
+
+ def append_collect_skipped(self, report):
+ self._add_simple(
+ Junit.skipped, "collection skipped", report.longrepr)
+
+ def append_error(self, report):
+ self._add_simple(
+ Junit.error, "test setup failure", report.longrepr)
+ self._write_captured_output(report)
+
+ def append_skipped(self, report):
+ if hasattr(report, "wasxfail"):
+ self._add_simple(
+ Junit.skipped, "expected test failure", report.wasxfail
+ )
+ else:
+ filename, lineno, skipreason = report.longrepr
+ if skipreason.startswith("Skipped: "):
+ skipreason = bin_xml_escape(skipreason[9:])
+ self.append(
+ Junit.skipped("%s:%s: %s" % (filename, lineno, skipreason),
+ type="pytest.skip",
+ message=skipreason))
+ self._write_captured_output(report)
+
+ def finalize(self):
+ data = self.to_xml().unicode(indent=0)
+ self.__dict__.clear()
+ self.to_xml = lambda: py.xml.raw(data)
+
+
+@pytest.fixture
+def record_xml_property(request):
+ """Fixture that adds extra xml properties to the tag for the calling test.
+ The fixture is callable with (name, value), with value being automatically
+ xml-encoded.
+ """
+ request.node.warn(
+ code='C3',
+ message='record_xml_property is an experimental feature',
+ )
+ xml = getattr(request.config, "_xml", None)
+ if xml is not None:
+ node_reporter = xml.node_reporter(request.node.nodeid)
+ return node_reporter.add_property
+ else:
+ def add_property_noop(name, value):
+ pass
+
+ return add_property_noop
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group.addoption(
+ '--junitxml', '--junit-xml',
+ action="store",
+ dest="xmlpath",
+ metavar="path",
+ default=None,
+ help="create junit-xml style report file at given path.")
+ group.addoption(
+ '--junitprefix', '--junit-prefix',
+ action="store",
+ metavar="str",
+ default=None,
+ help="prepend prefix to classnames in junit-xml output")
+
+
+def pytest_configure(config):
+ xmlpath = config.option.xmlpath
+ # prevent opening xmllog on slave nodes (xdist)
+ if xmlpath and not hasattr(config, 'slaveinput'):
+ config._xml = LogXML(xmlpath, config.option.junitprefix)
+ config.pluginmanager.register(config._xml)
+
+
+def pytest_unconfigure(config):
+ xml = getattr(config, '_xml', None)
+ if xml:
+ del config._xml
+ config.pluginmanager.unregister(xml)
+
+
+def mangle_test_address(address):
+ path, possible_open_bracket, params = address.partition('[')
+ names = path.split("::")
+ try:
+ names.remove('()')
+ except ValueError:
+ pass
+ # convert file path to dotted path
+ names[0] = names[0].replace("/", '.')
+ names[0] = _py_ext_re.sub("", names[0])
+ # put any params back
+ names[-1] += possible_open_bracket + params
+ return names
+
+
+class LogXML(object):
+ def __init__(self, logfile, prefix):
+ logfile = os.path.expanduser(os.path.expandvars(logfile))
+ self.logfile = os.path.normpath(os.path.abspath(logfile))
+ self.prefix = prefix
+ self.stats = dict.fromkeys([
+ 'error',
+ 'passed',
+ 'failure',
+ 'skipped',
+ ], 0)
+ self.node_reporters = {} # nodeid -> _NodeReporter
+ self.node_reporters_ordered = []
+
+ def finalize(self, report):
+ nodeid = getattr(report, 'nodeid', report)
+ # local hack to handle xdist report order
+ slavenode = getattr(report, 'node', None)
+ reporter = self.node_reporters.pop((nodeid, slavenode))
+ if reporter is not None:
+ reporter.finalize()
+
+ def node_reporter(self, report):
+ nodeid = getattr(report, 'nodeid', report)
+ # local hack to handle xdist report order
+ slavenode = getattr(report, 'node', None)
+
+ key = nodeid, slavenode
+
+ if key in self.node_reporters:
+ # TODO: breasks for --dist=each
+ return self.node_reporters[key]
+ reporter = _NodeReporter(nodeid, self)
+ self.node_reporters[key] = reporter
+ self.node_reporters_ordered.append(reporter)
+ return reporter
+
+ def add_stats(self, key):
+ if key in self.stats:
+ self.stats[key] += 1
+
+ def _opentestcase(self, report):
+ reporter = self.node_reporter(report)
+ reporter.record_testreport(report)
+ return reporter
+
+ def pytest_runtest_logreport(self, report):
+ """handle a setup/call/teardown report, generating the appropriate
+ xml tags as necessary.
+
+ note: due to plugins like xdist, this hook may be called in interlaced
+ order with reports from other nodes. for example:
+
+ usual call order:
+ -> setup node1
+ -> call node1
+ -> teardown node1
+ -> setup node2
+ -> call node2
+ -> teardown node2
+
+ possible call order in xdist:
+ -> setup node1
+ -> call node1
+ -> setup node2
+ -> call node2
+ -> teardown node2
+ -> teardown node1
+ """
+ if report.passed:
+ if report.when == "call": # ignore setup/teardown
+ reporter = self._opentestcase(report)
+ reporter.append_pass(report)
+ elif report.failed:
+ reporter = self._opentestcase(report)
+ if report.when == "call":
+ reporter.append_failure(report)
+ else:
+ reporter.append_error(report)
+ elif report.skipped:
+ reporter = self._opentestcase(report)
+ reporter.append_skipped(report)
+ self.update_testcase_duration(report)
+ if report.when == "teardown":
+ self.finalize(report)
+
+ def update_testcase_duration(self, report):
+ """accumulates total duration for nodeid from given report and updates
+ the Junit.testcase with the new total if already created.
+ """
+ reporter = self.node_reporter(report)
+ reporter.duration += getattr(report, 'duration', 0.0)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ reporter = self._opentestcase(report)
+ if report.failed:
+ reporter.append_collect_error(report)
+ else:
+ reporter.append_collect_skipped(report)
+
+ def pytest_internalerror(self, excrepr):
+ reporter = self.node_reporter('internal')
+ reporter.attrs.update(classname="pytest", name='internal')
+ reporter._add_simple(Junit.error, 'internal error', excrepr)
+
+ def pytest_sessionstart(self):
+ self.suite_start_time = time.time()
+
+ def pytest_sessionfinish(self):
+ dirname = os.path.dirname(os.path.abspath(self.logfile))
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ logfile = open(self.logfile, 'w', encoding='utf-8')
+ suite_stop_time = time.time()
+ suite_time_delta = suite_stop_time - self.suite_start_time
+
+ numtests = self.stats['passed'] + self.stats['failure'] + self.stats['skipped']
+
+ logfile.write('<?xml version="1.0" encoding="utf-8"?>')
+ logfile.write(Junit.testsuite(
+ [x.to_xml() for x in self.node_reporters_ordered],
+ name="pytest",
+ errors=self.stats['error'],
+ failures=self.stats['failure'],
+ skips=self.stats['skipped'],
+ tests=numtests,
+ time="%.3f" % suite_time_delta, ).unicode(indent=0))
+ logfile.close()
+
+ def pytest_terminal_summary(self, terminalreporter):
+ terminalreporter.write_sep("-",
+ "generated xml file: %s" % (self.logfile))
diff --git a/python/pytest/_pytest/main.py b/python/pytest/_pytest/main.py
new file mode 100644
index 000000000..8654d7af6
--- /dev/null
+++ b/python/pytest/_pytest/main.py
@@ -0,0 +1,744 @@
+""" core implementation of testing process: init, session, runtest loop. """
+import imp
+import os
+import re
+import sys
+
+import _pytest
+import _pytest._code
+import py
+import pytest
+try:
+ from collections import MutableMapping as MappingMixin
+except ImportError:
+ from UserDict import DictMixin as MappingMixin
+
+from _pytest.runner import collect_one_node
+
+tracebackcutdir = py.path.local(_pytest.__file__).dirpath()
+
+# exitcodes for the command line
+EXIT_OK = 0
+EXIT_TESTSFAILED = 1
+EXIT_INTERRUPTED = 2
+EXIT_INTERNALERROR = 3
+EXIT_USAGEERROR = 4
+EXIT_NOTESTSCOLLECTED = 5
+
+name_re = re.compile("^[a-zA-Z_]\w*$")
+
+def pytest_addoption(parser):
+ parser.addini("norecursedirs", "directory patterns to avoid for recursion",
+ type="args", default=['.*', 'CVS', '_darcs', '{arch}', '*.egg'])
+ parser.addini("testpaths", "directories to search for tests when no files or directories are given in the command line.",
+ type="args", default=[])
+ #parser.addini("dirpatterns",
+ # "patterns specifying possible locations of test files",
+ # type="linelist", default=["**/test_*.txt",
+ # "**/test_*.py", "**/*_test.py"]
+ #)
+ group = parser.getgroup("general", "running and selection options")
+ group._addoption('-x', '--exitfirst', action="store_true", default=False,
+ dest="exitfirst",
+ help="exit instantly on first error or failed test."),
+ group._addoption('--maxfail', metavar="num",
+ action="store", type=int, dest="maxfail", default=0,
+ help="exit after first num failures or errors.")
+ group._addoption('--strict', action="store_true",
+ help="run pytest in strict mode, warnings become errors.")
+ group._addoption("-c", metavar="file", type=str, dest="inifilename",
+ help="load configuration from `file` instead of trying to locate one of the implicit configuration files.")
+
+ group = parser.getgroup("collect", "collection")
+ group.addoption('--collectonly', '--collect-only', action="store_true",
+ help="only collect tests, don't execute them."),
+ group.addoption('--pyargs', action="store_true",
+ help="try to interpret all arguments as python packages.")
+ group.addoption("--ignore", action="append", metavar="path",
+ help="ignore path during collection (multi-allowed).")
+ # when changing this to --conf-cut-dir, config.py Conftest.setinitial
+ # needs upgrading as well
+ group.addoption('--confcutdir', dest="confcutdir", default=None,
+ metavar="dir",
+ help="only load conftest.py's relative to specified dir.")
+ group.addoption('--noconftest', action="store_true",
+ dest="noconftest", default=False,
+ help="Don't load any conftest.py files.")
+
+ group = parser.getgroup("debugconfig",
+ "test session debugging and configuration")
+ group.addoption('--basetemp', dest="basetemp", default=None, metavar="dir",
+ help="base temporary directory for this test run.")
+
+
+def pytest_namespace():
+ collect = dict(Item=Item, Collector=Collector, File=File, Session=Session)
+ return dict(collect=collect)
+
+def pytest_configure(config):
+ pytest.config = config # compatibiltiy
+ if config.option.exitfirst:
+ config.option.maxfail = 1
+
+def wrap_session(config, doit):
+ """Skeleton command line program"""
+ session = Session(config)
+ session.exitstatus = EXIT_OK
+ initstate = 0
+ try:
+ try:
+ config._do_configure()
+ initstate = 1
+ config.hook.pytest_sessionstart(session=session)
+ initstate = 2
+ session.exitstatus = doit(config, session) or 0
+ except pytest.UsageError:
+ raise
+ except KeyboardInterrupt:
+ excinfo = _pytest._code.ExceptionInfo()
+ config.hook.pytest_keyboard_interrupt(excinfo=excinfo)
+ session.exitstatus = EXIT_INTERRUPTED
+ except:
+ excinfo = _pytest._code.ExceptionInfo()
+ config.notify_exception(excinfo, config.option)
+ session.exitstatus = EXIT_INTERNALERROR
+ if excinfo.errisinstance(SystemExit):
+ sys.stderr.write("mainloop: caught Spurious SystemExit!\n")
+
+ finally:
+ excinfo = None # Explicitly break reference cycle.
+ session.startdir.chdir()
+ if initstate >= 2:
+ config.hook.pytest_sessionfinish(
+ session=session,
+ exitstatus=session.exitstatus)
+ config._ensure_unconfigure()
+ return session.exitstatus
+
+def pytest_cmdline_main(config):
+ return wrap_session(config, _main)
+
+def _main(config, session):
+ """ default command line protocol for initialization, session,
+ running tests and reporting. """
+ config.hook.pytest_collection(session=session)
+ config.hook.pytest_runtestloop(session=session)
+
+ if session.testsfailed:
+ return EXIT_TESTSFAILED
+ elif session.testscollected == 0:
+ return EXIT_NOTESTSCOLLECTED
+
+def pytest_collection(session):
+ return session.perform_collect()
+
+def pytest_runtestloop(session):
+ if session.config.option.collectonly:
+ return True
+
+ def getnextitem(i):
+ # this is a function to avoid python2
+ # keeping sys.exc_info set when calling into a test
+ # python2 keeps sys.exc_info till the frame is left
+ try:
+ return session.items[i+1]
+ except IndexError:
+ return None
+
+ for i, item in enumerate(session.items):
+ nextitem = getnextitem(i)
+ item.config.hook.pytest_runtest_protocol(item=item, nextitem=nextitem)
+ if session.shouldstop:
+ raise session.Interrupted(session.shouldstop)
+ return True
+
+def pytest_ignore_collect(path, config):
+ p = path.dirpath()
+ ignore_paths = config._getconftest_pathlist("collect_ignore", path=p)
+ ignore_paths = ignore_paths or []
+ excludeopt = config.getoption("ignore")
+ if excludeopt:
+ ignore_paths.extend([py.path.local(x) for x in excludeopt])
+ return path in ignore_paths
+
+class FSHookProxy:
+ def __init__(self, fspath, pm, remove_mods):
+ self.fspath = fspath
+ self.pm = pm
+ self.remove_mods = remove_mods
+
+ def __getattr__(self, name):
+ x = self.pm.subset_hook_caller(name, remove_plugins=self.remove_mods)
+ self.__dict__[name] = x
+ return x
+
+def compatproperty(name):
+ def fget(self):
+ # deprecated - use pytest.name
+ return getattr(pytest, name)
+
+ return property(fget)
+
+class NodeKeywords(MappingMixin):
+ def __init__(self, node):
+ self.node = node
+ self.parent = node.parent
+ self._markers = {node.name: True}
+
+ def __getitem__(self, key):
+ try:
+ return self._markers[key]
+ except KeyError:
+ if self.parent is None:
+ raise
+ return self.parent.keywords[key]
+
+ def __setitem__(self, key, value):
+ self._markers[key] = value
+
+ def __delitem__(self, key):
+ raise ValueError("cannot delete key in keywords dict")
+
+ def __iter__(self):
+ seen = set(self._markers)
+ if self.parent is not None:
+ seen.update(self.parent.keywords)
+ return iter(seen)
+
+ def __len__(self):
+ return len(self.__iter__())
+
+ def keys(self):
+ return list(self)
+
+ def __repr__(self):
+ return "<NodeKeywords for node %s>" % (self.node, )
+
+
+class Node(object):
+ """ base class for Collector and Item the test collection tree.
+ Collector subclasses have children, Items are terminal nodes."""
+
+ def __init__(self, name, parent=None, config=None, session=None):
+ #: a unique name within the scope of the parent node
+ self.name = name
+
+ #: the parent collector node.
+ self.parent = parent
+
+ #: the pytest config object
+ self.config = config or parent.config
+
+ #: the session this node is part of
+ self.session = session or parent.session
+
+ #: filesystem path where this node was collected from (can be None)
+ self.fspath = getattr(parent, 'fspath', None)
+
+ #: keywords/markers collected from all scopes
+ self.keywords = NodeKeywords(self)
+
+ #: allow adding of extra keywords to use for matching
+ self.extra_keyword_matches = set()
+
+ # used for storing artificial fixturedefs for direct parametrization
+ self._name2pseudofixturedef = {}
+
+ @property
+ def ihook(self):
+ """ fspath sensitive hook proxy used to call pytest hooks"""
+ return self.session.gethookproxy(self.fspath)
+
+ Module = compatproperty("Module")
+ Class = compatproperty("Class")
+ Instance = compatproperty("Instance")
+ Function = compatproperty("Function")
+ File = compatproperty("File")
+ Item = compatproperty("Item")
+
+ def _getcustomclass(self, name):
+ cls = getattr(self, name)
+ if cls != getattr(pytest, name):
+ py.log._apiwarn("2.0", "use of node.%s is deprecated, "
+ "use pytest_pycollect_makeitem(...) to create custom "
+ "collection nodes" % name)
+ return cls
+
+ def __repr__(self):
+ return "<%s %r>" %(self.__class__.__name__,
+ getattr(self, 'name', None))
+
+ def warn(self, code, message):
+ """ generate a warning with the given code and message for this
+ item. """
+ assert isinstance(code, str)
+ fslocation = getattr(self, "location", None)
+ if fslocation is None:
+ fslocation = getattr(self, "fspath", None)
+ else:
+ fslocation = "%s:%s" % fslocation[:2]
+
+ self.ihook.pytest_logwarning.call_historic(kwargs=dict(
+ code=code, message=message,
+ nodeid=self.nodeid, fslocation=fslocation))
+
+ # methods for ordering nodes
+ @property
+ def nodeid(self):
+ """ a ::-separated string denoting its collection tree address. """
+ try:
+ return self._nodeid
+ except AttributeError:
+ self._nodeid = x = self._makeid()
+ return x
+
+ def _makeid(self):
+ return self.parent.nodeid + "::" + self.name
+
+ def __hash__(self):
+ return hash(self.nodeid)
+
+ def setup(self):
+ pass
+
+ def teardown(self):
+ pass
+
+ def _memoizedcall(self, attrname, function):
+ exattrname = "_ex_" + attrname
+ failure = getattr(self, exattrname, None)
+ if failure is not None:
+ py.builtin._reraise(failure[0], failure[1], failure[2])
+ if hasattr(self, attrname):
+ return getattr(self, attrname)
+ try:
+ res = function()
+ except py.builtin._sysex:
+ raise
+ except:
+ failure = sys.exc_info()
+ setattr(self, exattrname, failure)
+ raise
+ setattr(self, attrname, res)
+ return res
+
+ def listchain(self):
+ """ return list of all parent collectors up to self,
+ starting from root of collection tree. """
+ chain = []
+ item = self
+ while item is not None:
+ chain.append(item)
+ item = item.parent
+ chain.reverse()
+ return chain
+
+ def add_marker(self, marker):
+ """ dynamically add a marker object to the node.
+
+ ``marker`` can be a string or pytest.mark.* instance.
+ """
+ from _pytest.mark import MarkDecorator
+ if isinstance(marker, py.builtin._basestring):
+ marker = MarkDecorator(marker)
+ elif not isinstance(marker, MarkDecorator):
+ raise ValueError("is not a string or pytest.mark.* Marker")
+ self.keywords[marker.name] = marker
+
+ def get_marker(self, name):
+ """ get a marker object from this node or None if
+ the node doesn't have a marker with that name. """
+ val = self.keywords.get(name, None)
+ if val is not None:
+ from _pytest.mark import MarkInfo, MarkDecorator
+ if isinstance(val, (MarkDecorator, MarkInfo)):
+ return val
+
+ def listextrakeywords(self):
+ """ Return a set of all extra keywords in self and any parents."""
+ extra_keywords = set()
+ item = self
+ for item in self.listchain():
+ extra_keywords.update(item.extra_keyword_matches)
+ return extra_keywords
+
+ def listnames(self):
+ return [x.name for x in self.listchain()]
+
+ def addfinalizer(self, fin):
+ """ register a function to be called when this node is finalized.
+
+ This method can only be called when this node is active
+ in a setup chain, for example during self.setup().
+ """
+ self.session._setupstate.addfinalizer(fin, self)
+
+ def getparent(self, cls):
+ """ get the next parent node (including ourself)
+ which is an instance of the given class"""
+ current = self
+ while current and not isinstance(current, cls):
+ current = current.parent
+ return current
+
+ def _prunetraceback(self, excinfo):
+ pass
+
+ def _repr_failure_py(self, excinfo, style=None):
+ fm = self.session._fixturemanager
+ if excinfo.errisinstance(fm.FixtureLookupError):
+ return excinfo.value.formatrepr()
+ tbfilter = True
+ if self.config.option.fulltrace:
+ style="long"
+ else:
+ self._prunetraceback(excinfo)
+ tbfilter = False # prunetraceback already does it
+ if style == "auto":
+ style = "long"
+ # XXX should excinfo.getrepr record all data and toterminal() process it?
+ if style is None:
+ if self.config.option.tbstyle == "short":
+ style = "short"
+ else:
+ style = "long"
+
+ return excinfo.getrepr(funcargs=True,
+ showlocals=self.config.option.showlocals,
+ style=style, tbfilter=tbfilter)
+
+ repr_failure = _repr_failure_py
+
+class Collector(Node):
+ """ Collector instances create children through collect()
+ and thus iteratively build a tree.
+ """
+
+ class CollectError(Exception):
+ """ an error during collection, contains a custom message. """
+
+ def collect(self):
+ """ returns a list of children (items and collectors)
+ for this collection node.
+ """
+ raise NotImplementedError("abstract")
+
+ def repr_failure(self, excinfo):
+ """ represent a collection failure. """
+ if excinfo.errisinstance(self.CollectError):
+ exc = excinfo.value
+ return str(exc.args[0])
+ return self._repr_failure_py(excinfo, style="short")
+
+ def _memocollect(self):
+ """ internal helper method to cache results of calling collect(). """
+ return self._memoizedcall('_collected', lambda: list(self.collect()))
+
+ def _prunetraceback(self, excinfo):
+ if hasattr(self, 'fspath'):
+ traceback = excinfo.traceback
+ ntraceback = traceback.cut(path=self.fspath)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(excludepath=tracebackcutdir)
+ excinfo.traceback = ntraceback.filter()
+
+class FSCollector(Collector):
+ def __init__(self, fspath, parent=None, config=None, session=None):
+ fspath = py.path.local(fspath) # xxx only for test_resultlog.py?
+ name = fspath.basename
+ if parent is not None:
+ rel = fspath.relto(parent.fspath)
+ if rel:
+ name = rel
+ name = name.replace(os.sep, "/")
+ super(FSCollector, self).__init__(name, parent, config, session)
+ self.fspath = fspath
+
+ def _makeid(self):
+ relpath = self.fspath.relto(self.config.rootdir)
+ if os.sep != "/":
+ relpath = relpath.replace(os.sep, "/")
+ return relpath
+
+class File(FSCollector):
+ """ base class for collecting tests from a file. """
+
+class Item(Node):
+ """ a basic test invocation item. Note that for a single function
+ there might be multiple test invocation items.
+ """
+ nextitem = None
+
+ def __init__(self, name, parent=None, config=None, session=None):
+ super(Item, self).__init__(name, parent, config, session)
+ self._report_sections = []
+
+ def add_report_section(self, when, key, content):
+ if content:
+ self._report_sections.append((when, key, content))
+
+ def reportinfo(self):
+ return self.fspath, None, ""
+
+ @property
+ def location(self):
+ try:
+ return self._location
+ except AttributeError:
+ location = self.reportinfo()
+ # bestrelpath is a quite slow function
+ cache = self.config.__dict__.setdefault("_bestrelpathcache", {})
+ try:
+ fspath = cache[location[0]]
+ except KeyError:
+ fspath = self.session.fspath.bestrelpath(location[0])
+ cache[location[0]] = fspath
+ location = (fspath, location[1], str(location[2]))
+ self._location = location
+ return location
+
+class NoMatch(Exception):
+ """ raised if matching cannot locate a matching names. """
+
+class Interrupted(KeyboardInterrupt):
+ """ signals an interrupted test run. """
+ __module__ = 'builtins' # for py3
+
+class Session(FSCollector):
+ Interrupted = Interrupted
+
+ def __init__(self, config):
+ FSCollector.__init__(self, config.rootdir, parent=None,
+ config=config, session=self)
+ self._fs2hookproxy = {}
+ self.testsfailed = 0
+ self.testscollected = 0
+ self.shouldstop = False
+ self.trace = config.trace.root.get("collection")
+ self._norecursepatterns = config.getini("norecursedirs")
+ self.startdir = py.path.local()
+ self.config.pluginmanager.register(self, name="session")
+
+ def _makeid(self):
+ return ""
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_collectstart(self):
+ if self.shouldstop:
+ raise self.Interrupted(self.shouldstop)
+
+ @pytest.hookimpl(tryfirst=True)
+ def pytest_runtest_logreport(self, report):
+ if report.failed and not hasattr(report, 'wasxfail'):
+ self.testsfailed += 1
+ maxfail = self.config.getvalue("maxfail")
+ if maxfail and self.testsfailed >= maxfail:
+ self.shouldstop = "stopping after %d failures" % (
+ self.testsfailed)
+ pytest_collectreport = pytest_runtest_logreport
+
+ def isinitpath(self, path):
+ return path in self._initialpaths
+
+ def gethookproxy(self, fspath):
+ try:
+ return self._fs2hookproxy[fspath]
+ except KeyError:
+ # check if we have the common case of running
+ # hooks with all conftest.py filesall conftest.py
+ pm = self.config.pluginmanager
+ my_conftestmodules = pm._getconftestmodules(fspath)
+ remove_mods = pm._conftest_plugins.difference(my_conftestmodules)
+ if remove_mods:
+ # one or more conftests are not in use at this fspath
+ proxy = FSHookProxy(fspath, pm, remove_mods)
+ else:
+ # all plugis are active for this fspath
+ proxy = self.config.hook
+
+ self._fs2hookproxy[fspath] = proxy
+ return proxy
+
+ def perform_collect(self, args=None, genitems=True):
+ hook = self.config.hook
+ try:
+ items = self._perform_collect(args, genitems)
+ hook.pytest_collection_modifyitems(session=self,
+ config=self.config, items=items)
+ finally:
+ hook.pytest_collection_finish(session=self)
+ self.testscollected = len(items)
+ return items
+
+ def _perform_collect(self, args, genitems):
+ if args is None:
+ args = self.config.args
+ self.trace("perform_collect", self, args)
+ self.trace.root.indent += 1
+ self._notfound = []
+ self._initialpaths = set()
+ self._initialparts = []
+ self.items = items = []
+ for arg in args:
+ parts = self._parsearg(arg)
+ self._initialparts.append(parts)
+ self._initialpaths.add(parts[0])
+ rep = collect_one_node(self)
+ self.ihook.pytest_collectreport(report=rep)
+ self.trace.root.indent -= 1
+ if self._notfound:
+ errors = []
+ for arg, exc in self._notfound:
+ line = "(no name %r in any of %r)" % (arg, exc.args[0])
+ errors.append("not found: %s\n%s" % (arg, line))
+ #XXX: test this
+ raise pytest.UsageError(*errors)
+ if not genitems:
+ return rep.result
+ else:
+ if rep.passed:
+ for node in rep.result:
+ self.items.extend(self.genitems(node))
+ return items
+
+ def collect(self):
+ for parts in self._initialparts:
+ arg = "::".join(map(str, parts))
+ self.trace("processing argument", arg)
+ self.trace.root.indent += 1
+ try:
+ for x in self._collect(arg):
+ yield x
+ except NoMatch:
+ # we are inside a make_report hook so
+ # we cannot directly pass through the exception
+ self._notfound.append((arg, sys.exc_info()[1]))
+
+ self.trace.root.indent -= 1
+
+ def _collect(self, arg):
+ names = self._parsearg(arg)
+ path = names.pop(0)
+ if path.check(dir=1):
+ assert not names, "invalid arg %r" %(arg,)
+ for path in path.visit(fil=lambda x: x.check(file=1),
+ rec=self._recurse, bf=True, sort=True):
+ for x in self._collectfile(path):
+ yield x
+ else:
+ assert path.check(file=1)
+ for x in self.matchnodes(self._collectfile(path), names):
+ yield x
+
+ def _collectfile(self, path):
+ ihook = self.gethookproxy(path)
+ if not self.isinitpath(path):
+ if ihook.pytest_ignore_collect(path=path, config=self.config):
+ return ()
+ return ihook.pytest_collect_file(path=path, parent=self)
+
+ def _recurse(self, path):
+ ihook = self.gethookproxy(path.dirpath())
+ if ihook.pytest_ignore_collect(path=path, config=self.config):
+ return
+ for pat in self._norecursepatterns:
+ if path.check(fnmatch=pat):
+ return False
+ ihook = self.gethookproxy(path)
+ ihook.pytest_collect_directory(path=path, parent=self)
+ return True
+
+ def _tryconvertpyarg(self, x):
+ mod = None
+ path = [os.path.abspath('.')] + sys.path
+ for name in x.split('.'):
+ # ignore anything that's not a proper name here
+ # else something like --pyargs will mess up '.'
+ # since imp.find_module will actually sometimes work for it
+ # but it's supposed to be considered a filesystem path
+ # not a package
+ if name_re.match(name) is None:
+ return x
+ try:
+ fd, mod, type_ = imp.find_module(name, path)
+ except ImportError:
+ return x
+ else:
+ if fd is not None:
+ fd.close()
+
+ if type_[2] != imp.PKG_DIRECTORY:
+ path = [os.path.dirname(mod)]
+ else:
+ path = [mod]
+ return mod
+
+ def _parsearg(self, arg):
+ """ return (fspath, names) tuple after checking the file exists. """
+ arg = str(arg)
+ if self.config.option.pyargs:
+ arg = self._tryconvertpyarg(arg)
+ parts = str(arg).split("::")
+ relpath = parts[0].replace("/", os.sep)
+ path = self.config.invocation_dir.join(relpath, abs=True)
+ if not path.check():
+ if self.config.option.pyargs:
+ msg = "file or package not found: "
+ else:
+ msg = "file not found: "
+ raise pytest.UsageError(msg + arg)
+ parts[0] = path
+ return parts
+
+ def matchnodes(self, matching, names):
+ self.trace("matchnodes", matching, names)
+ self.trace.root.indent += 1
+ nodes = self._matchnodes(matching, names)
+ num = len(nodes)
+ self.trace("matchnodes finished -> ", num, "nodes")
+ self.trace.root.indent -= 1
+ if num == 0:
+ raise NoMatch(matching, names[:1])
+ return nodes
+
+ def _matchnodes(self, matching, names):
+ if not matching or not names:
+ return matching
+ name = names[0]
+ assert name
+ nextnames = names[1:]
+ resultnodes = []
+ for node in matching:
+ if isinstance(node, pytest.Item):
+ if not names:
+ resultnodes.append(node)
+ continue
+ assert isinstance(node, pytest.Collector)
+ rep = collect_one_node(node)
+ if rep.passed:
+ has_matched = False
+ for x in rep.result:
+ # TODO: remove parametrized workaround once collection structure contains parametrization
+ if x.name == name or x.name.split("[")[0] == name:
+ resultnodes.extend(self.matchnodes([x], nextnames))
+ has_matched = True
+ # XXX accept IDs that don't have "()" for class instances
+ if not has_matched and len(rep.result) == 1 and x.name == "()":
+ nextnames.insert(0, name)
+ resultnodes.extend(self.matchnodes([x], nextnames))
+ node.ihook.pytest_collectreport(report=rep)
+ return resultnodes
+
+ def genitems(self, node):
+ self.trace("genitems", node)
+ if isinstance(node, pytest.Item):
+ node.ihook.pytest_itemcollected(item=node)
+ yield node
+ else:
+ assert isinstance(node, pytest.Collector)
+ rep = collect_one_node(node)
+ if rep.passed:
+ for subnode in rep.result:
+ for x in self.genitems(subnode):
+ yield x
+ node.ihook.pytest_collectreport(report=rep)
diff --git a/python/pytest/_pytest/mark.py b/python/pytest/_pytest/mark.py
new file mode 100644
index 000000000..d8b60def3
--- /dev/null
+++ b/python/pytest/_pytest/mark.py
@@ -0,0 +1,311 @@
+""" generic mechanism for marking and selecting python functions. """
+import inspect
+
+
+class MarkerError(Exception):
+
+ """Error in use of a pytest marker/attribute."""
+
+
+def pytest_namespace():
+ return {'mark': MarkGenerator()}
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption(
+ '-k',
+ action="store", dest="keyword", default='', metavar="EXPRESSION",
+ help="only run tests which match the given substring expression. "
+ "An expression is a python evaluatable expression "
+ "where all names are substring-matched against test names "
+ "and their parent classes. Example: -k 'test_method or test "
+ "other' matches all test functions and classes whose name "
+ "contains 'test_method' or 'test_other'. "
+ "Additionally keywords are matched to classes and functions "
+ "containing extra names in their 'extra_keyword_matches' set, "
+ "as well as functions which have names assigned directly to them."
+ )
+
+ group._addoption(
+ "-m",
+ action="store", dest="markexpr", default="", metavar="MARKEXPR",
+ help="only run tests matching given mark expression. "
+ "example: -m 'mark1 and not mark2'."
+ )
+
+ group.addoption(
+ "--markers", action="store_true",
+ help="show markers (builtin, plugin and per-project ones)."
+ )
+
+ parser.addini("markers", "markers for test functions", 'linelist')
+
+
+def pytest_cmdline_main(config):
+ import _pytest.config
+ if config.option.markers:
+ config._do_configure()
+ tw = _pytest.config.create_terminal_writer(config)
+ for line in config.getini("markers"):
+ name, rest = line.split(":", 1)
+ tw.write("@pytest.mark.%s:" % name, bold=True)
+ tw.line(rest)
+ tw.line()
+ config._ensure_unconfigure()
+ return 0
+pytest_cmdline_main.tryfirst = True
+
+
+def pytest_collection_modifyitems(items, config):
+ keywordexpr = config.option.keyword.lstrip()
+ matchexpr = config.option.markexpr
+ if not keywordexpr and not matchexpr:
+ return
+ # pytest used to allow "-" for negating
+ # but today we just allow "-" at the beginning, use "not" instead
+ # we probably remove "-" alltogether soon
+ if keywordexpr.startswith("-"):
+ keywordexpr = "not " + keywordexpr[1:]
+ selectuntil = False
+ if keywordexpr[-1:] == ":":
+ selectuntil = True
+ keywordexpr = keywordexpr[:-1]
+
+ remaining = []
+ deselected = []
+ for colitem in items:
+ if keywordexpr and not matchkeyword(colitem, keywordexpr):
+ deselected.append(colitem)
+ else:
+ if selectuntil:
+ keywordexpr = None
+ if matchexpr:
+ if not matchmark(colitem, matchexpr):
+ deselected.append(colitem)
+ continue
+ remaining.append(colitem)
+
+ if deselected:
+ config.hook.pytest_deselected(items=deselected)
+ items[:] = remaining
+
+
+class MarkMapping:
+ """Provides a local mapping for markers where item access
+ resolves to True if the marker is present. """
+ def __init__(self, keywords):
+ mymarks = set()
+ for key, value in keywords.items():
+ if isinstance(value, MarkInfo) or isinstance(value, MarkDecorator):
+ mymarks.add(key)
+ self._mymarks = mymarks
+
+ def __getitem__(self, name):
+ return name in self._mymarks
+
+
+class KeywordMapping:
+ """Provides a local mapping for keywords.
+ Given a list of names, map any substring of one of these names to True.
+ """
+ def __init__(self, names):
+ self._names = names
+
+ def __getitem__(self, subname):
+ for name in self._names:
+ if subname in name:
+ return True
+ return False
+
+
+def matchmark(colitem, markexpr):
+ """Tries to match on any marker names, attached to the given colitem."""
+ return eval(markexpr, {}, MarkMapping(colitem.keywords))
+
+
+def matchkeyword(colitem, keywordexpr):
+ """Tries to match given keyword expression to given collector item.
+
+ Will match on the name of colitem, including the names of its parents.
+ Only matches names of items which are either a :class:`Class` or a
+ :class:`Function`.
+ Additionally, matches on names in the 'extra_keyword_matches' set of
+ any item, as well as names directly assigned to test functions.
+ """
+ mapped_names = set()
+
+ # Add the names of the current item and any parent items
+ import pytest
+ for item in colitem.listchain():
+ if not isinstance(item, pytest.Instance):
+ mapped_names.add(item.name)
+
+ # Add the names added as extra keywords to current or parent items
+ for name in colitem.listextrakeywords():
+ mapped_names.add(name)
+
+ # Add the names attached to the current function through direct assignment
+ if hasattr(colitem, 'function'):
+ for name in colitem.function.__dict__:
+ mapped_names.add(name)
+
+ mapping = KeywordMapping(mapped_names)
+ if " " not in keywordexpr:
+ # special case to allow for simple "-k pass" and "-k 1.3"
+ return mapping[keywordexpr]
+ elif keywordexpr.startswith("not ") and " " not in keywordexpr[4:]:
+ return not mapping[keywordexpr[4:]]
+ return eval(keywordexpr, {}, mapping)
+
+
+def pytest_configure(config):
+ import pytest
+ if config.option.strict:
+ pytest.mark._config = config
+
+
+class MarkGenerator:
+ """ Factory for :class:`MarkDecorator` objects - exposed as
+ a ``pytest.mark`` singleton instance. Example::
+
+ import pytest
+ @pytest.mark.slowtest
+ def test_function():
+ pass
+
+ will set a 'slowtest' :class:`MarkInfo` object
+ on the ``test_function`` object. """
+
+ def __getattr__(self, name):
+ if name[0] == "_":
+ raise AttributeError("Marker name must NOT start with underscore")
+ if hasattr(self, '_config'):
+ self._check(name)
+ return MarkDecorator(name)
+
+ def _check(self, name):
+ try:
+ if name in self._markers:
+ return
+ except AttributeError:
+ pass
+ self._markers = l = set()
+ for line in self._config.getini("markers"):
+ beginning = line.split(":", 1)
+ x = beginning[0].split("(", 1)[0]
+ l.add(x)
+ if name not in self._markers:
+ raise AttributeError("%r not a registered marker" % (name,))
+
+def istestfunc(func):
+ return hasattr(func, "__call__") and \
+ getattr(func, "__name__", "<lambda>") != "<lambda>"
+
+class MarkDecorator:
+ """ A decorator for test functions and test classes. When applied
+ it will create :class:`MarkInfo` objects which may be
+ :ref:`retrieved by hooks as item keywords <excontrolskip>`.
+ MarkDecorator instances are often created like this::
+
+ mark1 = pytest.mark.NAME # simple MarkDecorator
+ mark2 = pytest.mark.NAME(name1=value) # parametrized MarkDecorator
+
+ and can then be applied as decorators to test functions::
+
+ @mark2
+ def test_function():
+ pass
+
+ When a MarkDecorator instance is called it does the following:
+ 1. If called with a single class as its only positional argument and no
+ additional keyword arguments, it attaches itself to the class so it
+ gets applied automatically to all test cases found in that class.
+ 2. If called with a single function as its only positional argument and
+ no additional keyword arguments, it attaches a MarkInfo object to the
+ function, containing all the arguments already stored internally in
+ the MarkDecorator.
+ 3. When called in any other case, it performs a 'fake construction' call,
+ i.e. it returns a new MarkDecorator instance with the original
+ MarkDecorator's content updated with the arguments passed to this
+ call.
+
+ Note: The rules above prevent MarkDecorator objects from storing only a
+ single function or class reference as their positional argument with no
+ additional keyword or positional arguments.
+
+ """
+ def __init__(self, name, args=None, kwargs=None):
+ self.name = name
+ self.args = args or ()
+ self.kwargs = kwargs or {}
+
+ @property
+ def markname(self):
+ return self.name # for backward-compat (2.4.1 had this attr)
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ name = d.pop('name')
+ return "<MarkDecorator %r %r>" % (name, d)
+
+ def __call__(self, *args, **kwargs):
+ """ if passed a single callable argument: decorate it with mark info.
+ otherwise add *args/**kwargs in-place to mark information. """
+ if args and not kwargs:
+ func = args[0]
+ is_class = inspect.isclass(func)
+ if len(args) == 1 and (istestfunc(func) or is_class):
+ if is_class:
+ if hasattr(func, 'pytestmark'):
+ mark_list = func.pytestmark
+ if not isinstance(mark_list, list):
+ mark_list = [mark_list]
+ # always work on a copy to avoid updating pytestmark
+ # from a superclass by accident
+ mark_list = mark_list + [self]
+ func.pytestmark = mark_list
+ else:
+ func.pytestmark = [self]
+ else:
+ holder = getattr(func, self.name, None)
+ if holder is None:
+ holder = MarkInfo(
+ self.name, self.args, self.kwargs
+ )
+ setattr(func, self.name, holder)
+ else:
+ holder.add(self.args, self.kwargs)
+ return func
+ kw = self.kwargs.copy()
+ kw.update(kwargs)
+ args = self.args + args
+ return self.__class__(self.name, args=args, kwargs=kw)
+
+
+class MarkInfo:
+ """ Marking object created by :class:`MarkDecorator` instances. """
+ def __init__(self, name, args, kwargs):
+ #: name of attribute
+ self.name = name
+ #: positional argument list, empty if none specified
+ self.args = args
+ #: keyword argument dictionary, empty if nothing specified
+ self.kwargs = kwargs.copy()
+ self._arglist = [(args, kwargs.copy())]
+
+ def __repr__(self):
+ return "<MarkInfo %r args=%r kwargs=%r>" % (
+ self.name, self.args, self.kwargs
+ )
+
+ def add(self, args, kwargs):
+ """ add a MarkInfo with the given args and kwargs. """
+ self._arglist.append((args, kwargs))
+ self.args += args
+ self.kwargs.update(kwargs)
+
+ def __iter__(self):
+ """ yield MarkInfo objects each relating to a marking-call. """
+ for args, kwargs in self._arglist:
+ yield MarkInfo(self.name, args, kwargs)
diff --git a/python/pytest/_pytest/monkeypatch.py b/python/pytest/_pytest/monkeypatch.py
new file mode 100644
index 000000000..d4c169d37
--- /dev/null
+++ b/python/pytest/_pytest/monkeypatch.py
@@ -0,0 +1,254 @@
+""" monkeypatching and mocking functionality. """
+
+import os, sys
+import re
+
+from py.builtin import _basestring
+
+RE_IMPORT_ERROR_NAME = re.compile("^No module named (.*)$")
+
+
+def pytest_funcarg__monkeypatch(request):
+ """The returned ``monkeypatch`` funcarg provides these
+ helper methods to modify objects, dictionaries or os.environ::
+
+ monkeypatch.setattr(obj, name, value, raising=True)
+ monkeypatch.delattr(obj, name, raising=True)
+ monkeypatch.setitem(mapping, name, value)
+ monkeypatch.delitem(obj, name, raising=True)
+ monkeypatch.setenv(name, value, prepend=False)
+ monkeypatch.delenv(name, value, raising=True)
+ monkeypatch.syspath_prepend(path)
+ monkeypatch.chdir(path)
+
+ All modifications will be undone after the requesting
+ test function has finished. The ``raising``
+ parameter determines if a KeyError or AttributeError
+ will be raised if the set/deletion operation has no target.
+ """
+ mpatch = monkeypatch()
+ request.addfinalizer(mpatch.undo)
+ return mpatch
+
+
+def resolve(name):
+ # simplified from zope.dottedname
+ parts = name.split('.')
+
+ used = parts.pop(0)
+ found = __import__(used)
+ for part in parts:
+ used += '.' + part
+ try:
+ found = getattr(found, part)
+ except AttributeError:
+ pass
+ else:
+ continue
+ # we use explicit un-nesting of the handling block in order
+ # to avoid nested exceptions on python 3
+ try:
+ __import__(used)
+ except ImportError as ex:
+ # str is used for py2 vs py3
+ expected = str(ex).split()[-1]
+ if expected == used:
+ raise
+ else:
+ raise ImportError(
+ 'import error in %s: %s' % (used, ex)
+ )
+ found = annotated_getattr(found, part, used)
+ return found
+
+
+def annotated_getattr(obj, name, ann):
+ try:
+ obj = getattr(obj, name)
+ except AttributeError:
+ raise AttributeError(
+ '%r object at %s has no attribute %r' % (
+ type(obj).__name__, ann, name
+ )
+ )
+ return obj
+
+
+def derive_importpath(import_path, raising):
+ if not isinstance(import_path, _basestring) or "." not in import_path:
+ raise TypeError("must be absolute import path string, not %r" %
+ (import_path,))
+ module, attr = import_path.rsplit('.', 1)
+ target = resolve(module)
+ if raising:
+ annotated_getattr(target, attr, ann=module)
+ return attr, target
+
+
+class Notset:
+ def __repr__(self):
+ return "<notset>"
+
+
+notset = Notset()
+
+
+class monkeypatch:
+ """ Object keeping a record of setattr/item/env/syspath changes. """
+
+ def __init__(self):
+ self._setattr = []
+ self._setitem = []
+ self._cwd = None
+ self._savesyspath = None
+
+ def setattr(self, target, name, value=notset, raising=True):
+ """ Set attribute value on target, memorizing the old value.
+ By default raise AttributeError if the attribute did not exist.
+
+ For convenience you can specify a string as ``target`` which
+ will be interpreted as a dotted import path, with the last part
+ being the attribute name. Example:
+ ``monkeypatch.setattr("os.getcwd", lambda x: "/")``
+ would set the ``getcwd`` function of the ``os`` module.
+
+ The ``raising`` value determines if the setattr should fail
+ if the attribute is not already present (defaults to True
+ which means it will raise).
+ """
+ __tracebackhide__ = True
+ import inspect
+
+ if value is notset:
+ if not isinstance(target, _basestring):
+ raise TypeError("use setattr(target, name, value) or "
+ "setattr(target, value) with target being a dotted "
+ "import string")
+ value = name
+ name, target = derive_importpath(target, raising)
+
+ oldval = getattr(target, name, notset)
+ if raising and oldval is notset:
+ raise AttributeError("%r has no attribute %r" % (target, name))
+
+ # avoid class descriptors like staticmethod/classmethod
+ if inspect.isclass(target):
+ oldval = target.__dict__.get(name, notset)
+ self._setattr.append((target, name, oldval))
+ setattr(target, name, value)
+
+ def delattr(self, target, name=notset, raising=True):
+ """ Delete attribute ``name`` from ``target``, by default raise
+ AttributeError it the attribute did not previously exist.
+
+ If no ``name`` is specified and ``target`` is a string
+ it will be interpreted as a dotted import path with the
+ last part being the attribute name.
+
+ If ``raising`` is set to False, no exception will be raised if the
+ attribute is missing.
+ """
+ __tracebackhide__ = True
+ if name is notset:
+ if not isinstance(target, _basestring):
+ raise TypeError("use delattr(target, name) or "
+ "delattr(target) with target being a dotted "
+ "import string")
+ name, target = derive_importpath(target, raising)
+
+ if not hasattr(target, name):
+ if raising:
+ raise AttributeError(name)
+ else:
+ self._setattr.append((target, name, getattr(target, name, notset)))
+ delattr(target, name)
+
+ def setitem(self, dic, name, value):
+ """ Set dictionary entry ``name`` to value. """
+ self._setitem.append((dic, name, dic.get(name, notset)))
+ dic[name] = value
+
+ def delitem(self, dic, name, raising=True):
+ """ Delete ``name`` from dict. Raise KeyError if it doesn't exist.
+
+ If ``raising`` is set to False, no exception will be raised if the
+ key is missing.
+ """
+ if name not in dic:
+ if raising:
+ raise KeyError(name)
+ else:
+ self._setitem.append((dic, name, dic.get(name, notset)))
+ del dic[name]
+
+ def setenv(self, name, value, prepend=None):
+ """ Set environment variable ``name`` to ``value``. If ``prepend``
+ is a character, read the current environment variable value
+ and prepend the ``value`` adjoined with the ``prepend`` character."""
+ value = str(value)
+ if prepend and name in os.environ:
+ value = value + prepend + os.environ[name]
+ self.setitem(os.environ, name, value)
+
+ def delenv(self, name, raising=True):
+ """ Delete ``name`` from the environment. Raise KeyError it does not
+ exist.
+
+ If ``raising`` is set to False, no exception will be raised if the
+ environment variable is missing.
+ """
+ self.delitem(os.environ, name, raising=raising)
+
+ def syspath_prepend(self, path):
+ """ Prepend ``path`` to ``sys.path`` list of import locations. """
+ if self._savesyspath is None:
+ self._savesyspath = sys.path[:]
+ sys.path.insert(0, str(path))
+
+ def chdir(self, path):
+ """ Change the current working directory to the specified path.
+ Path can be a string or a py.path.local object.
+ """
+ if self._cwd is None:
+ self._cwd = os.getcwd()
+ if hasattr(path, "chdir"):
+ path.chdir()
+ else:
+ os.chdir(path)
+
+ def undo(self):
+ """ Undo previous changes. This call consumes the
+ undo stack. Calling it a second time has no effect unless
+ you do more monkeypatching after the undo call.
+
+ There is generally no need to call `undo()`, since it is
+ called automatically during tear-down.
+
+ Note that the same `monkeypatch` fixture is used across a
+ single test function invocation. If `monkeypatch` is used both by
+ the test function itself and one of the test fixtures,
+ calling `undo()` will undo all of the changes made in
+ both functions.
+ """
+ for obj, name, value in reversed(self._setattr):
+ if value is not notset:
+ setattr(obj, name, value)
+ else:
+ delattr(obj, name)
+ self._setattr[:] = []
+ for dictionary, name, value in reversed(self._setitem):
+ if value is notset:
+ try:
+ del dictionary[name]
+ except KeyError:
+ pass # was already deleted, so we have the desired state
+ else:
+ dictionary[name] = value
+ self._setitem[:] = []
+ if self._savesyspath is not None:
+ sys.path[:] = self._savesyspath
+ self._savesyspath = None
+
+ if self._cwd is not None:
+ os.chdir(self._cwd)
+ self._cwd = None
diff --git a/python/pytest/_pytest/nose.py b/python/pytest/_pytest/nose.py
new file mode 100644
index 000000000..038746868
--- /dev/null
+++ b/python/pytest/_pytest/nose.py
@@ -0,0 +1,71 @@
+""" run test suites written for nose. """
+
+import sys
+
+import py
+import pytest
+from _pytest import unittest
+
+
+def get_skip_exceptions():
+ skip_classes = set()
+ for module_name in ('unittest', 'unittest2', 'nose'):
+ mod = sys.modules.get(module_name)
+ if hasattr(mod, 'SkipTest'):
+ skip_classes.add(mod.SkipTest)
+ return tuple(skip_classes)
+
+
+def pytest_runtest_makereport(item, call):
+ if call.excinfo and call.excinfo.errisinstance(get_skip_exceptions()):
+ # let's substitute the excinfo with a pytest.skip one
+ call2 = call.__class__(lambda:
+ pytest.skip(str(call.excinfo.value)), call.when)
+ call.excinfo = call2.excinfo
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_runtest_setup(item):
+ if is_potential_nosetest(item):
+ if isinstance(item.parent, pytest.Generator):
+ gen = item.parent
+ if not hasattr(gen, '_nosegensetup'):
+ call_optional(gen.obj, 'setup')
+ if isinstance(gen.parent, pytest.Instance):
+ call_optional(gen.parent.obj, 'setup')
+ gen._nosegensetup = True
+ if not call_optional(item.obj, 'setup'):
+ # call module level setup if there is no object level one
+ call_optional(item.parent.obj, 'setup')
+ #XXX this implies we only call teardown when setup worked
+ item.session._setupstate.addfinalizer((lambda: teardown_nose(item)), item)
+
+def teardown_nose(item):
+ if is_potential_nosetest(item):
+ if not call_optional(item.obj, 'teardown'):
+ call_optional(item.parent.obj, 'teardown')
+ #if hasattr(item.parent, '_nosegensetup'):
+ # #call_optional(item._nosegensetup, 'teardown')
+ # del item.parent._nosegensetup
+
+
+def pytest_make_collect_report(collector):
+ if isinstance(collector, pytest.Generator):
+ call_optional(collector.obj, 'setup')
+
+
+def is_potential_nosetest(item):
+ # extra check needed since we do not do nose style setup/teardown
+ # on direct unittest style classes
+ return isinstance(item, pytest.Function) and \
+ not isinstance(item, unittest.TestCaseFunction)
+
+
+def call_optional(obj, name):
+ method = getattr(obj, name, None)
+ isfixture = hasattr(method, "_pytestfixturefunction")
+ if method is not None and not isfixture and py.builtin.callable(method):
+ # If there's any problems allow the exception to raise rather than
+ # silently ignoring them
+ method()
+ return True
diff --git a/python/pytest/_pytest/pastebin.py b/python/pytest/_pytest/pastebin.py
new file mode 100644
index 000000000..4ec62d022
--- /dev/null
+++ b/python/pytest/_pytest/pastebin.py
@@ -0,0 +1,92 @@
+""" submit failure or test session information to a pastebin service. """
+import pytest
+import sys
+import tempfile
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting")
+ group._addoption('--pastebin', metavar="mode",
+ action='store', dest="pastebin", default=None,
+ choices=['failed', 'all'],
+ help="send failed|all info to bpaste.net pastebin service.")
+
+@pytest.hookimpl(trylast=True)
+def pytest_configure(config):
+ import py
+ if config.option.pastebin == "all":
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ # if no terminal reporter plugin is present, nothing we can do here;
+ # this can happen when this function executes in a slave node
+ # when using pytest-xdist, for example
+ if tr is not None:
+ # pastebin file will be utf-8 encoded binary file
+ config._pastebinfile = tempfile.TemporaryFile('w+b')
+ oldwrite = tr._tw.write
+ def tee_write(s, **kwargs):
+ oldwrite(s, **kwargs)
+ if py.builtin._istext(s):
+ s = s.encode('utf-8')
+ config._pastebinfile.write(s)
+ tr._tw.write = tee_write
+
+def pytest_unconfigure(config):
+ if hasattr(config, '_pastebinfile'):
+ # get terminal contents and delete file
+ config._pastebinfile.seek(0)
+ sessionlog = config._pastebinfile.read()
+ config._pastebinfile.close()
+ del config._pastebinfile
+ # undo our patching in the terminal reporter
+ tr = config.pluginmanager.getplugin('terminalreporter')
+ del tr._tw.__dict__['write']
+ # write summary
+ tr.write_sep("=", "Sending information to Paste Service")
+ pastebinurl = create_new_paste(sessionlog)
+ tr.write_line("pastebin session-log: %s\n" % pastebinurl)
+
+def create_new_paste(contents):
+ """
+ Creates a new paste using bpaste.net service.
+
+ :contents: paste contents as utf-8 encoded bytes
+ :returns: url to the pasted contents
+ """
+ import re
+ if sys.version_info < (3, 0):
+ from urllib import urlopen, urlencode
+ else:
+ from urllib.request import urlopen
+ from urllib.parse import urlencode
+
+ params = {
+ 'code': contents,
+ 'lexer': 'python3' if sys.version_info[0] == 3 else 'python',
+ 'expiry': '1week',
+ }
+ url = 'https://bpaste.net'
+ response = urlopen(url, data=urlencode(params).encode('ascii')).read()
+ m = re.search(r'href="/raw/(\w+)"', response.decode('utf-8'))
+ if m:
+ return '%s/show/%s' % (url, m.group(1))
+ else:
+ return 'bad response: ' + response
+
+def pytest_terminal_summary(terminalreporter):
+ import _pytest.config
+ if terminalreporter.config.option.pastebin != "failed":
+ return
+ tr = terminalreporter
+ if 'failed' in tr.stats:
+ terminalreporter.write_sep("=", "Sending information to Paste Service")
+ for rep in terminalreporter.stats.get('failed'):
+ try:
+ msg = rep.longrepr.reprtraceback.reprentries[-1].reprfileloc
+ except AttributeError:
+ msg = tr._getfailureheadline(rep)
+ tw = _pytest.config.create_terminal_writer(terminalreporter.config, stringio=True)
+ rep.toterminal(tw)
+ s = tw.stringio.getvalue()
+ assert len(s)
+ pastebinurl = create_new_paste(s)
+ tr.write_line("%s --> %s" %(msg, pastebinurl))
diff --git a/python/pytest/_pytest/pdb.py b/python/pytest/_pytest/pdb.py
new file mode 100644
index 000000000..84c920d17
--- /dev/null
+++ b/python/pytest/_pytest/pdb.py
@@ -0,0 +1,109 @@
+""" interactive debugging with PDB, the Python Debugger. """
+from __future__ import absolute_import
+import pdb
+import sys
+
+import pytest
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group._addoption('--pdb',
+ action="store_true", dest="usepdb", default=False,
+ help="start the interactive Python debugger on errors.")
+
+def pytest_namespace():
+ return {'set_trace': pytestPDB().set_trace}
+
+def pytest_configure(config):
+ if config.getvalue("usepdb"):
+ config.pluginmanager.register(PdbInvoke(), 'pdbinvoke')
+
+ old = (pdb.set_trace, pytestPDB._pluginmanager)
+ def fin():
+ pdb.set_trace, pytestPDB._pluginmanager = old
+ pytestPDB._config = None
+ pdb.set_trace = pytest.set_trace
+ pytestPDB._pluginmanager = config.pluginmanager
+ pytestPDB._config = config
+ config._cleanup.append(fin)
+
+class pytestPDB:
+ """ Pseudo PDB that defers to the real pdb. """
+ _pluginmanager = None
+ _config = None
+
+ def set_trace(self):
+ """ invoke PDB set_trace debugging, dropping any IO capturing. """
+ import _pytest.config
+ frame = sys._getframe().f_back
+ if self._pluginmanager is not None:
+ capman = self._pluginmanager.getplugin("capturemanager")
+ if capman:
+ capman.suspendcapture(in_=True)
+ tw = _pytest.config.create_terminal_writer(self._config)
+ tw.line()
+ tw.sep(">", "PDB set_trace (IO-capturing turned off)")
+ self._pluginmanager.hook.pytest_enter_pdb(config=self._config)
+ pdb.Pdb().set_trace(frame)
+
+
+class PdbInvoke:
+ def pytest_exception_interact(self, node, call, report):
+ capman = node.config.pluginmanager.getplugin("capturemanager")
+ if capman:
+ out, err = capman.suspendcapture(in_=True)
+ sys.stdout.write(out)
+ sys.stdout.write(err)
+ _enter_pdb(node, call.excinfo, report)
+
+ def pytest_internalerror(self, excrepr, excinfo):
+ for line in str(excrepr).split("\n"):
+ sys.stderr.write("INTERNALERROR> %s\n" %line)
+ sys.stderr.flush()
+ tb = _postmortem_traceback(excinfo)
+ post_mortem(tb)
+
+
+def _enter_pdb(node, excinfo, rep):
+ # XXX we re-use the TerminalReporter's terminalwriter
+ # because this seems to avoid some encoding related troubles
+ # for not completely clear reasons.
+ tw = node.config.pluginmanager.getplugin("terminalreporter")._tw
+ tw.line()
+ tw.sep(">", "traceback")
+ rep.toterminal(tw)
+ tw.sep(">", "entering PDB")
+ tb = _postmortem_traceback(excinfo)
+ post_mortem(tb)
+ rep._pdbshown = True
+ return rep
+
+
+def _postmortem_traceback(excinfo):
+ # A doctest.UnexpectedException is not useful for post_mortem.
+ # Use the underlying exception instead:
+ from doctest import UnexpectedException
+ if isinstance(excinfo.value, UnexpectedException):
+ return excinfo.value.exc_info[2]
+ else:
+ return excinfo._excinfo[2]
+
+
+def _find_last_non_hidden_frame(stack):
+ i = max(0, len(stack) - 1)
+ while i and stack[i][0].f_locals.get("__tracebackhide__", False):
+ i -= 1
+ return i
+
+
+def post_mortem(t):
+ class Pdb(pdb.Pdb):
+ def get_stack(self, f, t):
+ stack, i = pdb.Pdb.get_stack(self, f, t)
+ if f is None:
+ i = _find_last_non_hidden_frame(stack)
+ return stack, i
+ p = Pdb()
+ p.reset()
+ p.interaction(None, t)
diff --git a/python/pytest/_pytest/pytester.py b/python/pytest/_pytest/pytester.py
new file mode 100644
index 000000000..faed7f581
--- /dev/null
+++ b/python/pytest/_pytest/pytester.py
@@ -0,0 +1,1110 @@
+""" (disabled by default) support for testing pytest and pytest plugins. """
+import codecs
+import gc
+import os
+import platform
+import re
+import subprocess
+import sys
+import time
+import traceback
+from fnmatch import fnmatch
+
+from py.builtin import print_
+
+from _pytest._code import Source
+import py
+import pytest
+from _pytest.main import Session, EXIT_OK
+
+
+def pytest_addoption(parser):
+ # group = parser.getgroup("pytester", "pytester (self-tests) options")
+ parser.addoption('--lsof',
+ action="store_true", dest="lsof", default=False,
+ help=("run FD checks if lsof is available"))
+
+ parser.addoption('--runpytest', default="inprocess", dest="runpytest",
+ choices=("inprocess", "subprocess", ),
+ help=("run pytest sub runs in tests using an 'inprocess' "
+ "or 'subprocess' (python -m main) method"))
+
+
+def pytest_configure(config):
+ # This might be called multiple times. Only take the first.
+ global _pytest_fullpath
+ try:
+ _pytest_fullpath
+ except NameError:
+ _pytest_fullpath = os.path.abspath(pytest.__file__.rstrip("oc"))
+ _pytest_fullpath = _pytest_fullpath.replace("$py.class", ".py")
+
+ if config.getvalue("lsof"):
+ checker = LsofFdLeakChecker()
+ if checker.matching_platform():
+ config.pluginmanager.register(checker)
+
+
+class LsofFdLeakChecker(object):
+ def get_open_files(self):
+ out = self._exec_lsof()
+ open_files = self._parse_lsof_output(out)
+ return open_files
+
+ def _exec_lsof(self):
+ pid = os.getpid()
+ return py.process.cmdexec("lsof -Ffn0 -p %d" % pid)
+
+ def _parse_lsof_output(self, out):
+ def isopen(line):
+ return line.startswith('f') and ("deleted" not in line and
+ 'mem' not in line and "txt" not in line and 'cwd' not in line)
+
+ open_files = []
+
+ for line in out.split("\n"):
+ if isopen(line):
+ fields = line.split('\0')
+ fd = fields[0][1:]
+ filename = fields[1][1:]
+ if filename.startswith('/'):
+ open_files.append((fd, filename))
+
+ return open_files
+
+ def matching_platform(self):
+ try:
+ py.process.cmdexec("lsof -v")
+ except (py.process.cmdexec.Error, UnicodeDecodeError):
+ # cmdexec may raise UnicodeDecodeError on Windows systems
+ # with locale other than english:
+ # https://bitbucket.org/pytest-dev/py/issues/66
+ return False
+ else:
+ return True
+
+ @pytest.hookimpl(hookwrapper=True, tryfirst=True)
+ def pytest_runtest_item(self, item):
+ lines1 = self.get_open_files()
+ yield
+ if hasattr(sys, "pypy_version_info"):
+ gc.collect()
+ lines2 = self.get_open_files()
+
+ new_fds = set([t[0] for t in lines2]) - set([t[0] for t in lines1])
+ leaked_files = [t for t in lines2 if t[0] in new_fds]
+ if leaked_files:
+ error = []
+ error.append("***** %s FD leakage detected" % len(leaked_files))
+ error.extend([str(f) for f in leaked_files])
+ error.append("*** Before:")
+ error.extend([str(f) for f in lines1])
+ error.append("*** After:")
+ error.extend([str(f) for f in lines2])
+ error.append(error[0])
+ error.append("*** function %s:%s: %s " % item.location)
+ pytest.fail("\n".join(error), pytrace=False)
+
+
+# XXX copied from execnet's conftest.py - needs to be merged
+winpymap = {
+ 'python2.7': r'C:\Python27\python.exe',
+ 'python2.6': r'C:\Python26\python.exe',
+ 'python3.1': r'C:\Python31\python.exe',
+ 'python3.2': r'C:\Python32\python.exe',
+ 'python3.3': r'C:\Python33\python.exe',
+ 'python3.4': r'C:\Python34\python.exe',
+ 'python3.5': r'C:\Python35\python.exe',
+}
+
+def getexecutable(name, cache={}):
+ try:
+ return cache[name]
+ except KeyError:
+ executable = py.path.local.sysfind(name)
+ if executable:
+ if name == "jython":
+ import subprocess
+ popen = subprocess.Popen([str(executable), "--version"],
+ universal_newlines=True, stderr=subprocess.PIPE)
+ out, err = popen.communicate()
+ if not err or "2.5" not in err:
+ executable = None
+ if "2.5.2" in err:
+ executable = None # http://bugs.jython.org/issue1790
+ cache[name] = executable
+ return executable
+
+@pytest.fixture(params=['python2.6', 'python2.7', 'python3.3', "python3.4",
+ 'pypy', 'pypy3'])
+def anypython(request):
+ name = request.param
+ executable = getexecutable(name)
+ if executable is None:
+ if sys.platform == "win32":
+ executable = winpymap.get(name, None)
+ if executable:
+ executable = py.path.local(executable)
+ if executable.check():
+ return executable
+ pytest.skip("no suitable %s found" % (name,))
+ return executable
+
+# used at least by pytest-xdist plugin
+@pytest.fixture
+def _pytest(request):
+ """ Return a helper which offers a gethookrecorder(hook)
+ method which returns a HookRecorder instance which helps
+ to make assertions about called hooks.
+ """
+ return PytestArg(request)
+
+class PytestArg:
+ def __init__(self, request):
+ self.request = request
+
+ def gethookrecorder(self, hook):
+ hookrecorder = HookRecorder(hook._pm)
+ self.request.addfinalizer(hookrecorder.finish_recording)
+ return hookrecorder
+
+
+def get_public_names(l):
+ """Only return names from iterator l without a leading underscore."""
+ return [x for x in l if x[0] != "_"]
+
+
+class ParsedCall:
+ def __init__(self, name, kwargs):
+ self.__dict__.update(kwargs)
+ self._name = name
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ del d['_name']
+ return "<ParsedCall %r(**%r)>" %(self._name, d)
+
+
+class HookRecorder:
+ """Record all hooks called in a plugin manager.
+
+ This wraps all the hook calls in the plugin manager, recording
+ each call before propagating the normal calls.
+
+ """
+
+ def __init__(self, pluginmanager):
+ self._pluginmanager = pluginmanager
+ self.calls = []
+
+ def before(hook_name, hook_impls, kwargs):
+ self.calls.append(ParsedCall(hook_name, kwargs))
+
+ def after(outcome, hook_name, hook_impls, kwargs):
+ pass
+
+ self._undo_wrapping = pluginmanager.add_hookcall_monitoring(before, after)
+
+ def finish_recording(self):
+ self._undo_wrapping()
+
+ def getcalls(self, names):
+ if isinstance(names, str):
+ names = names.split()
+ return [call for call in self.calls if call._name in names]
+
+ def assert_contains(self, entries):
+ __tracebackhide__ = True
+ i = 0
+ entries = list(entries)
+ backlocals = sys._getframe(1).f_locals
+ while entries:
+ name, check = entries.pop(0)
+ for ind, call in enumerate(self.calls[i:]):
+ if call._name == name:
+ print_("NAMEMATCH", name, call)
+ if eval(check, backlocals, call.__dict__):
+ print_("CHECKERMATCH", repr(check), "->", call)
+ else:
+ print_("NOCHECKERMATCH", repr(check), "-", call)
+ continue
+ i += ind + 1
+ break
+ print_("NONAMEMATCH", name, "with", call)
+ else:
+ pytest.fail("could not find %r check %r" % (name, check))
+
+ def popcall(self, name):
+ __tracebackhide__ = True
+ for i, call in enumerate(self.calls):
+ if call._name == name:
+ del self.calls[i]
+ return call
+ lines = ["could not find call %r, in:" % (name,)]
+ lines.extend([" %s" % str(x) for x in self.calls])
+ pytest.fail("\n".join(lines))
+
+ def getcall(self, name):
+ l = self.getcalls(name)
+ assert len(l) == 1, (name, l)
+ return l[0]
+
+ # functionality for test reports
+
+ def getreports(self,
+ names="pytest_runtest_logreport pytest_collectreport"):
+ return [x.report for x in self.getcalls(names)]
+
+ def matchreport(self, inamepart="",
+ names="pytest_runtest_logreport pytest_collectreport", when=None):
+ """ return a testreport whose dotted import path matches """
+ l = []
+ for rep in self.getreports(names=names):
+ try:
+ if not when and rep.when != "call" and rep.passed:
+ # setup/teardown passing reports - let's ignore those
+ continue
+ except AttributeError:
+ pass
+ if when and getattr(rep, 'when', None) != when:
+ continue
+ if not inamepart or inamepart in rep.nodeid.split("::"):
+ l.append(rep)
+ if not l:
+ raise ValueError("could not find test report matching %r: "
+ "no test reports at all!" % (inamepart,))
+ if len(l) > 1:
+ raise ValueError(
+ "found 2 or more testreports matching %r: %s" %(inamepart, l))
+ return l[0]
+
+ def getfailures(self,
+ names='pytest_runtest_logreport pytest_collectreport'):
+ return [rep for rep in self.getreports(names) if rep.failed]
+
+ def getfailedcollections(self):
+ return self.getfailures('pytest_collectreport')
+
+ def listoutcomes(self):
+ passed = []
+ skipped = []
+ failed = []
+ for rep in self.getreports(
+ "pytest_collectreport pytest_runtest_logreport"):
+ if rep.passed:
+ if getattr(rep, "when", None) == "call":
+ passed.append(rep)
+ elif rep.skipped:
+ skipped.append(rep)
+ elif rep.failed:
+ failed.append(rep)
+ return passed, skipped, failed
+
+ def countoutcomes(self):
+ return [len(x) for x in self.listoutcomes()]
+
+ def assertoutcome(self, passed=0, skipped=0, failed=0):
+ realpassed, realskipped, realfailed = self.listoutcomes()
+ assert passed == len(realpassed)
+ assert skipped == len(realskipped)
+ assert failed == len(realfailed)
+
+ def clear(self):
+ self.calls[:] = []
+
+
+@pytest.fixture
+def linecomp(request):
+ return LineComp()
+
+
+def pytest_funcarg__LineMatcher(request):
+ return LineMatcher
+
+
+@pytest.fixture
+def testdir(request, tmpdir_factory):
+ return Testdir(request, tmpdir_factory)
+
+
+rex_outcome = re.compile("(\d+) ([\w-]+)")
+class RunResult:
+ """The result of running a command.
+
+ Attributes:
+
+ :ret: The return value.
+ :outlines: List of lines captured from stdout.
+ :errlines: List of lines captures from stderr.
+ :stdout: :py:class:`LineMatcher` of stdout, use ``stdout.str()`` to
+ reconstruct stdout or the commonly used
+ ``stdout.fnmatch_lines()`` method.
+ :stderrr: :py:class:`LineMatcher` of stderr.
+ :duration: Duration in seconds.
+
+ """
+ def __init__(self, ret, outlines, errlines, duration):
+ self.ret = ret
+ self.outlines = outlines
+ self.errlines = errlines
+ self.stdout = LineMatcher(outlines)
+ self.stderr = LineMatcher(errlines)
+ self.duration = duration
+
+ def parseoutcomes(self):
+ """ Return a dictionary of outcomestring->num from parsing
+ the terminal output that the test process produced."""
+ for line in reversed(self.outlines):
+ if 'seconds' in line:
+ outcomes = rex_outcome.findall(line)
+ if outcomes:
+ d = {}
+ for num, cat in outcomes:
+ d[cat] = int(num)
+ return d
+
+ def assert_outcomes(self, passed=0, skipped=0, failed=0):
+ """ assert that the specified outcomes appear with the respective
+ numbers (0 means it didn't occur) in the text output from a test run."""
+ d = self.parseoutcomes()
+ assert passed == d.get("passed", 0)
+ assert skipped == d.get("skipped", 0)
+ assert failed == d.get("failed", 0)
+
+
+
+class Testdir:
+ """Temporary test directory with tools to test/run py.test itself.
+
+ This is based on the ``tmpdir`` fixture but provides a number of
+ methods which aid with testing py.test itself. Unless
+ :py:meth:`chdir` is used all methods will use :py:attr:`tmpdir` as
+ current working directory.
+
+ Attributes:
+
+ :tmpdir: The :py:class:`py.path.local` instance of the temporary
+ directory.
+
+ :plugins: A list of plugins to use with :py:meth:`parseconfig` and
+ :py:meth:`runpytest`. Initially this is an empty list but
+ plugins can be added to the list. The type of items to add to
+ the list depend on the method which uses them so refer to them
+ for details.
+
+ """
+
+ def __init__(self, request, tmpdir_factory):
+ self.request = request
+ # XXX remove duplication with tmpdir plugin
+ basetmp = tmpdir_factory.ensuretemp("testdir")
+ name = request.function.__name__
+ for i in range(100):
+ try:
+ tmpdir = basetmp.mkdir(name + str(i))
+ except py.error.EEXIST:
+ continue
+ break
+ self.tmpdir = tmpdir
+ self.plugins = []
+ self._savesyspath = (list(sys.path), list(sys.meta_path))
+ self._savemodulekeys = set(sys.modules)
+ self.chdir() # always chdir
+ self.request.addfinalizer(self.finalize)
+ method = self.request.config.getoption("--runpytest")
+ if method == "inprocess":
+ self._runpytest_method = self.runpytest_inprocess
+ elif method == "subprocess":
+ self._runpytest_method = self.runpytest_subprocess
+
+ def __repr__(self):
+ return "<Testdir %r>" % (self.tmpdir,)
+
+ def finalize(self):
+ """Clean up global state artifacts.
+
+ Some methods modify the global interpreter state and this
+ tries to clean this up. It does not remove the temporary
+ directory however so it can be looked at after the test run
+ has finished.
+
+ """
+ sys.path[:], sys.meta_path[:] = self._savesyspath
+ if hasattr(self, '_olddir'):
+ self._olddir.chdir()
+ self.delete_loaded_modules()
+
+ def delete_loaded_modules(self):
+ """Delete modules that have been loaded during a test.
+
+ This allows the interpreter to catch module changes in case
+ the module is re-imported.
+ """
+ for name in set(sys.modules).difference(self._savemodulekeys):
+ # it seems zope.interfaces is keeping some state
+ # (used by twisted related tests)
+ if name != "zope.interface":
+ del sys.modules[name]
+
+ def make_hook_recorder(self, pluginmanager):
+ """Create a new :py:class:`HookRecorder` for a PluginManager."""
+ assert not hasattr(pluginmanager, "reprec")
+ pluginmanager.reprec = reprec = HookRecorder(pluginmanager)
+ self.request.addfinalizer(reprec.finish_recording)
+ return reprec
+
+ def chdir(self):
+ """Cd into the temporary directory.
+
+ This is done automatically upon instantiation.
+
+ """
+ old = self.tmpdir.chdir()
+ if not hasattr(self, '_olddir'):
+ self._olddir = old
+
+ def _makefile(self, ext, args, kwargs):
+ items = list(kwargs.items())
+ if args:
+ source = py.builtin._totext("\n").join(
+ map(py.builtin._totext, args)) + py.builtin._totext("\n")
+ basename = self.request.function.__name__
+ items.insert(0, (basename, source))
+ ret = None
+ for name, value in items:
+ p = self.tmpdir.join(name).new(ext=ext)
+ source = Source(value)
+ def my_totext(s, encoding="utf-8"):
+ if py.builtin._isbytes(s):
+ s = py.builtin._totext(s, encoding=encoding)
+ return s
+ source_unicode = "\n".join([my_totext(line) for line in source.lines])
+ source = py.builtin._totext(source_unicode)
+ content = source.strip().encode("utf-8") # + "\n"
+ #content = content.rstrip() + "\n"
+ p.write(content, "wb")
+ if ret is None:
+ ret = p
+ return ret
+
+ def makefile(self, ext, *args, **kwargs):
+ """Create a new file in the testdir.
+
+ ext: The extension the file should use, including the dot.
+ E.g. ".py".
+
+ args: All args will be treated as strings and joined using
+ newlines. The result will be written as contents to the
+ file. The name of the file will be based on the test
+ function requesting this fixture.
+ E.g. "testdir.makefile('.txt', 'line1', 'line2')"
+
+ kwargs: Each keyword is the name of a file, while the value of
+ it will be written as contents of the file.
+ E.g. "testdir.makefile('.ini', pytest='[pytest]\naddopts=-rs\n')"
+
+ """
+ return self._makefile(ext, args, kwargs)
+
+ def makeconftest(self, source):
+ """Write a contest.py file with 'source' as contents."""
+ return self.makepyfile(conftest=source)
+
+ def makeini(self, source):
+ """Write a tox.ini file with 'source' as contents."""
+ return self.makefile('.ini', tox=source)
+
+ def getinicfg(self, source):
+ """Return the pytest section from the tox.ini config file."""
+ p = self.makeini(source)
+ return py.iniconfig.IniConfig(p)['pytest']
+
+ def makepyfile(self, *args, **kwargs):
+ """Shortcut for .makefile() with a .py extension."""
+ return self._makefile('.py', args, kwargs)
+
+ def maketxtfile(self, *args, **kwargs):
+ """Shortcut for .makefile() with a .txt extension."""
+ return self._makefile('.txt', args, kwargs)
+
+ def syspathinsert(self, path=None):
+ """Prepend a directory to sys.path, defaults to :py:attr:`tmpdir`.
+
+ This is undone automatically after the test.
+ """
+ if path is None:
+ path = self.tmpdir
+ sys.path.insert(0, str(path))
+ # a call to syspathinsert() usually means that the caller
+ # wants to import some dynamically created files.
+ # with python3 we thus invalidate import caches.
+ self._possibly_invalidate_import_caches()
+
+ def _possibly_invalidate_import_caches(self):
+ # invalidate caches if we can (py33 and above)
+ try:
+ import importlib
+ except ImportError:
+ pass
+ else:
+ if hasattr(importlib, "invalidate_caches"):
+ importlib.invalidate_caches()
+
+ def mkdir(self, name):
+ """Create a new (sub)directory."""
+ return self.tmpdir.mkdir(name)
+
+ def mkpydir(self, name):
+ """Create a new python package.
+
+ This creates a (sub)direcotry with an empty ``__init__.py``
+ file so that is recognised as a python package.
+
+ """
+ p = self.mkdir(name)
+ p.ensure("__init__.py")
+ return p
+
+ Session = Session
+ def getnode(self, config, arg):
+ """Return the collection node of a file.
+
+ :param config: :py:class:`_pytest.config.Config` instance, see
+ :py:meth:`parseconfig` and :py:meth:`parseconfigure` to
+ create the configuration.
+
+ :param arg: A :py:class:`py.path.local` instance of the file.
+
+ """
+ session = Session(config)
+ assert '::' not in str(arg)
+ p = py.path.local(arg)
+ config.hook.pytest_sessionstart(session=session)
+ res = session.perform_collect([str(p)], genitems=False)[0]
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return res
+
+ def getpathnode(self, path):
+ """Return the collection node of a file.
+
+ This is like :py:meth:`getnode` but uses
+ :py:meth:`parseconfigure` to create the (configured) py.test
+ Config instance.
+
+ :param path: A :py:class:`py.path.local` instance of the file.
+
+ """
+ config = self.parseconfigure(path)
+ session = Session(config)
+ x = session.fspath.bestrelpath(path)
+ config.hook.pytest_sessionstart(session=session)
+ res = session.perform_collect([x], genitems=False)[0]
+ config.hook.pytest_sessionfinish(session=session, exitstatus=EXIT_OK)
+ return res
+
+ def genitems(self, colitems):
+ """Generate all test items from a collection node.
+
+ This recurses into the collection node and returns a list of
+ all the test items contained within.
+
+ """
+ session = colitems[0].session
+ result = []
+ for colitem in colitems:
+ result.extend(session.genitems(colitem))
+ return result
+
+ def runitem(self, source):
+ """Run the "test_func" Item.
+
+ The calling test instance (the class which contains the test
+ method) must provide a ``.getrunner()`` method which should
+ return a runner which can run the test protocol for a single
+ item, like e.g. :py:func:`_pytest.runner.runtestprotocol`.
+
+ """
+ # used from runner functional tests
+ item = self.getitem(source)
+ # the test class where we are called from wants to provide the runner
+ testclassinstance = self.request.instance
+ runner = testclassinstance.getrunner()
+ return runner(item)
+
+ def inline_runsource(self, source, *cmdlineargs):
+ """Run a test module in process using ``pytest.main()``.
+
+ This run writes "source" into a temporary file and runs
+ ``pytest.main()`` on it, returning a :py:class:`HookRecorder`
+ instance for the result.
+
+ :param source: The source code of the test module.
+
+ :param cmdlineargs: Any extra command line arguments to use.
+
+ :return: :py:class:`HookRecorder` instance of the result.
+
+ """
+ p = self.makepyfile(source)
+ l = list(cmdlineargs) + [p]
+ return self.inline_run(*l)
+
+ def inline_genitems(self, *args):
+ """Run ``pytest.main(['--collectonly'])`` in-process.
+
+ Retuns a tuple of the collected items and a
+ :py:class:`HookRecorder` instance.
+
+ This runs the :py:func:`pytest.main` function to run all of
+ py.test inside the test process itself like
+ :py:meth:`inline_run`. However the return value is a tuple of
+ the collection items and a :py:class:`HookRecorder` instance.
+
+ """
+ rec = self.inline_run("--collect-only", *args)
+ items = [x.item for x in rec.getcalls("pytest_itemcollected")]
+ return items, rec
+
+ def inline_run(self, *args, **kwargs):
+ """Run ``pytest.main()`` in-process, returning a HookRecorder.
+
+ This runs the :py:func:`pytest.main` function to run all of
+ py.test inside the test process itself. This means it can
+ return a :py:class:`HookRecorder` instance which gives more
+ detailed results from then run then can be done by matching
+ stdout/stderr from :py:meth:`runpytest`.
+
+ :param args: Any command line arguments to pass to
+ :py:func:`pytest.main`.
+
+ :param plugin: (keyword-only) Extra plugin instances the
+ ``pytest.main()`` instance should use.
+
+ :return: A :py:class:`HookRecorder` instance.
+
+ """
+ rec = []
+ class Collect:
+ def pytest_configure(x, config):
+ rec.append(self.make_hook_recorder(config.pluginmanager))
+
+ plugins = kwargs.get("plugins") or []
+ plugins.append(Collect())
+ ret = pytest.main(list(args), plugins=plugins)
+ self.delete_loaded_modules()
+ if len(rec) == 1:
+ reprec = rec.pop()
+ else:
+ class reprec:
+ pass
+ reprec.ret = ret
+
+ # typically we reraise keyboard interrupts from the child run
+ # because it's our user requesting interruption of the testing
+ if ret == 2 and not kwargs.get("no_reraise_ctrlc"):
+ calls = reprec.getcalls("pytest_keyboard_interrupt")
+ if calls and calls[-1].excinfo.type == KeyboardInterrupt:
+ raise KeyboardInterrupt()
+ return reprec
+
+ def runpytest_inprocess(self, *args, **kwargs):
+ """ Return result of running pytest in-process, providing a similar
+ interface to what self.runpytest() provides. """
+ if kwargs.get("syspathinsert"):
+ self.syspathinsert()
+ now = time.time()
+ capture = py.io.StdCapture()
+ try:
+ try:
+ reprec = self.inline_run(*args, **kwargs)
+ except SystemExit as e:
+ class reprec:
+ ret = e.args[0]
+ except Exception:
+ traceback.print_exc()
+ class reprec:
+ ret = 3
+ finally:
+ out, err = capture.reset()
+ sys.stdout.write(out)
+ sys.stderr.write(err)
+
+ res = RunResult(reprec.ret,
+ out.split("\n"), err.split("\n"),
+ time.time()-now)
+ res.reprec = reprec
+ return res
+
+ def runpytest(self, *args, **kwargs):
+ """ Run pytest inline or in a subprocess, depending on the command line
+ option "--runpytest" and return a :py:class:`RunResult`.
+
+ """
+ args = self._ensure_basetemp(args)
+ return self._runpytest_method(*args, **kwargs)
+
+ def _ensure_basetemp(self, args):
+ args = [str(x) for x in args]
+ for x in args:
+ if str(x).startswith('--basetemp'):
+ #print ("basedtemp exists: %s" %(args,))
+ break
+ else:
+ args.append("--basetemp=%s" % self.tmpdir.dirpath('basetemp'))
+ #print ("added basetemp: %s" %(args,))
+ return args
+
+ def parseconfig(self, *args):
+ """Return a new py.test Config instance from given commandline args.
+
+ This invokes the py.test bootstrapping code in _pytest.config
+ to create a new :py:class:`_pytest.core.PluginManager` and
+ call the pytest_cmdline_parse hook to create new
+ :py:class:`_pytest.config.Config` instance.
+
+ If :py:attr:`plugins` has been populated they should be plugin
+ modules which will be registered with the PluginManager.
+
+ """
+ args = self._ensure_basetemp(args)
+
+ import _pytest.config
+ config = _pytest.config._prepareconfig(args, self.plugins)
+ # we don't know what the test will do with this half-setup config
+ # object and thus we make sure it gets unconfigured properly in any
+ # case (otherwise capturing could still be active, for example)
+ self.request.addfinalizer(config._ensure_unconfigure)
+ return config
+
+ def parseconfigure(self, *args):
+ """Return a new py.test configured Config instance.
+
+ This returns a new :py:class:`_pytest.config.Config` instance
+ like :py:meth:`parseconfig`, but also calls the
+ pytest_configure hook.
+
+ """
+ config = self.parseconfig(*args)
+ config._do_configure()
+ self.request.addfinalizer(config._ensure_unconfigure)
+ return config
+
+ def getitem(self, source, funcname="test_func"):
+ """Return the test item for a test function.
+
+ This writes the source to a python file and runs py.test's
+ collection on the resulting module, returning the test item
+ for the requested function name.
+
+ :param source: The module source.
+
+ :param funcname: The name of the test function for which the
+ Item must be returned.
+
+ """
+ items = self.getitems(source)
+ for item in items:
+ if item.name == funcname:
+ return item
+ assert 0, "%r item not found in module:\n%s\nitems: %s" %(
+ funcname, source, items)
+
+ def getitems(self, source):
+ """Return all test items collected from the module.
+
+ This writes the source to a python file and runs py.test's
+ collection on the resulting module, returning all test items
+ contained within.
+
+ """
+ modcol = self.getmodulecol(source)
+ return self.genitems([modcol])
+
+ def getmodulecol(self, source, configargs=(), withinit=False):
+ """Return the module collection node for ``source``.
+
+ This writes ``source`` to a file using :py:meth:`makepyfile`
+ and then runs the py.test collection on it, returning the
+ collection node for the test module.
+
+ :param source: The source code of the module to collect.
+
+ :param configargs: Any extra arguments to pass to
+ :py:meth:`parseconfigure`.
+
+ :param withinit: Whether to also write a ``__init__.py`` file
+ to the temporarly directory to ensure it is a package.
+
+ """
+ kw = {self.request.function.__name__: Source(source).strip()}
+ path = self.makepyfile(**kw)
+ if withinit:
+ self.makepyfile(__init__ = "#")
+ self.config = config = self.parseconfigure(path, *configargs)
+ node = self.getnode(config, path)
+ return node
+
+ def collect_by_name(self, modcol, name):
+ """Return the collection node for name from the module collection.
+
+ This will search a module collection node for a collection
+ node matching the given name.
+
+ :param modcol: A module collection node, see
+ :py:meth:`getmodulecol`.
+
+ :param name: The name of the node to return.
+
+ """
+ for colitem in modcol._memocollect():
+ if colitem.name == name:
+ return colitem
+
+ def popen(self, cmdargs, stdout, stderr, **kw):
+ """Invoke subprocess.Popen.
+
+ This calls subprocess.Popen making sure the current working
+ directory is the PYTHONPATH.
+
+ You probably want to use :py:meth:`run` instead.
+
+ """
+ env = os.environ.copy()
+ env['PYTHONPATH'] = os.pathsep.join(filter(None, [
+ str(os.getcwd()), env.get('PYTHONPATH', '')]))
+ kw['env'] = env
+ return subprocess.Popen(cmdargs,
+ stdout=stdout, stderr=stderr, **kw)
+
+ def run(self, *cmdargs):
+ """Run a command with arguments.
+
+ Run a process using subprocess.Popen saving the stdout and
+ stderr.
+
+ Returns a :py:class:`RunResult`.
+
+ """
+ return self._run(*cmdargs)
+
+ def _run(self, *cmdargs):
+ cmdargs = [str(x) for x in cmdargs]
+ p1 = self.tmpdir.join("stdout")
+ p2 = self.tmpdir.join("stderr")
+ print_("running:", ' '.join(cmdargs))
+ print_(" in:", str(py.path.local()))
+ f1 = codecs.open(str(p1), "w", encoding="utf8")
+ f2 = codecs.open(str(p2), "w", encoding="utf8")
+ try:
+ now = time.time()
+ popen = self.popen(cmdargs, stdout=f1, stderr=f2,
+ close_fds=(sys.platform != "win32"))
+ ret = popen.wait()
+ finally:
+ f1.close()
+ f2.close()
+ f1 = codecs.open(str(p1), "r", encoding="utf8")
+ f2 = codecs.open(str(p2), "r", encoding="utf8")
+ try:
+ out = f1.read().splitlines()
+ err = f2.read().splitlines()
+ finally:
+ f1.close()
+ f2.close()
+ self._dump_lines(out, sys.stdout)
+ self._dump_lines(err, sys.stderr)
+ return RunResult(ret, out, err, time.time()-now)
+
+ def _dump_lines(self, lines, fp):
+ try:
+ for line in lines:
+ py.builtin.print_(line, file=fp)
+ except UnicodeEncodeError:
+ print("couldn't print to %s because of encoding" % (fp,))
+
+ def _getpytestargs(self):
+ # we cannot use "(sys.executable,script)"
+ # because on windows the script is e.g. a py.test.exe
+ return (sys.executable, _pytest_fullpath,) # noqa
+
+ def runpython(self, script):
+ """Run a python script using sys.executable as interpreter.
+
+ Returns a :py:class:`RunResult`.
+ """
+ return self.run(sys.executable, script)
+
+ def runpython_c(self, command):
+ """Run python -c "command", return a :py:class:`RunResult`."""
+ return self.run(sys.executable, "-c", command)
+
+ def runpytest_subprocess(self, *args, **kwargs):
+ """Run py.test as a subprocess with given arguments.
+
+ Any plugins added to the :py:attr:`plugins` list will added
+ using the ``-p`` command line option. Addtionally
+ ``--basetemp`` is used put any temporary files and directories
+ in a numbered directory prefixed with "runpytest-" so they do
+ not conflict with the normal numberd pytest location for
+ temporary files and directories.
+
+ Returns a :py:class:`RunResult`.
+
+ """
+ p = py.path.local.make_numbered_dir(prefix="runpytest-",
+ keep=None, rootdir=self.tmpdir)
+ args = ('--basetemp=%s' % p, ) + args
+ #for x in args:
+ # if '--confcutdir' in str(x):
+ # break
+ #else:
+ # pass
+ # args = ('--confcutdir=.',) + args
+ plugins = [x for x in self.plugins if isinstance(x, str)]
+ if plugins:
+ args = ('-p', plugins[0]) + args
+ args = self._getpytestargs() + args
+ return self.run(*args)
+
+ def spawn_pytest(self, string, expect_timeout=10.0):
+ """Run py.test using pexpect.
+
+ This makes sure to use the right py.test and sets up the
+ temporary directory locations.
+
+ The pexpect child is returned.
+
+ """
+ basetemp = self.tmpdir.mkdir("pexpect")
+ invoke = " ".join(map(str, self._getpytestargs()))
+ cmd = "%s --basetemp=%s %s" % (invoke, basetemp, string)
+ return self.spawn(cmd, expect_timeout=expect_timeout)
+
+ def spawn(self, cmd, expect_timeout=10.0):
+ """Run a command using pexpect.
+
+ The pexpect child is returned.
+ """
+ pexpect = pytest.importorskip("pexpect", "3.0")
+ if hasattr(sys, 'pypy_version_info') and '64' in platform.machine():
+ pytest.skip("pypy-64 bit not supported")
+ if sys.platform == "darwin":
+ pytest.xfail("pexpect does not work reliably on darwin?!")
+ if sys.platform.startswith("freebsd"):
+ pytest.xfail("pexpect does not work reliably on freebsd")
+ logfile = self.tmpdir.join("spawn.out").open("wb")
+ child = pexpect.spawn(cmd, logfile=logfile)
+ self.request.addfinalizer(logfile.close)
+ child.timeout = expect_timeout
+ return child
+
+def getdecoded(out):
+ try:
+ return out.decode("utf-8")
+ except UnicodeDecodeError:
+ return "INTERNAL not-utf8-decodeable, truncated string:\n%s" % (
+ py.io.saferepr(out),)
+
+
+class LineComp:
+ def __init__(self):
+ self.stringio = py.io.TextIO()
+
+ def assert_contains_lines(self, lines2):
+ """ assert that lines2 are contained (linearly) in lines1.
+ return a list of extralines found.
+ """
+ __tracebackhide__ = True
+ val = self.stringio.getvalue()
+ self.stringio.truncate(0)
+ self.stringio.seek(0)
+ lines1 = val.split("\n")
+ return LineMatcher(lines1).fnmatch_lines(lines2)
+
+
+class LineMatcher:
+ """Flexible matching of text.
+
+ This is a convenience class to test large texts like the output of
+ commands.
+
+ The constructor takes a list of lines without their trailing
+ newlines, i.e. ``text.splitlines()``.
+
+ """
+
+ def __init__(self, lines):
+ self.lines = lines
+
+ def str(self):
+ """Return the entire original text."""
+ return "\n".join(self.lines)
+
+ def _getlines(self, lines2):
+ if isinstance(lines2, str):
+ lines2 = Source(lines2)
+ if isinstance(lines2, Source):
+ lines2 = lines2.strip().lines
+ return lines2
+
+ def fnmatch_lines_random(self, lines2):
+ """Check lines exist in the output.
+
+ The argument is a list of lines which have to occur in the
+ output, in any order. Each line can contain glob whildcards.
+
+ """
+ lines2 = self._getlines(lines2)
+ for line in lines2:
+ for x in self.lines:
+ if line == x or fnmatch(x, line):
+ print_("matched: ", repr(line))
+ break
+ else:
+ raise ValueError("line %r not found in output" % line)
+
+ def get_lines_after(self, fnline):
+ """Return all lines following the given line in the text.
+
+ The given line can contain glob wildcards.
+ """
+ for i, line in enumerate(self.lines):
+ if fnline == line or fnmatch(line, fnline):
+ return self.lines[i+1:]
+ raise ValueError("line %r not found in output" % fnline)
+
+ def fnmatch_lines(self, lines2):
+ """Search the text for matching lines.
+
+ The argument is a list of lines which have to match and can
+ use glob wildcards. If they do not match an pytest.fail() is
+ called. The matches and non-matches are also printed on
+ stdout.
+
+ """
+ def show(arg1, arg2):
+ py.builtin.print_(arg1, arg2, file=sys.stderr)
+ lines2 = self._getlines(lines2)
+ lines1 = self.lines[:]
+ nextline = None
+ extralines = []
+ __tracebackhide__ = True
+ for line in lines2:
+ nomatchprinted = False
+ while lines1:
+ nextline = lines1.pop(0)
+ if line == nextline:
+ show("exact match:", repr(line))
+ break
+ elif fnmatch(nextline, line):
+ show("fnmatch:", repr(line))
+ show(" with:", repr(nextline))
+ break
+ else:
+ if not nomatchprinted:
+ show("nomatch:", repr(line))
+ nomatchprinted = True
+ show(" and:", repr(nextline))
+ extralines.append(nextline)
+ else:
+ pytest.fail("remains unmatched: %r, see stderr" % (line,))
diff --git a/python/pytest/_pytest/python.py b/python/pytest/_pytest/python.py
new file mode 100644
index 000000000..21d78aea3
--- /dev/null
+++ b/python/pytest/_pytest/python.py
@@ -0,0 +1,2300 @@
+""" Python test discovery, setup and run of test functions. """
+import fnmatch
+import functools
+import inspect
+import re
+import types
+import sys
+
+import py
+import pytest
+from _pytest._code.code import TerminalRepr
+from _pytest.mark import MarkDecorator, MarkerError
+
+try:
+ import enum
+except ImportError: # pragma: no cover
+ # Only available in Python 3.4+ or as a backport
+ enum = None
+
+import _pytest
+import _pytest._pluggy as pluggy
+
+cutdir2 = py.path.local(_pytest.__file__).dirpath()
+cutdir1 = py.path.local(pluggy.__file__.rstrip("oc"))
+
+
+NoneType = type(None)
+NOTSET = object()
+isfunction = inspect.isfunction
+isclass = inspect.isclass
+callable = py.builtin.callable
+# used to work around a python2 exception info leak
+exc_clear = getattr(sys, 'exc_clear', lambda: None)
+# The type of re.compile objects is not exposed in Python.
+REGEX_TYPE = type(re.compile(''))
+
+_PY3 = sys.version_info > (3, 0)
+_PY2 = not _PY3
+
+
+if hasattr(inspect, 'signature'):
+ def _format_args(func):
+ return str(inspect.signature(func))
+else:
+ def _format_args(func):
+ return inspect.formatargspec(*inspect.getargspec(func))
+
+if sys.version_info[:2] == (2, 6):
+ def isclass(object):
+ """ Return true if the object is a class. Overrides inspect.isclass for
+ python 2.6 because it will return True for objects which always return
+ something on __getattr__ calls (see #1035).
+ Backport of https://hg.python.org/cpython/rev/35bf8f7a8edc
+ """
+ return isinstance(object, (type, types.ClassType))
+
+def _has_positional_arg(func):
+ return func.__code__.co_argcount
+
+
+def filter_traceback(entry):
+ # entry.path might sometimes return a str object when the entry
+ # points to dynamically generated code
+ # see https://bitbucket.org/pytest-dev/py/issues/71
+ raw_filename = entry.frame.code.raw.co_filename
+ is_generated = '<' in raw_filename and '>' in raw_filename
+ if is_generated:
+ return False
+ # entry.path might point to an inexisting file, in which case it will
+ # alsso return a str object. see #1133
+ p = py.path.local(entry.path)
+ return p != cutdir1 and not p.relto(cutdir2)
+
+
+def get_real_func(obj):
+ """ gets the real function object of the (possibly) wrapped object by
+ functools.wraps or functools.partial.
+ """
+ while hasattr(obj, "__wrapped__"):
+ obj = obj.__wrapped__
+ if isinstance(obj, functools.partial):
+ obj = obj.func
+ return obj
+
+def getfslineno(obj):
+ # xxx let decorators etc specify a sane ordering
+ obj = get_real_func(obj)
+ if hasattr(obj, 'place_as'):
+ obj = obj.place_as
+ fslineno = _pytest._code.getfslineno(obj)
+ assert isinstance(fslineno[1], int), obj
+ return fslineno
+
+def getimfunc(func):
+ try:
+ return func.__func__
+ except AttributeError:
+ try:
+ return func.im_func
+ except AttributeError:
+ return func
+
+def safe_getattr(object, name, default):
+ """ Like getattr but return default upon any Exception.
+
+ Attribute access can potentially fail for 'evil' Python objects.
+ See issue214
+ """
+ try:
+ return getattr(object, name, default)
+ except Exception:
+ return default
+
+
+class FixtureFunctionMarker:
+ def __init__(self, scope, params,
+ autouse=False, yieldctx=False, ids=None):
+ self.scope = scope
+ self.params = params
+ self.autouse = autouse
+ self.yieldctx = yieldctx
+ self.ids = ids
+
+ def __call__(self, function):
+ if isclass(function):
+ raise ValueError(
+ "class fixtures not supported (may be in the future)")
+ function._pytestfixturefunction = self
+ return function
+
+
+def fixture(scope="function", params=None, autouse=False, ids=None):
+ """ (return a) decorator to mark a fixture factory function.
+
+ This decorator can be used (with or or without parameters) to define
+ a fixture function. The name of the fixture function can later be
+ referenced to cause its invocation ahead of running tests: test
+ modules or classes can use the pytest.mark.usefixtures(fixturename)
+ marker. Test functions can directly use fixture names as input
+ arguments in which case the fixture instance returned from the fixture
+ function will be injected.
+
+ :arg scope: the scope for which this fixture is shared, one of
+ "function" (default), "class", "module", "session".
+
+ :arg params: an optional list of parameters which will cause multiple
+ invocations of the fixture function and all of the tests
+ using it.
+
+ :arg autouse: if True, the fixture func is activated for all tests that
+ can see it. If False (the default) then an explicit
+ reference is needed to activate the fixture.
+
+ :arg ids: list of string ids each corresponding to the params
+ so that they are part of the test id. If no ids are provided
+ they will be generated automatically from the params.
+
+ """
+ if callable(scope) and params is None and autouse == False:
+ # direct decoration
+ return FixtureFunctionMarker(
+ "function", params, autouse)(scope)
+ if params is not None and not isinstance(params, (list, tuple)):
+ params = list(params)
+ return FixtureFunctionMarker(scope, params, autouse, ids=ids)
+
+def yield_fixture(scope="function", params=None, autouse=False, ids=None):
+ """ (return a) decorator to mark a yield-fixture factory function
+ (EXPERIMENTAL).
+
+ This takes the same arguments as :py:func:`pytest.fixture` but
+ expects a fixture function to use a ``yield`` instead of a ``return``
+ statement to provide a fixture. See
+ http://pytest.org/en/latest/yieldfixture.html for more info.
+ """
+ if callable(scope) and params is None and autouse == False:
+ # direct decoration
+ return FixtureFunctionMarker(
+ "function", params, autouse, yieldctx=True)(scope)
+ else:
+ return FixtureFunctionMarker(scope, params, autouse,
+ yieldctx=True, ids=ids)
+
+defaultfuncargprefixmarker = fixture()
+
+def pyobj_property(name):
+ def get(self):
+ node = self.getparent(getattr(pytest, name))
+ if node is not None:
+ return node.obj
+ doc = "python %s object this node was collected from (can be None)." % (
+ name.lower(),)
+ return property(get, None, None, doc)
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption('--fixtures', '--funcargs',
+ action="store_true", dest="showfixtures", default=False,
+ help="show available fixtures, sorted by plugin appearance")
+ parser.addini("usefixtures", type="args", default=[],
+ help="list of default fixtures to be used with this project")
+ parser.addini("python_files", type="args",
+ default=['test_*.py', '*_test.py'],
+ help="glob-style file patterns for Python test module discovery")
+ parser.addini("python_classes", type="args", default=["Test",],
+ help="prefixes or glob names for Python test class discovery")
+ parser.addini("python_functions", type="args", default=["test",],
+ help="prefixes or glob names for Python test function and "
+ "method discovery")
+
+ group.addoption("--import-mode", default="prepend",
+ choices=["prepend", "append"], dest="importmode",
+ help="prepend/append to sys.path when importing test modules, "
+ "default is to prepend.")
+
+
+def pytest_cmdline_main(config):
+ if config.option.showfixtures:
+ showfixtures(config)
+ return 0
+
+
+def pytest_generate_tests(metafunc):
+ # those alternative spellings are common - raise a specific error to alert
+ # the user
+ alt_spellings = ['parameterize', 'parametrise', 'parameterise']
+ for attr in alt_spellings:
+ if hasattr(metafunc.function, attr):
+ msg = "{0} has '{1}', spelling should be 'parametrize'"
+ raise MarkerError(msg.format(metafunc.function.__name__, attr))
+ try:
+ markers = metafunc.function.parametrize
+ except AttributeError:
+ return
+ for marker in markers:
+ metafunc.parametrize(*marker.args, **marker.kwargs)
+
+def pytest_configure(config):
+ config.addinivalue_line("markers",
+ "parametrize(argnames, argvalues): call a test function multiple "
+ "times passing in different arguments in turn. argvalues generally "
+ "needs to be a list of values if argnames specifies only one name "
+ "or a list of tuples of values if argnames specifies multiple names. "
+ "Example: @parametrize('arg1', [1,2]) would lead to two calls of the "
+ "decorated test function, one with arg1=1 and another with arg1=2."
+ "see http://pytest.org/latest/parametrize.html for more info and "
+ "examples."
+ )
+ config.addinivalue_line("markers",
+ "usefixtures(fixturename1, fixturename2, ...): mark tests as needing "
+ "all of the specified fixtures. see http://pytest.org/latest/fixture.html#usefixtures "
+ )
+
+def pytest_sessionstart(session):
+ session._fixturemanager = FixtureManager(session)
+
+@pytest.hookimpl(trylast=True)
+def pytest_namespace():
+ raises.Exception = pytest.fail.Exception
+ return {
+ 'fixture': fixture,
+ 'yield_fixture': yield_fixture,
+ 'raises' : raises,
+ 'collect': {
+ 'Module': Module, 'Class': Class, 'Instance': Instance,
+ 'Function': Function, 'Generator': Generator,
+ '_fillfuncargs': fillfixtures}
+ }
+
+@fixture(scope="session")
+def pytestconfig(request):
+ """ the pytest config object with access to command line opts."""
+ return request.config
+
+
+@pytest.hookimpl(trylast=True)
+def pytest_pyfunc_call(pyfuncitem):
+ testfunction = pyfuncitem.obj
+ if pyfuncitem._isyieldedfunction():
+ testfunction(*pyfuncitem._args)
+ else:
+ funcargs = pyfuncitem.funcargs
+ testargs = {}
+ for arg in pyfuncitem._fixtureinfo.argnames:
+ testargs[arg] = funcargs[arg]
+ testfunction(**testargs)
+ return True
+
+def pytest_collect_file(path, parent):
+ ext = path.ext
+ if ext == ".py":
+ if not parent.session.isinitpath(path):
+ for pat in parent.config.getini('python_files'):
+ if path.fnmatch(pat):
+ break
+ else:
+ return
+ ihook = parent.session.gethookproxy(path)
+ return ihook.pytest_pycollect_makemodule(path=path, parent=parent)
+
+def pytest_pycollect_makemodule(path, parent):
+ return Module(path, parent)
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_pycollect_makeitem(collector, name, obj):
+ outcome = yield
+ res = outcome.get_result()
+ if res is not None:
+ raise StopIteration
+ # nothing was collected elsewhere, let's do it here
+ if isclass(obj):
+ if collector.istestclass(obj, name):
+ Class = collector._getcustomclass("Class")
+ outcome.force_result(Class(name, parent=collector))
+ elif collector.istestfunction(obj, name):
+ # mock seems to store unbound methods (issue473), normalize it
+ obj = getattr(obj, "__func__", obj)
+ # We need to try and unwrap the function if it's a functools.partial
+ # or a funtools.wrapped.
+ # We musn't if it's been wrapped with mock.patch (python 2 only)
+ if not (isfunction(obj) or isfunction(get_real_func(obj))):
+ collector.warn(code="C2", message=
+ "cannot collect %r because it is not a function."
+ % name, )
+ elif getattr(obj, "__test__", True):
+ if is_generator(obj):
+ res = Generator(name, parent=collector)
+ else:
+ res = list(collector._genfunctions(name, obj))
+ outcome.force_result(res)
+
+def is_generator(func):
+ try:
+ return _pytest._code.getrawcode(func).co_flags & 32 # generator function
+ except AttributeError: # builtin functions have no bytecode
+ # assume them to not be generators
+ return False
+
+class PyobjContext(object):
+ module = pyobj_property("Module")
+ cls = pyobj_property("Class")
+ instance = pyobj_property("Instance")
+
+class PyobjMixin(PyobjContext):
+ def obj():
+ def fget(self):
+ try:
+ return self._obj
+ except AttributeError:
+ self._obj = obj = self._getobj()
+ return obj
+ def fset(self, value):
+ self._obj = value
+ return property(fget, fset, None, "underlying python object")
+ obj = obj()
+
+ def _getobj(self):
+ return getattr(self.parent.obj, self.name)
+
+ def getmodpath(self, stopatmodule=True, includemodule=False):
+ """ return python path relative to the containing module. """
+ chain = self.listchain()
+ chain.reverse()
+ parts = []
+ for node in chain:
+ if isinstance(node, Instance):
+ continue
+ name = node.name
+ if isinstance(node, Module):
+ assert name.endswith(".py")
+ name = name[:-3]
+ if stopatmodule:
+ if includemodule:
+ parts.append(name)
+ break
+ parts.append(name)
+ parts.reverse()
+ s = ".".join(parts)
+ return s.replace(".[", "[")
+
+ def _getfslineno(self):
+ return getfslineno(self.obj)
+
+ def reportinfo(self):
+ # XXX caching?
+ obj = self.obj
+ compat_co_firstlineno = getattr(obj, 'compat_co_firstlineno', None)
+ if isinstance(compat_co_firstlineno, int):
+ # nose compatibility
+ fspath = sys.modules[obj.__module__].__file__
+ if fspath.endswith(".pyc"):
+ fspath = fspath[:-1]
+ lineno = compat_co_firstlineno
+ else:
+ fspath, lineno = getfslineno(obj)
+ modpath = self.getmodpath()
+ assert isinstance(lineno, int)
+ return fspath, lineno, modpath
+
+class PyCollector(PyobjMixin, pytest.Collector):
+
+ def funcnamefilter(self, name):
+ return self._matches_prefix_or_glob_option('python_functions', name)
+
+ def isnosetest(self, obj):
+ """ Look for the __test__ attribute, which is applied by the
+ @nose.tools.istest decorator
+ """
+ # We explicitly check for "is True" here to not mistakenly treat
+ # classes with a custom __getattr__ returning something truthy (like a
+ # function) as test classes.
+ return safe_getattr(obj, '__test__', False) is True
+
+ def classnamefilter(self, name):
+ return self._matches_prefix_or_glob_option('python_classes', name)
+
+ def istestfunction(self, obj, name):
+ return (
+ (self.funcnamefilter(name) or self.isnosetest(obj)) and
+ safe_getattr(obj, "__call__", False) and getfixturemarker(obj) is None
+ )
+
+ def istestclass(self, obj, name):
+ return self.classnamefilter(name) or self.isnosetest(obj)
+
+ def _matches_prefix_or_glob_option(self, option_name, name):
+ """
+ checks if the given name matches the prefix or glob-pattern defined
+ in ini configuration.
+ """
+ for option in self.config.getini(option_name):
+ if name.startswith(option):
+ return True
+ # check that name looks like a glob-string before calling fnmatch
+ # because this is called for every name in each collected module,
+ # and fnmatch is somewhat expensive to call
+ elif ('*' in option or '?' in option or '[' in option) and \
+ fnmatch.fnmatch(name, option):
+ return True
+ return False
+
+ def collect(self):
+ if not getattr(self.obj, "__test__", True):
+ return []
+
+ # NB. we avoid random getattrs and peek in the __dict__ instead
+ # (XXX originally introduced from a PyPy need, still true?)
+ dicts = [getattr(self.obj, '__dict__', {})]
+ for basecls in inspect.getmro(self.obj.__class__):
+ dicts.append(basecls.__dict__)
+ seen = {}
+ l = []
+ for dic in dicts:
+ for name, obj in list(dic.items()):
+ if name in seen:
+ continue
+ seen[name] = True
+ res = self.makeitem(name, obj)
+ if res is None:
+ continue
+ if not isinstance(res, list):
+ res = [res]
+ l.extend(res)
+ l.sort(key=lambda item: item.reportinfo()[:2])
+ return l
+
+ def makeitem(self, name, obj):
+ #assert self.ihook.fspath == self.fspath, self
+ return self.ihook.pytest_pycollect_makeitem(
+ collector=self, name=name, obj=obj)
+
+ def _genfunctions(self, name, funcobj):
+ module = self.getparent(Module).obj
+ clscol = self.getparent(Class)
+ cls = clscol and clscol.obj or None
+ transfer_markers(funcobj, cls, module)
+ fm = self.session._fixturemanager
+ fixtureinfo = fm.getfixtureinfo(self, funcobj, cls)
+ metafunc = Metafunc(funcobj, fixtureinfo, self.config,
+ cls=cls, module=module)
+ methods = []
+ if hasattr(module, "pytest_generate_tests"):
+ methods.append(module.pytest_generate_tests)
+ if hasattr(cls, "pytest_generate_tests"):
+ methods.append(cls().pytest_generate_tests)
+ if methods:
+ self.ihook.pytest_generate_tests.call_extra(methods,
+ dict(metafunc=metafunc))
+ else:
+ self.ihook.pytest_generate_tests(metafunc=metafunc)
+
+ Function = self._getcustomclass("Function")
+ if not metafunc._calls:
+ yield Function(name, parent=self, fixtureinfo=fixtureinfo)
+ else:
+ # add funcargs() as fixturedefs to fixtureinfo.arg2fixturedefs
+ add_funcarg_pseudo_fixture_def(self, metafunc, fm)
+
+ for callspec in metafunc._calls:
+ subname = "%s[%s]" %(name, callspec.id)
+ yield Function(name=subname, parent=self,
+ callspec=callspec, callobj=funcobj,
+ fixtureinfo=fixtureinfo,
+ keywords={callspec.id:True})
+
+def add_funcarg_pseudo_fixture_def(collector, metafunc, fixturemanager):
+ # this function will transform all collected calls to a functions
+ # if they use direct funcargs (i.e. direct parametrization)
+ # because we want later test execution to be able to rely on
+ # an existing FixtureDef structure for all arguments.
+ # XXX we can probably avoid this algorithm if we modify CallSpec2
+ # to directly care for creating the fixturedefs within its methods.
+ if not metafunc._calls[0].funcargs:
+ return # this function call does not have direct parametrization
+ # collect funcargs of all callspecs into a list of values
+ arg2params = {}
+ arg2scope = {}
+ for callspec in metafunc._calls:
+ for argname, argvalue in callspec.funcargs.items():
+ assert argname not in callspec.params
+ callspec.params[argname] = argvalue
+ arg2params_list = arg2params.setdefault(argname, [])
+ callspec.indices[argname] = len(arg2params_list)
+ arg2params_list.append(argvalue)
+ if argname not in arg2scope:
+ scopenum = callspec._arg2scopenum.get(argname,
+ scopenum_function)
+ arg2scope[argname] = scopes[scopenum]
+ callspec.funcargs.clear()
+
+ # register artificial FixtureDef's so that later at test execution
+ # time we can rely on a proper FixtureDef to exist for fixture setup.
+ arg2fixturedefs = metafunc._arg2fixturedefs
+ for argname, valuelist in arg2params.items():
+ # if we have a scope that is higher than function we need
+ # to make sure we only ever create an according fixturedef on
+ # a per-scope basis. We thus store and cache the fixturedef on the
+ # node related to the scope.
+ scope = arg2scope[argname]
+ node = None
+ if scope != "function":
+ node = get_scope_node(collector, scope)
+ if node is None:
+ assert scope == "class" and isinstance(collector, Module)
+ # use module-level collector for class-scope (for now)
+ node = collector
+ if node and argname in node._name2pseudofixturedef:
+ arg2fixturedefs[argname] = [node._name2pseudofixturedef[argname]]
+ else:
+ fixturedef = FixtureDef(fixturemanager, '', argname,
+ get_direct_param_fixture_func,
+ arg2scope[argname],
+ valuelist, False, False)
+ arg2fixturedefs[argname] = [fixturedef]
+ if node is not None:
+ node._name2pseudofixturedef[argname] = fixturedef
+
+
+def get_direct_param_fixture_func(request):
+ return request.param
+
+class FuncFixtureInfo:
+ def __init__(self, argnames, names_closure, name2fixturedefs):
+ self.argnames = argnames
+ self.names_closure = names_closure
+ self.name2fixturedefs = name2fixturedefs
+
+
+def _marked(func, mark):
+ """ Returns True if :func: is already marked with :mark:, False otherwise.
+ This can happen if marker is applied to class and the test file is
+ invoked more than once.
+ """
+ try:
+ func_mark = getattr(func, mark.name)
+ except AttributeError:
+ return False
+ return mark.args == func_mark.args and mark.kwargs == func_mark.kwargs
+
+
+def transfer_markers(funcobj, cls, mod):
+ # XXX this should rather be code in the mark plugin or the mark
+ # plugin should merge with the python plugin.
+ for holder in (cls, mod):
+ try:
+ pytestmark = holder.pytestmark
+ except AttributeError:
+ continue
+ if isinstance(pytestmark, list):
+ for mark in pytestmark:
+ if not _marked(funcobj, mark):
+ mark(funcobj)
+ else:
+ if not _marked(funcobj, pytestmark):
+ pytestmark(funcobj)
+
+class Module(pytest.File, PyCollector):
+ """ Collector for test classes and functions. """
+ def _getobj(self):
+ return self._memoizedcall('_obj', self._importtestmodule)
+
+ def collect(self):
+ self.session._fixturemanager.parsefactories(self)
+ return super(Module, self).collect()
+
+ def _importtestmodule(self):
+ # we assume we are only called once per module
+ importmode = self.config.getoption("--import-mode")
+ try:
+ mod = self.fspath.pyimport(ensuresyspath=importmode)
+ except SyntaxError:
+ raise self.CollectError(
+ _pytest._code.ExceptionInfo().getrepr(style="short"))
+ except self.fspath.ImportMismatchError:
+ e = sys.exc_info()[1]
+ raise self.CollectError(
+ "import file mismatch:\n"
+ "imported module %r has this __file__ attribute:\n"
+ " %s\n"
+ "which is not the same as the test file we want to collect:\n"
+ " %s\n"
+ "HINT: remove __pycache__ / .pyc files and/or use a "
+ "unique basename for your test file modules"
+ % e.args
+ )
+ #print "imported test module", mod
+ self.config.pluginmanager.consider_module(mod)
+ return mod
+
+ def setup(self):
+ setup_module = xunitsetup(self.obj, "setUpModule")
+ if setup_module is None:
+ setup_module = xunitsetup(self.obj, "setup_module")
+ if setup_module is not None:
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, its probably a pytest style one
+ # so we pass the current module object
+ if _has_positional_arg(setup_module):
+ setup_module(self.obj)
+ else:
+ setup_module()
+ fin = getattr(self.obj, 'tearDownModule', None)
+ if fin is None:
+ fin = getattr(self.obj, 'teardown_module', None)
+ if fin is not None:
+ #XXX: nose compat hack, move to nose plugin
+ # if it takes a positional arg, it's probably a pytest style one
+ # so we pass the current module object
+ if _has_positional_arg(fin):
+ finalizer = lambda: fin(self.obj)
+ else:
+ finalizer = fin
+ self.addfinalizer(finalizer)
+
+
+class Class(PyCollector):
+ """ Collector for test methods. """
+ def collect(self):
+ if hasinit(self.obj):
+ self.warn("C1", "cannot collect test class %r because it has a "
+ "__init__ constructor" % self.obj.__name__)
+ return []
+ return [self._getcustomclass("Instance")(name="()", parent=self)]
+
+ def setup(self):
+ setup_class = xunitsetup(self.obj, 'setup_class')
+ if setup_class is not None:
+ setup_class = getattr(setup_class, 'im_func', setup_class)
+ setup_class = getattr(setup_class, '__func__', setup_class)
+ setup_class(self.obj)
+
+ fin_class = getattr(self.obj, 'teardown_class', None)
+ if fin_class is not None:
+ fin_class = getattr(fin_class, 'im_func', fin_class)
+ fin_class = getattr(fin_class, '__func__', fin_class)
+ self.addfinalizer(lambda: fin_class(self.obj))
+
+class Instance(PyCollector):
+ def _getobj(self):
+ obj = self.parent.obj()
+ return obj
+
+ def collect(self):
+ self.session._fixturemanager.parsefactories(self)
+ return super(Instance, self).collect()
+
+ def newinstance(self):
+ self.obj = self._getobj()
+ return self.obj
+
+class FunctionMixin(PyobjMixin):
+ """ mixin for the code common to Function and Generator.
+ """
+
+ def setup(self):
+ """ perform setup for this test function. """
+ if hasattr(self, '_preservedparent'):
+ obj = self._preservedparent
+ elif isinstance(self.parent, Instance):
+ obj = self.parent.newinstance()
+ self.obj = self._getobj()
+ else:
+ obj = self.parent.obj
+ if inspect.ismethod(self.obj):
+ setup_name = 'setup_method'
+ teardown_name = 'teardown_method'
+ else:
+ setup_name = 'setup_function'
+ teardown_name = 'teardown_function'
+ setup_func_or_method = xunitsetup(obj, setup_name)
+ if setup_func_or_method is not None:
+ setup_func_or_method(self.obj)
+ fin = getattr(obj, teardown_name, None)
+ if fin is not None:
+ self.addfinalizer(lambda: fin(self.obj))
+
+ def _prunetraceback(self, excinfo):
+ if hasattr(self, '_obj') and not self.config.option.fulltrace:
+ code = _pytest._code.Code(get_real_func(self.obj))
+ path, firstlineno = code.path, code.firstlineno
+ traceback = excinfo.traceback
+ ntraceback = traceback.cut(path=path, firstlineno=firstlineno)
+ if ntraceback == traceback:
+ ntraceback = ntraceback.cut(path=path)
+ if ntraceback == traceback:
+ #ntraceback = ntraceback.cut(excludepath=cutdir2)
+ ntraceback = ntraceback.filter(filter_traceback)
+ if not ntraceback:
+ ntraceback = traceback
+
+ excinfo.traceback = ntraceback.filter()
+ # issue364: mark all but first and last frames to
+ # only show a single-line message for each frame
+ if self.config.option.tbstyle == "auto":
+ if len(excinfo.traceback) > 2:
+ for entry in excinfo.traceback[1:-1]:
+ entry.set_repr_style('short')
+
+ def _repr_failure_py(self, excinfo, style="long"):
+ if excinfo.errisinstance(pytest.fail.Exception):
+ if not excinfo.value.pytrace:
+ return py._builtin._totext(excinfo.value)
+ return super(FunctionMixin, self)._repr_failure_py(excinfo,
+ style=style)
+
+ def repr_failure(self, excinfo, outerr=None):
+ assert outerr is None, "XXX outerr usage is deprecated"
+ style = self.config.option.tbstyle
+ if style == "auto":
+ style = "long"
+ return self._repr_failure_py(excinfo, style=style)
+
+
+class Generator(FunctionMixin, PyCollector):
+ def collect(self):
+ # test generators are seen as collectors but they also
+ # invoke setup/teardown on popular request
+ # (induced by the common "test_*" naming shared with normal tests)
+ self.session._setupstate.prepare(self)
+ # see FunctionMixin.setup and test_setupstate_is_preserved_134
+ self._preservedparent = self.parent.obj
+ l = []
+ seen = {}
+ for i, x in enumerate(self.obj()):
+ name, call, args = self.getcallargs(x)
+ if not callable(call):
+ raise TypeError("%r yielded non callable test %r" %(self.obj, call,))
+ if name is None:
+ name = "[%d]" % i
+ else:
+ name = "['%s']" % name
+ if name in seen:
+ raise ValueError("%r generated tests with non-unique name %r" %(self, name))
+ seen[name] = True
+ l.append(self.Function(name, self, args=args, callobj=call))
+ return l
+
+ def getcallargs(self, obj):
+ if not isinstance(obj, (tuple, list)):
+ obj = (obj,)
+ # explict naming
+ if isinstance(obj[0], py.builtin._basestring):
+ name = obj[0]
+ obj = obj[1:]
+ else:
+ name = None
+ call, args = obj[0], obj[1:]
+ return name, call, args
+
+
+def hasinit(obj):
+ init = getattr(obj, '__init__', None)
+ if init:
+ if init != object.__init__:
+ return True
+
+
+
+def fillfixtures(function):
+ """ fill missing funcargs for a test function. """
+ try:
+ request = function._request
+ except AttributeError:
+ # XXX this special code path is only expected to execute
+ # with the oejskit plugin. It uses classes with funcargs
+ # and we thus have to work a bit to allow this.
+ fm = function.session._fixturemanager
+ fi = fm.getfixtureinfo(function.parent, function.obj, None)
+ function._fixtureinfo = fi
+ request = function._request = FixtureRequest(function)
+ request._fillfixtures()
+ # prune out funcargs for jstests
+ newfuncargs = {}
+ for name in fi.argnames:
+ newfuncargs[name] = function.funcargs[name]
+ function.funcargs = newfuncargs
+ else:
+ request._fillfixtures()
+
+
+_notexists = object()
+
+class CallSpec2(object):
+ def __init__(self, metafunc):
+ self.metafunc = metafunc
+ self.funcargs = {}
+ self._idlist = []
+ self.params = {}
+ self._globalid = _notexists
+ self._globalid_args = set()
+ self._globalparam = _notexists
+ self._arg2scopenum = {} # used for sorting parametrized resources
+ self.keywords = {}
+ self.indices = {}
+
+ def copy(self, metafunc):
+ cs = CallSpec2(self.metafunc)
+ cs.funcargs.update(self.funcargs)
+ cs.params.update(self.params)
+ cs.keywords.update(self.keywords)
+ cs.indices.update(self.indices)
+ cs._arg2scopenum.update(self._arg2scopenum)
+ cs._idlist = list(self._idlist)
+ cs._globalid = self._globalid
+ cs._globalid_args = self._globalid_args
+ cs._globalparam = self._globalparam
+ return cs
+
+ def _checkargnotcontained(self, arg):
+ if arg in self.params or arg in self.funcargs:
+ raise ValueError("duplicate %r" %(arg,))
+
+ def getparam(self, name):
+ try:
+ return self.params[name]
+ except KeyError:
+ if self._globalparam is _notexists:
+ raise ValueError(name)
+ return self._globalparam
+
+ @property
+ def id(self):
+ return "-".join(map(str, filter(None, self._idlist)))
+
+ def setmulti(self, valtypes, argnames, valset, id, keywords, scopenum,
+ param_index):
+ for arg,val in zip(argnames, valset):
+ self._checkargnotcontained(arg)
+ valtype_for_arg = valtypes[arg]
+ getattr(self, valtype_for_arg)[arg] = val
+ self.indices[arg] = param_index
+ self._arg2scopenum[arg] = scopenum
+ self._idlist.append(id)
+ self.keywords.update(keywords)
+
+ def setall(self, funcargs, id, param):
+ for x in funcargs:
+ self._checkargnotcontained(x)
+ self.funcargs.update(funcargs)
+ if id is not _notexists:
+ self._idlist.append(id)
+ if param is not _notexists:
+ assert self._globalparam is _notexists
+ self._globalparam = param
+ for arg in funcargs:
+ self._arg2scopenum[arg] = scopenum_function
+
+
+class FuncargnamesCompatAttr:
+ """ helper class so that Metafunc, Function and FixtureRequest
+ don't need to each define the "funcargnames" compatibility attribute.
+ """
+ @property
+ def funcargnames(self):
+ """ alias attribute for ``fixturenames`` for pre-2.3 compatibility"""
+ return self.fixturenames
+
+class Metafunc(FuncargnamesCompatAttr):
+ """
+ Metafunc objects are passed to the ``pytest_generate_tests`` hook.
+ They help to inspect a test function and to generate tests according to
+ test configuration or values specified in the class or module where a
+ test function is defined.
+
+ :ivar fixturenames: set of fixture names required by the test function
+
+ :ivar function: underlying python test function
+
+ :ivar cls: class object where the test function is defined in or ``None``.
+
+ :ivar module: the module object where the test function is defined in.
+
+ :ivar config: access to the :class:`_pytest.config.Config` object for the
+ test session.
+
+ :ivar funcargnames:
+ .. deprecated:: 2.3
+ Use ``fixturenames`` instead.
+ """
+ def __init__(self, function, fixtureinfo, config, cls=None, module=None):
+ self.config = config
+ self.module = module
+ self.function = function
+ self.fixturenames = fixtureinfo.names_closure
+ self._arg2fixturedefs = fixtureinfo.name2fixturedefs
+ self.cls = cls
+ self._calls = []
+ self._ids = py.builtin.set()
+
+ def parametrize(self, argnames, argvalues, indirect=False, ids=None,
+ scope=None):
+ """ Add new invocations to the underlying test function using the list
+ of argvalues for the given argnames. Parametrization is performed
+ during the collection phase. If you need to setup expensive resources
+ see about setting indirect to do it rather at test setup time.
+
+ :arg argnames: a comma-separated string denoting one or more argument
+ names, or a list/tuple of argument strings.
+
+ :arg argvalues: The list of argvalues determines how often a
+ test is invoked with different argument values. If only one
+ argname was specified argvalues is a list of values. If N
+ argnames were specified, argvalues must be a list of N-tuples,
+ where each tuple-element specifies a value for its respective
+ argname.
+
+ :arg indirect: The list of argnames or boolean. A list of arguments'
+ names (subset of argnames). If True the list contains all names from
+ the argnames. Each argvalue corresponding to an argname in this list will
+ be passed as request.param to its respective argname fixture
+ function so that it can perform more expensive setups during the
+ setup phase of a test rather than at collection time.
+
+ :arg ids: list of string ids, or a callable.
+ If strings, each is corresponding to the argvalues so that they are
+ part of the test id.
+ If callable, it should take one argument (a single argvalue) and return
+ a string or return None. If None, the automatically generated id for that
+ argument will be used.
+ If no ids are provided they will be generated automatically from
+ the argvalues.
+
+ :arg scope: if specified it denotes the scope of the parameters.
+ The scope is used for grouping tests by parameter instances.
+ It will also override any fixture-function defined scope, allowing
+ to set a dynamic scope using test context or configuration.
+ """
+
+ # individual parametrized argument sets can be wrapped in a series
+ # of markers in which case we unwrap the values and apply the mark
+ # at Function init
+ newkeywords = {}
+ unwrapped_argvalues = []
+ for i, argval in enumerate(argvalues):
+ while isinstance(argval, MarkDecorator):
+ newmark = MarkDecorator(argval.markname,
+ argval.args[:-1], argval.kwargs)
+ newmarks = newkeywords.setdefault(i, {})
+ newmarks[newmark.markname] = newmark
+ argval = argval.args[-1]
+ unwrapped_argvalues.append(argval)
+ argvalues = unwrapped_argvalues
+
+ if not isinstance(argnames, (tuple, list)):
+ argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+ if len(argnames) == 1:
+ argvalues = [(val,) for val in argvalues]
+ if not argvalues:
+ argvalues = [(_notexists,) * len(argnames)]
+ # we passed a empty list to parameterize, skip that test
+ #
+ fs, lineno = getfslineno(self.function)
+ newmark = pytest.mark.skip(
+ reason="got empty parameter set %r, function %s at %s:%d" % (
+ argnames, self.function.__name__, fs, lineno))
+ newmarks = newkeywords.setdefault(0, {})
+ newmarks[newmark.markname] = newmark
+
+
+ if scope is None:
+ scope = "function"
+ scopenum = scopes.index(scope)
+ valtypes = {}
+ for arg in argnames:
+ if arg not in self.fixturenames:
+ raise ValueError("%r uses no fixture %r" %(self.function, arg))
+
+ if indirect is True:
+ valtypes = dict.fromkeys(argnames, "params")
+ elif indirect is False:
+ valtypes = dict.fromkeys(argnames, "funcargs")
+ elif isinstance(indirect, (tuple, list)):
+ valtypes = dict.fromkeys(argnames, "funcargs")
+ for arg in indirect:
+ if arg not in argnames:
+ raise ValueError("indirect given to %r: fixture %r doesn't exist" %(
+ self.function, arg))
+ valtypes[arg] = "params"
+ idfn = None
+ if callable(ids):
+ idfn = ids
+ ids = None
+ if ids and len(ids) != len(argvalues):
+ raise ValueError('%d tests specified with %d ids' %(
+ len(argvalues), len(ids)))
+ if not ids:
+ ids = idmaker(argnames, argvalues, idfn)
+ newcalls = []
+ for callspec in self._calls or [CallSpec2(self)]:
+ for param_index, valset in enumerate(argvalues):
+ assert len(valset) == len(argnames)
+ newcallspec = callspec.copy(self)
+ newcallspec.setmulti(valtypes, argnames, valset, ids[param_index],
+ newkeywords.get(param_index, {}), scopenum,
+ param_index)
+ newcalls.append(newcallspec)
+ self._calls = newcalls
+
+ def addcall(self, funcargs=None, id=_notexists, param=_notexists):
+ """ (deprecated, use parametrize) Add a new call to the underlying
+ test function during the collection phase of a test run. Note that
+ request.addcall() is called during the test collection phase prior and
+ independently to actual test execution. You should only use addcall()
+ if you need to specify multiple arguments of a test function.
+
+ :arg funcargs: argument keyword dictionary used when invoking
+ the test function.
+
+ :arg id: used for reporting and identification purposes. If you
+ don't supply an `id` an automatic unique id will be generated.
+
+ :arg param: a parameter which will be exposed to a later fixture function
+ invocation through the ``request.param`` attribute.
+ """
+ assert funcargs is None or isinstance(funcargs, dict)
+ if funcargs is not None:
+ for name in funcargs:
+ if name not in self.fixturenames:
+ pytest.fail("funcarg %r not used in this function." % name)
+ else:
+ funcargs = {}
+ if id is None:
+ raise ValueError("id=None not allowed")
+ if id is _notexists:
+ id = len(self._calls)
+ id = str(id)
+ if id in self._ids:
+ raise ValueError("duplicate id %r" % id)
+ self._ids.add(id)
+
+ cs = CallSpec2(self)
+ cs.setall(funcargs, id, param)
+ self._calls.append(cs)
+
+
+if _PY3:
+ import codecs
+
+ def _escape_bytes(val):
+ """
+ If val is pure ascii, returns it as a str(), otherwise escapes
+ into a sequence of escaped bytes:
+ b'\xc3\xb4\xc5\xd6' -> u'\\xc3\\xb4\\xc5\\xd6'
+
+ note:
+ the obvious "v.decode('unicode-escape')" will return
+ valid utf-8 unicode if it finds them in the string, but we
+ want to return escaped bytes for any byte, even if they match
+ a utf-8 string.
+ """
+ if val:
+ # source: http://goo.gl/bGsnwC
+ encoded_bytes, _ = codecs.escape_encode(val)
+ return encoded_bytes.decode('ascii')
+ else:
+ # empty bytes crashes codecs.escape_encode (#1087)
+ return ''
+else:
+ def _escape_bytes(val):
+ """
+ In py2 bytes and str are the same type, so return it unchanged if it
+ is a full ascii string, otherwise escape it into its binary form.
+ """
+ try:
+ return val.decode('ascii')
+ except UnicodeDecodeError:
+ return val.encode('string-escape')
+
+
+def _idval(val, argname, idx, idfn):
+ if idfn:
+ try:
+ s = idfn(val)
+ if s:
+ return s
+ except Exception:
+ pass
+
+ if isinstance(val, bytes):
+ return _escape_bytes(val)
+ elif isinstance(val, (float, int, str, bool, NoneType)):
+ return str(val)
+ elif isinstance(val, REGEX_TYPE):
+ return _escape_bytes(val.pattern) if isinstance(val.pattern, bytes) else val.pattern
+ elif enum is not None and isinstance(val, enum.Enum):
+ return str(val)
+ elif isclass(val) and hasattr(val, '__name__'):
+ return val.__name__
+ elif _PY2 and isinstance(val, unicode):
+ # special case for python 2: if a unicode string is
+ # convertible to ascii, return it as an str() object instead
+ try:
+ return str(val)
+ except UnicodeError:
+ # fallthrough
+ pass
+ return str(argname)+str(idx)
+
+def _idvalset(idx, valset, argnames, idfn):
+ this_id = [_idval(val, argname, idx, idfn)
+ for val, argname in zip(valset, argnames)]
+ return "-".join(this_id)
+
+def idmaker(argnames, argvalues, idfn=None):
+ ids = [_idvalset(valindex, valset, argnames, idfn)
+ for valindex, valset in enumerate(argvalues)]
+ if len(set(ids)) < len(ids):
+ # user may have provided a bad idfn which means the ids are not unique
+ ids = [str(i) + testid for i, testid in enumerate(ids)]
+ return ids
+
+def showfixtures(config):
+ from _pytest.main import wrap_session
+ return wrap_session(config, _showfixtures_main)
+
+def _showfixtures_main(config, session):
+ import _pytest.config
+ session.perform_collect()
+ curdir = py.path.local()
+ tw = _pytest.config.create_terminal_writer(config)
+ verbose = config.getvalue("verbose")
+
+ fm = session._fixturemanager
+
+ available = []
+ for argname, fixturedefs in fm._arg2fixturedefs.items():
+ assert fixturedefs is not None
+ if not fixturedefs:
+ continue
+ for fixturedef in fixturedefs:
+ loc = getlocation(fixturedef.func, curdir)
+ available.append((len(fixturedef.baseid),
+ fixturedef.func.__module__,
+ curdir.bestrelpath(loc),
+ fixturedef.argname, fixturedef))
+
+ available.sort()
+ currentmodule = None
+ for baseid, module, bestrel, argname, fixturedef in available:
+ if currentmodule != module:
+ if not module.startswith("_pytest."):
+ tw.line()
+ tw.sep("-", "fixtures defined from %s" %(module,))
+ currentmodule = module
+ if verbose <= 0 and argname[0] == "_":
+ continue
+ if verbose > 0:
+ funcargspec = "%s -- %s" %(argname, bestrel,)
+ else:
+ funcargspec = argname
+ tw.line(funcargspec, green=True)
+ loc = getlocation(fixturedef.func, curdir)
+ doc = fixturedef.func.__doc__ or ""
+ if doc:
+ for line in doc.strip().split("\n"):
+ tw.line(" " + line.strip())
+ else:
+ tw.line(" %s: no docstring available" %(loc,),
+ red=True)
+
+def getlocation(function, curdir):
+ import inspect
+ fn = py.path.local(inspect.getfile(function))
+ lineno = py.builtin._getcode(function).co_firstlineno
+ if fn.relto(curdir):
+ fn = fn.relto(curdir)
+ return "%s:%d" %(fn, lineno+1)
+
+# builtin pytest.raises helper
+
+def raises(expected_exception, *args, **kwargs):
+ """ assert that a code block/function call raises ``expected_exception``
+ and raise a failure exception otherwise.
+
+ This helper produces a ``ExceptionInfo()`` object (see below).
+
+ If using Python 2.5 or above, you may use this function as a
+ context manager::
+
+ >>> with raises(ZeroDivisionError):
+ ... 1/0
+
+ .. note::
+
+ When using ``pytest.raises`` as a context manager, it's worthwhile to
+ note that normal context manager rules apply and that the exception
+ raised *must* be the final line in the scope of the context manager.
+ Lines of code after that, within the scope of the context manager will
+ not be executed. For example::
+
+ >>> with raises(OSError) as exc_info:
+ assert 1 == 1 # this will execute as expected
+ raise OSError(errno.EEXISTS, 'directory exists')
+ assert exc_info.value.errno == errno.EEXISTS # this will not execute
+
+ Instead, the following approach must be taken (note the difference in
+ scope)::
+
+ >>> with raises(OSError) as exc_info:
+ assert 1 == 1 # this will execute as expected
+ raise OSError(errno.EEXISTS, 'directory exists')
+
+ assert exc_info.value.errno == errno.EEXISTS # this will now execute
+
+ Or you can specify a callable by passing a to-be-called lambda::
+
+ >>> raises(ZeroDivisionError, lambda: 1/0)
+ <ExceptionInfo ...>
+
+ or you can specify an arbitrary callable with arguments::
+
+ >>> def f(x): return 1/x
+ ...
+ >>> raises(ZeroDivisionError, f, 0)
+ <ExceptionInfo ...>
+ >>> raises(ZeroDivisionError, f, x=0)
+ <ExceptionInfo ...>
+
+ A third possibility is to use a string to be executed::
+
+ >>> raises(ZeroDivisionError, "f(0)")
+ <ExceptionInfo ...>
+
+ .. autoclass:: _pytest._code.ExceptionInfo
+ :members:
+
+ .. note::
+ Similar to caught exception objects in Python, explicitly clearing
+ local references to returned ``ExceptionInfo`` objects can
+ help the Python interpreter speed up its garbage collection.
+
+ Clearing those references breaks a reference cycle
+ (``ExceptionInfo`` --> caught exception --> frame stack raising
+ the exception --> current frame stack --> local variables -->
+ ``ExceptionInfo``) which makes Python keep all objects referenced
+ from that cycle (including all local variables in the current
+ frame) alive until the next cyclic garbage collection run. See the
+ official Python ``try`` statement documentation for more detailed
+ information.
+
+ """
+ __tracebackhide__ = True
+ if expected_exception is AssertionError:
+ # we want to catch a AssertionError
+ # replace our subclass with the builtin one
+ # see https://github.com/pytest-dev/pytest/issues/176
+ from _pytest.assertion.util import BuiltinAssertionError \
+ as expected_exception
+ msg = ("exceptions must be old-style classes or"
+ " derived from BaseException, not %s")
+ if isinstance(expected_exception, tuple):
+ for exc in expected_exception:
+ if not isclass(exc):
+ raise TypeError(msg % type(exc))
+ elif not isclass(expected_exception):
+ raise TypeError(msg % type(expected_exception))
+
+ if not args:
+ return RaisesContext(expected_exception)
+ elif isinstance(args[0], str):
+ code, = args
+ assert isinstance(code, str)
+ frame = sys._getframe(1)
+ loc = frame.f_locals.copy()
+ loc.update(kwargs)
+ #print "raises frame scope: %r" % frame.f_locals
+ try:
+ code = _pytest._code.Source(code).compile()
+ py.builtin.exec_(code, frame.f_globals, loc)
+ # XXX didn'T mean f_globals == f_locals something special?
+ # this is destroyed here ...
+ except expected_exception:
+ return _pytest._code.ExceptionInfo()
+ else:
+ func = args[0]
+ try:
+ func(*args[1:], **kwargs)
+ except expected_exception:
+ return _pytest._code.ExceptionInfo()
+ pytest.fail("DID NOT RAISE {0}".format(expected_exception))
+
+class RaisesContext(object):
+ def __init__(self, expected_exception):
+ self.expected_exception = expected_exception
+ self.excinfo = None
+
+ def __enter__(self):
+ self.excinfo = object.__new__(_pytest._code.ExceptionInfo)
+ return self.excinfo
+
+ def __exit__(self, *tp):
+ __tracebackhide__ = True
+ if tp[0] is None:
+ pytest.fail("DID NOT RAISE")
+ if sys.version_info < (2, 7):
+ # py26: on __exit__() exc_value often does not contain the
+ # exception value.
+ # http://bugs.python.org/issue7853
+ if not isinstance(tp[1], BaseException):
+ exc_type, value, traceback = tp
+ tp = exc_type, exc_type(value), traceback
+ self.excinfo.__init__(tp)
+ return issubclass(self.excinfo.type, self.expected_exception)
+
+#
+# the basic pytest Function item
+#
+
+class Function(FunctionMixin, pytest.Item, FuncargnamesCompatAttr):
+ """ a Function Item is responsible for setting up and executing a
+ Python test function.
+ """
+ _genid = None
+ def __init__(self, name, parent, args=None, config=None,
+ callspec=None, callobj=NOTSET, keywords=None, session=None,
+ fixtureinfo=None):
+ super(Function, self).__init__(name, parent, config=config,
+ session=session)
+ self._args = args
+ if callobj is not NOTSET:
+ self.obj = callobj
+
+ self.keywords.update(self.obj.__dict__)
+ if callspec:
+ self.callspec = callspec
+ self.keywords.update(callspec.keywords)
+ if keywords:
+ self.keywords.update(keywords)
+
+ if fixtureinfo is None:
+ fixtureinfo = self.session._fixturemanager.getfixtureinfo(
+ self.parent, self.obj, self.cls,
+ funcargs=not self._isyieldedfunction())
+ self._fixtureinfo = fixtureinfo
+ self.fixturenames = fixtureinfo.names_closure
+ self._initrequest()
+
+ def _initrequest(self):
+ self.funcargs = {}
+ if self._isyieldedfunction():
+ assert not hasattr(self, "callspec"), (
+ "yielded functions (deprecated) cannot have funcargs")
+ else:
+ if hasattr(self, "callspec"):
+ callspec = self.callspec
+ assert not callspec.funcargs
+ self._genid = callspec.id
+ if hasattr(callspec, "param"):
+ self.param = callspec.param
+ self._request = FixtureRequest(self)
+
+ @property
+ def function(self):
+ "underlying python 'function' object"
+ return getattr(self.obj, 'im_func', self.obj)
+
+ def _getobj(self):
+ name = self.name
+ i = name.find("[") # parametrization
+ if i != -1:
+ name = name[:i]
+ return getattr(self.parent.obj, name)
+
+ @property
+ def _pyfuncitem(self):
+ "(compatonly) for code expecting pytest-2.2 style request objects"
+ return self
+
+ def _isyieldedfunction(self):
+ return getattr(self, "_args", None) is not None
+
+ def runtest(self):
+ """ execute the underlying test function. """
+ self.ihook.pytest_pyfunc_call(pyfuncitem=self)
+
+ def setup(self):
+ super(Function, self).setup()
+ fillfixtures(self)
+
+
+scope2props = dict(session=())
+scope2props["module"] = ("fspath", "module")
+scope2props["class"] = scope2props["module"] + ("cls",)
+scope2props["instance"] = scope2props["class"] + ("instance", )
+scope2props["function"] = scope2props["instance"] + ("function", "keywords")
+
+def scopeproperty(name=None, doc=None):
+ def decoratescope(func):
+ scopename = name or func.__name__
+ def provide(self):
+ if func.__name__ in scope2props[self.scope]:
+ return func(self)
+ raise AttributeError("%s not available in %s-scoped context" % (
+ scopename, self.scope))
+ return property(provide, None, None, func.__doc__)
+ return decoratescope
+
+
+class FixtureRequest(FuncargnamesCompatAttr):
+ """ A request for a fixture from a test or fixture function.
+
+ A request object gives access to the requesting test context
+ and has an optional ``param`` attribute in case
+ the fixture is parametrized indirectly.
+ """
+
+ def __init__(self, pyfuncitem):
+ self._pyfuncitem = pyfuncitem
+ #: fixture for which this request is being performed
+ self.fixturename = None
+ #: Scope string, one of "function", "class", "module", "session"
+ self.scope = "function"
+ self._funcargs = {}
+ self._fixturedefs = {}
+ fixtureinfo = pyfuncitem._fixtureinfo
+ self._arg2fixturedefs = fixtureinfo.name2fixturedefs.copy()
+ self._arg2index = {}
+ self.fixturenames = fixtureinfo.names_closure
+ self._fixturemanager = pyfuncitem.session._fixturemanager
+
+ @property
+ def node(self):
+ """ underlying collection node (depends on current request scope)"""
+ return self._getscopeitem(self.scope)
+
+
+ def _getnextfixturedef(self, argname):
+ fixturedefs = self._arg2fixturedefs.get(argname, None)
+ if fixturedefs is None:
+ # we arrive here because of a a dynamic call to
+ # getfuncargvalue(argname) usage which was naturally
+ # not known at parsing/collection time
+ fixturedefs = self._fixturemanager.getfixturedefs(
+ argname, self._pyfuncitem.parent.nodeid)
+ self._arg2fixturedefs[argname] = fixturedefs
+ # fixturedefs list is immutable so we maintain a decreasing index
+ index = self._arg2index.get(argname, 0) - 1
+ if fixturedefs is None or (-index > len(fixturedefs)):
+ raise FixtureLookupError(argname, self)
+ self._arg2index[argname] = index
+ return fixturedefs[index]
+
+ @property
+ def config(self):
+ """ the pytest config object associated with this request. """
+ return self._pyfuncitem.config
+
+
+ @scopeproperty()
+ def function(self):
+ """ test function object if the request has a per-function scope. """
+ return self._pyfuncitem.obj
+
+ @scopeproperty("class")
+ def cls(self):
+ """ class (can be None) where the test function was collected. """
+ clscol = self._pyfuncitem.getparent(pytest.Class)
+ if clscol:
+ return clscol.obj
+
+ @property
+ def instance(self):
+ """ instance (can be None) on which test function was collected. """
+ # unittest support hack, see _pytest.unittest.TestCaseFunction
+ try:
+ return self._pyfuncitem._testcase
+ except AttributeError:
+ function = getattr(self, "function", None)
+ if function is not None:
+ return py.builtin._getimself(function)
+
+ @scopeproperty()
+ def module(self):
+ """ python module object where the test function was collected. """
+ return self._pyfuncitem.getparent(pytest.Module).obj
+
+ @scopeproperty()
+ def fspath(self):
+ """ the file system path of the test module which collected this test. """
+ return self._pyfuncitem.fspath
+
+ @property
+ def keywords(self):
+ """ keywords/markers dictionary for the underlying node. """
+ return self.node.keywords
+
+ @property
+ def session(self):
+ """ pytest session object. """
+ return self._pyfuncitem.session
+
+ def addfinalizer(self, finalizer):
+ """ add finalizer/teardown function to be called after the
+ last test within the requesting test context finished
+ execution. """
+ # XXX usually this method is shadowed by fixturedef specific ones
+ self._addfinalizer(finalizer, scope=self.scope)
+
+ def _addfinalizer(self, finalizer, scope):
+ colitem = self._getscopeitem(scope)
+ self._pyfuncitem.session._setupstate.addfinalizer(
+ finalizer=finalizer, colitem=colitem)
+
+ def applymarker(self, marker):
+ """ Apply a marker to a single test function invocation.
+ This method is useful if you don't want to have a keyword/marker
+ on all function invocations.
+
+ :arg marker: a :py:class:`_pytest.mark.MarkDecorator` object
+ created by a call to ``pytest.mark.NAME(...)``.
+ """
+ try:
+ self.node.keywords[marker.markname] = marker
+ except AttributeError:
+ raise ValueError(marker)
+
+ def raiseerror(self, msg):
+ """ raise a FixtureLookupError with the given message. """
+ raise self._fixturemanager.FixtureLookupError(None, self, msg)
+
+ def _fillfixtures(self):
+ item = self._pyfuncitem
+ fixturenames = getattr(item, "fixturenames", self.fixturenames)
+ for argname in fixturenames:
+ if argname not in item.funcargs:
+ item.funcargs[argname] = self.getfuncargvalue(argname)
+
+ def cached_setup(self, setup, teardown=None, scope="module", extrakey=None):
+ """ (deprecated) Return a testing resource managed by ``setup`` &
+ ``teardown`` calls. ``scope`` and ``extrakey`` determine when the
+ ``teardown`` function will be called so that subsequent calls to
+ ``setup`` would recreate the resource. With pytest-2.3 you often
+ do not need ``cached_setup()`` as you can directly declare a scope
+ on a fixture function and register a finalizer through
+ ``request.addfinalizer()``.
+
+ :arg teardown: function receiving a previously setup resource.
+ :arg setup: a no-argument function creating a resource.
+ :arg scope: a string value out of ``function``, ``class``, ``module``
+ or ``session`` indicating the caching lifecycle of the resource.
+ :arg extrakey: added to internal caching key of (funcargname, scope).
+ """
+ if not hasattr(self.config, '_setupcache'):
+ self.config._setupcache = {} # XXX weakref?
+ cachekey = (self.fixturename, self._getscopeitem(scope), extrakey)
+ cache = self.config._setupcache
+ try:
+ val = cache[cachekey]
+ except KeyError:
+ self._check_scope(self.fixturename, self.scope, scope)
+ val = setup()
+ cache[cachekey] = val
+ if teardown is not None:
+ def finalizer():
+ del cache[cachekey]
+ teardown(val)
+ self._addfinalizer(finalizer, scope=scope)
+ return val
+
+ def getfuncargvalue(self, argname):
+ """ Dynamically retrieve a named fixture function argument.
+
+ As of pytest-2.3, it is easier and usually better to access other
+ fixture values by stating it as an input argument in the fixture
+ function. If you only can decide about using another fixture at test
+ setup time, you may use this function to retrieve it inside a fixture
+ function body.
+ """
+ return self._get_active_fixturedef(argname).cached_result[0]
+
+ def _get_active_fixturedef(self, argname):
+ try:
+ return self._fixturedefs[argname]
+ except KeyError:
+ try:
+ fixturedef = self._getnextfixturedef(argname)
+ except FixtureLookupError:
+ if argname == "request":
+ class PseudoFixtureDef:
+ cached_result = (self, [0], None)
+ scope = "function"
+ return PseudoFixtureDef
+ raise
+ # remove indent to prevent the python3 exception
+ # from leaking into the call
+ result = self._getfuncargvalue(fixturedef)
+ self._funcargs[argname] = result
+ self._fixturedefs[argname] = fixturedef
+ return fixturedef
+
+ def _get_fixturestack(self):
+ current = self
+ l = []
+ while 1:
+ fixturedef = getattr(current, "_fixturedef", None)
+ if fixturedef is None:
+ l.reverse()
+ return l
+ l.append(fixturedef)
+ current = current._parent_request
+
+ def _getfuncargvalue(self, fixturedef):
+ # prepare a subrequest object before calling fixture function
+ # (latter managed by fixturedef)
+ argname = fixturedef.argname
+ funcitem = self._pyfuncitem
+ scope = fixturedef.scope
+ try:
+ param = funcitem.callspec.getparam(argname)
+ except (AttributeError, ValueError):
+ param = NOTSET
+ param_index = 0
+ else:
+ # indices might not be set if old-style metafunc.addcall() was used
+ param_index = funcitem.callspec.indices.get(argname, 0)
+ # if a parametrize invocation set a scope it will override
+ # the static scope defined with the fixture function
+ paramscopenum = funcitem.callspec._arg2scopenum.get(argname)
+ if paramscopenum is not None:
+ scope = scopes[paramscopenum]
+
+ subrequest = SubRequest(self, scope, param, param_index, fixturedef)
+
+ # check if a higher-level scoped fixture accesses a lower level one
+ subrequest._check_scope(argname, self.scope, scope)
+
+ # clear sys.exc_info before invoking the fixture (python bug?)
+ # if its not explicitly cleared it will leak into the call
+ exc_clear()
+ try:
+ # call the fixture function
+ val = fixturedef.execute(request=subrequest)
+ finally:
+ # if fixture function failed it might have registered finalizers
+ self.session._setupstate.addfinalizer(fixturedef.finish,
+ subrequest.node)
+ return val
+
+ def _check_scope(self, argname, invoking_scope, requested_scope):
+ if argname == "request":
+ return
+ if scopemismatch(invoking_scope, requested_scope):
+ # try to report something helpful
+ lines = self._factorytraceback()
+ pytest.fail("ScopeMismatch: You tried to access the %r scoped "
+ "fixture %r with a %r scoped request object, "
+ "involved factories\n%s" %(
+ (requested_scope, argname, invoking_scope, "\n".join(lines))),
+ pytrace=False)
+
+ def _factorytraceback(self):
+ lines = []
+ for fixturedef in self._get_fixturestack():
+ factory = fixturedef.func
+ fs, lineno = getfslineno(factory)
+ p = self._pyfuncitem.session.fspath.bestrelpath(fs)
+ args = _format_args(factory)
+ lines.append("%s:%d: def %s%s" %(
+ p, lineno, factory.__name__, args))
+ return lines
+
+ def _getscopeitem(self, scope):
+ if scope == "function":
+ # this might also be a non-function Item despite its attribute name
+ return self._pyfuncitem
+ node = get_scope_node(self._pyfuncitem, scope)
+ if node is None and scope == "class":
+ # fallback to function item itself
+ node = self._pyfuncitem
+ assert node
+ return node
+
+ def __repr__(self):
+ return "<FixtureRequest for %r>" %(self.node)
+
+
+class SubRequest(FixtureRequest):
+ """ a sub request for handling getting a fixture from a
+ test function/fixture. """
+ def __init__(self, request, scope, param, param_index, fixturedef):
+ self._parent_request = request
+ self.fixturename = fixturedef.argname
+ if param is not NOTSET:
+ self.param = param
+ self.param_index = param_index
+ self.scope = scope
+ self._fixturedef = fixturedef
+ self.addfinalizer = fixturedef.addfinalizer
+ self._pyfuncitem = request._pyfuncitem
+ self._funcargs = request._funcargs
+ self._fixturedefs = request._fixturedefs
+ self._arg2fixturedefs = request._arg2fixturedefs
+ self._arg2index = request._arg2index
+ self.fixturenames = request.fixturenames
+ self._fixturemanager = request._fixturemanager
+
+ def __repr__(self):
+ return "<SubRequest %r for %r>" % (self.fixturename, self._pyfuncitem)
+
+
+class ScopeMismatchError(Exception):
+ """ A fixture function tries to use a different fixture function which
+ which has a lower scope (e.g. a Session one calls a function one)
+ """
+
+scopes = "session module class function".split()
+scopenum_function = scopes.index("function")
+def scopemismatch(currentscope, newscope):
+ return scopes.index(newscope) > scopes.index(currentscope)
+
+
+class FixtureLookupError(LookupError):
+ """ could not return a requested Fixture (missing or invalid). """
+ def __init__(self, argname, request, msg=None):
+ self.argname = argname
+ self.request = request
+ self.fixturestack = request._get_fixturestack()
+ self.msg = msg
+
+ def formatrepr(self):
+ tblines = []
+ addline = tblines.append
+ stack = [self.request._pyfuncitem.obj]
+ stack.extend(map(lambda x: x.func, self.fixturestack))
+ msg = self.msg
+ if msg is not None:
+ # the last fixture raise an error, let's present
+ # it at the requesting side
+ stack = stack[:-1]
+ for function in stack:
+ fspath, lineno = getfslineno(function)
+ try:
+ lines, _ = inspect.getsourcelines(get_real_func(function))
+ except (IOError, IndexError):
+ error_msg = "file %s, line %s: source code not available"
+ addline(error_msg % (fspath, lineno+1))
+ else:
+ addline("file %s, line %s" % (fspath, lineno+1))
+ for i, line in enumerate(lines):
+ line = line.rstrip()
+ addline(" " + line)
+ if line.lstrip().startswith('def'):
+ break
+
+ if msg is None:
+ fm = self.request._fixturemanager
+ available = []
+ for name, fixturedef in fm._arg2fixturedefs.items():
+ parentid = self.request._pyfuncitem.parent.nodeid
+ faclist = list(fm._matchfactories(fixturedef, parentid))
+ if faclist:
+ available.append(name)
+ msg = "fixture %r not found" % (self.argname,)
+ msg += "\n available fixtures: %s" %(", ".join(available),)
+ msg += "\n use 'py.test --fixtures [testpath]' for help on them."
+
+ return FixtureLookupErrorRepr(fspath, lineno, tblines, msg, self.argname)
+
+class FixtureLookupErrorRepr(TerminalRepr):
+ def __init__(self, filename, firstlineno, tblines, errorstring, argname):
+ self.tblines = tblines
+ self.errorstring = errorstring
+ self.filename = filename
+ self.firstlineno = firstlineno
+ self.argname = argname
+
+ def toterminal(self, tw):
+ #tw.line("FixtureLookupError: %s" %(self.argname), red=True)
+ for tbline in self.tblines:
+ tw.line(tbline.rstrip())
+ for line in self.errorstring.split("\n"):
+ tw.line(" " + line.strip(), red=True)
+ tw.line()
+ tw.line("%s:%d" % (self.filename, self.firstlineno+1))
+
+class FixtureManager:
+ """
+ pytest fixtures definitions and information is stored and managed
+ from this class.
+
+ During collection fm.parsefactories() is called multiple times to parse
+ fixture function definitions into FixtureDef objects and internal
+ data structures.
+
+ During collection of test functions, metafunc-mechanics instantiate
+ a FuncFixtureInfo object which is cached per node/func-name.
+ This FuncFixtureInfo object is later retrieved by Function nodes
+ which themselves offer a fixturenames attribute.
+
+ The FuncFixtureInfo object holds information about fixtures and FixtureDefs
+ relevant for a particular function. An initial list of fixtures is
+ assembled like this:
+
+ - ini-defined usefixtures
+ - autouse-marked fixtures along the collection chain up from the function
+ - usefixtures markers at module/class/function level
+ - test function funcargs
+
+ Subsequently the funcfixtureinfo.fixturenames attribute is computed
+ as the closure of the fixtures needed to setup the initial fixtures,
+ i. e. fixtures needed by fixture functions themselves are appended
+ to the fixturenames list.
+
+ Upon the test-setup phases all fixturenames are instantiated, retrieved
+ by a lookup of their FuncFixtureInfo.
+ """
+
+ _argprefix = "pytest_funcarg__"
+ FixtureLookupError = FixtureLookupError
+ FixtureLookupErrorRepr = FixtureLookupErrorRepr
+
+ def __init__(self, session):
+ self.session = session
+ self.config = session.config
+ self._arg2fixturedefs = {}
+ self._holderobjseen = set()
+ self._arg2finish = {}
+ self._nodeid_and_autousenames = [("", self.config.getini("usefixtures"))]
+ session.config.pluginmanager.register(self, "funcmanage")
+
+
+ def getfixtureinfo(self, node, func, cls, funcargs=True):
+ if funcargs and not hasattr(node, "nofuncargs"):
+ if cls is not None:
+ startindex = 1
+ else:
+ startindex = None
+ argnames = getfuncargnames(func, startindex)
+ else:
+ argnames = ()
+ usefixtures = getattr(func, "usefixtures", None)
+ initialnames = argnames
+ if usefixtures is not None:
+ initialnames = usefixtures.args + initialnames
+ fm = node.session._fixturemanager
+ names_closure, arg2fixturedefs = fm.getfixtureclosure(initialnames,
+ node)
+ return FuncFixtureInfo(argnames, names_closure, arg2fixturedefs)
+
+ def pytest_plugin_registered(self, plugin):
+ nodeid = None
+ try:
+ p = py.path.local(plugin.__file__)
+ except AttributeError:
+ pass
+ else:
+ # construct the base nodeid which is later used to check
+ # what fixtures are visible for particular tests (as denoted
+ # by their test id)
+ if p.basename.startswith("conftest.py"):
+ nodeid = p.dirpath().relto(self.config.rootdir)
+ if p.sep != "/":
+ nodeid = nodeid.replace(p.sep, "/")
+ self.parsefactories(plugin, nodeid)
+
+ def _getautousenames(self, nodeid):
+ """ return a tuple of fixture names to be used. """
+ autousenames = []
+ for baseid, basenames in self._nodeid_and_autousenames:
+ if nodeid.startswith(baseid):
+ if baseid:
+ i = len(baseid)
+ nextchar = nodeid[i:i+1]
+ if nextchar and nextchar not in ":/":
+ continue
+ autousenames.extend(basenames)
+ # make sure autousenames are sorted by scope, scopenum 0 is session
+ autousenames.sort(
+ key=lambda x: self._arg2fixturedefs[x][-1].scopenum)
+ return autousenames
+
+ def getfixtureclosure(self, fixturenames, parentnode):
+ # collect the closure of all fixtures , starting with the given
+ # fixturenames as the initial set. As we have to visit all
+ # factory definitions anyway, we also return a arg2fixturedefs
+ # mapping so that the caller can reuse it and does not have
+ # to re-discover fixturedefs again for each fixturename
+ # (discovering matching fixtures for a given name/node is expensive)
+
+ parentid = parentnode.nodeid
+ fixturenames_closure = self._getautousenames(parentid)
+ def merge(otherlist):
+ for arg in otherlist:
+ if arg not in fixturenames_closure:
+ fixturenames_closure.append(arg)
+ merge(fixturenames)
+ arg2fixturedefs = {}
+ lastlen = -1
+ while lastlen != len(fixturenames_closure):
+ lastlen = len(fixturenames_closure)
+ for argname in fixturenames_closure:
+ if argname in arg2fixturedefs:
+ continue
+ fixturedefs = self.getfixturedefs(argname, parentid)
+ if fixturedefs:
+ arg2fixturedefs[argname] = fixturedefs
+ merge(fixturedefs[-1].argnames)
+ return fixturenames_closure, arg2fixturedefs
+
+ def pytest_generate_tests(self, metafunc):
+ for argname in metafunc.fixturenames:
+ faclist = metafunc._arg2fixturedefs.get(argname)
+ if faclist:
+ fixturedef = faclist[-1]
+ if fixturedef.params is not None:
+ func_params = getattr(getattr(metafunc.function, 'parametrize', None), 'args', [[None]])
+ # skip directly parametrized arguments
+ argnames = func_params[0]
+ if not isinstance(argnames, (tuple, list)):
+ argnames = [x.strip() for x in argnames.split(",") if x.strip()]
+ if argname not in func_params and argname not in argnames:
+ metafunc.parametrize(argname, fixturedef.params,
+ indirect=True, scope=fixturedef.scope,
+ ids=fixturedef.ids)
+ else:
+ continue # will raise FixtureLookupError at setup time
+
+ def pytest_collection_modifyitems(self, items):
+ # separate parametrized setups
+ items[:] = reorder_items(items)
+
+ def parsefactories(self, node_or_obj, nodeid=NOTSET, unittest=False):
+ if nodeid is not NOTSET:
+ holderobj = node_or_obj
+ else:
+ holderobj = node_or_obj.obj
+ nodeid = node_or_obj.nodeid
+ if holderobj in self._holderobjseen:
+ return
+ self._holderobjseen.add(holderobj)
+ autousenames = []
+ for name in dir(holderobj):
+ obj = getattr(holderobj, name, None)
+ # fixture functions have a pytest_funcarg__ prefix (pre-2.3 style)
+ # or are "@pytest.fixture" marked
+ marker = getfixturemarker(obj)
+ if marker is None:
+ if not name.startswith(self._argprefix):
+ continue
+ if not callable(obj):
+ continue
+ marker = defaultfuncargprefixmarker
+ name = name[len(self._argprefix):]
+ elif not isinstance(marker, FixtureFunctionMarker):
+ # magic globals with __getattr__ might have got us a wrong
+ # fixture attribute
+ continue
+ else:
+ assert not name.startswith(self._argprefix)
+ fixturedef = FixtureDef(self, nodeid, name, obj,
+ marker.scope, marker.params,
+ yieldctx=marker.yieldctx,
+ unittest=unittest, ids=marker.ids)
+ faclist = self._arg2fixturedefs.setdefault(name, [])
+ if fixturedef.has_location:
+ faclist.append(fixturedef)
+ else:
+ # fixturedefs with no location are at the front
+ # so this inserts the current fixturedef after the
+ # existing fixturedefs from external plugins but
+ # before the fixturedefs provided in conftests.
+ i = len([f for f in faclist if not f.has_location])
+ faclist.insert(i, fixturedef)
+ if marker.autouse:
+ autousenames.append(name)
+ if autousenames:
+ self._nodeid_and_autousenames.append((nodeid or '', autousenames))
+
+ def getfixturedefs(self, argname, nodeid):
+ try:
+ fixturedefs = self._arg2fixturedefs[argname]
+ except KeyError:
+ return None
+ else:
+ return tuple(self._matchfactories(fixturedefs, nodeid))
+
+ def _matchfactories(self, fixturedefs, nodeid):
+ for fixturedef in fixturedefs:
+ if nodeid.startswith(fixturedef.baseid):
+ yield fixturedef
+
+
+def fail_fixturefunc(fixturefunc, msg):
+ fs, lineno = getfslineno(fixturefunc)
+ location = "%s:%s" % (fs, lineno+1)
+ source = _pytest._code.Source(fixturefunc)
+ pytest.fail(msg + ":\n\n" + str(source.indent()) + "\n" + location,
+ pytrace=False)
+
+def call_fixture_func(fixturefunc, request, kwargs, yieldctx):
+ if yieldctx:
+ if not is_generator(fixturefunc):
+ fail_fixturefunc(fixturefunc,
+ msg="yield_fixture requires yield statement in function")
+ iter = fixturefunc(**kwargs)
+ next = getattr(iter, "__next__", None)
+ if next is None:
+ next = getattr(iter, "next")
+ res = next()
+ def teardown():
+ try:
+ next()
+ except StopIteration:
+ pass
+ else:
+ fail_fixturefunc(fixturefunc,
+ "yield_fixture function has more than one 'yield'")
+ request.addfinalizer(teardown)
+ else:
+ if is_generator(fixturefunc):
+ fail_fixturefunc(fixturefunc,
+ msg="pytest.fixture functions cannot use ``yield``. "
+ "Instead write and return an inner function/generator "
+ "and let the consumer call and iterate over it.")
+ res = fixturefunc(**kwargs)
+ return res
+
+class FixtureDef:
+ """ A container for a factory definition. """
+ def __init__(self, fixturemanager, baseid, argname, func, scope, params,
+ yieldctx, unittest=False, ids=None):
+ self._fixturemanager = fixturemanager
+ self.baseid = baseid or ''
+ self.has_location = baseid is not None
+ self.func = func
+ self.argname = argname
+ self.scope = scope
+ self.scopenum = scopes.index(scope or "function")
+ self.params = params
+ startindex = unittest and 1 or None
+ self.argnames = getfuncargnames(func, startindex=startindex)
+ self.yieldctx = yieldctx
+ self.unittest = unittest
+ self.ids = ids
+ self._finalizer = []
+
+ def addfinalizer(self, finalizer):
+ self._finalizer.append(finalizer)
+
+ def finish(self):
+ try:
+ while self._finalizer:
+ func = self._finalizer.pop()
+ func()
+ finally:
+ # even if finalization fails, we invalidate
+ # the cached fixture value
+ if hasattr(self, "cached_result"):
+ del self.cached_result
+
+ def execute(self, request):
+ # get required arguments and register our own finish()
+ # with their finalization
+ kwargs = {}
+ for argname in self.argnames:
+ fixturedef = request._get_active_fixturedef(argname)
+ result, arg_cache_key, exc = fixturedef.cached_result
+ request._check_scope(argname, request.scope, fixturedef.scope)
+ kwargs[argname] = result
+ if argname != "request":
+ fixturedef.addfinalizer(self.finish)
+
+ my_cache_key = request.param_index
+ cached_result = getattr(self, "cached_result", None)
+ if cached_result is not None:
+ result, cache_key, err = cached_result
+ if my_cache_key == cache_key:
+ if err is not None:
+ py.builtin._reraise(*err)
+ else:
+ return result
+ # we have a previous but differently parametrized fixture instance
+ # so we need to tear it down before creating a new one
+ self.finish()
+ assert not hasattr(self, "cached_result")
+
+ fixturefunc = self.func
+
+ if self.unittest:
+ if request.instance is not None:
+ # bind the unbound method to the TestCase instance
+ fixturefunc = self.func.__get__(request.instance)
+ else:
+ # the fixture function needs to be bound to the actual
+ # request.instance so that code working with "self" behaves
+ # as expected.
+ if request.instance is not None:
+ fixturefunc = getimfunc(self.func)
+ if fixturefunc != self.func:
+ fixturefunc = fixturefunc.__get__(request.instance)
+
+ try:
+ result = call_fixture_func(fixturefunc, request, kwargs,
+ self.yieldctx)
+ except Exception:
+ self.cached_result = (None, my_cache_key, sys.exc_info())
+ raise
+ self.cached_result = (result, my_cache_key, None)
+ return result
+
+ def __repr__(self):
+ return ("<FixtureDef name=%r scope=%r baseid=%r >" %
+ (self.argname, self.scope, self.baseid))
+
+def num_mock_patch_args(function):
+ """ return number of arguments used up by mock arguments (if any) """
+ patchings = getattr(function, "patchings", None)
+ if not patchings:
+ return 0
+ mock = sys.modules.get("mock", sys.modules.get("unittest.mock", None))
+ if mock is not None:
+ return len([p for p in patchings
+ if not p.attribute_name and p.new is mock.DEFAULT])
+ return len(patchings)
+
+
+def getfuncargnames(function, startindex=None):
+ # XXX merge with main.py's varnames
+ #assert not isclass(function)
+ realfunction = function
+ while hasattr(realfunction, "__wrapped__"):
+ realfunction = realfunction.__wrapped__
+ if startindex is None:
+ startindex = inspect.ismethod(function) and 1 or 0
+ if realfunction != function:
+ startindex += num_mock_patch_args(function)
+ function = realfunction
+ if isinstance(function, functools.partial):
+ argnames = inspect.getargs(_pytest._code.getrawcode(function.func))[0]
+ partial = function
+ argnames = argnames[len(partial.args):]
+ if partial.keywords:
+ for kw in partial.keywords:
+ argnames.remove(kw)
+ else:
+ argnames = inspect.getargs(_pytest._code.getrawcode(function))[0]
+ defaults = getattr(function, 'func_defaults',
+ getattr(function, '__defaults__', None)) or ()
+ numdefaults = len(defaults)
+ if numdefaults:
+ return tuple(argnames[startindex:-numdefaults])
+ return tuple(argnames[startindex:])
+
+# algorithm for sorting on a per-parametrized resource setup basis
+# it is called for scopenum==0 (session) first and performs sorting
+# down to the lower scopes such as to minimize number of "high scope"
+# setups and teardowns
+
+def reorder_items(items):
+ argkeys_cache = {}
+ for scopenum in range(0, scopenum_function):
+ argkeys_cache[scopenum] = d = {}
+ for item in items:
+ keys = set(get_parametrized_fixture_keys(item, scopenum))
+ if keys:
+ d[item] = keys
+ return reorder_items_atscope(items, set(), argkeys_cache, 0)
+
+def reorder_items_atscope(items, ignore, argkeys_cache, scopenum):
+ if scopenum >= scopenum_function or len(items) < 3:
+ return items
+ items_done = []
+ while 1:
+ items_before, items_same, items_other, newignore = \
+ slice_items(items, ignore, argkeys_cache[scopenum])
+ items_before = reorder_items_atscope(
+ items_before, ignore, argkeys_cache,scopenum+1)
+ if items_same is None:
+ # nothing to reorder in this scope
+ assert items_other is None
+ return items_done + items_before
+ items_done.extend(items_before)
+ items = items_same + items_other
+ ignore = newignore
+
+
+def slice_items(items, ignore, scoped_argkeys_cache):
+ # we pick the first item which uses a fixture instance in the
+ # requested scope and which we haven't seen yet. We slice the input
+ # items list into a list of items_nomatch, items_same and
+ # items_other
+ if scoped_argkeys_cache: # do we need to do work at all?
+ it = iter(items)
+ # first find a slicing key
+ for i, item in enumerate(it):
+ argkeys = scoped_argkeys_cache.get(item)
+ if argkeys is not None:
+ argkeys = argkeys.difference(ignore)
+ if argkeys: # found a slicing key
+ slicing_argkey = argkeys.pop()
+ items_before = items[:i]
+ items_same = [item]
+ items_other = []
+ # now slice the remainder of the list
+ for item in it:
+ argkeys = scoped_argkeys_cache.get(item)
+ if argkeys and slicing_argkey in argkeys and \
+ slicing_argkey not in ignore:
+ items_same.append(item)
+ else:
+ items_other.append(item)
+ newignore = ignore.copy()
+ newignore.add(slicing_argkey)
+ return (items_before, items_same, items_other, newignore)
+ return items, None, None, None
+
+def get_parametrized_fixture_keys(item, scopenum):
+ """ return list of keys for all parametrized arguments which match
+ the specified scope. """
+ assert scopenum < scopenum_function # function
+ try:
+ cs = item.callspec
+ except AttributeError:
+ pass
+ else:
+ # cs.indictes.items() is random order of argnames but
+ # then again different functions (items) can change order of
+ # arguments so it doesn't matter much probably
+ for argname, param_index in cs.indices.items():
+ if cs._arg2scopenum[argname] != scopenum:
+ continue
+ if scopenum == 0: # session
+ key = (argname, param_index)
+ elif scopenum == 1: # module
+ key = (argname, param_index, item.fspath)
+ elif scopenum == 2: # class
+ key = (argname, param_index, item.fspath, item.cls)
+ yield key
+
+
+def xunitsetup(obj, name):
+ meth = getattr(obj, name, None)
+ if getfixturemarker(meth) is None:
+ return meth
+
+def getfixturemarker(obj):
+ """ return fixturemarker or None if it doesn't exist or raised
+ exceptions."""
+ try:
+ return getattr(obj, "_pytestfixturefunction", None)
+ except KeyboardInterrupt:
+ raise
+ except Exception:
+ # some objects raise errors like request (from flask import request)
+ # we don't expect them to be fixture functions
+ return None
+
+scopename2class = {
+ 'class': Class,
+ 'module': Module,
+ 'function': pytest.Item,
+}
+def get_scope_node(node, scope):
+ cls = scopename2class.get(scope)
+ if cls is None:
+ if scope == "session":
+ return node.session
+ raise ValueError("unknown scope")
+ return node.getparent(cls)
diff --git a/python/pytest/_pytest/recwarn.py b/python/pytest/_pytest/recwarn.py
new file mode 100644
index 000000000..a89474c03
--- /dev/null
+++ b/python/pytest/_pytest/recwarn.py
@@ -0,0 +1,221 @@
+""" recording warnings during test function execution. """
+
+import inspect
+
+import _pytest._code
+import py
+import sys
+import warnings
+import pytest
+
+
+@pytest.yield_fixture
+def recwarn(request):
+ """Return a WarningsRecorder instance that provides these methods:
+
+ * ``pop(category=None)``: return last warning matching the category.
+ * ``clear()``: clear list of warnings
+
+ See http://docs.python.org/library/warnings.html for information
+ on warning categories.
+ """
+ wrec = WarningsRecorder()
+ with wrec:
+ warnings.simplefilter('default')
+ yield wrec
+
+
+def pytest_namespace():
+ return {'deprecated_call': deprecated_call,
+ 'warns': warns}
+
+
+def deprecated_call(func=None, *args, **kwargs):
+ """ assert that calling ``func(*args, **kwargs)`` triggers a
+ ``DeprecationWarning`` or ``PendingDeprecationWarning``.
+
+ This function can be used as a context manager::
+
+ >>> with deprecated_call():
+ ... myobject.deprecated_method()
+
+ Note: we cannot use WarningsRecorder here because it is still subject
+ to the mechanism that prevents warnings of the same type from being
+ triggered twice for the same module. See #1190.
+ """
+ if not func:
+ return WarningsChecker(expected_warning=DeprecationWarning)
+
+ categories = []
+
+ def warn_explicit(message, category, *args, **kwargs):
+ categories.append(category)
+ old_warn_explicit(message, category, *args, **kwargs)
+
+ def warn(message, category=None, *args, **kwargs):
+ if isinstance(message, Warning):
+ categories.append(message.__class__)
+ else:
+ categories.append(category)
+ old_warn(message, category, *args, **kwargs)
+
+ old_warn = warnings.warn
+ old_warn_explicit = warnings.warn_explicit
+ warnings.warn_explicit = warn_explicit
+ warnings.warn = warn
+ try:
+ ret = func(*args, **kwargs)
+ finally:
+ warnings.warn_explicit = old_warn_explicit
+ warnings.warn = old_warn
+ deprecation_categories = (DeprecationWarning, PendingDeprecationWarning)
+ if not any(issubclass(c, deprecation_categories) for c in categories):
+ __tracebackhide__ = True
+ raise AssertionError("%r did not produce DeprecationWarning" % (func,))
+ return ret
+
+
+def warns(expected_warning, *args, **kwargs):
+ """Assert that code raises a particular class of warning.
+
+ Specifically, the input @expected_warning can be a warning class or
+ tuple of warning classes, and the code must return that warning
+ (if a single class) or one of those warnings (if a tuple).
+
+ This helper produces a list of ``warnings.WarningMessage`` objects,
+ one for each warning raised.
+
+ This function can be used as a context manager, or any of the other ways
+ ``pytest.raises`` can be used::
+
+ >>> with warns(RuntimeWarning):
+ ... warnings.warn("my warning", RuntimeWarning)
+ """
+ wcheck = WarningsChecker(expected_warning)
+ if not args:
+ return wcheck
+ elif isinstance(args[0], str):
+ code, = args
+ assert isinstance(code, str)
+ frame = sys._getframe(1)
+ loc = frame.f_locals.copy()
+ loc.update(kwargs)
+
+ with wcheck:
+ code = _pytest._code.Source(code).compile()
+ py.builtin.exec_(code, frame.f_globals, loc)
+ else:
+ func = args[0]
+ with wcheck:
+ return func(*args[1:], **kwargs)
+
+
+class RecordedWarning(object):
+ def __init__(self, message, category, filename, lineno, file, line):
+ self.message = message
+ self.category = category
+ self.filename = filename
+ self.lineno = lineno
+ self.file = file
+ self.line = line
+
+
+class WarningsRecorder(object):
+ """A context manager to record raised warnings.
+
+ Adapted from `warnings.catch_warnings`.
+ """
+
+ def __init__(self, module=None):
+ self._module = sys.modules['warnings'] if module is None else module
+ self._entered = False
+ self._list = []
+
+ @property
+ def list(self):
+ """The list of recorded warnings."""
+ return self._list
+
+ def __getitem__(self, i):
+ """Get a recorded warning by index."""
+ return self._list[i]
+
+ def __iter__(self):
+ """Iterate through the recorded warnings."""
+ return iter(self._list)
+
+ def __len__(self):
+ """The number of recorded warnings."""
+ return len(self._list)
+
+ def pop(self, cls=Warning):
+ """Pop the first recorded warning, raise exception if not exists."""
+ for i, w in enumerate(self._list):
+ if issubclass(w.category, cls):
+ return self._list.pop(i)
+ __tracebackhide__ = True
+ raise AssertionError("%r not found in warning list" % cls)
+
+ def clear(self):
+ """Clear the list of recorded warnings."""
+ self._list[:] = []
+
+ def __enter__(self):
+ if self._entered:
+ __tracebackhide__ = True
+ raise RuntimeError("Cannot enter %r twice" % self)
+ self._entered = True
+ self._filters = self._module.filters
+ self._module.filters = self._filters[:]
+ self._showwarning = self._module.showwarning
+
+ def showwarning(message, category, filename, lineno,
+ file=None, line=None):
+ self._list.append(RecordedWarning(
+ message, category, filename, lineno, file, line))
+
+ # still perform old showwarning functionality
+ self._showwarning(
+ message, category, filename, lineno, file=file, line=line)
+
+ self._module.showwarning = showwarning
+
+ # allow the same warning to be raised more than once
+
+ self._module.simplefilter('always')
+ return self
+
+ def __exit__(self, *exc_info):
+ if not self._entered:
+ __tracebackhide__ = True
+ raise RuntimeError("Cannot exit %r without entering first" % self)
+ self._module.filters = self._filters
+ self._module.showwarning = self._showwarning
+
+
+class WarningsChecker(WarningsRecorder):
+ def __init__(self, expected_warning=None, module=None):
+ super(WarningsChecker, self).__init__(module=module)
+
+ msg = ("exceptions must be old-style classes or "
+ "derived from Warning, not %s")
+ if isinstance(expected_warning, tuple):
+ for exc in expected_warning:
+ if not inspect.isclass(exc):
+ raise TypeError(msg % type(exc))
+ elif inspect.isclass(expected_warning):
+ expected_warning = (expected_warning,)
+ elif expected_warning is not None:
+ raise TypeError(msg % type(expected_warning))
+
+ self.expected_warning = expected_warning
+
+ def __exit__(self, *exc_info):
+ super(WarningsChecker, self).__exit__(*exc_info)
+
+ # only check if we're not currently handling an exception
+ if all(a is None for a in exc_info):
+ if self.expected_warning is not None:
+ if not any(r.category in self.expected_warning for r in self):
+ __tracebackhide__ = True
+ pytest.fail("DID NOT WARN")
diff --git a/python/pytest/_pytest/resultlog.py b/python/pytest/_pytest/resultlog.py
new file mode 100644
index 000000000..3670f0214
--- /dev/null
+++ b/python/pytest/_pytest/resultlog.py
@@ -0,0 +1,104 @@
+""" log machine-parseable test session result information in a plain
+text file.
+"""
+
+import py
+import os
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "resultlog plugin options")
+ group.addoption('--resultlog', '--result-log', action="store",
+ metavar="path", default=None,
+ help="path for machine-readable result log.")
+
+def pytest_configure(config):
+ resultlog = config.option.resultlog
+ # prevent opening resultlog on slave nodes (xdist)
+ if resultlog and not hasattr(config, 'slaveinput'):
+ dirname = os.path.dirname(os.path.abspath(resultlog))
+ if not os.path.isdir(dirname):
+ os.makedirs(dirname)
+ logfile = open(resultlog, 'w', 1) # line buffered
+ config._resultlog = ResultLog(config, logfile)
+ config.pluginmanager.register(config._resultlog)
+
+def pytest_unconfigure(config):
+ resultlog = getattr(config, '_resultlog', None)
+ if resultlog:
+ resultlog.logfile.close()
+ del config._resultlog
+ config.pluginmanager.unregister(resultlog)
+
+def generic_path(item):
+ chain = item.listchain()
+ gpath = [chain[0].name]
+ fspath = chain[0].fspath
+ fspart = False
+ for node in chain[1:]:
+ newfspath = node.fspath
+ if newfspath == fspath:
+ if fspart:
+ gpath.append(':')
+ fspart = False
+ else:
+ gpath.append('.')
+ else:
+ gpath.append('/')
+ fspart = True
+ name = node.name
+ if name[0] in '([':
+ gpath.pop()
+ gpath.append(name)
+ fspath = newfspath
+ return ''.join(gpath)
+
+class ResultLog(object):
+ def __init__(self, config, logfile):
+ self.config = config
+ self.logfile = logfile # preferably line buffered
+
+ def write_log_entry(self, testpath, lettercode, longrepr):
+ py.builtin.print_("%s %s" % (lettercode, testpath), file=self.logfile)
+ for line in longrepr.splitlines():
+ py.builtin.print_(" %s" % line, file=self.logfile)
+
+ def log_outcome(self, report, lettercode, longrepr):
+ testpath = getattr(report, 'nodeid', None)
+ if testpath is None:
+ testpath = report.fspath
+ self.write_log_entry(testpath, lettercode, longrepr)
+
+ def pytest_runtest_logreport(self, report):
+ if report.when != "call" and report.passed:
+ return
+ res = self.config.hook.pytest_report_teststatus(report=report)
+ code = res[1]
+ if code == 'x':
+ longrepr = str(report.longrepr)
+ elif code == 'X':
+ longrepr = ''
+ elif report.passed:
+ longrepr = ""
+ elif report.failed:
+ longrepr = str(report.longrepr)
+ elif report.skipped:
+ longrepr = str(report.longrepr[2])
+ self.log_outcome(report, code, longrepr)
+
+ def pytest_collectreport(self, report):
+ if not report.passed:
+ if report.failed:
+ code = "F"
+ longrepr = str(report.longrepr)
+ else:
+ assert report.skipped
+ code = "S"
+ longrepr = "%s:%d: %s" % report.longrepr
+ self.log_outcome(report, code, longrepr)
+
+ def pytest_internalerror(self, excrepr):
+ reprcrash = getattr(excrepr, 'reprcrash', None)
+ path = getattr(reprcrash, "path", None)
+ if path is None:
+ path = "cwd:%s" % py.path.local()
+ self.write_log_entry(path, '!', str(excrepr))
diff --git a/python/pytest/_pytest/runner.py b/python/pytest/_pytest/runner.py
new file mode 100644
index 000000000..cde94c8c8
--- /dev/null
+++ b/python/pytest/_pytest/runner.py
@@ -0,0 +1,515 @@
+""" basic collect and runtest protocol implementations """
+import bdb
+import sys
+from time import time
+
+import py
+import pytest
+from _pytest._code.code import TerminalRepr, ExceptionInfo
+
+
+def pytest_namespace():
+ return {
+ 'fail' : fail,
+ 'skip' : skip,
+ 'importorskip' : importorskip,
+ 'exit' : exit,
+ }
+
+#
+# pytest plugin hooks
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group.addoption('--durations',
+ action="store", type=int, default=None, metavar="N",
+ help="show N slowest setup/test durations (N=0 for all)."),
+
+def pytest_terminal_summary(terminalreporter):
+ durations = terminalreporter.config.option.durations
+ if durations is None:
+ return
+ tr = terminalreporter
+ dlist = []
+ for replist in tr.stats.values():
+ for rep in replist:
+ if hasattr(rep, 'duration'):
+ dlist.append(rep)
+ if not dlist:
+ return
+ dlist.sort(key=lambda x: x.duration)
+ dlist.reverse()
+ if not durations:
+ tr.write_sep("=", "slowest test durations")
+ else:
+ tr.write_sep("=", "slowest %s test durations" % durations)
+ dlist = dlist[:durations]
+
+ for rep in dlist:
+ nodeid = rep.nodeid.replace("::()::", "::")
+ tr.write_line("%02.2fs %-8s %s" %
+ (rep.duration, rep.when, nodeid))
+
+def pytest_sessionstart(session):
+ session._setupstate = SetupState()
+def pytest_sessionfinish(session):
+ session._setupstate.teardown_all()
+
+class NodeInfo:
+ def __init__(self, location):
+ self.location = location
+
+def pytest_runtest_protocol(item, nextitem):
+ item.ihook.pytest_runtest_logstart(
+ nodeid=item.nodeid, location=item.location,
+ )
+ runtestprotocol(item, nextitem=nextitem)
+ return True
+
+def runtestprotocol(item, log=True, nextitem=None):
+ hasrequest = hasattr(item, "_request")
+ if hasrequest and not item._request:
+ item._initrequest()
+ rep = call_and_report(item, "setup", log)
+ reports = [rep]
+ if rep.passed:
+ reports.append(call_and_report(item, "call", log))
+ reports.append(call_and_report(item, "teardown", log,
+ nextitem=nextitem))
+ # after all teardown hooks have been called
+ # want funcargs and request info to go away
+ if hasrequest:
+ item._request = False
+ item.funcargs = None
+ return reports
+
+def pytest_runtest_setup(item):
+ item.session._setupstate.prepare(item)
+
+def pytest_runtest_call(item):
+ try:
+ item.runtest()
+ except Exception:
+ # Store trace info to allow postmortem debugging
+ type, value, tb = sys.exc_info()
+ tb = tb.tb_next # Skip *this* frame
+ sys.last_type = type
+ sys.last_value = value
+ sys.last_traceback = tb
+ del tb # Get rid of it in this namespace
+ raise
+
+def pytest_runtest_teardown(item, nextitem):
+ item.session._setupstate.teardown_exact(item, nextitem)
+
+def pytest_report_teststatus(report):
+ if report.when in ("setup", "teardown"):
+ if report.failed:
+ # category, shortletter, verbose-word
+ return "error", "E", "ERROR"
+ elif report.skipped:
+ return "skipped", "s", "SKIPPED"
+ else:
+ return "", "", ""
+
+
+#
+# Implementation
+
+def call_and_report(item, when, log=True, **kwds):
+ call = call_runtest_hook(item, when, **kwds)
+ hook = item.ihook
+ report = hook.pytest_runtest_makereport(item=item, call=call)
+ if log:
+ hook.pytest_runtest_logreport(report=report)
+ if check_interactive_exception(call, report):
+ hook.pytest_exception_interact(node=item, call=call, report=report)
+ return report
+
+def check_interactive_exception(call, report):
+ return call.excinfo and not (
+ hasattr(report, "wasxfail") or
+ call.excinfo.errisinstance(skip.Exception) or
+ call.excinfo.errisinstance(bdb.BdbQuit))
+
+def call_runtest_hook(item, when, **kwds):
+ hookname = "pytest_runtest_" + when
+ ihook = getattr(item.ihook, hookname)
+ return CallInfo(lambda: ihook(item=item, **kwds), when=when)
+
+class CallInfo:
+ """ Result/Exception info a function invocation. """
+ #: None or ExceptionInfo object.
+ excinfo = None
+ def __init__(self, func, when):
+ #: context of invocation: one of "setup", "call",
+ #: "teardown", "memocollect"
+ self.when = when
+ self.start = time()
+ try:
+ self.result = func()
+ except KeyboardInterrupt:
+ self.stop = time()
+ raise
+ except:
+ self.excinfo = ExceptionInfo()
+ self.stop = time()
+
+ def __repr__(self):
+ if self.excinfo:
+ status = "exception: %s" % str(self.excinfo.value)
+ else:
+ status = "result: %r" % (self.result,)
+ return "<CallInfo when=%r %s>" % (self.when, status)
+
+def getslaveinfoline(node):
+ try:
+ return node._slaveinfocache
+ except AttributeError:
+ d = node.slaveinfo
+ ver = "%s.%s.%s" % d['version_info'][:3]
+ node._slaveinfocache = s = "[%s] %s -- Python %s %s" % (
+ d['id'], d['sysplatform'], ver, d['executable'])
+ return s
+
+class BaseReport(object):
+
+ def __init__(self, **kw):
+ self.__dict__.update(kw)
+
+ def toterminal(self, out):
+ if hasattr(self, 'node'):
+ out.line(getslaveinfoline(self.node))
+
+ longrepr = self.longrepr
+ if longrepr is None:
+ return
+
+ if hasattr(longrepr, 'toterminal'):
+ longrepr.toterminal(out)
+ else:
+ try:
+ out.line(longrepr)
+ except UnicodeEncodeError:
+ out.line("<unprintable longrepr>")
+
+ def get_sections(self, prefix):
+ for name, content in self.sections:
+ if name.startswith(prefix):
+ yield prefix, content
+
+ passed = property(lambda x: x.outcome == "passed")
+ failed = property(lambda x: x.outcome == "failed")
+ skipped = property(lambda x: x.outcome == "skipped")
+
+ @property
+ def fspath(self):
+ return self.nodeid.split("::")[0]
+
+def pytest_runtest_makereport(item, call):
+ when = call.when
+ duration = call.stop-call.start
+ keywords = dict([(x,1) for x in item.keywords])
+ excinfo = call.excinfo
+ sections = []
+ if not call.excinfo:
+ outcome = "passed"
+ longrepr = None
+ else:
+ if not isinstance(excinfo, ExceptionInfo):
+ outcome = "failed"
+ longrepr = excinfo
+ elif excinfo.errisinstance(pytest.skip.Exception):
+ outcome = "skipped"
+ r = excinfo._getreprcrash()
+ longrepr = (str(r.path), r.lineno, r.message)
+ else:
+ outcome = "failed"
+ if call.when == "call":
+ longrepr = item.repr_failure(excinfo)
+ else: # exception in setup or teardown
+ longrepr = item._repr_failure_py(excinfo,
+ style=item.config.option.tbstyle)
+ for rwhen, key, content in item._report_sections:
+ sections.append(("Captured %s %s" %(key, rwhen), content))
+ return TestReport(item.nodeid, item.location,
+ keywords, outcome, longrepr, when,
+ sections, duration)
+
+class TestReport(BaseReport):
+ """ Basic test report object (also used for setup and teardown calls if
+ they fail).
+ """
+ def __init__(self, nodeid, location, keywords, outcome,
+ longrepr, when, sections=(), duration=0, **extra):
+ #: normalized collection node id
+ self.nodeid = nodeid
+
+ #: a (filesystempath, lineno, domaininfo) tuple indicating the
+ #: actual location of a test item - it might be different from the
+ #: collected one e.g. if a method is inherited from a different module.
+ self.location = location
+
+ #: a name -> value dictionary containing all keywords and
+ #: markers associated with a test invocation.
+ self.keywords = keywords
+
+ #: test outcome, always one of "passed", "failed", "skipped".
+ self.outcome = outcome
+
+ #: None or a failure representation.
+ self.longrepr = longrepr
+
+ #: one of 'setup', 'call', 'teardown' to indicate runtest phase.
+ self.when = when
+
+ #: list of (secname, data) extra information which needs to
+ #: marshallable
+ self.sections = list(sections)
+
+ #: time it took to run just the test
+ self.duration = duration
+
+ self.__dict__.update(extra)
+
+ def __repr__(self):
+ return "<TestReport %r when=%r outcome=%r>" % (
+ self.nodeid, self.when, self.outcome)
+
+class TeardownErrorReport(BaseReport):
+ outcome = "failed"
+ when = "teardown"
+ def __init__(self, longrepr, **extra):
+ self.longrepr = longrepr
+ self.sections = []
+ self.__dict__.update(extra)
+
+def pytest_make_collect_report(collector):
+ call = CallInfo(collector._memocollect, "memocollect")
+ longrepr = None
+ if not call.excinfo:
+ outcome = "passed"
+ else:
+ from _pytest import nose
+ skip_exceptions = (Skipped,) + nose.get_skip_exceptions()
+ if call.excinfo.errisinstance(skip_exceptions):
+ outcome = "skipped"
+ r = collector._repr_failure_py(call.excinfo, "line").reprcrash
+ longrepr = (str(r.path), r.lineno, r.message)
+ else:
+ outcome = "failed"
+ errorinfo = collector.repr_failure(call.excinfo)
+ if not hasattr(errorinfo, "toterminal"):
+ errorinfo = CollectErrorRepr(errorinfo)
+ longrepr = errorinfo
+ rep = CollectReport(collector.nodeid, outcome, longrepr,
+ getattr(call, 'result', None))
+ rep.call = call # see collect_one_node
+ return rep
+
+
+class CollectReport(BaseReport):
+ def __init__(self, nodeid, outcome, longrepr, result,
+ sections=(), **extra):
+ self.nodeid = nodeid
+ self.outcome = outcome
+ self.longrepr = longrepr
+ self.result = result or []
+ self.sections = list(sections)
+ self.__dict__.update(extra)
+
+ @property
+ def location(self):
+ return (self.fspath, None, self.fspath)
+
+ def __repr__(self):
+ return "<CollectReport %r lenresult=%s outcome=%r>" % (
+ self.nodeid, len(self.result), self.outcome)
+
+class CollectErrorRepr(TerminalRepr):
+ def __init__(self, msg):
+ self.longrepr = msg
+ def toterminal(self, out):
+ out.line(self.longrepr, red=True)
+
+class SetupState(object):
+ """ shared state for setting up/tearing down test items or collectors. """
+ def __init__(self):
+ self.stack = []
+ self._finalizers = {}
+
+ def addfinalizer(self, finalizer, colitem):
+ """ attach a finalizer to the given colitem.
+ if colitem is None, this will add a finalizer that
+ is called at the end of teardown_all().
+ """
+ assert colitem and not isinstance(colitem, tuple)
+ assert py.builtin.callable(finalizer)
+ #assert colitem in self.stack # some unit tests don't setup stack :/
+ self._finalizers.setdefault(colitem, []).append(finalizer)
+
+ def _pop_and_teardown(self):
+ colitem = self.stack.pop()
+ self._teardown_with_finalization(colitem)
+
+ def _callfinalizers(self, colitem):
+ finalizers = self._finalizers.pop(colitem, None)
+ exc = None
+ while finalizers:
+ fin = finalizers.pop()
+ try:
+ fin()
+ except Exception:
+ # XXX Only first exception will be seen by user,
+ # ideally all should be reported.
+ if exc is None:
+ exc = sys.exc_info()
+ if exc:
+ py.builtin._reraise(*exc)
+
+ def _teardown_with_finalization(self, colitem):
+ self._callfinalizers(colitem)
+ if hasattr(colitem, "teardown"):
+ colitem.teardown()
+ for colitem in self._finalizers:
+ assert colitem is None or colitem in self.stack \
+ or isinstance(colitem, tuple)
+
+ def teardown_all(self):
+ while self.stack:
+ self._pop_and_teardown()
+ for key in list(self._finalizers):
+ self._teardown_with_finalization(key)
+ assert not self._finalizers
+
+ def teardown_exact(self, item, nextitem):
+ needed_collectors = nextitem and nextitem.listchain() or []
+ self._teardown_towards(needed_collectors)
+
+ def _teardown_towards(self, needed_collectors):
+ while self.stack:
+ if self.stack == needed_collectors[:len(self.stack)]:
+ break
+ self._pop_and_teardown()
+
+ def prepare(self, colitem):
+ """ setup objects along the collector chain to the test-method
+ and teardown previously setup objects."""
+ needed_collectors = colitem.listchain()
+ self._teardown_towards(needed_collectors)
+
+ # check if the last collection node has raised an error
+ for col in self.stack:
+ if hasattr(col, '_prepare_exc'):
+ py.builtin._reraise(*col._prepare_exc)
+ for col in needed_collectors[len(self.stack):]:
+ self.stack.append(col)
+ try:
+ col.setup()
+ except Exception:
+ col._prepare_exc = sys.exc_info()
+ raise
+
+def collect_one_node(collector):
+ ihook = collector.ihook
+ ihook.pytest_collectstart(collector=collector)
+ rep = ihook.pytest_make_collect_report(collector=collector)
+ call = rep.__dict__.pop("call", None)
+ if call and check_interactive_exception(call, rep):
+ ihook.pytest_exception_interact(node=collector, call=call, report=rep)
+ return rep
+
+
+# =============================================================
+# Test OutcomeExceptions and helpers for creating them.
+
+
+class OutcomeException(Exception):
+ """ OutcomeException and its subclass instances indicate and
+ contain info about test and collection outcomes.
+ """
+ def __init__(self, msg=None, pytrace=True):
+ Exception.__init__(self, msg)
+ self.msg = msg
+ self.pytrace = pytrace
+
+ def __repr__(self):
+ if self.msg:
+ val = self.msg
+ if isinstance(val, bytes):
+ val = py._builtin._totext(val, errors='replace')
+ return val
+ return "<%s instance>" %(self.__class__.__name__,)
+ __str__ = __repr__
+
+class Skipped(OutcomeException):
+ # XXX hackish: on 3k we fake to live in the builtins
+ # in order to have Skipped exception printing shorter/nicer
+ __module__ = 'builtins'
+
+class Failed(OutcomeException):
+ """ raised from an explicit call to pytest.fail() """
+ __module__ = 'builtins'
+
+class Exit(KeyboardInterrupt):
+ """ raised for immediate program exits (no tracebacks/summaries)"""
+ def __init__(self, msg="unknown reason"):
+ self.msg = msg
+ KeyboardInterrupt.__init__(self, msg)
+
+# exposed helper methods
+
+def exit(msg):
+ """ exit testing process as if KeyboardInterrupt was triggered. """
+ __tracebackhide__ = True
+ raise Exit(msg)
+
+exit.Exception = Exit
+
+def skip(msg=""):
+ """ skip an executing test with the given message. Note: it's usually
+ better to use the pytest.mark.skipif marker to declare a test to be
+ skipped under certain conditions like mismatching platforms or
+ dependencies. See the pytest_skipping plugin for details.
+ """
+ __tracebackhide__ = True
+ raise Skipped(msg=msg)
+skip.Exception = Skipped
+
+def fail(msg="", pytrace=True):
+ """ explicitly fail an currently-executing test with the given Message.
+
+ :arg pytrace: if false the msg represents the full failure information
+ and no python traceback will be reported.
+ """
+ __tracebackhide__ = True
+ raise Failed(msg=msg, pytrace=pytrace)
+fail.Exception = Failed
+
+
+def importorskip(modname, minversion=None):
+ """ return imported module if it has at least "minversion" as its
+ __version__ attribute. If no minversion is specified the a skip
+ is only triggered if the module can not be imported.
+ """
+ __tracebackhide__ = True
+ compile(modname, '', 'eval') # to catch syntaxerrors
+ try:
+ __import__(modname)
+ except ImportError:
+ skip("could not import %r" %(modname,))
+ mod = sys.modules[modname]
+ if minversion is None:
+ return mod
+ verattr = getattr(mod, '__version__', None)
+ if minversion is not None:
+ try:
+ from pkg_resources import parse_version as pv
+ except ImportError:
+ skip("we have a required version for %r but can not import "
+ "no pkg_resources to parse version strings." %(modname,))
+ if verattr is None or pv(verattr) < pv(minversion):
+ skip("module %r has __version__ %r, required is: %r" %(
+ modname, verattr, minversion))
+ return mod
+
diff --git a/python/pytest/_pytest/skipping.py b/python/pytest/_pytest/skipping.py
new file mode 100644
index 000000000..18e038d2c
--- /dev/null
+++ b/python/pytest/_pytest/skipping.py
@@ -0,0 +1,361 @@
+""" support for skip/xfail functions and markers. """
+import os
+import sys
+import traceback
+
+import py
+import pytest
+from _pytest.mark import MarkInfo, MarkDecorator
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("general")
+ group.addoption('--runxfail',
+ action="store_true", dest="runxfail", default=False,
+ help="run tests even if they are marked xfail")
+
+ parser.addini("xfail_strict", "default for the strict parameter of xfail "
+ "markers when not given explicitly (default: "
+ "False)",
+ default=False,
+ type="bool")
+
+
+def pytest_configure(config):
+ if config.option.runxfail:
+ old = pytest.xfail
+ config._cleanup.append(lambda: setattr(pytest, "xfail", old))
+ def nop(*args, **kwargs):
+ pass
+ nop.Exception = XFailed
+ setattr(pytest, "xfail", nop)
+
+ config.addinivalue_line("markers",
+ "skip(reason=None): skip the given test function with an optional reason. "
+ "Example: skip(reason=\"no way of currently testing this\") skips the "
+ "test."
+ )
+ config.addinivalue_line("markers",
+ "skipif(condition): skip the given test function if eval(condition) "
+ "results in a True value. Evaluation happens within the "
+ "module global context. Example: skipif('sys.platform == \"win32\"') "
+ "skips the test if we are on the win32 platform. see "
+ "http://pytest.org/latest/skipping.html"
+ )
+ config.addinivalue_line("markers",
+ "xfail(condition, reason=None, run=True, raises=None, strict=False): "
+ "mark the the test function as an expected failure if eval(condition) "
+ "has a True value. Optionally specify a reason for better reporting "
+ "and run=False if you don't even want to execute the test function. "
+ "If only specific exception(s) are expected, you can list them in "
+ "raises, and if the test fails in other ways, it will be reported as "
+ "a true failure. See http://pytest.org/latest/skipping.html"
+ )
+
+
+def pytest_namespace():
+ return dict(xfail=xfail)
+
+
+class XFailed(pytest.fail.Exception):
+ """ raised from an explicit call to pytest.xfail() """
+
+
+def xfail(reason=""):
+ """ xfail an executing test or setup functions with the given reason."""
+ __tracebackhide__ = True
+ raise XFailed(reason)
+xfail.Exception = XFailed
+
+
+class MarkEvaluator:
+ def __init__(self, item, name):
+ self.item = item
+ self.name = name
+
+ @property
+ def holder(self):
+ return self.item.keywords.get(self.name)
+
+ def __bool__(self):
+ return bool(self.holder)
+ __nonzero__ = __bool__
+
+ def wasvalid(self):
+ return not hasattr(self, 'exc')
+
+ def invalidraise(self, exc):
+ raises = self.get('raises')
+ if not raises:
+ return
+ return not isinstance(exc, raises)
+
+ def istrue(self):
+ try:
+ return self._istrue()
+ except Exception:
+ self.exc = sys.exc_info()
+ if isinstance(self.exc[1], SyntaxError):
+ msg = [" " * (self.exc[1].offset + 4) + "^",]
+ msg.append("SyntaxError: invalid syntax")
+ else:
+ msg = traceback.format_exception_only(*self.exc[:2])
+ pytest.fail("Error evaluating %r expression\n"
+ " %s\n"
+ "%s"
+ %(self.name, self.expr, "\n".join(msg)),
+ pytrace=False)
+
+ def _getglobals(self):
+ d = {'os': os, 'sys': sys, 'config': self.item.config}
+ func = self.item.obj
+ try:
+ d.update(func.__globals__)
+ except AttributeError:
+ d.update(func.func_globals)
+ return d
+
+ def _istrue(self):
+ if hasattr(self, 'result'):
+ return self.result
+ if self.holder:
+ d = self._getglobals()
+ if self.holder.args or 'condition' in self.holder.kwargs:
+ self.result = False
+ # "holder" might be a MarkInfo or a MarkDecorator; only
+ # MarkInfo keeps track of all parameters it received in an
+ # _arglist attribute
+ if hasattr(self.holder, '_arglist'):
+ arglist = self.holder._arglist
+ else:
+ arglist = [(self.holder.args, self.holder.kwargs)]
+ for args, kwargs in arglist:
+ if 'condition' in kwargs:
+ args = (kwargs['condition'],)
+ for expr in args:
+ self.expr = expr
+ if isinstance(expr, py.builtin._basestring):
+ result = cached_eval(self.item.config, expr, d)
+ else:
+ if "reason" not in kwargs:
+ # XXX better be checked at collection time
+ msg = "you need to specify reason=STRING " \
+ "when using booleans as conditions."
+ pytest.fail(msg)
+ result = bool(expr)
+ if result:
+ self.result = True
+ self.reason = kwargs.get('reason', None)
+ self.expr = expr
+ return self.result
+ else:
+ self.result = True
+ return getattr(self, 'result', False)
+
+ def get(self, attr, default=None):
+ return self.holder.kwargs.get(attr, default)
+
+ def getexplanation(self):
+ expl = getattr(self, 'reason', None) or self.get('reason', None)
+ if not expl:
+ if not hasattr(self, 'expr'):
+ return ""
+ else:
+ return "condition: " + str(self.expr)
+ return expl
+
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_setup(item):
+ # Check if skip or skipif are specified as pytest marks
+
+ skipif_info = item.keywords.get('skipif')
+ if isinstance(skipif_info, (MarkInfo, MarkDecorator)):
+ eval_skipif = MarkEvaluator(item, 'skipif')
+ if eval_skipif.istrue():
+ item._evalskip = eval_skipif
+ pytest.skip(eval_skipif.getexplanation())
+
+ skip_info = item.keywords.get('skip')
+ if isinstance(skip_info, (MarkInfo, MarkDecorator)):
+ item._evalskip = True
+ if 'reason' in skip_info.kwargs:
+ pytest.skip(skip_info.kwargs['reason'])
+ elif skip_info.args:
+ pytest.skip(skip_info.args[0])
+ else:
+ pytest.skip("unconditional skip")
+
+ item._evalxfail = MarkEvaluator(item, 'xfail')
+ check_xfail_no_run(item)
+
+
+@pytest.mark.hookwrapper
+def pytest_pyfunc_call(pyfuncitem):
+ check_xfail_no_run(pyfuncitem)
+ outcome = yield
+ passed = outcome.excinfo is None
+ if passed:
+ check_strict_xfail(pyfuncitem)
+
+
+def check_xfail_no_run(item):
+ """check xfail(run=False)"""
+ if not item.config.option.runxfail:
+ evalxfail = item._evalxfail
+ if evalxfail.istrue():
+ if not evalxfail.get('run', True):
+ pytest.xfail("[NOTRUN] " + evalxfail.getexplanation())
+
+
+def check_strict_xfail(pyfuncitem):
+ """check xfail(strict=True) for the given PASSING test"""
+ evalxfail = pyfuncitem._evalxfail
+ if evalxfail.istrue():
+ strict_default = pyfuncitem.config.getini('xfail_strict')
+ is_strict_xfail = evalxfail.get('strict', strict_default)
+ if is_strict_xfail:
+ del pyfuncitem._evalxfail
+ explanation = evalxfail.getexplanation()
+ pytest.fail('[XPASS(strict)] ' + explanation, pytrace=False)
+
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_makereport(item, call):
+ outcome = yield
+ rep = outcome.get_result()
+ evalxfail = getattr(item, '_evalxfail', None)
+ evalskip = getattr(item, '_evalskip', None)
+ # unitttest special case, see setting of _unexpectedsuccess
+ if hasattr(item, '_unexpectedsuccess') and rep.when == "call":
+ # we need to translate into how pytest encodes xpass
+ rep.wasxfail = "reason: " + repr(item._unexpectedsuccess)
+ rep.outcome = "failed"
+ elif item.config.option.runxfail:
+ pass # don't interefere
+ elif call.excinfo and call.excinfo.errisinstance(pytest.xfail.Exception):
+ rep.wasxfail = "reason: " + call.excinfo.value.msg
+ rep.outcome = "skipped"
+ elif evalxfail and not rep.skipped and evalxfail.wasvalid() and \
+ evalxfail.istrue():
+ if call.excinfo:
+ if evalxfail.invalidraise(call.excinfo.value):
+ rep.outcome = "failed"
+ else:
+ rep.outcome = "skipped"
+ rep.wasxfail = evalxfail.getexplanation()
+ elif call.when == "call":
+ rep.outcome = "failed" # xpass outcome
+ rep.wasxfail = evalxfail.getexplanation()
+ elif evalskip is not None and rep.skipped and type(rep.longrepr) is tuple:
+ # skipped by mark.skipif; change the location of the failure
+ # to point to the item definition, otherwise it will display
+ # the location of where the skip exception was raised within pytest
+ filename, line, reason = rep.longrepr
+ filename, line = item.location[:2]
+ rep.longrepr = filename, line, reason
+
+# called by terminalreporter progress reporting
+def pytest_report_teststatus(report):
+ if hasattr(report, "wasxfail"):
+ if report.skipped:
+ return "xfailed", "x", "xfail"
+ elif report.failed:
+ return "xpassed", "X", ("XPASS", {'yellow': True})
+
+# called by the terminalreporter instance/plugin
+def pytest_terminal_summary(terminalreporter):
+ tr = terminalreporter
+ if not tr.reportchars:
+ #for name in "xfailed skipped failed xpassed":
+ # if not tr.stats.get(name, 0):
+ # tr.write_line("HINT: use '-r' option to see extra "
+ # "summary info about tests")
+ # break
+ return
+
+ lines = []
+ for char in tr.reportchars:
+ if char == "x":
+ show_xfailed(terminalreporter, lines)
+ elif char == "X":
+ show_xpassed(terminalreporter, lines)
+ elif char in "fF":
+ show_simple(terminalreporter, lines, 'failed', "FAIL %s")
+ elif char in "sS":
+ show_skipped(terminalreporter, lines)
+ elif char == "E":
+ show_simple(terminalreporter, lines, 'error', "ERROR %s")
+ elif char == 'p':
+ show_simple(terminalreporter, lines, 'passed', "PASSED %s")
+
+ if lines:
+ tr._tw.sep("=", "short test summary info")
+ for line in lines:
+ tr._tw.line(line)
+
+def show_simple(terminalreporter, lines, stat, format):
+ failed = terminalreporter.stats.get(stat)
+ if failed:
+ for rep in failed:
+ pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+ lines.append(format %(pos,))
+
+def show_xfailed(terminalreporter, lines):
+ xfailed = terminalreporter.stats.get("xfailed")
+ if xfailed:
+ for rep in xfailed:
+ pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+ reason = rep.wasxfail
+ lines.append("XFAIL %s" % (pos,))
+ if reason:
+ lines.append(" " + str(reason))
+
+def show_xpassed(terminalreporter, lines):
+ xpassed = terminalreporter.stats.get("xpassed")
+ if xpassed:
+ for rep in xpassed:
+ pos = terminalreporter.config.cwd_relative_nodeid(rep.nodeid)
+ reason = rep.wasxfail
+ lines.append("XPASS %s %s" %(pos, reason))
+
+def cached_eval(config, expr, d):
+ if not hasattr(config, '_evalcache'):
+ config._evalcache = {}
+ try:
+ return config._evalcache[expr]
+ except KeyError:
+ import _pytest._code
+ exprcode = _pytest._code.compile(expr, mode="eval")
+ config._evalcache[expr] = x = eval(exprcode, d)
+ return x
+
+
+def folded_skips(skipped):
+ d = {}
+ for event in skipped:
+ key = event.longrepr
+ assert len(key) == 3, (event, key)
+ d.setdefault(key, []).append(event)
+ l = []
+ for key, events in d.items():
+ l.append((len(events),) + key)
+ return l
+
+def show_skipped(terminalreporter, lines):
+ tr = terminalreporter
+ skipped = tr.stats.get('skipped', [])
+ if skipped:
+ #if not tr.hasopt('skipped'):
+ # tr.write_line(
+ # "%d skipped tests, specify -rs for more info" %
+ # len(skipped))
+ # return
+ fskips = folded_skips(skipped)
+ if fskips:
+ #tr.write_sep("_", "skipped test summary")
+ for num, fspath, lineno, reason in fskips:
+ if reason.startswith("Skipped: "):
+ reason = reason[9:]
+ lines.append("SKIP [%d] %s:%d: %s" %
+ (num, fspath, lineno, reason))
diff --git a/python/pytest/_pytest/standalonetemplate.py b/python/pytest/_pytest/standalonetemplate.py
new file mode 100755
index 000000000..484d5d1b2
--- /dev/null
+++ b/python/pytest/_pytest/standalonetemplate.py
@@ -0,0 +1,89 @@
+#! /usr/bin/env python
+
+# Hi There!
+# You may be wondering what this giant blob of binary data here is, you might
+# even be worried that we're up to something nefarious (good for you for being
+# paranoid!). This is a base64 encoding of a zip file, this zip file contains
+# a fully functional basic pytest script.
+#
+# Pytest is a thing that tests packages, pytest itself is a package that some-
+# one might want to install, especially if they're looking to run tests inside
+# some package they want to install. Pytest has a lot of code to collect and
+# execute tests, and other such sort of "tribal knowledge" that has been en-
+# coded in its code base. Because of this we basically include a basic copy
+# of pytest inside this blob. We do this because it let's you as a maintainer
+# or application developer who wants people who don't deal with python much to
+# easily run tests without installing the complete pytest package.
+#
+# If you're wondering how this is created: you can create it yourself if you
+# have a complete pytest installation by using this command on the command-
+# line: ``py.test --genscript=runtests.py``.
+
+sources = """
+@SOURCES@"""
+
+import sys
+import base64
+import zlib
+
+class DictImporter(object):
+ def __init__(self, sources):
+ self.sources = sources
+
+ def find_module(self, fullname, path=None):
+ if fullname == "argparse" and sys.version_info >= (2,7):
+ # we were generated with <python2.7 (which pulls in argparse)
+ # but we are running now on a stdlib which has it, so use that.
+ return None
+ if fullname in self.sources:
+ return self
+ if fullname + '.__init__' in self.sources:
+ return self
+ return None
+
+ def load_module(self, fullname):
+ # print "load_module:", fullname
+ from types import ModuleType
+ try:
+ s = self.sources[fullname]
+ is_pkg = False
+ except KeyError:
+ s = self.sources[fullname + '.__init__']
+ is_pkg = True
+
+ co = compile(s, fullname, 'exec')
+ module = sys.modules.setdefault(fullname, ModuleType(fullname))
+ module.__file__ = "%s/%s" % (__file__, fullname)
+ module.__loader__ = self
+ if is_pkg:
+ module.__path__ = [fullname]
+
+ do_exec(co, module.__dict__) # noqa
+ return sys.modules[fullname]
+
+ def get_source(self, name):
+ res = self.sources.get(name)
+ if res is None:
+ res = self.sources.get(name + '.__init__')
+ return res
+
+if __name__ == "__main__":
+ try:
+ import pkg_resources # noqa
+ except ImportError:
+ sys.stderr.write("ERROR: setuptools not installed\n")
+ sys.exit(2)
+ if sys.version_info >= (3, 0):
+ exec("def do_exec(co, loc): exec(co, loc)\n")
+ import pickle
+ sources = sources.encode("ascii") # ensure bytes
+ sources = pickle.loads(zlib.decompress(base64.decodebytes(sources)))
+ else:
+ import cPickle as pickle
+ exec("def do_exec(co, loc): exec co in loc\n")
+ sources = pickle.loads(zlib.decompress(base64.decodestring(sources)))
+
+ importer = DictImporter(sources)
+ sys.meta_path.insert(0, importer)
+ entry = "@ENTRY@"
+ do_exec(entry, locals()) # noqa
diff --git a/python/pytest/_pytest/terminal.py b/python/pytest/_pytest/terminal.py
new file mode 100644
index 000000000..825f553ef
--- /dev/null
+++ b/python/pytest/_pytest/terminal.py
@@ -0,0 +1,593 @@
+""" terminal reporting of the full testing process.
+
+This is a good source for looking at the various reporting hooks.
+"""
+from _pytest.main import EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, \
+ EXIT_USAGEERROR, EXIT_NOTESTSCOLLECTED
+import pytest
+import py
+import sys
+import time
+import platform
+
+import _pytest._pluggy as pluggy
+
+
+def pytest_addoption(parser):
+ group = parser.getgroup("terminal reporting", "reporting", after="general")
+ group._addoption('-v', '--verbose', action="count",
+ dest="verbose", default=0, help="increase verbosity."),
+ group._addoption('-q', '--quiet', action="count",
+ dest="quiet", default=0, help="decrease verbosity."),
+ group._addoption('-r',
+ action="store", dest="reportchars", default=None, metavar="chars",
+ help="show extra test summary info as specified by chars (f)ailed, "
+ "(E)error, (s)skipped, (x)failed, (X)passed (w)pytest-warnings "
+ "(p)passed, (P)passed with output, (a)all except pP.")
+ group._addoption('-l', '--showlocals',
+ action="store_true", dest="showlocals", default=False,
+ help="show locals in tracebacks (disabled by default).")
+ group._addoption('--report',
+ action="store", dest="report", default=None, metavar="opts",
+ help="(deprecated, use -r)")
+ group._addoption('--tb', metavar="style",
+ action="store", dest="tbstyle", default='auto',
+ choices=['auto', 'long', 'short', 'no', 'line', 'native'],
+ help="traceback print mode (auto/long/short/line/native/no).")
+ group._addoption('--fulltrace', '--full-trace',
+ action="store_true", default=False,
+ help="don't cut any tracebacks (default is to cut).")
+ group._addoption('--color', metavar="color",
+ action="store", dest="color", default='auto',
+ choices=['yes', 'no', 'auto'],
+ help="color terminal output (yes/no/auto).")
+
+def pytest_configure(config):
+ config.option.verbose -= config.option.quiet
+ reporter = TerminalReporter(config, sys.stdout)
+ config.pluginmanager.register(reporter, 'terminalreporter')
+ if config.option.debug or config.option.traceconfig:
+ def mywriter(tags, args):
+ msg = " ".join(map(str, args))
+ reporter.write_line("[traceconfig] " + msg)
+ config.trace.root.setprocessor("pytest:config", mywriter)
+
+def getreportopt(config):
+ reportopts = ""
+ optvalue = config.option.report
+ if optvalue:
+ py.builtin.print_("DEPRECATED: use -r instead of --report option.",
+ file=sys.stderr)
+ if optvalue:
+ for setting in optvalue.split(","):
+ setting = setting.strip()
+ if setting == "skipped":
+ reportopts += "s"
+ elif setting == "xfailed":
+ reportopts += "x"
+ reportchars = config.option.reportchars
+ if reportchars:
+ for char in reportchars:
+ if char not in reportopts and char != 'a':
+ reportopts += char
+ elif char == 'a':
+ reportopts = 'fEsxXw'
+ return reportopts
+
+def pytest_report_teststatus(report):
+ if report.passed:
+ letter = "."
+ elif report.skipped:
+ letter = "s"
+ elif report.failed:
+ letter = "F"
+ if report.when != "call":
+ letter = "f"
+ return report.outcome, letter, report.outcome.upper()
+
+class WarningReport:
+ def __init__(self, code, message, nodeid=None, fslocation=None):
+ self.code = code
+ self.message = message
+ self.nodeid = nodeid
+ self.fslocation = fslocation
+
+
+class TerminalReporter:
+ def __init__(self, config, file=None):
+ import _pytest.config
+ self.config = config
+ self.verbosity = self.config.option.verbose
+ self.showheader = self.verbosity >= 0
+ self.showfspath = self.verbosity >= 0
+ self.showlongtestinfo = self.verbosity > 0
+ self._numcollected = 0
+
+ self.stats = {}
+ self.startdir = py.path.local()
+ if file is None:
+ file = sys.stdout
+ self._tw = self.writer = _pytest.config.create_terminal_writer(config,
+ file)
+ self.currentfspath = None
+ self.reportchars = getreportopt(config)
+ self.hasmarkup = self._tw.hasmarkup
+ self.isatty = file.isatty()
+
+ def hasopt(self, char):
+ char = {'xfailed': 'x', 'skipped': 's'}.get(char, char)
+ return char in self.reportchars
+
+ def write_fspath_result(self, nodeid, res):
+ fspath = self.config.rootdir.join(nodeid.split("::")[0])
+ if fspath != self.currentfspath:
+ self.currentfspath = fspath
+ fspath = self.startdir.bestrelpath(fspath)
+ self._tw.line()
+ self._tw.write(fspath + " ")
+ self._tw.write(res)
+
+ def write_ensure_prefix(self, prefix, extra="", **kwargs):
+ if self.currentfspath != prefix:
+ self._tw.line()
+ self.currentfspath = prefix
+ self._tw.write(prefix)
+ if extra:
+ self._tw.write(extra, **kwargs)
+ self.currentfspath = -2
+
+ def ensure_newline(self):
+ if self.currentfspath:
+ self._tw.line()
+ self.currentfspath = None
+
+ def write(self, content, **markup):
+ self._tw.write(content, **markup)
+
+ def write_line(self, line, **markup):
+ if not py.builtin._istext(line):
+ line = py.builtin.text(line, errors="replace")
+ self.ensure_newline()
+ self._tw.line(line, **markup)
+
+ def rewrite(self, line, **markup):
+ line = str(line)
+ self._tw.write("\r" + line, **markup)
+
+ def write_sep(self, sep, title=None, **markup):
+ self.ensure_newline()
+ self._tw.sep(sep, title, **markup)
+
+ def section(self, title, sep="=", **kw):
+ self._tw.sep(sep, title, **kw)
+
+ def line(self, msg, **kw):
+ self._tw.line(msg, **kw)
+
+ def pytest_internalerror(self, excrepr):
+ for line in py.builtin.text(excrepr).split("\n"):
+ self.write_line("INTERNALERROR> " + line)
+ return 1
+
+ def pytest_logwarning(self, code, fslocation, message, nodeid):
+ warnings = self.stats.setdefault("warnings", [])
+ if isinstance(fslocation, tuple):
+ fslocation = "%s:%d" % fslocation
+ warning = WarningReport(code=code, fslocation=fslocation,
+ message=message, nodeid=nodeid)
+ warnings.append(warning)
+
+ def pytest_plugin_registered(self, plugin):
+ if self.config.option.traceconfig:
+ msg = "PLUGIN registered: %s" % (plugin,)
+ # XXX this event may happen during setup/teardown time
+ # which unfortunately captures our output here
+ # which garbles our output if we use self.write_line
+ self.write_line(msg)
+
+ def pytest_deselected(self, items):
+ self.stats.setdefault('deselected', []).extend(items)
+
+ def pytest_runtest_logstart(self, nodeid, location):
+ # ensure that the path is printed before the
+ # 1st test of a module starts running
+ if self.showlongtestinfo:
+ line = self._locationline(nodeid, *location)
+ self.write_ensure_prefix(line, "")
+ elif self.showfspath:
+ fsid = nodeid.split("::")[0]
+ self.write_fspath_result(fsid, "")
+
+ def pytest_runtest_logreport(self, report):
+ rep = report
+ res = self.config.hook.pytest_report_teststatus(report=rep)
+ cat, letter, word = res
+ self.stats.setdefault(cat, []).append(rep)
+ self._tests_ran = True
+ if not letter and not word:
+ # probably passed setup/teardown
+ return
+ if self.verbosity <= 0:
+ if not hasattr(rep, 'node') and self.showfspath:
+ self.write_fspath_result(rep.nodeid, letter)
+ else:
+ self._tw.write(letter)
+ else:
+ if isinstance(word, tuple):
+ word, markup = word
+ else:
+ if rep.passed:
+ markup = {'green':True}
+ elif rep.failed:
+ markup = {'red':True}
+ elif rep.skipped:
+ markup = {'yellow':True}
+ line = self._locationline(rep.nodeid, *rep.location)
+ if not hasattr(rep, 'node'):
+ self.write_ensure_prefix(line, word, **markup)
+ #self._tw.write(word, **markup)
+ else:
+ self.ensure_newline()
+ if hasattr(rep, 'node'):
+ self._tw.write("[%s] " % rep.node.gateway.id)
+ self._tw.write(word, **markup)
+ self._tw.write(" " + line)
+ self.currentfspath = -2
+
+ def pytest_collection(self):
+ if not self.isatty and self.config.option.verbose >= 1:
+ self.write("collecting ... ", bold=True)
+
+ def pytest_collectreport(self, report):
+ if report.failed:
+ self.stats.setdefault("error", []).append(report)
+ elif report.skipped:
+ self.stats.setdefault("skipped", []).append(report)
+ items = [x for x in report.result if isinstance(x, pytest.Item)]
+ self._numcollected += len(items)
+ if self.isatty:
+ #self.write_fspath_result(report.nodeid, 'E')
+ self.report_collect()
+
+ def report_collect(self, final=False):
+ if self.config.option.verbose < 0:
+ return
+
+ errors = len(self.stats.get('error', []))
+ skipped = len(self.stats.get('skipped', []))
+ if final:
+ line = "collected "
+ else:
+ line = "collecting "
+ line += str(self._numcollected) + " items"
+ if errors:
+ line += " / %d errors" % errors
+ if skipped:
+ line += " / %d skipped" % skipped
+ if self.isatty:
+ if final:
+ line += " \n"
+ self.rewrite(line, bold=True)
+ else:
+ self.write_line(line)
+
+ def pytest_collection_modifyitems(self):
+ self.report_collect(True)
+
+ @pytest.hookimpl(trylast=True)
+ def pytest_sessionstart(self, session):
+ self._sessionstarttime = time.time()
+ if not self.showheader:
+ return
+ self.write_sep("=", "test session starts", bold=True)
+ verinfo = platform.python_version()
+ msg = "platform %s -- Python %s" % (sys.platform, verinfo)
+ if hasattr(sys, 'pypy_version_info'):
+ verinfo = ".".join(map(str, sys.pypy_version_info[:3]))
+ msg += "[pypy-%s-%s]" % (verinfo, sys.pypy_version_info[3])
+ msg += ", pytest-%s, py-%s, pluggy-%s" % (
+ pytest.__version__, py.__version__, pluggy.__version__)
+ if self.verbosity > 0 or self.config.option.debug or \
+ getattr(self.config.option, 'pastebin', None):
+ msg += " -- " + str(sys.executable)
+ self.write_line(msg)
+ lines = self.config.hook.pytest_report_header(
+ config=self.config, startdir=self.startdir)
+ lines.reverse()
+ for line in flatten(lines):
+ self.write_line(line)
+
+ def pytest_report_header(self, config):
+ inifile = ""
+ if config.inifile:
+ inifile = config.rootdir.bestrelpath(config.inifile)
+ lines = ["rootdir: %s, inifile: %s" %(config.rootdir, inifile)]
+
+ plugininfo = config.pluginmanager.list_plugin_distinfo()
+ if plugininfo:
+
+ lines.append(
+ "plugins: %s" % ", ".join(_plugin_nameversions(plugininfo)))
+ return lines
+
+ def pytest_collection_finish(self, session):
+ if self.config.option.collectonly:
+ self._printcollecteditems(session.items)
+ if self.stats.get('failed'):
+ self._tw.sep("!", "collection failures")
+ for rep in self.stats.get('failed'):
+ rep.toterminal(self._tw)
+ return 1
+ return 0
+ if not self.showheader:
+ return
+ #for i, testarg in enumerate(self.config.args):
+ # self.write_line("test path %d: %s" %(i+1, testarg))
+
+ def _printcollecteditems(self, items):
+ # to print out items and their parent collectors
+ # we take care to leave out Instances aka ()
+ # because later versions are going to get rid of them anyway
+ if self.config.option.verbose < 0:
+ if self.config.option.verbose < -1:
+ counts = {}
+ for item in items:
+ name = item.nodeid.split('::', 1)[0]
+ counts[name] = counts.get(name, 0) + 1
+ for name, count in sorted(counts.items()):
+ self._tw.line("%s: %d" % (name, count))
+ else:
+ for item in items:
+ nodeid = item.nodeid
+ nodeid = nodeid.replace("::()::", "::")
+ self._tw.line(nodeid)
+ return
+ stack = []
+ indent = ""
+ for item in items:
+ needed_collectors = item.listchain()[1:] # strip root node
+ while stack:
+ if stack == needed_collectors[:len(stack)]:
+ break
+ stack.pop()
+ for col in needed_collectors[len(stack):]:
+ stack.append(col)
+ #if col.name == "()":
+ # continue
+ indent = (len(stack) - 1) * " "
+ self._tw.line("%s%s" % (indent, col))
+
+ @pytest.hookimpl(hookwrapper=True)
+ def pytest_sessionfinish(self, exitstatus):
+ outcome = yield
+ outcome.get_result()
+ self._tw.line("")
+ summary_exit_codes = (
+ EXIT_OK, EXIT_TESTSFAILED, EXIT_INTERRUPTED, EXIT_USAGEERROR,
+ EXIT_NOTESTSCOLLECTED)
+ if exitstatus in summary_exit_codes:
+ self.config.hook.pytest_terminal_summary(terminalreporter=self)
+ self.summary_errors()
+ self.summary_failures()
+ self.summary_warnings()
+ self.summary_passes()
+ if exitstatus == EXIT_INTERRUPTED:
+ self._report_keyboardinterrupt()
+ del self._keyboardinterrupt_memo
+ self.summary_deselected()
+ self.summary_stats()
+
+ def pytest_keyboard_interrupt(self, excinfo):
+ self._keyboardinterrupt_memo = excinfo.getrepr(funcargs=True)
+
+ def pytest_unconfigure(self):
+ if hasattr(self, '_keyboardinterrupt_memo'):
+ self._report_keyboardinterrupt()
+
+ def _report_keyboardinterrupt(self):
+ excrepr = self._keyboardinterrupt_memo
+ msg = excrepr.reprcrash.message
+ self.write_sep("!", msg)
+ if "KeyboardInterrupt" in msg:
+ if self.config.option.fulltrace:
+ excrepr.toterminal(self._tw)
+ else:
+ self._tw.line("to show a full traceback on KeyboardInterrupt use --fulltrace", yellow=True)
+ excrepr.reprcrash.toterminal(self._tw)
+
+ def _locationline(self, nodeid, fspath, lineno, domain):
+ def mkrel(nodeid):
+ line = self.config.cwd_relative_nodeid(nodeid)
+ if domain and line.endswith(domain):
+ line = line[:-len(domain)]
+ l = domain.split("[")
+ l[0] = l[0].replace('.', '::') # don't replace '.' in params
+ line += "[".join(l)
+ return line
+ # collect_fspath comes from testid which has a "/"-normalized path
+
+ if fspath:
+ res = mkrel(nodeid).replace("::()", "") # parens-normalization
+ if nodeid.split("::")[0] != fspath.replace("\\", "/"):
+ res += " <- " + self.startdir.bestrelpath(fspath)
+ else:
+ res = "[location]"
+ return res + " "
+
+ def _getfailureheadline(self, rep):
+ if hasattr(rep, 'location'):
+ fspath, lineno, domain = rep.location
+ return domain
+ else:
+ return "test session" # XXX?
+
+ def _getcrashline(self, rep):
+ try:
+ return str(rep.longrepr.reprcrash)
+ except AttributeError:
+ try:
+ return str(rep.longrepr)[:50]
+ except AttributeError:
+ return ""
+
+ #
+ # summaries for sessionfinish
+ #
+ def getreports(self, name):
+ l = []
+ for x in self.stats.get(name, []):
+ if not hasattr(x, '_pdbshown'):
+ l.append(x)
+ return l
+
+ def summary_warnings(self):
+ if self.hasopt("w"):
+ warnings = self.stats.get("warnings")
+ if not warnings:
+ return
+ self.write_sep("=", "pytest-warning summary")
+ for w in warnings:
+ self._tw.line("W%s %s %s" % (w.code,
+ w.fslocation, w.message))
+
+ def summary_passes(self):
+ if self.config.option.tbstyle != "no":
+ if self.hasopt("P"):
+ reports = self.getreports('passed')
+ if not reports:
+ return
+ self.write_sep("=", "PASSES")
+ for rep in reports:
+ msg = self._getfailureheadline(rep)
+ self.write_sep("_", msg)
+ self._outrep_summary(rep)
+
+ def summary_failures(self):
+ if self.config.option.tbstyle != "no":
+ reports = self.getreports('failed')
+ if not reports:
+ return
+ self.write_sep("=", "FAILURES")
+ for rep in reports:
+ if self.config.option.tbstyle == "line":
+ line = self._getcrashline(rep)
+ self.write_line(line)
+ else:
+ msg = self._getfailureheadline(rep)
+ markup = {'red': True, 'bold': True}
+ self.write_sep("_", msg, **markup)
+ self._outrep_summary(rep)
+
+ def summary_errors(self):
+ if self.config.option.tbstyle != "no":
+ reports = self.getreports('error')
+ if not reports:
+ return
+ self.write_sep("=", "ERRORS")
+ for rep in self.stats['error']:
+ msg = self._getfailureheadline(rep)
+ if not hasattr(rep, 'when'):
+ # collect
+ msg = "ERROR collecting " + msg
+ elif rep.when == "setup":
+ msg = "ERROR at setup of " + msg
+ elif rep.when == "teardown":
+ msg = "ERROR at teardown of " + msg
+ self.write_sep("_", msg)
+ self._outrep_summary(rep)
+
+ def _outrep_summary(self, rep):
+ rep.toterminal(self._tw)
+ for secname, content in rep.sections:
+ self._tw.sep("-", secname)
+ if content[-1:] == "\n":
+ content = content[:-1]
+ self._tw.line(content)
+
+ def summary_stats(self):
+ session_duration = time.time() - self._sessionstarttime
+ (line, color) = build_summary_stats_line(self.stats)
+ msg = "%s in %.2f seconds" % (line, session_duration)
+ markup = {color: True, 'bold': True}
+
+ if self.verbosity >= 0:
+ self.write_sep("=", msg, **markup)
+ if self.verbosity == -1:
+ self.write_line(msg, **markup)
+
+ def summary_deselected(self):
+ if 'deselected' in self.stats:
+ l = []
+ k = self.config.option.keyword
+ if k:
+ l.append("-k%s" % k)
+ m = self.config.option.markexpr
+ if m:
+ l.append("-m %r" % m)
+ if l:
+ self.write_sep("=", "%d tests deselected by %r" % (
+ len(self.stats['deselected']), " ".join(l)), bold=True)
+
+def repr_pythonversion(v=None):
+ if v is None:
+ v = sys.version_info
+ try:
+ return "%s.%s.%s-%s-%s" % v
+ except (TypeError, ValueError):
+ return str(v)
+
+def flatten(l):
+ for x in l:
+ if isinstance(x, (list, tuple)):
+ for y in flatten(x):
+ yield y
+ else:
+ yield x
+
+def build_summary_stats_line(stats):
+ keys = ("failed passed skipped deselected "
+ "xfailed xpassed warnings error").split()
+ key_translation = {'warnings': 'pytest-warnings'}
+ unknown_key_seen = False
+ for key in stats.keys():
+ if key not in keys:
+ if key: # setup/teardown reports have an empty key, ignore them
+ keys.append(key)
+ unknown_key_seen = True
+ parts = []
+ for key in keys:
+ val = stats.get(key, None)
+ if val:
+ key_name = key_translation.get(key, key)
+ parts.append("%d %s" % (len(val), key_name))
+
+ if parts:
+ line = ", ".join(parts)
+ else:
+ line = "no tests ran"
+
+ if 'failed' in stats or 'error' in stats:
+ color = 'red'
+ elif 'warnings' in stats or unknown_key_seen:
+ color = 'yellow'
+ elif 'passed' in stats:
+ color = 'green'
+ else:
+ color = 'yellow'
+
+ return (line, color)
+
+
+def _plugin_nameversions(plugininfo):
+ l = []
+ for plugin, dist in plugininfo:
+ # gets us name and version!
+ name = '{dist.project_name}-{dist.version}'.format(dist=dist)
+ # questionable convenience, but it keeps things short
+ if name.startswith("pytest-"):
+ name = name[7:]
+ # we decided to print python package names
+ # they can have more than one plugin
+ if name not in l:
+ l.append(name)
+ return l
diff --git a/python/pytest/_pytest/tmpdir.py b/python/pytest/_pytest/tmpdir.py
new file mode 100644
index 000000000..ebc48dbe5
--- /dev/null
+++ b/python/pytest/_pytest/tmpdir.py
@@ -0,0 +1,123 @@
+""" support for providing temporary directories to test functions. """
+import re
+
+import pytest
+import py
+from _pytest.monkeypatch import monkeypatch
+
+
+class TempdirFactory:
+ """Factory for temporary directories under the common base temp directory.
+
+ The base directory can be configured using the ``--basetemp`` option.
+ """
+
+ def __init__(self, config):
+ self.config = config
+ self.trace = config.trace.get("tmpdir")
+
+ def ensuretemp(self, string, dir=1):
+ """ (deprecated) return temporary directory path with
+ the given string as the trailing part. It is usually
+ better to use the 'tmpdir' function argument which
+ provides an empty unique-per-test-invocation directory
+ and is guaranteed to be empty.
+ """
+ #py.log._apiwarn(">1.1", "use tmpdir function argument")
+ return self.getbasetemp().ensure(string, dir=dir)
+
+ def mktemp(self, basename, numbered=True):
+ """Create a subdirectory of the base temporary directory and return it.
+ If ``numbered``, ensure the directory is unique by adding a number
+ prefix greater than any existing one.
+ """
+ basetemp = self.getbasetemp()
+ if not numbered:
+ p = basetemp.mkdir(basename)
+ else:
+ p = py.path.local.make_numbered_dir(prefix=basename,
+ keep=0, rootdir=basetemp, lock_timeout=None)
+ self.trace("mktemp", p)
+ return p
+
+ def getbasetemp(self):
+ """ return base temporary directory. """
+ try:
+ return self._basetemp
+ except AttributeError:
+ basetemp = self.config.option.basetemp
+ if basetemp:
+ basetemp = py.path.local(basetemp)
+ if basetemp.check():
+ basetemp.remove()
+ basetemp.mkdir()
+ else:
+ temproot = py.path.local.get_temproot()
+ user = get_user()
+ if user:
+ # use a sub-directory in the temproot to speed-up
+ # make_numbered_dir() call
+ rootdir = temproot.join('pytest-of-%s' % user)
+ else:
+ rootdir = temproot
+ rootdir.ensure(dir=1)
+ basetemp = py.path.local.make_numbered_dir(prefix='pytest-',
+ rootdir=rootdir)
+ self._basetemp = t = basetemp.realpath()
+ self.trace("new basetemp", t)
+ return t
+
+ def finish(self):
+ self.trace("finish")
+
+
+def get_user():
+ """Return the current user name, or None if getuser() does not work
+ in the current environment (see #1010).
+ """
+ import getpass
+ try:
+ return getpass.getuser()
+ except (ImportError, KeyError):
+ return None
+
+# backward compatibility
+TempdirHandler = TempdirFactory
+
+
+def pytest_configure(config):
+ """Create a TempdirFactory and attach it to the config object.
+
+ This is to comply with existing plugins which expect the handler to be
+ available at pytest_configure time, but ideally should be moved entirely
+ to the tmpdir_factory session fixture.
+ """
+ mp = monkeypatch()
+ t = TempdirFactory(config)
+ config._cleanup.extend([mp.undo, t.finish])
+ mp.setattr(config, '_tmpdirhandler', t, raising=False)
+ mp.setattr(pytest, 'ensuretemp', t.ensuretemp, raising=False)
+
+
+@pytest.fixture(scope='session')
+def tmpdir_factory(request):
+ """Return a TempdirFactory instance for the test session.
+ """
+ return request.config._tmpdirhandler
+
+
+@pytest.fixture
+def tmpdir(request, tmpdir_factory):
+ """return a temporary directory path object
+ which is unique to each test function invocation,
+ created as a sub directory of the base temporary
+ directory. The returned object is a `py.path.local`_
+ path object.
+ """
+ name = request.node.name
+ name = re.sub("[\W]", "_", name)
+ MAXVAL = 30
+ if len(name) > MAXVAL:
+ name = name[:MAXVAL]
+ x = tmpdir_factory.mktemp(name, numbered=True)
+ return x
diff --git a/python/pytest/_pytest/unittest.py b/python/pytest/_pytest/unittest.py
new file mode 100644
index 000000000..8120e94fb
--- /dev/null
+++ b/python/pytest/_pytest/unittest.py
@@ -0,0 +1,205 @@
+""" discovery and running of std-library "unittest" style tests. """
+from __future__ import absolute_import
+
+import sys
+import traceback
+
+import pytest
+# for transfering markers
+import _pytest._code
+from _pytest.python import transfer_markers
+from _pytest.skipping import MarkEvaluator
+
+
+def pytest_pycollect_makeitem(collector, name, obj):
+ # has unittest been imported and is obj a subclass of its TestCase?
+ try:
+ if not issubclass(obj, sys.modules["unittest"].TestCase):
+ return
+ except Exception:
+ return
+ # yes, so let's collect it
+ return UnitTestCase(name, parent=collector)
+
+
+class UnitTestCase(pytest.Class):
+ # marker for fixturemanger.getfixtureinfo()
+ # to declare that our children do not support funcargs
+ nofuncargs = True
+
+ def setup(self):
+ cls = self.obj
+ if getattr(cls, '__unittest_skip__', False):
+ return # skipped
+ setup = getattr(cls, 'setUpClass', None)
+ if setup is not None:
+ setup()
+ teardown = getattr(cls, 'tearDownClass', None)
+ if teardown is not None:
+ self.addfinalizer(teardown)
+ super(UnitTestCase, self).setup()
+
+ def collect(self):
+ from unittest import TestLoader
+ cls = self.obj
+ if not getattr(cls, "__test__", True):
+ return
+ self.session._fixturemanager.parsefactories(self, unittest=True)
+ loader = TestLoader()
+ module = self.getparent(pytest.Module).obj
+ foundsomething = False
+ for name in loader.getTestCaseNames(self.obj):
+ x = getattr(self.obj, name)
+ funcobj = getattr(x, 'im_func', x)
+ transfer_markers(funcobj, cls, module)
+ yield TestCaseFunction(name, parent=self)
+ foundsomething = True
+
+ if not foundsomething:
+ runtest = getattr(self.obj, 'runTest', None)
+ if runtest is not None:
+ ut = sys.modules.get("twisted.trial.unittest", None)
+ if ut is None or runtest != ut.TestCase.runTest:
+ yield TestCaseFunction('runTest', parent=self)
+
+
+
+class TestCaseFunction(pytest.Function):
+ _excinfo = None
+
+ def setup(self):
+ self._testcase = self.parent.obj(self.name)
+ self._fix_unittest_skip_decorator()
+ self._obj = getattr(self._testcase, self.name)
+ if hasattr(self._testcase, 'setup_method'):
+ self._testcase.setup_method(self._obj)
+ if hasattr(self, "_request"):
+ self._request._fillfixtures()
+
+ def _fix_unittest_skip_decorator(self):
+ """
+ The @unittest.skip decorator calls functools.wraps(self._testcase)
+ The call to functools.wraps() fails unless self._testcase
+ has a __name__ attribute. This is usually automatically supplied
+ if the test is a function or method, but we need to add manually
+ here.
+
+ See issue #1169
+ """
+ if sys.version_info[0] == 2:
+ setattr(self._testcase, "__name__", self.name)
+
+ def teardown(self):
+ if hasattr(self._testcase, 'teardown_method'):
+ self._testcase.teardown_method(self._obj)
+
+ def startTest(self, testcase):
+ pass
+
+ def _addexcinfo(self, rawexcinfo):
+ # unwrap potential exception info (see twisted trial support below)
+ rawexcinfo = getattr(rawexcinfo, '_rawexcinfo', rawexcinfo)
+ try:
+ excinfo = _pytest._code.ExceptionInfo(rawexcinfo)
+ except TypeError:
+ try:
+ try:
+ l = traceback.format_exception(*rawexcinfo)
+ l.insert(0, "NOTE: Incompatible Exception Representation, "
+ "displaying natively:\n\n")
+ pytest.fail("".join(l), pytrace=False)
+ except (pytest.fail.Exception, KeyboardInterrupt):
+ raise
+ except:
+ pytest.fail("ERROR: Unknown Incompatible Exception "
+ "representation:\n%r" %(rawexcinfo,), pytrace=False)
+ except KeyboardInterrupt:
+ raise
+ except pytest.fail.Exception:
+ excinfo = _pytest._code.ExceptionInfo()
+ self.__dict__.setdefault('_excinfo', []).append(excinfo)
+
+ def addError(self, testcase, rawexcinfo):
+ self._addexcinfo(rawexcinfo)
+ def addFailure(self, testcase, rawexcinfo):
+ self._addexcinfo(rawexcinfo)
+
+ def addSkip(self, testcase, reason):
+ try:
+ pytest.skip(reason)
+ except pytest.skip.Exception:
+ self._evalskip = MarkEvaluator(self, 'SkipTest')
+ self._evalskip.result = True
+ self._addexcinfo(sys.exc_info())
+
+ def addExpectedFailure(self, testcase, rawexcinfo, reason=""):
+ try:
+ pytest.xfail(str(reason))
+ except pytest.xfail.Exception:
+ self._addexcinfo(sys.exc_info())
+
+ def addUnexpectedSuccess(self, testcase, reason=""):
+ self._unexpectedsuccess = reason
+
+ def addSuccess(self, testcase):
+ pass
+
+ def stopTest(self, testcase):
+ pass
+
+ def runtest(self):
+ self._testcase(result=self)
+
+ def _prunetraceback(self, excinfo):
+ pytest.Function._prunetraceback(self, excinfo)
+ traceback = excinfo.traceback.filter(
+ lambda x:not x.frame.f_globals.get('__unittest'))
+ if traceback:
+ excinfo.traceback = traceback
+
+@pytest.hookimpl(tryfirst=True)
+def pytest_runtest_makereport(item, call):
+ if isinstance(item, TestCaseFunction):
+ if item._excinfo:
+ call.excinfo = item._excinfo.pop(0)
+ try:
+ del call.result
+ except AttributeError:
+ pass
+
+# twisted trial support
+
+@pytest.hookimpl(hookwrapper=True)
+def pytest_runtest_protocol(item):
+ if isinstance(item, TestCaseFunction) and \
+ 'twisted.trial.unittest' in sys.modules:
+ ut = sys.modules['twisted.python.failure']
+ Failure__init__ = ut.Failure.__init__
+ check_testcase_implements_trial_reporter()
+ def excstore(self, exc_value=None, exc_type=None, exc_tb=None,
+ captureVars=None):
+ if exc_value is None:
+ self._rawexcinfo = sys.exc_info()
+ else:
+ if exc_type is None:
+ exc_type = type(exc_value)
+ self._rawexcinfo = (exc_type, exc_value, exc_tb)
+ try:
+ Failure__init__(self, exc_value, exc_type, exc_tb,
+ captureVars=captureVars)
+ except TypeError:
+ Failure__init__(self, exc_value, exc_type, exc_tb)
+ ut.Failure.__init__ = excstore
+ yield
+ ut.Failure.__init__ = Failure__init__
+ else:
+ yield
+
+
+def check_testcase_implements_trial_reporter(done=[]):
+ if done:
+ return
+ from zope.interface import classImplements
+ from twisted.trial.itrial import IReporter
+ classImplements(TestCaseFunction, IReporter)
+ done.append(1)
diff --git a/python/pytest/_pytest/vendored_packages/README.md b/python/pytest/_pytest/vendored_packages/README.md
new file mode 100644
index 000000000..eab7c714f
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/README.md
@@ -0,0 +1,13 @@
+This directory vendors the `pluggy` module.
+
+For a more detailed discussion for the reasons to vendoring this
+package, please see [this issue](https://github.com/pytest-dev/pytest/issues/944).
+
+To update the current version, execute:
+
+```
+$ pip install -U pluggy==<version> --no-compile --target=_pytest/vendored_packages
+```
+
+And commit the modified files. The `pluggy-<version>.dist-info` directory
+created by `pip` should be ignored.
diff --git a/python/pytest/_pytest/vendored_packages/__init__.py b/python/pytest/_pytest/vendored_packages/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/__init__.py
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
new file mode 100644
index 000000000..aa3bbf812
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/DESCRIPTION.rst
@@ -0,0 +1,10 @@
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
new file mode 100644
index 000000000..ec81f0a6b
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/METADATA
@@ -0,0 +1,39 @@
+Metadata-Version: 2.0
+Name: pluggy
+Version: 0.3.1
+Summary: plugin and hook calling mechanisms for python
+Home-page: UNKNOWN
+Author: Holger Krekel
+Author-email: holger at merlinux.eu
+License: MIT license
+Platform: unix
+Platform: linux
+Platform: osx
+Platform: win32
+Classifier: Development Status :: 4 - Beta
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: MIT License
+Classifier: Operating System :: POSIX
+Classifier: Operating System :: Microsoft :: Windows
+Classifier: Operating System :: MacOS :: MacOS X
+Classifier: Topic :: Software Development :: Testing
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Utilities
+Classifier: Programming Language :: Python :: 2
+Classifier: Programming Language :: Python :: 2.6
+Classifier: Programming Language :: Python :: 2.7
+Classifier: Programming Language :: Python :: 3
+Classifier: Programming Language :: Python :: 3.3
+Classifier: Programming Language :: Python :: 3.4
+Classifier: Programming Language :: Python :: 3.5
+
+Plugin registration and hook calling for Python
+===============================================
+
+This is the plugin manager as used by pytest but stripped
+of pytest specific details.
+
+During the 0.x series this plugin does not have much documentation
+except extensive docstrings in the pluggy.py module.
+
+
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
new file mode 100644
index 000000000..9626673c4
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/RECORD
@@ -0,0 +1,8 @@
+pluggy.py,sha256=v_RfWzyW6DPU1cJu_EFoL_OHq3t13qloVdR6UaMCXQA,29862
+pluggy-0.3.1.dist-info/top_level.txt,sha256=xKSCRhai-v9MckvMuWqNz16c1tbsmOggoMSwTgcpYHE,7
+pluggy-0.3.1.dist-info/pbr.json,sha256=xX3s6__wOcAyF-AZJX1sdZyW6PUXT-FkfBlM69EEUCg,47
+pluggy-0.3.1.dist-info/RECORD,,
+pluggy-0.3.1.dist-info/metadata.json,sha256=nLKltOT78dMV-00uXD6Aeemp4xNsz2q59j6ORSDeLjw,1027
+pluggy-0.3.1.dist-info/METADATA,sha256=1b85Ho2u4iK30M099k7axMzcDDhLcIMb-A82JUJZnSo,1334
+pluggy-0.3.1.dist-info/WHEEL,sha256=AvR0WeTpDaxT645bl5FQxUK6NPsTls2ttpcGJg3j1Xg,110
+pluggy-0.3.1.dist-info/DESCRIPTION.rst,sha256=P5Akh1EdIBR6CeqtV2P8ZwpGSpZiTKPw0NyS7jEiD-g,306
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
new file mode 100644
index 000000000..9dff69d86
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/WHEEL
@@ -0,0 +1,6 @@
+Wheel-Version: 1.0
+Generator: bdist_wheel (0.24.0)
+Root-Is-Purelib: true
+Tag: py2-none-any
+Tag: py3-none-any
+
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
new file mode 100644
index 000000000..426a3a7ad
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/metadata.json
@@ -0,0 +1 @@
+{"license": "MIT license", "name": "pluggy", "metadata_version": "2.0", "generator": "bdist_wheel (0.24.0)", "summary": "plugin and hook calling mechanisms for python", "platform": "unix", "version": "0.3.1", "extensions": {"python.details": {"document_names": {"description": "DESCRIPTION.rst"}, "contacts": [{"role": "author", "email": "holger at merlinux.eu", "name": "Holger Krekel"}]}}, "classifiers": ["Development Status :: 4 - Beta", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Operating System :: POSIX", "Operating System :: Microsoft :: Windows", "Operating System :: MacOS :: MacOS X", "Topic :: Software Development :: Testing", "Topic :: Software Development :: Libraries", "Topic :: Utilities", "Programming Language :: Python :: 2", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: 3.5"]} \ No newline at end of file
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
new file mode 100644
index 000000000..d6b798640
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/pbr.json
@@ -0,0 +1 @@
+{"is_release": false, "git_version": "7d4c9cd"} \ No newline at end of file
diff --git a/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
new file mode 100644
index 000000000..11bdb5c1f
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy-0.3.1.dist-info/top_level.txt
@@ -0,0 +1 @@
+pluggy
diff --git a/python/pytest/_pytest/vendored_packages/pluggy.py b/python/pytest/_pytest/vendored_packages/pluggy.py
new file mode 100644
index 000000000..2f848b23d
--- /dev/null
+++ b/python/pytest/_pytest/vendored_packages/pluggy.py
@@ -0,0 +1,777 @@
+"""
+PluginManager, basic initialization and tracing.
+
+pluggy is the cristallized core of plugin management as used
+by some 150 plugins for pytest.
+
+Pluggy uses semantic versioning. Breaking changes are only foreseen for
+Major releases (incremented X in "X.Y.Z"). If you want to use pluggy in
+your project you should thus use a dependency restriction like
+"pluggy>=0.1.0,<1.0" to avoid surprises.
+
+pluggy is concerned with hook specification, hook implementations and hook
+calling. For any given hook specification a hook call invokes up to N implementations.
+A hook implementation can influence its position and type of execution:
+if attributed "tryfirst" or "trylast" it will be tried to execute
+first or last. However, if attributed "hookwrapper" an implementation
+can wrap all calls to non-hookwrapper implementations. A hookwrapper
+can thus execute some code ahead and after the execution of other hooks.
+
+Hook specification is done by way of a regular python function where
+both the function name and the names of all its arguments are significant.
+Each hook implementation function is verified against the original specification
+function, including the names of all its arguments. To allow for hook specifications
+to evolve over the livetime of a project, hook implementations can
+accept less arguments. One can thus add new arguments and semantics to
+a hook specification by adding another argument typically without breaking
+existing hook implementations.
+
+The chosen approach is meant to let a hook designer think carefuly about
+which objects are needed by an extension writer. By contrast, subclass-based
+extension mechanisms often expose a lot more state and behaviour than needed,
+thus restricting future developments.
+
+Pluggy currently consists of functionality for:
+
+- a way to register new hook specifications. Without a hook
+ specification no hook calling can be performed.
+
+- a registry of plugins which contain hook implementation functions. It
+ is possible to register plugins for which a hook specification is not yet
+ known and validate all hooks when the system is in a more referentially
+ consistent state. Setting an "optionalhook" attribution to a hook
+ implementation will avoid PluginValidationError's if a specification
+ is missing. This allows to have optional integration between plugins.
+
+- a "hook" relay object from which you can launch 1:N calls to
+ registered hook implementation functions
+
+- a mechanism for ordering hook implementation functions
+
+- mechanisms for two different type of 1:N calls: "firstresult" for when
+ the call should stop when the first implementation returns a non-None result.
+ And the other (default) way of guaranteeing that all hook implementations
+ will be called and their non-None result collected.
+
+- mechanisms for "historic" extension points such that all newly
+ registered functions will receive all hook calls that happened
+ before their registration.
+
+- a mechanism for discovering plugin objects which are based on
+ setuptools based entry points.
+
+- a simple tracing mechanism, including tracing of plugin calls and
+ their arguments.
+
+"""
+import sys
+import inspect
+
+__version__ = '0.3.1'
+__all__ = ["PluginManager", "PluginValidationError",
+ "HookspecMarker", "HookimplMarker"]
+
+_py3 = sys.version_info > (3, 0)
+
+
+class HookspecMarker:
+ """ Decorator helper class for marking functions as hook specifications.
+
+ You can instantiate it with a project_name to get a decorator.
+ Calling PluginManager.add_hookspecs later will discover all marked functions
+ if the PluginManager uses the same project_name.
+ """
+
+ def __init__(self, project_name):
+ self.project_name = project_name
+
+ def __call__(self, function=None, firstresult=False, historic=False):
+ """ if passed a function, directly sets attributes on the function
+ which will make it discoverable to add_hookspecs(). If passed no
+ function, returns a decorator which can be applied to a function
+ later using the attributes supplied.
+
+ If firstresult is True the 1:N hook call (N being the number of registered
+ hook implementation functions) will stop at I<=N when the I'th function
+ returns a non-None result.
+
+ If historic is True calls to a hook will be memorized and replayed
+ on later registered plugins.
+
+ """
+ def setattr_hookspec_opts(func):
+ if historic and firstresult:
+ raise ValueError("cannot have a historic firstresult hook")
+ setattr(func, self.project_name + "_spec",
+ dict(firstresult=firstresult, historic=historic))
+ return func
+
+ if function is not None:
+ return setattr_hookspec_opts(function)
+ else:
+ return setattr_hookspec_opts
+
+
+class HookimplMarker:
+ """ Decorator helper class for marking functions as hook implementations.
+
+ You can instantiate with a project_name to get a decorator.
+ Calling PluginManager.register later will discover all marked functions
+ if the PluginManager uses the same project_name.
+ """
+ def __init__(self, project_name):
+ self.project_name = project_name
+
+ def __call__(self, function=None, hookwrapper=False, optionalhook=False,
+ tryfirst=False, trylast=False):
+
+ """ if passed a function, directly sets attributes on the function
+ which will make it discoverable to register(). If passed no function,
+ returns a decorator which can be applied to a function later using
+ the attributes supplied.
+
+ If optionalhook is True a missing matching hook specification will not result
+ in an error (by default it is an error if no matching spec is found).
+
+ If tryfirst is True this hook implementation will run as early as possible
+ in the chain of N hook implementations for a specfication.
+
+ If trylast is True this hook implementation will run as late as possible
+ in the chain of N hook implementations.
+
+ If hookwrapper is True the hook implementations needs to execute exactly
+ one "yield". The code before the yield is run early before any non-hookwrapper
+ function is run. The code after the yield is run after all non-hookwrapper
+ function have run. The yield receives an ``_CallOutcome`` object representing
+ the exception or result outcome of the inner calls (including other hookwrapper
+ calls).
+
+ """
+ def setattr_hookimpl_opts(func):
+ setattr(func, self.project_name + "_impl",
+ dict(hookwrapper=hookwrapper, optionalhook=optionalhook,
+ tryfirst=tryfirst, trylast=trylast))
+ return func
+
+ if function is None:
+ return setattr_hookimpl_opts
+ else:
+ return setattr_hookimpl_opts(function)
+
+
+def normalize_hookimpl_opts(opts):
+ opts.setdefault("tryfirst", False)
+ opts.setdefault("trylast", False)
+ opts.setdefault("hookwrapper", False)
+ opts.setdefault("optionalhook", False)
+
+
+class _TagTracer:
+ def __init__(self):
+ self._tag2proc = {}
+ self.writer = None
+ self.indent = 0
+
+ def get(self, name):
+ return _TagTracerSub(self, (name,))
+
+ def format_message(self, tags, args):
+ if isinstance(args[-1], dict):
+ extra = args[-1]
+ args = args[:-1]
+ else:
+ extra = {}
+
+ content = " ".join(map(str, args))
+ indent = " " * self.indent
+
+ lines = [
+ "%s%s [%s]\n" % (indent, content, ":".join(tags))
+ ]
+
+ for name, value in extra.items():
+ lines.append("%s %s: %s\n" % (indent, name, value))
+ return lines
+
+ def processmessage(self, tags, args):
+ if self.writer is not None and args:
+ lines = self.format_message(tags, args)
+ self.writer(''.join(lines))
+ try:
+ self._tag2proc[tags](tags, args)
+ except KeyError:
+ pass
+
+ def setwriter(self, writer):
+ self.writer = writer
+
+ def setprocessor(self, tags, processor):
+ if isinstance(tags, str):
+ tags = tuple(tags.split(":"))
+ else:
+ assert isinstance(tags, tuple)
+ self._tag2proc[tags] = processor
+
+
+class _TagTracerSub:
+ def __init__(self, root, tags):
+ self.root = root
+ self.tags = tags
+
+ def __call__(self, *args):
+ self.root.processmessage(self.tags, args)
+
+ def setmyprocessor(self, processor):
+ self.root.setprocessor(self.tags, processor)
+
+ def get(self, name):
+ return self.__class__(self.root, self.tags + (name,))
+
+
+def _raise_wrapfail(wrap_controller, msg):
+ co = wrap_controller.gi_code
+ raise RuntimeError("wrap_controller at %r %s:%d %s" %
+ (co.co_name, co.co_filename, co.co_firstlineno, msg))
+
+
+def _wrapped_call(wrap_controller, func):
+ """ Wrap calling to a function with a generator which needs to yield
+ exactly once. The yield point will trigger calling the wrapped function
+ and return its _CallOutcome to the yield point. The generator then needs
+ to finish (raise StopIteration) in order for the wrapped call to complete.
+ """
+ try:
+ next(wrap_controller) # first yield
+ except StopIteration:
+ _raise_wrapfail(wrap_controller, "did not yield")
+ call_outcome = _CallOutcome(func)
+ try:
+ wrap_controller.send(call_outcome)
+ _raise_wrapfail(wrap_controller, "has second yield")
+ except StopIteration:
+ pass
+ return call_outcome.get_result()
+
+
+class _CallOutcome:
+ """ Outcome of a function call, either an exception or a proper result.
+ Calling the ``get_result`` method will return the result or reraise
+ the exception raised when the function was called. """
+ excinfo = None
+
+ def __init__(self, func):
+ try:
+ self.result = func()
+ except BaseException:
+ self.excinfo = sys.exc_info()
+
+ def force_result(self, result):
+ self.result = result
+ self.excinfo = None
+
+ def get_result(self):
+ if self.excinfo is None:
+ return self.result
+ else:
+ ex = self.excinfo
+ if _py3:
+ raise ex[1].with_traceback(ex[2])
+ _reraise(*ex) # noqa
+
+if not _py3:
+ exec("""
+def _reraise(cls, val, tb):
+ raise cls, val, tb
+""")
+
+
+class _TracedHookExecution:
+ def __init__(self, pluginmanager, before, after):
+ self.pluginmanager = pluginmanager
+ self.before = before
+ self.after = after
+ self.oldcall = pluginmanager._inner_hookexec
+ assert not isinstance(self.oldcall, _TracedHookExecution)
+ self.pluginmanager._inner_hookexec = self
+
+ def __call__(self, hook, hook_impls, kwargs):
+ self.before(hook.name, hook_impls, kwargs)
+ outcome = _CallOutcome(lambda: self.oldcall(hook, hook_impls, kwargs))
+ self.after(outcome, hook.name, hook_impls, kwargs)
+ return outcome.get_result()
+
+ def undo(self):
+ self.pluginmanager._inner_hookexec = self.oldcall
+
+
+class PluginManager(object):
+ """ Core Pluginmanager class which manages registration
+ of plugin objects and 1:N hook calling.
+
+ You can register new hooks by calling ``addhooks(module_or_class)``.
+ You can register plugin objects (which contain hooks) by calling
+ ``register(plugin)``. The Pluginmanager is initialized with a
+ prefix that is searched for in the names of the dict of registered
+ plugin objects. An optional excludefunc allows to blacklist names which
+ are not considered as hooks despite a matching prefix.
+
+ For debugging purposes you can call ``enable_tracing()``
+ which will subsequently send debug information to the trace helper.
+ """
+
+ def __init__(self, project_name, implprefix=None):
+ """ if implprefix is given implementation functions
+ will be recognized if their name matches the implprefix. """
+ self.project_name = project_name
+ self._name2plugin = {}
+ self._plugin2hookcallers = {}
+ self._plugin_distinfo = []
+ self.trace = _TagTracer().get("pluginmanage")
+ self.hook = _HookRelay(self.trace.root.get("hook"))
+ self._implprefix = implprefix
+ self._inner_hookexec = lambda hook, methods, kwargs: \
+ _MultiCall(methods, kwargs, hook.spec_opts).execute()
+
+ def _hookexec(self, hook, methods, kwargs):
+ # called from all hookcaller instances.
+ # enable_tracing will set its own wrapping function at self._inner_hookexec
+ return self._inner_hookexec(hook, methods, kwargs)
+
+ def register(self, plugin, name=None):
+ """ Register a plugin and return its canonical name or None if the name
+ is blocked from registering. Raise a ValueError if the plugin is already
+ registered. """
+ plugin_name = name or self.get_canonical_name(plugin)
+
+ if plugin_name in self._name2plugin or plugin in self._plugin2hookcallers:
+ if self._name2plugin.get(plugin_name, -1) is None:
+ return # blocked plugin, return None to indicate no registration
+ raise ValueError("Plugin already registered: %s=%s\n%s" %
+ (plugin_name, plugin, self._name2plugin))
+
+ # XXX if an error happens we should make sure no state has been
+ # changed at point of return
+ self._name2plugin[plugin_name] = plugin
+
+ # register matching hook implementations of the plugin
+ self._plugin2hookcallers[plugin] = hookcallers = []
+ for name in dir(plugin):
+ hookimpl_opts = self.parse_hookimpl_opts(plugin, name)
+ if hookimpl_opts is not None:
+ normalize_hookimpl_opts(hookimpl_opts)
+ method = getattr(plugin, name)
+ hookimpl = HookImpl(plugin, plugin_name, method, hookimpl_opts)
+ hook = getattr(self.hook, name, None)
+ if hook is None:
+ hook = _HookCaller(name, self._hookexec)
+ setattr(self.hook, name, hook)
+ elif hook.has_spec():
+ self._verify_hook(hook, hookimpl)
+ hook._maybe_apply_history(hookimpl)
+ hook._add_hookimpl(hookimpl)
+ hookcallers.append(hook)
+ return plugin_name
+
+ def parse_hookimpl_opts(self, plugin, name):
+ method = getattr(plugin, name)
+ res = getattr(method, self.project_name + "_impl", None)
+ if res is not None and not isinstance(res, dict):
+ # false positive
+ res = None
+ elif res is None and self._implprefix and name.startswith(self._implprefix):
+ res = {}
+ return res
+
+ def unregister(self, plugin=None, name=None):
+ """ unregister a plugin object and all its contained hook implementations
+ from internal data structures. """
+ if name is None:
+ assert plugin is not None, "one of name or plugin needs to be specified"
+ name = self.get_name(plugin)
+
+ if plugin is None:
+ plugin = self.get_plugin(name)
+
+ # if self._name2plugin[name] == None registration was blocked: ignore
+ if self._name2plugin.get(name):
+ del self._name2plugin[name]
+
+ for hookcaller in self._plugin2hookcallers.pop(plugin, []):
+ hookcaller._remove_plugin(plugin)
+
+ return plugin
+
+ def set_blocked(self, name):
+ """ block registrations of the given name, unregister if already registered. """
+ self.unregister(name=name)
+ self._name2plugin[name] = None
+
+ def is_blocked(self, name):
+ """ return True if the name blogs registering plugins of that name. """
+ return name in self._name2plugin and self._name2plugin[name] is None
+
+ def add_hookspecs(self, module_or_class):
+ """ add new hook specifications defined in the given module_or_class.
+ Functions are recognized if they have been decorated accordingly. """
+ names = []
+ for name in dir(module_or_class):
+ spec_opts = self.parse_hookspec_opts(module_or_class, name)
+ if spec_opts is not None:
+ hc = getattr(self.hook, name, None)
+ if hc is None:
+ hc = _HookCaller(name, self._hookexec, module_or_class, spec_opts)
+ setattr(self.hook, name, hc)
+ else:
+ # plugins registered this hook without knowing the spec
+ hc.set_specification(module_or_class, spec_opts)
+ for hookfunction in (hc._wrappers + hc._nonwrappers):
+ self._verify_hook(hc, hookfunction)
+ names.append(name)
+
+ if not names:
+ raise ValueError("did not find any %r hooks in %r" %
+ (self.project_name, module_or_class))
+
+ def parse_hookspec_opts(self, module_or_class, name):
+ method = getattr(module_or_class, name)
+ return getattr(method, self.project_name + "_spec", None)
+
+ def get_plugins(self):
+ """ return the set of registered plugins. """
+ return set(self._plugin2hookcallers)
+
+ def is_registered(self, plugin):
+ """ Return True if the plugin is already registered. """
+ return plugin in self._plugin2hookcallers
+
+ def get_canonical_name(self, plugin):
+ """ Return canonical name for a plugin object. Note that a plugin
+ may be registered under a different name which was specified
+ by the caller of register(plugin, name). To obtain the name
+ of an registered plugin use ``get_name(plugin)`` instead."""
+ return getattr(plugin, "__name__", None) or str(id(plugin))
+
+ def get_plugin(self, name):
+ """ Return a plugin or None for the given name. """
+ return self._name2plugin.get(name)
+
+ def get_name(self, plugin):
+ """ Return name for registered plugin or None if not registered. """
+ for name, val in self._name2plugin.items():
+ if plugin == val:
+ return name
+
+ def _verify_hook(self, hook, hookimpl):
+ if hook.is_historic() and hookimpl.hookwrapper:
+ raise PluginValidationError(
+ "Plugin %r\nhook %r\nhistoric incompatible to hookwrapper" %
+ (hookimpl.plugin_name, hook.name))
+
+ for arg in hookimpl.argnames:
+ if arg not in hook.argnames:
+ raise PluginValidationError(
+ "Plugin %r\nhook %r\nargument %r not available\n"
+ "plugin definition: %s\n"
+ "available hookargs: %s" %
+ (hookimpl.plugin_name, hook.name, arg,
+ _formatdef(hookimpl.function), ", ".join(hook.argnames)))
+
+ def check_pending(self):
+ """ Verify that all hooks which have not been verified against
+ a hook specification are optional, otherwise raise PluginValidationError"""
+ for name in self.hook.__dict__:
+ if name[0] != "_":
+ hook = getattr(self.hook, name)
+ if not hook.has_spec():
+ for hookimpl in (hook._wrappers + hook._nonwrappers):
+ if not hookimpl.optionalhook:
+ raise PluginValidationError(
+ "unknown hook %r in plugin %r" %
+ (name, hookimpl.plugin))
+
+ def load_setuptools_entrypoints(self, entrypoint_name):
+ """ Load modules from querying the specified setuptools entrypoint name.
+ Return the number of loaded plugins. """
+ from pkg_resources import iter_entry_points, DistributionNotFound
+ for ep in iter_entry_points(entrypoint_name):
+ # is the plugin registered or blocked?
+ if self.get_plugin(ep.name) or self.is_blocked(ep.name):
+ continue
+ try:
+ plugin = ep.load()
+ except DistributionNotFound:
+ continue
+ self.register(plugin, name=ep.name)
+ self._plugin_distinfo.append((plugin, ep.dist))
+ return len(self._plugin_distinfo)
+
+ def list_plugin_distinfo(self):
+ """ return list of distinfo/plugin tuples for all setuptools registered
+ plugins. """
+ return list(self._plugin_distinfo)
+
+ def list_name_plugin(self):
+ """ return list of name/plugin pairs. """
+ return list(self._name2plugin.items())
+
+ def get_hookcallers(self, plugin):
+ """ get all hook callers for the specified plugin. """
+ return self._plugin2hookcallers.get(plugin)
+
+ def add_hookcall_monitoring(self, before, after):
+ """ add before/after tracing functions for all hooks
+ and return an undo function which, when called,
+ will remove the added tracers.
+
+ ``before(hook_name, hook_impls, kwargs)`` will be called ahead
+ of all hook calls and receive a hookcaller instance, a list
+ of HookImpl instances and the keyword arguments for the hook call.
+
+ ``after(outcome, hook_name, hook_impls, kwargs)`` receives the
+ same arguments as ``before`` but also a :py:class:`_CallOutcome`` object
+ which represents the result of the overall hook call.
+ """
+ return _TracedHookExecution(self, before, after).undo
+
+ def enable_tracing(self):
+ """ enable tracing of hook calls and return an undo function. """
+ hooktrace = self.hook._trace
+
+ def before(hook_name, methods, kwargs):
+ hooktrace.root.indent += 1
+ hooktrace(hook_name, kwargs)
+
+ def after(outcome, hook_name, methods, kwargs):
+ if outcome.excinfo is None:
+ hooktrace("finish", hook_name, "-->", outcome.result)
+ hooktrace.root.indent -= 1
+
+ return self.add_hookcall_monitoring(before, after)
+
+ def subset_hook_caller(self, name, remove_plugins):
+ """ Return a new _HookCaller instance for the named method
+ which manages calls to all registered plugins except the
+ ones from remove_plugins. """
+ orig = getattr(self.hook, name)
+ plugins_to_remove = [plug for plug in remove_plugins if hasattr(plug, name)]
+ if plugins_to_remove:
+ hc = _HookCaller(orig.name, orig._hookexec, orig._specmodule_or_class,
+ orig.spec_opts)
+ for hookimpl in (orig._wrappers + orig._nonwrappers):
+ plugin = hookimpl.plugin
+ if plugin not in plugins_to_remove:
+ hc._add_hookimpl(hookimpl)
+ # we also keep track of this hook caller so it
+ # gets properly removed on plugin unregistration
+ self._plugin2hookcallers.setdefault(plugin, []).append(hc)
+ return hc
+ return orig
+
+
+class _MultiCall:
+ """ execute a call into multiple python functions/methods. """
+
+ # XXX note that the __multicall__ argument is supported only
+ # for pytest compatibility reasons. It was never officially
+ # supported there and is explicitly deprecated since 2.8
+ # so we can remove it soon, allowing to avoid the below recursion
+ # in execute() and simplify/speed up the execute loop.
+
+ def __init__(self, hook_impls, kwargs, specopts={}):
+ self.hook_impls = hook_impls
+ self.kwargs = kwargs
+ self.kwargs["__multicall__"] = self
+ self.specopts = specopts
+
+ def execute(self):
+ all_kwargs = self.kwargs
+ self.results = results = []
+ firstresult = self.specopts.get("firstresult")
+
+ while self.hook_impls:
+ hook_impl = self.hook_impls.pop()
+ args = [all_kwargs[argname] for argname in hook_impl.argnames]
+ if hook_impl.hookwrapper:
+ return _wrapped_call(hook_impl.function(*args), self.execute)
+ res = hook_impl.function(*args)
+ if res is not None:
+ if firstresult:
+ return res
+ results.append(res)
+
+ if not firstresult:
+ return results
+
+ def __repr__(self):
+ status = "%d meths" % (len(self.hook_impls),)
+ if hasattr(self, "results"):
+ status = ("%d results, " % len(self.results)) + status
+ return "<_MultiCall %s, kwargs=%r>" % (status, self.kwargs)
+
+
+def varnames(func, startindex=None):
+ """ return argument name tuple for a function, method, class or callable.
+
+ In case of a class, its "__init__" method is considered.
+ For methods the "self" parameter is not included unless you are passing
+ an unbound method with Python3 (which has no supports for unbound methods)
+ """
+ cache = getattr(func, "__dict__", {})
+ try:
+ return cache["_varnames"]
+ except KeyError:
+ pass
+ if inspect.isclass(func):
+ try:
+ func = func.__init__
+ except AttributeError:
+ return ()
+ startindex = 1
+ else:
+ if not inspect.isfunction(func) and not inspect.ismethod(func):
+ func = getattr(func, '__call__', func)
+ if startindex is None:
+ startindex = int(inspect.ismethod(func))
+
+ try:
+ rawcode = func.__code__
+ except AttributeError:
+ return ()
+ try:
+ x = rawcode.co_varnames[startindex:rawcode.co_argcount]
+ except AttributeError:
+ x = ()
+ else:
+ defaults = func.__defaults__
+ if defaults:
+ x = x[:-len(defaults)]
+ try:
+ cache["_varnames"] = x
+ except TypeError:
+ pass
+ return x
+
+
+class _HookRelay:
+ """ hook holder object for performing 1:N hook calls where N is the number
+ of registered plugins.
+
+ """
+
+ def __init__(self, trace):
+ self._trace = trace
+
+
+class _HookCaller(object):
+ def __init__(self, name, hook_execute, specmodule_or_class=None, spec_opts=None):
+ self.name = name
+ self._wrappers = []
+ self._nonwrappers = []
+ self._hookexec = hook_execute
+ if specmodule_or_class is not None:
+ assert spec_opts is not None
+ self.set_specification(specmodule_or_class, spec_opts)
+
+ def has_spec(self):
+ return hasattr(self, "_specmodule_or_class")
+
+ def set_specification(self, specmodule_or_class, spec_opts):
+ assert not self.has_spec()
+ self._specmodule_or_class = specmodule_or_class
+ specfunc = getattr(specmodule_or_class, self.name)
+ argnames = varnames(specfunc, startindex=inspect.isclass(specmodule_or_class))
+ assert "self" not in argnames # sanity check
+ self.argnames = ["__multicall__"] + list(argnames)
+ self.spec_opts = spec_opts
+ if spec_opts.get("historic"):
+ self._call_history = []
+
+ def is_historic(self):
+ return hasattr(self, "_call_history")
+
+ def _remove_plugin(self, plugin):
+ def remove(wrappers):
+ for i, method in enumerate(wrappers):
+ if method.plugin == plugin:
+ del wrappers[i]
+ return True
+ if remove(self._wrappers) is None:
+ if remove(self._nonwrappers) is None:
+ raise ValueError("plugin %r not found" % (plugin,))
+
+ def _add_hookimpl(self, hookimpl):
+ if hookimpl.hookwrapper:
+ methods = self._wrappers
+ else:
+ methods = self._nonwrappers
+
+ if hookimpl.trylast:
+ methods.insert(0, hookimpl)
+ elif hookimpl.tryfirst:
+ methods.append(hookimpl)
+ else:
+ # find last non-tryfirst method
+ i = len(methods) - 1
+ while i >= 0 and methods[i].tryfirst:
+ i -= 1
+ methods.insert(i + 1, hookimpl)
+
+ def __repr__(self):
+ return "<_HookCaller %r>" % (self.name,)
+
+ def __call__(self, **kwargs):
+ assert not self.is_historic()
+ return self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
+
+ def call_historic(self, proc=None, kwargs=None):
+ self._call_history.append((kwargs or {}, proc))
+ # historizing hooks don't return results
+ self._hookexec(self, self._nonwrappers + self._wrappers, kwargs)
+
+ def call_extra(self, methods, kwargs):
+ """ Call the hook with some additional temporarily participating
+ methods using the specified kwargs as call parameters. """
+ old = list(self._nonwrappers), list(self._wrappers)
+ for method in methods:
+ opts = dict(hookwrapper=False, trylast=False, tryfirst=False)
+ hookimpl = HookImpl(None, "<temp>", method, opts)
+ self._add_hookimpl(hookimpl)
+ try:
+ return self(**kwargs)
+ finally:
+ self._nonwrappers, self._wrappers = old
+
+ def _maybe_apply_history(self, method):
+ if self.is_historic():
+ for kwargs, proc in self._call_history:
+ res = self._hookexec(self, [method], kwargs)
+ if res and proc is not None:
+ proc(res[0])
+
+
+class HookImpl:
+ def __init__(self, plugin, plugin_name, function, hook_impl_opts):
+ self.function = function
+ self.argnames = varnames(self.function)
+ self.plugin = plugin
+ self.opts = hook_impl_opts
+ self.plugin_name = plugin_name
+ self.__dict__.update(hook_impl_opts)
+
+
+class PluginValidationError(Exception):
+ """ plugin failed validation. """
+
+
+if hasattr(inspect, 'signature'):
+ def _formatdef(func):
+ return "%s%s" % (
+ func.__name__,
+ str(inspect.signature(func))
+ )
+else:
+ def _formatdef(func):
+ return "%s%s" % (
+ func.__name__,
+ inspect.formatargspec(*inspect.getargspec(func))
+ )