summaryrefslogtreecommitdiffstats
path: root/scripts/external_libs/nose-1.3.4/python2/nose/plugins
diff options
context:
space:
mode:
Diffstat (limited to 'scripts/external_libs/nose-1.3.4/python2/nose/plugins')
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py190
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py45
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py286
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py725
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py34
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py115
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py94
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py253
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py67
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py45
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py455
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py210
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py49
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py103
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py245
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py460
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py835
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py416
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py154
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py63
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py306
-rwxr-xr-xscripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py329
22 files changed, 5479 insertions, 0 deletions
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py
new file mode 100755
index 00000000..08ee8f32
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/__init__.py
@@ -0,0 +1,190 @@
+"""
+Writing Plugins
+---------------
+
+nose supports plugins for test collection, selection, observation and
+reporting. There are two basic rules for plugins:
+
+* Plugin classes should subclass :class:`nose.plugins.Plugin`.
+
+* Plugins may implement any of the methods described in the class
+ :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
+ this class is for documentary purposes only; plugins may not subclass
+ IPluginInterface.
+
+Hello World
+===========
+
+Here's a basic plugin. It doesn't do much so read on for more ideas or dive
+into the :doc:`IPluginInterface <interface>` to see all available hooks.
+
+.. code-block:: python
+
+ import logging
+ import os
+
+ from nose.plugins import Plugin
+
+ log = logging.getLogger('nose.plugins.helloworld')
+
+ class HelloWorld(Plugin):
+ name = 'helloworld'
+
+ def options(self, parser, env=os.environ):
+ super(HelloWorld, self).options(parser, env=env)
+
+ def configure(self, options, conf):
+ super(HelloWorld, self).configure(options, conf)
+ if not self.enabled:
+ return
+
+ def finalize(self, result):
+ log.info('Hello pluginized world!')
+
+Registering
+===========
+
+.. Note::
+ Important note: the following applies only to the default
+ plugin manager. Other plugin managers may use different means to
+ locate and load plugins.
+
+For nose to find a plugin, it must be part of a package that uses
+setuptools_, and the plugin must be included in the entry points defined
+in the setup.py for the package:
+
+.. code-block:: python
+
+ setup(name='Some plugin',
+ # ...
+ entry_points = {
+ 'nose.plugins.0.10': [
+ 'someplugin = someplugin:SomePlugin'
+ ]
+ },
+ # ...
+ )
+
+Once the package is installed with install or develop, nose will be able
+to load the plugin.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+Registering a plugin without setuptools
+=======================================
+
+It is currently possible to register a plugin programmatically by
+creating a custom nose runner like this :
+
+.. code-block:: python
+
+ import nose
+ from yourplugin import YourPlugin
+
+ if __name__ == '__main__':
+ nose.main(addplugins=[YourPlugin()])
+
+Defining options
+================
+
+All plugins must implement the methods ``options(self, parser, env)``
+and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
+that want the standard options should call the superclass methods.
+
+nose uses optparse.OptionParser from the standard library to parse
+arguments. A plugin's ``options()`` method receives a parser
+instance. It's good form for a plugin to use that instance only to add
+additional arguments that take only long arguments (--like-this). Most
+of nose's built-in arguments get their default value from an environment
+variable.
+
+A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
+object, as well as the current config object. Plugins should configure their
+behavior based on the user-selected settings, and may raise exceptions
+if the configured behavior is nonsensical.
+
+Logging
+=======
+
+nose uses the logging classes from the standard library. To enable users
+to view debug messages easily, plugins should use ``logging.getLogger()`` to
+acquire a logger in the ``nose.plugins`` namespace.
+
+Recipes
+=======
+
+* Writing a plugin that monitors or controls test result output
+
+ Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
+ results. If you also want to monitor output, implement
+ ``setOutputStream`` and keep a reference to the output stream. If you
+ want to prevent the builtin ``TextTestResult`` output, implement
+ ``setOutputSteam`` and *return a dummy stream*. The default output will go
+ to the dummy stream, while you send your desired output to the real stream.
+
+ Example: `examples/html_plugin/htmlplug.py`_
+
+* Writing a plugin that handles exceptions
+
+ Subclass :doc:`ErrorClassPlugin <errorclasses>`.
+
+ Examples: :doc:`nose.plugins.deprecated <deprecated>`,
+ :doc:`nose.plugins.skip <skip>`
+
+* Writing a plugin that adds detail to error reports
+
+ Implement ``formatError`` and/or ``formatFailure``. The error tuple
+ you return (error class, error message, traceback) will replace the
+ original error tuple.
+
+ Examples: :doc:`nose.plugins.capture <capture>`,
+ :doc:`nose.plugins.failuredetail <failuredetail>`
+
+* Writing a plugin that loads tests from files other than python modules
+
+ Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
+ return True for files that you want to examine for tests. In
+ ``loadTestsFromFile``, for those files, return an iterable
+ containing TestCases (or yield them as you find them;
+ ``loadTestsFromFile`` may also be a generator).
+
+ Example: :doc:`nose.plugins.doctests <doctests>`
+
+* Writing a plugin that prints a report
+
+ Implement ``begin`` if you need to perform setup before testing
+ begins. Implement ``report`` and output your report to the provided stream.
+
+ Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
+
+* Writing a plugin that selects or rejects tests
+
+ Implement any or all ``want*`` methods. Return False to reject the test
+ candidate, True to accept it -- which means that the test candidate
+ will pass through the rest of the system, so you must be prepared to
+ load tests from it if tests can't be loaded by the core loader or
+ another plugin -- and None if you don't care.
+
+ Examples: :doc:`nose.plugins.attrib <attrib>`,
+ :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
+
+
+More Examples
+=============
+
+See any builtin plugin or example plugin in the examples_ directory in
+the nose source distribution. There is a list of third-party plugins
+`on jottit`_.
+
+.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
+.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
+.. _on jottit: http://nose-plugins.jottit.com/
+
+"""
+from nose.plugins.base import Plugin
+from nose.plugins.manager import *
+from nose.plugins.plugintest import PluginTester
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py
new file mode 100755
index 00000000..1ccd7773
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/allmodules.py
@@ -0,0 +1,45 @@
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
+NOSE_ALL_MODULES environment variable to enable collection and execution of
+tests in all python modules. Normal nose behavior is to look for tests only in
+modules that match testMatch.
+
+More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
+
+.. warning ::
+
+ This plugin can have surprising interactions with plugins that load tests
+ from what nose normally considers non-test modules, such as
+ the :doc:`doctest plugin <doctests>`. This is because any given
+ object in a module can't be loaded both by a plugin and the normal nose
+ :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+ or classes in non-test modules that look like tests but aren't, you will
+ likely see errors as nose attempts to run them as tests.
+
+"""
+
+import os
+from nose.plugins.base import Plugin
+
+class AllModules(Plugin):
+ """Collect tests from all python modules.
+ """
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_ALL_MODULES'
+ parser.add_option('--all-modules',
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def wantFile(self, file):
+ """Override to return True for all files ending with .py"""
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+
+ def wantModule(self, module):
+ """Override return True for all modules"""
+ return True
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py
new file mode 100755
index 00000000..3d4422a2
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/attrib.py
@@ -0,0 +1,286 @@
+"""Attribute selector plugin.
+
+Oftentimes when testing you will want to select tests based on
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
+Attribute selector plugin by setting attributes on your test methods.
+Here is an example:
+
+.. code-block:: python
+
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+ test_big_download.slow = 1
+
+Once you've assigned an attribute ``slow = 1`` you can exclude that
+test and all other tests having the slow attribute by running ::
+
+ $ nosetests -a '!slow'
+
+There is also a decorator available for you that will set attributes.
+Here's how to set ``slow=1`` like above with the decorator:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr('slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+And here's how to set an attribute with a specific value:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+This test could be run with ::
+
+ $ nosetests -a speed=slow
+
+In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
+on all its test methods at once. For example:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ class MyTestCase:
+ def test_long_integration(self):
+ pass
+ def test_end_to_end_something(self):
+ pass
+
+Below is a reference to the different syntaxes available.
+
+Simple syntax
+-------------
+
+Examples of using the ``-a`` and ``--attr`` options:
+
+* ``nosetests -a status=stable``
+ Only runs tests with attribute "status" having value "stable"
+
+* ``nosetests -a priority=2,status=stable``
+ Runs tests having both attributes and values
+
+* ``nosetests -a priority=2 -a slow``
+ Runs tests that match either attribute
+
+* ``nosetests -a tags=http``
+ If a test's ``tags`` attribute was a list and it contained the value
+ ``http`` then it would be run
+
+* ``nosetests -a slow``
+ Runs tests with the attribute ``slow`` if its value does not equal False
+ (False, [], "", etc...)
+
+* ``nosetests -a '!slow'``
+ Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
+ attribute that is equal to False
+ **NOTE**:
+ if your shell (like bash) interprets '!' as a special character make sure to
+ put single quotes around it.
+
+Expression Evaluation
+---------------------
+
+Examples using the ``-A`` and ``--eval-attr`` options:
+
+* ``nosetests -A "not slow"``
+ Evaluates the Python expression "not slow" and runs the test if True
+
+* ``nosetests -A "(priority > 5) and not slow"``
+ Evaluates a complex Python expression and runs the test if True
+
+"""
+import inspect
+import logging
+import os
+import sys
+from inspect import isfunction
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins.attrib')
+compat_24 = sys.version_info >= (2, 4)
+
+def attr(*args, **kwargs):
+ """Decorator that adds attributes to classes or functions
+ for use with the Attribute (-a) plugin.
+ """
+ def wrap_ob(ob):
+ for name in args:
+ setattr(ob, name, True)
+ for name, value in kwargs.iteritems():
+ setattr(ob, name, value)
+ return ob
+ return wrap_ob
+
+def get_method_attr(method, cls, attr_name, default = False):
+ """Look up an attribute on a method/ function.
+ If the attribute isn't found there, looking it up in the
+ method's class, if any.
+ """
+ Missing = object()
+ value = getattr(method, attr_name, Missing)
+ if value is Missing and cls is not None:
+ value = getattr(cls, attr_name, Missing)
+ if value is Missing:
+ return default
+ return value
+
+
+class ContextHelper:
+ """Object that can act as context dictionary for eval and looks up
+ names as attributes on a method/ function and its class.
+ """
+ def __init__(self, method, cls):
+ self.method = method
+ self.cls = cls
+
+ def __getitem__(self, name):
+ return get_method_attr(self.method, self.cls, name)
+
+
+class AttributeSelector(Plugin):
+ """Selects test cases to be run based on their attributes.
+ """
+
+ def __init__(self):
+ Plugin.__init__(self)
+ self.attribs = []
+
+ def options(self, parser, env):
+ """Register command line options"""
+ parser.add_option("-a", "--attr",
+ dest="attr", action="append",
+ default=env.get('NOSE_ATTR'),
+ metavar="ATTR",
+ help="Run only tests that have attributes "
+ "specified by ATTR [NOSE_ATTR]")
+ # disable in < 2.4: eval can't take needed args
+ if compat_24:
+ parser.add_option("-A", "--eval-attr",
+ dest="eval_attr", metavar="EXPR", action="append",
+ default=env.get('NOSE_EVAL_ATTR'),
+ help="Run only tests for whose attributes "
+ "the Python expression EXPR evaluates "
+ "to True [NOSE_EVAL_ATTR]")
+
+ def configure(self, options, config):
+ """Configure the plugin and system, based on selected options.
+
+ attr and eval_attr may each be lists.
+
+ self.attribs will be a list of lists of tuples. In that list, each
+ list is a group of attributes, all of which must match for the rule to
+ match.
+ """
+ self.attribs = []
+
+ # handle python eval-expression parameter
+ if compat_24 and options.eval_attr:
+ eval_attr = tolist(options.eval_attr)
+ for attr in eval_attr:
+ # "<python expression>"
+ # -> eval(expr) in attribute context must be True
+ def eval_in_context(expr, obj, cls):
+ return eval(expr, None, ContextHelper(obj, cls))
+ self.attribs.append([(attr, eval_in_context)])
+
+ # attribute requirements are a comma separated list of
+ # 'key=value' pairs
+ if options.attr:
+ std_attr = tolist(options.attr)
+ for attr in std_attr:
+ # all attributes within an attribute group must match
+ attr_group = []
+ for attrib in attr.strip().split(","):
+ # don't die on trailing comma
+ if not attrib:
+ continue
+ items = attrib.split("=", 1)
+ if len(items) > 1:
+ # "name=value"
+ # -> 'str(obj.name) == value' must be True
+ key, value = items
+ else:
+ key = items[0]
+ if key[0] == "!":
+ # "!name"
+ # 'bool(obj.name)' must be False
+ key = key[1:]
+ value = False
+ else:
+ # "name"
+ # -> 'bool(obj.name)' must be True
+ value = True
+ attr_group.append((key, value))
+ self.attribs.append(attr_group)
+ if self.attribs:
+ self.enabled = True
+
+ def validateAttrib(self, method, cls = None):
+ """Verify whether a method has the required attributes
+ The method is considered a match if it matches all attributes
+ for any attribute group.
+ ."""
+ # TODO: is there a need for case-sensitive value comparison?
+ any = False
+ for group in self.attribs:
+ match = True
+ for key, value in group:
+ attr = get_method_attr(method, cls, key)
+ if callable(value):
+ if not value(key, method, cls):
+ match = False
+ break
+ elif value is True:
+ # value must exist and be True
+ if not bool(attr):
+ match = False
+ break
+ elif value is False:
+ # value must not exist or be False
+ if bool(attr):
+ match = False
+ break
+ elif type(attr) in (list, tuple):
+ # value must be found in the list attribute
+ if not str(value).lower() in [str(x).lower()
+ for x in attr]:
+ match = False
+ break
+ else:
+ # value must match, convert to string and compare
+ if (value != attr
+ and str(value).lower() != str(attr).lower()):
+ match = False
+ break
+ any = any or match
+ if any:
+ # not True because we don't want to FORCE the selection of the
+ # item, only say that it is acceptable
+ return None
+ return False
+
+ def wantFunction(self, function):
+ """Accept the function if its attributes match.
+ """
+ return self.validateAttrib(function)
+
+ def wantMethod(self, method):
+ """Accept the method if its attributes match.
+ """
+ try:
+ cls = method.im_class
+ except AttributeError:
+ return False
+ return self.validateAttrib(method, cls)
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py
new file mode 100755
index 00000000..f09beb69
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/base.py
@@ -0,0 +1,725 @@
+import os
+import textwrap
+from optparse import OptionConflictError
+from warnings import warn
+from nose.util import tolist
+
+class Plugin(object):
+ """Base class for nose plugins. It's recommended but not *necessary* to
+ subclass this class to create a plugin, but all plugins *must* implement
+ `options(self, parser, env)` and `configure(self, options, conf)`, and
+ must have the attributes `enabled`, `name` and `score`. The `name`
+ attribute may contain hyphens ('-').
+
+ Plugins should not be enabled by default.
+
+ Subclassing Plugin (and calling the superclass methods in
+ __init__, configure, and options, if you override them) will give
+ your plugin some friendly default behavior:
+
+ * A --with-$name option will be added to the command line interface
+ to enable the plugin, and a corresponding environment variable
+ will be used as the default value. The plugin class's docstring
+ will be used as the help for this option.
+ * The plugin will not be enabled unless this option is selected by
+ the user.
+ """
+ can_configure = False
+ enabled = False
+ enableOpt = None
+ name = None
+ score = 100
+
+ def __init__(self):
+ if self.name is None:
+ self.name = self.__class__.__name__.lower()
+ if self.enableOpt is None:
+ self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
+
+ def addOptions(self, parser, env=None):
+ """Add command-line options for this plugin.
+
+ The base plugin class adds --with-$name by default, used to enable the
+ plugin.
+
+ .. warning :: Don't implement addOptions unless you want to override
+ all default option handling behavior, including
+ warnings for conflicting options. Implement
+ :meth:`options
+ <nose.plugins.base.IPluginInterface.options>`
+ instead.
+ """
+ self.add_options(parser, env)
+
+ def add_options(self, parser, env=None):
+ """Non-camel-case version of func name for backwards compatibility.
+
+ .. warning ::
+
+ DEPRECATED: Do not use this method,
+ use :meth:`options <nose.plugins.base.IPluginInterface.options>`
+ instead.
+
+ """
+ # FIXME raise deprecation warning if wasn't called by wrapper
+ if env is None:
+ env = os.environ
+ try:
+ self.options(parser, env)
+ self.can_configure = True
+ except OptionConflictError, e:
+ warn("Plugin %s has conflicting option string: %s and will "
+ "be disabled" % (self, e), RuntimeWarning)
+ self.enabled = False
+ self.can_configure = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+
+ Implement this method for normal options behavior with protection from
+ OptionConflictErrors. If you override this method and want the default
+ --with-$name option to be registered, be sure to call super().
+ """
+ env_opt = 'NOSE_WITH_%s' % self.name.upper()
+ env_opt = env_opt.replace('-', '_')
+ parser.add_option("--with-%s" % self.name,
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def configure(self, options, conf):
+ """Configure the plugin and system, based on selected options.
+
+ The base plugin class sets the plugin to enabled if the enable option
+ for the plugin (self.enableOpt) is true.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ if hasattr(options, self.enableOpt):
+ self.enabled = getattr(options, self.enableOpt)
+
+ def help(self):
+ """Return help for this plugin. This will be output as the help
+ section of the --with-$name option that enables the plugin.
+ """
+ if self.__class__.__doc__:
+ # doc sections are often indented; compress the spaces
+ return textwrap.dedent(self.__class__.__doc__)
+ return "(no help available)"
+
+ # Compatiblity shim
+ def tolist(self, val):
+ warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
+ DeprecationWarning)
+ return tolist(val)
+
+
+class IPluginInterface(object):
+ """
+ IPluginInterface describes the plugin API. Do not subclass or use this
+ class directly.
+ """
+ def __new__(cls, *arg, **kw):
+ raise TypeError("IPluginInterface class is for documentation only")
+
+ def addOptions(self, parser, env):
+ """Called to allow plugin to register command-line options with the
+ parser. DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ .. warning ::
+
+ DEPRECATED -- implement
+ :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
+ """
+ pass
+ add_options = addOptions
+ add_options.deprecated = True
+
+ def addDeprecated(self, test):
+ """Called when a deprecated test is seen. DO NOT return a value
+ unless you want to stop other plugins from seeing the deprecated
+ test.
+
+ .. warning :: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addDeprecated.deprecated = True
+
+ def addError(self, test, err):
+ """Called when a test raises an uncaught exception. DO NOT return a
+ value unless you want to stop other plugins from seeing that the
+ test has raised an error.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ addError.changed = True
+
+ def addFailure(self, test, err):
+ """Called when a test fails. DO NOT return a value unless you
+ want to stop other plugins from seeing that the test has failed.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: 3-tuple
+ :type err: sys.exc_info() tuple
+ """
+ pass
+ addFailure.changed = True
+
+ def addSkip(self, test):
+ """Called when a test is skipped. DO NOT return a value unless
+ you want to stop other plugins from seeing the skipped test.
+
+ .. warning:: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addSkip.deprecated = True
+
+ def addSuccess(self, test):
+ """Called when a test passes. DO NOT return a value unless you
+ want to stop other plugins from seeing the passing test.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ addSuccess.changed = True
+
+ def afterContext(self):
+ """Called after a context (generally a module) has been
+ lazy-loaded, imported, setup, had its tests loaded and
+ executed, and torn down.
+ """
+ pass
+ afterContext._new = True
+
+ def afterDirectory(self, path):
+ """Called after all tests have been loaded from directory at path
+ and run.
+
+ :param path: the directory that has finished processing
+ :type path: string
+ """
+ pass
+ afterDirectory._new = True
+
+ def afterImport(self, filename, module):
+ """Called after module is imported from filename. afterImport
+ is called even if the import failed.
+
+ :param filename: The file that was loaded
+ :type filename: string
+ :param module: The name of the module
+ :type module: string
+ """
+ pass
+ afterImport._new = True
+
+ def afterTest(self, test):
+ """Called after the test has been run and the result recorded
+ (after stopTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ afterTest._new = True
+
+ def beforeContext(self):
+ """Called before a context (generally a module) is
+ examined. Because the context is not yet loaded, plugins don't
+ get to know what the context is; so any context operations
+ should use a stack that is pushed in `beforeContext` and popped
+ in `afterContext` to ensure they operate symmetrically.
+
+ `beforeContext` and `afterContext` are mainly useful for tracking
+ and restoring global state around possible changes from within a
+ context, whatever the context may be. If you need to operate on
+ contexts themselves, see `startContext` and `stopContext`, which
+ are passed the context in question, but are called after
+ it has been loaded (imported in the module case).
+ """
+ pass
+ beforeContext._new = True
+
+ def beforeDirectory(self, path):
+ """Called before tests are loaded from directory at path.
+
+ :param path: the directory that is about to be processed
+ """
+ pass
+ beforeDirectory._new = True
+
+ def beforeImport(self, filename, module):
+ """Called before module is imported from filename.
+
+ :param filename: The file that will be loaded
+ :param module: The name of the module found in file
+ :type module: string
+ """
+ beforeImport._new = True
+
+ def beforeTest(self, test):
+ """Called before the test is run (before startTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ beforeTest._new = True
+
+ def begin(self):
+ """Called before any tests are collected or run. Use this to
+ perform any setup needed before testing begins.
+ """
+ pass
+
+ def configure(self, options, conf):
+ """Called after the command line has been parsed, with the
+ parsed options and the config container. Here, implement any
+ config storage or changes to state or operation that are set
+ by command line options.
+
+ DO NOT return a value from this method unless you want to
+ stop all other plugins from being configured.
+ """
+ pass
+
+ def finalize(self, result):
+ """Called after all report output, including output from all
+ plugins, has been sent to the stream. Use this to print final
+ test results or perform final cleanup. Return None to allow
+ other plugins to continue printing, or any other value to stop
+ them.
+
+ :param result: test result object
+
+ .. Note:: When tests are run under a test runner other than
+ :class:`nose.core.TextTestRunner`, such as
+ via ``python setup.py test``, this method may be called
+ **before** the default report output is sent.
+ """
+ pass
+
+ def describeTest(self, test):
+ """Return a test description.
+
+ Called by :meth:`nose.case.Test.shortDescription`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ describeTest._new = True
+
+ def formatError(self, test, err):
+ """Called in result.addError, before plugin.addError. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatError._new = True
+ formatError.chainable = True
+ # test arg is not chainable
+ formatError.static_args = (True, False)
+
+ def formatFailure(self, test, err):
+ """Called in result.addFailure, before plugin.addFailure. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatFailure._new = True
+ formatFailure.chainable = True
+ # test arg is not chainable
+ formatFailure.static_args = (True, False)
+
+ def handleError(self, test, err):
+ """Called on addError. To handle the error yourself and prevent normal
+ error processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleError._new = True
+
+ def handleFailure(self, test, err):
+ """Called on addFailure. To handle the failure yourself and
+ prevent normal failure processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleFailure._new = True
+
+ def loadTestsFromDir(self, path):
+ """Return iterable of tests from a directory. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance or suite instance.
+ Return None if your plugin cannot collect any tests from
+ directory.
+
+ :param path: The path to the directory.
+ """
+ pass
+ loadTestsFromDir.generative = True
+ loadTestsFromDir._new = True
+
+ def loadTestsFromModule(self, module, path=None):
+ """Return iterable of tests in a module. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance.
+ Return None if your plugin cannot
+ collect any tests from module.
+
+ :param module: The module object
+ :type module: python module
+ :param path: the path of the module to search, to distinguish from
+ namespace package modules
+
+ .. note::
+
+ NEW. The ``path`` parameter will only be passed by nose 0.11
+ or above.
+ """
+ pass
+ loadTestsFromModule.generative = True
+
+ def loadTestsFromName(self, name, module=None, importPath=None):
+ """Return tests in this file or module. Return None if you are not able
+ to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param name: The test name. May be a file or module name plus a test
+ callable. Use split_test_name to split into parts. Or it might
+ be some crazy name of your own devising, in which case, do
+ whatever you want.
+ :param module: Module from which the name is to be loaded
+ :param importPath: Path from which file (must be a python module) was
+ found
+
+ .. warning:: DEPRECATED: this argument will NOT be passed.
+ """
+ pass
+ loadTestsFromName.generative = True
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a tuple of (tests loaded, remaining names). Return
+ None if you are not able to load any tests. Multiple plugins
+ may implement loadTestsFromNames; the remaining name list from
+ each will be passed to the next as input.
+
+ :param names: List of test names.
+ :type names: iterable
+ :param module: Module from which the names are to be loaded
+ """
+ pass
+ loadTestsFromNames._new = True
+ loadTestsFromNames.chainable = True
+
+ def loadTestsFromFile(self, filename):
+ """Return tests in this file. Return None if you are not
+ interested in loading any tests, or an iterable if you are and
+ can load some. May be a generator. *If you are interested in
+ loading tests from the file and encounter no errors, but find
+ no tests, yield False or return [False].*
+
+ .. Note:: This method replaces loadTestsFromPath from the 0.9
+ API.
+
+ :param filename: The full path to the file or directory.
+ """
+ pass
+ loadTestsFromFile.generative = True
+ loadTestsFromFile._new = True
+
+ def loadTestsFromPath(self, path):
+ """
+ .. warning:: DEPRECATED -- use loadTestsFromFile instead
+ """
+ pass
+ loadTestsFromPath.deprecated = True
+
+ def loadTestsFromTestCase(self, cls):
+ """Return tests in this test case class. Return None if you are
+ not able to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param cls: The test case class. Must be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestCase.generative = True
+
+ def loadTestsFromTestClass(self, cls):
+ """Return tests in this test class. Class will *not* be a
+ unittest.TestCase subclass. Return None if you are not able to
+ load any tests, an iterable if you are. May be a generator.
+
+ :param cls: The test case class. Must be **not** be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestClass._new = True
+ loadTestsFromTestClass.generative = True
+
+ def makeTest(self, obj, parent):
+ """Given an object and its parent, return or yield one or more
+ test cases. Each test must be a unittest.TestCase (or subclass)
+ instance. This is called before default test loading to allow
+ plugins to load an alternate test case or cases for an
+ object. May be a generator.
+
+ :param obj: The object to be made into a test
+ :param parent: The parent of obj (eg, for a method, the class)
+ """
+ pass
+ makeTest._new = True
+ makeTest.generative = True
+
+ def options(self, parser, env):
+ """Called to allow plugin to register command line
+ options with the parser.
+
+ DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ :param parser: options parser instance
+ :type parser: :class:`ConfigParser.ConfigParser`
+ :param env: environment, default is os.environ
+ """
+ pass
+ options._new = True
+
+ def prepareTest(self, test):
+ """Called before the test is run by the test runner. Please
+ note the article *the* in the previous sentence: prepareTest
+ is called *only once*, and is passed the test case or test
+ suite that the test runner will execute. It is *not* called
+ for each individual test case. If you return a non-None value,
+ that return value will be run as the test. Use this hook to
+ wrap or decorate the test with another function. If you need
+ to modify or wrap individual test cases, use `prepareTestCase`
+ instead.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def prepareTestCase(self, test):
+ """Prepare or wrap an individual test case. Called before
+ execution of the test. The test passed here is a
+ nose.case.Test instance; the case to be executed is in the
+ test attribute of the passed case. To modify the test to be
+ run, you should return a callable that takes one argument (the
+ test result object) -- it is recommended that you *do not*
+ side-effect the nose.case.Test instance you have been passed.
+
+ Keep in mind that when you replace the test callable you are
+ replacing the run() method of the test case -- including the
+ exception handling and result calls, etc.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ prepareTestCase._new = True
+
+ def prepareTestLoader(self, loader):
+ """Called before tests are loaded. To replace the test loader,
+ return a test loader. To allow other plugins to process the
+ test loader, return None. Only one plugin may replace the test
+ loader. Only valid when using nose.TestProgram.
+
+ :param loader: :class:`nose.loader.TestLoader`
+ (or other loader) instance
+ """
+ pass
+ prepareTestLoader._new = True
+
+ def prepareTestResult(self, result):
+ """Called before the first test is run. To use a different
+ test result handler for all tests than the given result,
+ return a test result handler. NOTE however that this handler
+ will only be seen by tests, that is, inside of the result
+ proxy system. The TestRunner and TestProgram -- whether nose's
+ or other -- will continue to see the original result
+ handler. For this reason, it is usually better to monkeypatch
+ the result (for instance, if you want to handle some
+ exceptions in a unique way). Only one plugin may replace the
+ result, but many may monkeypatch it. If you want to
+ monkeypatch and stop other plugins from doing so, monkeypatch
+ and return the patched result.
+
+ :param result: :class:`nose.result.TextTestResult`
+ (or other result) instance
+ """
+ pass
+ prepareTestResult._new = True
+
+ def prepareTestRunner(self, runner):
+ """Called before tests are run. To replace the test runner,
+ return a test runner. To allow other plugins to process the
+ test runner, return None. Only valid when using nose.TestProgram.
+
+ :param runner: :class:`nose.core.TextTestRunner`
+ (or other runner) instance
+ """
+ pass
+ prepareTestRunner._new = True
+
+ def report(self, stream):
+ """Called after all error output has been printed. Print your
+ plugin's report to the provided stream. Return None to allow
+ other plugins to print reports, any other value to stop them.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+ pass
+
+ def setOutputStream(self, stream):
+ """Called before test output begins. To direct test output to a
+ new stream, return a stream object, which must implement a
+ `write(msg)` method. If you only want to note the stream, not
+ capture or redirect it, then return None.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+
+ def startContext(self, context):
+ """Called before context setup and the running of tests in the
+ context. Note that tests have already been *loaded* from the
+ context before this call.
+
+ :param context: the context about to be setup. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ startContext._new = True
+
+ def startTest(self, test):
+ """Called before each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing the test start.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def stopContext(self, context):
+ """Called after the tests in a context have run and the
+ context has been torn down.
+
+ :param context: the context that has been torn down. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ stopContext._new = True
+
+ def stopTest(self, test):
+ """Called after each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing that the test has stopped.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def testName(self, test):
+ """Return a short test name. Called by `nose.case.Test.__str__`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ testName._new = True
+
+ def wantClass(self, cls):
+ """Return true if you want the main test selector to collect
+ tests from this class, false if you don't, and None if you don't
+ care.
+
+ :param cls: The class being examined by the selector
+ """
+ pass
+
+ def wantDirectory(self, dirname):
+ """Return true if you want test collection to descend into this
+ directory, false if you do not, and None if you don't care.
+
+ :param dirname: Full path to directory being examined by the selector
+ """
+ pass
+
+ def wantFile(self, file):
+ """Return true if you want to collect tests from this file,
+ false if you do not and None if you don't care.
+
+ Change from 0.9: The optional package parameter is no longer passed.
+
+ :param file: Full path to file being examined by the selector
+ """
+ pass
+
+ def wantFunction(self, function):
+ """Return true to collect this function as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param function: The function object being examined by the selector
+ """
+ pass
+
+ def wantMethod(self, method):
+ """Return true to collect this method as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param method: The method object being examined by the selector
+ :type method: unbound method
+ """
+ pass
+
+ def wantModule(self, module):
+ """Return true if you want to collection to descend into this
+ module, false to prevent the collector from descending into the
+ module, and None if you don't care.
+
+ :param module: The module object being examined by the selector
+ :type module: python module
+ """
+ pass
+
+ def wantModuleTests(self, module):
+ """
+ .. warning:: DEPRECATED -- this method will not be called, it has
+ been folded into wantModule.
+ """
+ pass
+ wantModuleTests.deprecated = True
+
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py
new file mode 100755
index 00000000..4fcc0018
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/builtin.py
@@ -0,0 +1,34 @@
+"""
+Lists builtin plugins.
+"""
+plugins = []
+builtins = (
+ ('nose.plugins.attrib', 'AttributeSelector'),
+ ('nose.plugins.capture', 'Capture'),
+ ('nose.plugins.logcapture', 'LogCapture'),
+ ('nose.plugins.cover', 'Coverage'),
+ ('nose.plugins.debug', 'Pdb'),
+ ('nose.plugins.deprecated', 'Deprecated'),
+ ('nose.plugins.doctests', 'Doctest'),
+ ('nose.plugins.isolate', 'IsolationPlugin'),
+ ('nose.plugins.failuredetail', 'FailureDetail'),
+ ('nose.plugins.prof', 'Profile'),
+ ('nose.plugins.skip', 'Skip'),
+ ('nose.plugins.testid', 'TestId'),
+ ('nose.plugins.multiprocess', 'MultiProcess'),
+ ('nose.plugins.xunit', 'Xunit'),
+ ('nose.plugins.allmodules', 'AllModules'),
+ ('nose.plugins.collect', 'CollectOnly'),
+ )
+
+for module, cls in builtins:
+ try:
+ plugmod = __import__(module, globals(), locals(), [cls])
+ except KeyboardInterrupt:
+ raise
+ except:
+ continue
+ plug = getattr(plugmod, cls)
+ plugins.append(plug)
+ globals()[cls] = plug
+
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py
new file mode 100755
index 00000000..fa4e5dca
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/capture.py
@@ -0,0 +1,115 @@
+"""
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
+the options ``-s`` or ``--nocapture``.
+
+:Options:
+ ``--nocapture``
+ Don't capture stdout (any stdout output will be printed immediately)
+
+"""
+import logging
+import os
+import sys
+from nose.plugins.base import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.util import ln
+from StringIO import StringIO
+
+
+log = logging.getLogger(__name__)
+
+class Capture(Plugin):
+ """
+ Output capture plugin. Enabled by default. Disable with ``-s`` or
+ ``--nocapture``. This plugin captures stdout during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOCAPTURE'
+ name = 'capture'
+ score = 1600
+
+ def __init__(self):
+ self.stdout = []
+ self._buf = None
+
+ def options(self, parser, env):
+ """Register commandline options
+ """
+ parser.add_option(
+ "-s", "--nocapture", action="store_false",
+ default=not env.get(self.env_opt), dest="capture",
+ help="Don't capture stdout (any stdout output "
+ "will be printed immediately) [NOSE_NOCAPTURE]")
+
+ def configure(self, options, conf):
+ """Configure plugin. Plugin is enabled by default.
+ """
+ self.conf = conf
+ if not options.capture:
+ self.enabled = False
+
+ def afterTest(self, test):
+ """Clear capture buffer.
+ """
+ self.end()
+ self._buf = None
+
+ def begin(self):
+ """Replace sys.stdout with capture buffer.
+ """
+ self.start() # get an early handle on sys.stdout
+
+ def beforeTest(self, test):
+ """Flush capture buffer.
+ """
+ self.start()
+
+ def formatError(self, test, err):
+ """Add captured output to error report.
+ """
+ test.capturedOutput = output = self.buffer
+ self._buf = None
+ if not output:
+ # Don't return None as that will prevent other
+ # formatters from formatting and remove earlier formatters
+ # formats, instead return the err we got
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, output), tb)
+
+ def formatFailure(self, test, err):
+ """Add captured output to failure report.
+ """
+ return self.formatError(test, err)
+
+ def addCaptureToErr(self, ev, output):
+ ev = exc_to_unicode(ev)
+ output = force_unicode(output)
+ return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
+ output, ln(u'>> end captured stdout <<')])
+
+ def start(self):
+ self.stdout.append(sys.stdout)
+ self._buf = StringIO()
+ sys.stdout = self._buf
+
+ def end(self):
+ if self.stdout:
+ sys.stdout = self.stdout.pop()
+
+ def finalize(self, result):
+ """Restore stdout.
+ """
+ while self.stdout:
+ self.end()
+
+ def _get_buffer(self):
+ if self._buf is not None:
+ return self._buf.getvalue()
+
+ buffer = property(_get_buffer, None, None,
+ """Captured stdout output.""")
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py
new file mode 100755
index 00000000..6f9f0faa
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/collect.py
@@ -0,0 +1,94 @@
+"""
+This plugin bypasses the actual execution of tests, and instead just collects
+test names. Fixtures are also bypassed, so running nosetests with the
+collection plugin enabled should be very quick.
+
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
+
+This plugin is also useful for counting tests in a test suite, and making
+people watching your demo think all of your tests pass.
+"""
+from nose.plugins.base import Plugin
+from nose.case import Test
+import logging
+import unittest
+
+log = logging.getLogger(__name__)
+
+
+class CollectOnly(Plugin):
+ """
+ Collect and output test names only, don't run any tests.
+ """
+ name = "collect-only"
+ enableOpt = 'collect_only'
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option('--collect-only',
+ action='store_true',
+ dest=self.enableOpt,
+ default=env.get('NOSE_COLLECT_ONLY'),
+ help="Enable collect-only: %s [COLLECT_ONLY]" %
+ (self.help()))
+
+ def prepareTestLoader(self, loader):
+ """Install collect-only suite class in TestLoader.
+ """
+ # Disable context awareness
+ log.debug("Preparing test loader")
+ loader.suiteClass = TestSuiteFactory(self.conf)
+
+ def prepareTestCase(self, test):
+ """Replace actual test with dummy that always passes.
+ """
+ # Return something that always passes
+ log.debug("Preparing test case %s", test)
+ if not isinstance(test, Test):
+ return
+ def run(result):
+ # We need to make these plugin calls because there won't be
+ # a result proxy, due to using a stripped-down test suite
+ self.conf.plugins.startTest(test)
+ result.startTest(test)
+ self.conf.plugins.addSuccess(test)
+ result.addSuccess(test)
+ self.conf.plugins.stopTest(test)
+ result.stopTest(test)
+ return run
+
+
+class TestSuiteFactory:
+ """
+ Factory for producing configured test suites.
+ """
+ def __init__(self, conf):
+ self.conf = conf
+
+ def __call__(self, tests=(), **kw):
+ return TestSuite(tests, conf=self.conf)
+
+
+class TestSuite(unittest.TestSuite):
+ """
+ Basic test suite that bypasses most proxy and plugin calls, but does
+ wrap tests in a nose.case.Test so prepareTestCase will be called.
+ """
+ def __init__(self, tests=(), conf=None):
+ self.conf = conf
+ # Exec lazy suites: makes discovery depth-first
+ if callable(tests):
+ tests = tests()
+ log.debug("TestSuite(%r)", tests)
+ unittest.TestSuite.__init__(self, tests)
+
+ def addTest(self, test):
+ log.debug("Add test %s", test)
+ if isinstance(test, unittest.TestSuite):
+ self._tests.append(test)
+ else:
+ self._tests.append(Test(test, config=self.conf))
+
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py
new file mode 100755
index 00000000..551f3320
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/cover.py
@@ -0,0 +1,253 @@
+"""If you have Ned Batchelder's coverage_ module installed, you may activate a
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
+environment variable. The coverage report will cover any python source module
+imported after the start of the test run, excluding modules that match
+testMatch. If you want to include those modules too, use the ``--cover-tests``
+switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
+restrict the coverage report to modules from a particular package or packages,
+use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
+variable.
+
+.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
+"""
+import logging
+import re
+import sys
+import StringIO
+from nose.plugins.base import Plugin
+from nose.util import src, tolist
+
+log = logging.getLogger(__name__)
+
+
+class Coverage(Plugin):
+ """
+ Activate a coverage report using Ned Batchelder's coverage module.
+ """
+ coverTests = False
+ coverPackages = None
+ coverInstance = None
+ coverErase = False
+ coverMinPercentage = None
+ score = 200
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Add options to command line.
+ """
+ super(Coverage, self).options(parser, env)
+ parser.add_option("--cover-package", action="append",
+ default=env.get('NOSE_COVER_PACKAGE'),
+ metavar="PACKAGE",
+ dest="cover_packages",
+ help="Restrict coverage output to selected packages "
+ "[NOSE_COVER_PACKAGE]")
+ parser.add_option("--cover-erase", action="store_true",
+ default=env.get('NOSE_COVER_ERASE'),
+ dest="cover_erase",
+ help="Erase previously collected coverage "
+ "statistics before run")
+ parser.add_option("--cover-tests", action="store_true",
+ dest="cover_tests",
+ default=env.get('NOSE_COVER_TESTS'),
+ help="Include test modules in coverage report "
+ "[NOSE_COVER_TESTS]")
+ parser.add_option("--cover-min-percentage", action="store",
+ dest="cover_min_percentage",
+ default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
+ help="Minimum percentage of coverage for tests "
+ "to pass [NOSE_COVER_MIN_PERCENTAGE]")
+ parser.add_option("--cover-inclusive", action="store_true",
+ dest="cover_inclusive",
+ default=env.get('NOSE_COVER_INCLUSIVE'),
+ help="Include all python files under working "
+ "directory in coverage report. Useful for "
+ "discovering holes in test coverage if not all "
+ "files are imported by the test suite. "
+ "[NOSE_COVER_INCLUSIVE]")
+ parser.add_option("--cover-html", action="store_true",
+ default=env.get('NOSE_COVER_HTML'),
+ dest='cover_html',
+ help="Produce HTML coverage information")
+ parser.add_option('--cover-html-dir', action='store',
+ default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
+ dest='cover_html_dir',
+ metavar='DIR',
+ help='Produce HTML coverage information in dir')
+ parser.add_option("--cover-branches", action="store_true",
+ default=env.get('NOSE_COVER_BRANCHES'),
+ dest="cover_branches",
+ help="Include branch coverage in coverage report "
+ "[NOSE_COVER_BRANCHES]")
+ parser.add_option("--cover-xml", action="store_true",
+ default=env.get('NOSE_COVER_XML'),
+ dest="cover_xml",
+ help="Produce XML coverage information")
+ parser.add_option("--cover-xml-file", action="store",
+ default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
+ dest="cover_xml_file",
+ metavar="FILE",
+ help="Produce XML coverage information in file")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ super(Coverage, self).configure(options, conf)
+ if conf.worker:
+ return
+ if self.enabled:
+ try:
+ import coverage
+ if not hasattr(coverage, 'coverage'):
+ raise ImportError("Unable to import coverage module")
+ except ImportError:
+ log.error("Coverage not available: "
+ "unable to import coverage module")
+ self.enabled = False
+ return
+ self.conf = conf
+ self.coverErase = options.cover_erase
+ self.coverTests = options.cover_tests
+ self.coverPackages = []
+ if options.cover_packages:
+ if isinstance(options.cover_packages, (list, tuple)):
+ cover_packages = options.cover_packages
+ else:
+ cover_packages = [options.cover_packages]
+ for pkgs in [tolist(x) for x in cover_packages]:
+ self.coverPackages.extend(pkgs)
+ self.coverInclusive = options.cover_inclusive
+ if self.coverPackages:
+ log.info("Coverage report will include only packages: %s",
+ self.coverPackages)
+ self.coverHtmlDir = None
+ if options.cover_html:
+ self.coverHtmlDir = options.cover_html_dir
+ log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
+ self.coverBranches = options.cover_branches
+ self.coverXmlFile = None
+ if options.cover_min_percentage:
+ self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
+ if options.cover_xml:
+ self.coverXmlFile = options.cover_xml_file
+ log.debug('Will put XML coverage report in %s', self.coverXmlFile)
+ if self.enabled:
+ self.status['active'] = True
+ self.coverInstance = coverage.coverage(auto_data=False,
+ branch=self.coverBranches, data_suffix=None,
+ source=self.coverPackages)
+
+ def begin(self):
+ """
+ Begin recording coverage information.
+ """
+ log.debug("Coverage begin")
+ self.skipModules = sys.modules.keys()[:]
+ if self.coverErase:
+ log.debug("Clearing previously collected coverage statistics")
+ self.coverInstance.combine()
+ self.coverInstance.erase()
+ self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
+ self.coverInstance.load()
+ self.coverInstance.start()
+
+ def report(self, stream):
+ """
+ Output code coverage report.
+ """
+ log.debug("Coverage report")
+ self.coverInstance.stop()
+ self.coverInstance.combine()
+ self.coverInstance.save()
+ modules = [module
+ for name, module in sys.modules.items()
+ if self.wantModuleCoverage(name, module)]
+ log.debug("Coverage report will cover modules: %s", modules)
+ self.coverInstance.report(modules, file=stream)
+
+ import coverage
+ if self.coverHtmlDir:
+ log.debug("Generating HTML coverage report")
+ try:
+ self.coverInstance.html_report(modules, self.coverHtmlDir)
+ except coverage.misc.CoverageException, e:
+ log.warning("Failed to generate HTML report: %s" % str(e))
+
+ if self.coverXmlFile:
+ log.debug("Generating XML coverage report")
+ try:
+ self.coverInstance.xml_report(modules, self.coverXmlFile)
+ except coverage.misc.CoverageException, e:
+ log.warning("Failed to generate XML report: %s" % str(e))
+
+ # make sure we have minimum required coverage
+ if self.coverMinPercentage:
+ f = StringIO.StringIO()
+ self.coverInstance.report(modules, file=f)
+
+ multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%\s+\d*\s{0,1}$')
+ singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
+
+ m = re.search(multiPackageRe, f.getvalue())
+ if m is None:
+ m = re.search(singlePackageRe, f.getvalue())
+
+ if m:
+ percentage = int(m.groups()[0])
+ if percentage < self.coverMinPercentage:
+ log.error('TOTAL Coverage did not reach minimum '
+ 'required: %d%%' % self.coverMinPercentage)
+ sys.exit(1)
+ else:
+ log.error("No total percentage was found in coverage output, "
+ "something went wrong.")
+
+
+ def wantModuleCoverage(self, name, module):
+ if not hasattr(module, '__file__'):
+ log.debug("no coverage of %s: no __file__", name)
+ return False
+ module_file = src(module.__file__)
+ if not module_file or not module_file.endswith('.py'):
+ log.debug("no coverage of %s: not a python file", name)
+ return False
+ if self.coverPackages:
+ for package in self.coverPackages:
+ if (re.findall(r'^%s\b' % re.escape(package), name)
+ and (self.coverTests
+ or not self.conf.testMatch.search(name))):
+ log.debug("coverage for %s", name)
+ return True
+ if name in self.skipModules:
+ log.debug("no coverage for %s: loaded before coverage start",
+ name)
+ return False
+ if self.conf.testMatch.search(name) and not self.coverTests:
+ log.debug("no coverage for %s: is a test", name)
+ return False
+ # accept any package that passed the previous tests, unless
+ # coverPackages is on -- in that case, if we wanted this
+ # module, we would have already returned True
+ return not self.coverPackages
+
+ def wantFile(self, file, package=None):
+ """If inclusive coverage enabled, return true for all source files
+ in wanted packages.
+ """
+ if self.coverInclusive:
+ if file.endswith(".py"):
+ if package and self.coverPackages:
+ for want in self.coverPackages:
+ if package.startswith(want):
+ return True
+ else:
+ return True
+ return None
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py
new file mode 100755
index 00000000..78243e60
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/debug.py
@@ -0,0 +1,67 @@
+"""
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
+"""
+
+import pdb
+from nose.plugins.base import Plugin
+
+class Pdb(Plugin):
+ """
+ Provides --pdb and --pdb-failures options that cause the test runner to
+ drop into pdb if it encounters an error or failure, respectively.
+ """
+ enabled_for_errors = False
+ enabled_for_failures = False
+ score = 5 # run last, among builtins
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--pdb", action="store_true", dest="debugBoth",
+ default=env.get('NOSE_PDB', False),
+ help="Drop into debugger on failures or errors")
+ parser.add_option(
+ "--pdb-failures", action="store_true",
+ dest="debugFailures",
+ default=env.get('NOSE_PDB_FAILURES', False),
+ help="Drop into debugger on failures")
+ parser.add_option(
+ "--pdb-errors", action="store_true",
+ dest="debugErrors",
+ default=env.get('NOSE_PDB_ERRORS', False),
+ help="Drop into debugger on errors")
+
+ def configure(self, options, conf):
+ """Configure which kinds of exceptions trigger plugin.
+ """
+ self.conf = conf
+ self.enabled_for_errors = options.debugErrors or options.debugBoth
+ self.enabled_for_failures = options.debugFailures or options.debugBoth
+ self.enabled = self.enabled_for_failures or self.enabled_for_errors
+
+ def addError(self, test, err):
+ """Enter pdb if configured to debug errors.
+ """
+ if not self.enabled_for_errors:
+ return
+ self.debug(err)
+
+ def addFailure(self, test, err):
+ """Enter pdb if configured to debug failures.
+ """
+ if not self.enabled_for_failures:
+ return
+ self.debug(err)
+
+ def debug(self, err):
+ import sys # FIXME why is this import here?
+ ec, ev, tb = err
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ try:
+ pdb.post_mortem(tb)
+ finally:
+ sys.stdout = stdout
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py
new file mode 100755
index 00000000..461a26be
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/deprecated.py
@@ -0,0 +1,45 @@
+"""
+This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+class DeprecatedTest(Exception):
+ """Raise this exception to mark a test as deprecated.
+ """
+ pass
+
+
+class Deprecated(ErrorClassPlugin):
+ """
+ Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+ by default.
+ """
+ enabled = True
+ deprecated = ErrorClass(DeprecatedTest,
+ label='DEPRECATED',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_WITHOUT_DEPRECATED'
+ parser.add_option('--no-deprecated', action='store_true',
+ dest='noDeprecated', default=env.get(env_opt, False),
+ help="Disable special handling of DeprecatedTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noDeprecated', False)
+ if disable:
+ self.enabled = False
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py
new file mode 100755
index 00000000..5ef65799
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/doctests.py
@@ -0,0 +1,455 @@
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of :mod:`doctests
+<doctest>`. Because doctests are usually included in the tested package
+(instead of being grouped into packages or modules of their own), nose only
+looks for them in the non-test packages it discovers in the working directory.
+
+Doctests may also be placed into files other than python modules, in which
+case they can be collected and executed by using the ``--doctest-extension``
+switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
+extension(s) to load.
+
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
+
+A fixtures module may define any or all of the following functions:
+
+* setup([module]) or setup_module([module])
+
+ Called before the test runs. You may raise SkipTest to skip all tests.
+
+* teardown([module]) or teardown_module([module])
+
+ Called after the test runs, if setup/setup_module did not raise an
+ unhandled exception.
+
+* setup_test(test)
+
+ Called before the test. NOTE: the argument passed is a
+ doctest.DocTest instance, *not* a unittest.TestCase.
+
+* teardown_test(test)
+
+ Called after the test, if setup_test did not raise an exception. NOTE: the
+ argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
+
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
+ additional documentation and examples.
+
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+from inspect import getmodule
+from nose.plugins.base import Plugin
+from nose.suite import ContextList
+from nose.util import anyp, getpackage, test_address, resolve_name, \
+ src, tolist, isproperty
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import sys
+import __builtin__ as builtin_mod
+
+log = logging.getLogger(__name__)
+
+try:
+ import doctest
+ doctest.DocTestCase
+ # system version of doctest is acceptable, but needs a monkeypatch
+except (ImportError, AttributeError):
+ # system version is too old
+ import nose.ext.dtcompat as doctest
+
+
+#
+# Doctest and coverage don't get along, so we need to create
+# a monkeypatch that will replace the part of doctest that
+# interferes with coverage reports.
+#
+# The monkeypatch is based on this zope patch:
+# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
+#
+_orp = doctest._OutputRedirectingPdb
+
+class NoseOutputRedirectingPdb(_orp):
+ def __init__(self, out):
+ self.__debugger_used = False
+ _orp.__init__(self, out)
+
+ def set_trace(self):
+ self.__debugger_used = True
+ _orp.set_trace(self, sys._getframe().f_back)
+
+ def set_continue(self):
+ # Calling set_continue unconditionally would break unit test
+ # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+ if self.__debugger_used:
+ _orp.set_continue(self)
+doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb
+
+
+class DoctestSuite(unittest.TestSuite):
+ """
+ Doctest suites are parallelizable at the module or file level only,
+ since they may be attached to objects that are not individually
+ addressable (like properties). This suite subclass is used when
+ loading doctests from a module to ensure that behavior.
+
+ This class is used only if the plugin is not fully prepared;
+ in normal use, the loader's suiteClass is used.
+
+ """
+ can_split = False
+
+ def __init__(self, tests=(), context=None, can_split=False):
+ self.context = context
+ self.can_split = can_split
+ unittest.TestSuite.__init__(self, tests=tests)
+
+ def address(self):
+ return test_address(self.context)
+
+ def __iter__(self):
+ # 2.3 compat
+ return iter(self._tests)
+
+ def __str__(self):
+ return str(self._tests)
+
+
+class Doctest(Plugin):
+ """
+ Activate doctest plugin to find and run doctests in non-test modules.
+ """
+ extension = None
+ suiteClass = DoctestSuite
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--doctest-tests', action='store_true',
+ dest='doctest_tests',
+ default=env.get('NOSE_DOCTEST_TESTS'),
+ help="Also look for doctests in test modules. "
+ "Note that classes, methods and functions should "
+ "have either doctests or non-doctest tests, "
+ "not both. [NOSE_DOCTEST_TESTS]")
+ parser.add_option('--doctest-extension', action="append",
+ dest="doctestExtension",
+ metavar="EXT",
+ help="Also look for doctests in files with "
+ "this extension [NOSE_DOCTEST_EXTENSION]")
+ parser.add_option('--doctest-result-variable',
+ dest='doctest_result_var',
+ default=env.get('NOSE_DOCTEST_RESULT_VAR'),
+ metavar="VAR",
+ help="Change the variable name set to the result of "
+ "the last interpreter command from the default '_'. "
+ "Can be used to avoid conflicts with the _() "
+ "function used for text translation. "
+ "[NOSE_DOCTEST_RESULT_VAR]")
+ parser.add_option('--doctest-fixtures', action="store",
+ dest="doctestFixtures",
+ metavar="SUFFIX",
+ help="Find fixtures for a doctest file in module "
+ "with this name appended to the base name "
+ "of the doctest file")
+ parser.add_option('--doctest-options', action="append",
+ dest="doctestOptions",
+ metavar="OPTIONS",
+ help="Specify options to pass to doctest. " +
+ "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
+ # Set the default as a list, if given in env; otherwise
+ # an additional value set on the command line will cause
+ # an error.
+ env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+ if env_setting is not None:
+ parser.set_defaults(doctestExtension=tolist(env_setting))
+
+ def configure(self, options, config):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, config)
+ self.doctest_result_var = options.doctest_result_var
+ self.doctest_tests = options.doctest_tests
+ self.extension = tolist(options.doctestExtension)
+ self.fixtures = options.doctestFixtures
+ self.finder = doctest.DocTestFinder()
+ self.optionflags = 0
+ if options.doctestOptions:
+ flags = ",".join(options.doctestOptions).split(',')
+ for flag in flags:
+ if not flag or flag[0] not in '+-':
+ raise ValueError(
+ "Must specify doctest options with starting " +
+ "'+' or '-'. Got %s" % (flag,))
+ mode, option_name = flag[0], flag[1:]
+ option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name)
+ if not option_flag:
+ raise ValueError("Unknown doctest option %s" %
+ (option_name,))
+ if mode == '+':
+ self.optionflags |= option_flag
+ elif mode == '-':
+ self.optionflags &= ~option_flag
+
+ def prepareTestLoader(self, loader):
+ """Capture loader's suiteClass.
+
+ This is used to create test suites from doctest files.
+
+ """
+ self.suiteClass = loader.suiteClass
+
+ def loadTestsFromModule(self, module):
+ """Load doctests from the module.
+ """
+ log.debug("loading from %s", module)
+ if not self.matches(module.__name__):
+ log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ log.exception("Attribute error loading from %s", module)
+ # nose allows module.__test__ = False; doctest does not and throws
+ # AttributeError
+ return
+ if not tests:
+ log.debug("No tests found in %s", module)
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ # FIXME this breaks the id plugin somehow (tests probably don't
+ # get wrapped in result proxy or something)
+ cases = []
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ cases.append(DocTestCase(test,
+ optionflags=self.optionflags,
+ result_var=self.doctest_result_var))
+ if cases:
+ yield self.suiteClass(cases, context=module, can_split=False)
+
+ def loadTestsFromFile(self, filename):
+ """Load doctests from the file.
+
+ Tests are loaded only if filename's extension matches
+ configured doctest extension.
+
+ """
+ if self.extension and anyp(filename.endswith, self.extension):
+ name = os.path.basename(filename)
+ dh = open(filename)
+ try:
+ doc = dh.read()
+ finally:
+ dh.close()
+
+ fixture_context = None
+ globs = {'__file__': filename}
+ if self.fixtures:
+ base, ext = os.path.splitext(name)
+ dirname = os.path.dirname(filename)
+ sys.path.append(dirname)
+ fixt_mod = base + self.fixtures
+ try:
+ fixture_context = __import__(
+ fixt_mod, globals(), locals(), ["nop"])
+ except ImportError, e:
+ log.debug(
+ "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
+ log.debug("Fixture module %s resolved to %s",
+ fixt_mod, fixture_context)
+ if hasattr(fixture_context, 'globs'):
+ globs = fixture_context.globs(globs)
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(
+ doc, globs=globs, name=name,
+ filename=filename, lineno=0)
+ if test.examples:
+ case = DocFileCase(
+ test,
+ optionflags=self.optionflags,
+ setUp=getattr(fixture_context, 'setup_test', None),
+ tearDown=getattr(fixture_context, 'teardown_test', None),
+ result_var=self.doctest_result_var)
+ if fixture_context:
+ yield ContextList((case,), context=fixture_context)
+ else:
+ yield case
+ else:
+ yield False # no tests to load
+
+ def makeTest(self, obj, parent):
+ """Look for doctests in the given object, which will be a
+ function, method or class.
+ """
+ name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
+ doctests = self.finder.find(obj, module=getmodule(parent), name=name)
+ if doctests:
+ for test in doctests:
+ if len(test.examples) == 0:
+ continue
+ yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
+ result_var=self.doctest_result_var)
+
+ def matches(self, name):
+ # FIXME this seems wrong -- nothing is ever going to
+ # fail this test, since we're given a module NAME not FILE
+ if name == '__init__.py':
+ return False
+ # FIXME don't think we need include/exclude checks here?
+ return ((self.doctest_tests or not self.conf.testMatch.search(name)
+ or (self.conf.include
+ and filter(None,
+ [inc.search(name)
+ for inc in self.conf.include])))
+ and (not self.conf.exclude
+ or not filter(None,
+ [exc.search(name)
+ for exc in self.conf.exclude])))
+
+ def wantFile(self, file):
+ """Override to select all modules and any file ending with
+ configured doctest extension.
+ """
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+ # also want files that match my extension
+ if (self.extension
+ and anyp(file.endswith, self.extension)
+ and (not self.conf.exclude
+ or not filter(None,
+ [exc.search(file)
+ for exc in self.conf.exclude]))):
+ return True
+ return None
+
+
+class DocTestCase(doctest.DocTestCase):
+ """Overrides DocTestCase to
+ provide an address() method that returns the correct address for
+ the doctest case. To provide hints for address(), an obj may also
+ be passed -- this will be used as the test object for purposes of
+ determining the test address, if it is provided.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ super(DocTestCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+ def address(self):
+ if self._nose_obj is not None:
+ return test_address(self._nose_obj)
+ obj = resolve_name(self._dt_test.name)
+
+ if isproperty(obj):
+ # properties have no connection to the class they are in
+ # so we can't just look 'em up, we have to first look up
+ # the class, then stick the prop on the end
+ parts = self._dt_test.name.split('.')
+ class_name = '.'.join(parts[:-1])
+ cls = resolve_name(class_name)
+ base_addr = test_address(cls)
+ return (base_addr[0], base_addr[1],
+ '.'.join([base_addr[2], parts[-1]]))
+ else:
+ return test_address(obj)
+
+ # doctests loaded via find(obj) omit the module name
+ # so we need to override id, __repr__ and shortDescription
+ # bonus: this will squash a 2.3 vs 2.4 incompatiblity
+ def id(self):
+ name = self._dt_test.name
+ filename = self._dt_test.filename
+ if filename is not None:
+ pk = getpackage(filename)
+ if pk is None:
+ return name
+ if not name.startswith(pk):
+ name = "%s.%s" % (pk, name)
+ return name
+
+ def __repr__(self):
+ name = self.id()
+ name = name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return 'Doctest: %s' % self.id()
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocTestCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print repr(value)
+
+ def tearDown(self):
+ super(DocTestCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
+
+
+class DocFileCase(doctest.DocFileCase):
+ """Overrides to provide address() method that returns the correct
+ address for the doc file case.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, result_var='_'):
+ self._result_var = result_var
+ super(DocFileCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=None)
+
+ def address(self):
+ return (self._dt_test.filename, None, None)
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocFileCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print repr(value)
+
+ def tearDown(self):
+ super(DocFileCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py
new file mode 100755
index 00000000..d1540e00
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/errorclass.py
@@ -0,0 +1,210 @@
+"""
+ErrorClass Plugins
+------------------
+
+ErrorClass plugins provide an easy way to add support for custom
+handling of particular classes of exceptions.
+
+An ErrorClass plugin defines one or more ErrorClasses and how each is
+handled and reported on. Each error class is stored in a different
+attribute on the result, and reported separately. Each error class must
+indicate the exceptions that fall under that class, the label to use
+for reporting, and whether exceptions of the class should be
+considered as failures for the whole test run.
+
+ErrorClasses use a declarative syntax. Assign an ErrorClass to the
+attribute you wish to add to the result object, defining the
+exceptions, label and isfailure attributes. For example, to declare an
+ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
+as an error class with the label 'TODO' that is considered a failure,
+do this:
+
+ >>> class Todo(Exception):
+ ... pass
+ >>> class TodoError(ErrorClassPlugin):
+ ... todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+The MetaErrorClass metaclass translates the ErrorClass declarations
+into the tuples used by the error handling and reporting functions in
+the result. This is an internal format and subject to change; you
+should always use the declarative syntax for attaching ErrorClasses to
+an ErrorClass plugin.
+
+ >>> TodoError.errorClasses # doctest: +ELLIPSIS
+ ((<class ...Todo...>, ('todo', 'TODO', True)),)
+
+Let's see the plugin in action. First some boilerplate.
+
+ >>> import sys
+ >>> import unittest
+ >>> try:
+ ... # 2.7+
+ ... from unittest.runner import _WritelnDecorator
+ ... except ImportError:
+ ... from unittest import _WritelnDecorator
+ ...
+ >>> buf = _WritelnDecorator(sys.stdout)
+
+Now define a test case that raises a Todo.
+
+ >>> class TestTodo(unittest.TestCase):
+ ... def runTest(self):
+ ... raise Todo("I need to test something")
+ >>> case = TestTodo()
+
+Prepare the result using our plugin. Normally this happens during the
+course of test execution within nose -- you won't be doing this
+yourself. For the purposes of this testing document, I'm stepping
+through the internal process of nose so you can see what happens at
+each step.
+
+ >>> plugin = TodoError()
+ >>> from nose.result import _TextTestResult
+ >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
+ >>> plugin.prepareTestResult(result)
+
+Now run the test. TODO is printed.
+
+ >>> _ = case(result) # doctest: +ELLIPSIS
+ runTest (....TestTodo) ... TODO: I need to test something
+
+Errors and failures are empty, but todo has our test:
+
+ >>> result.errors
+ []
+ >>> result.failures
+ []
+ >>> result.todo # doctest: +ELLIPSIS
+ [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
+ >>> result.printErrors() # doctest: +ELLIPSIS
+ <BLANKLINE>
+ ======================================================================
+ TODO: runTest (....TestTodo)
+ ----------------------------------------------------------------------
+ Traceback (most recent call last):
+ ...
+ ...Todo: I need to test something
+ <BLANKLINE>
+
+Since we defined a Todo as a failure, the run was not successful.
+
+ >>> result.wasSuccessful()
+ False
+"""
+
+from nose.pyversion import make_instancemethod
+from nose.plugins.base import Plugin
+from nose.result import TextTestResult
+from nose.util import isclass
+
+class MetaErrorClass(type):
+ """Metaclass for ErrorClassPlugins that allows error classes to be
+ set up in a declarative manner.
+ """
+ def __init__(self, name, bases, attr):
+ errorClasses = []
+ for name, detail in attr.items():
+ if isinstance(detail, ErrorClass):
+ attr.pop(name)
+ for cls in detail:
+ errorClasses.append(
+ (cls, (name, detail.label, detail.isfailure)))
+ super(MetaErrorClass, self).__init__(name, bases, attr)
+ self.errorClasses = tuple(errorClasses)
+
+
+class ErrorClass(object):
+ def __init__(self, *errorClasses, **kw):
+ self.errorClasses = errorClasses
+ try:
+ for key in ('label', 'isfailure'):
+ setattr(self, key, kw.pop(key))
+ except KeyError:
+ raise TypeError("%r is a required named argument for ErrorClass"
+ % key)
+
+ def __iter__(self):
+ return iter(self.errorClasses)
+
+
+class ErrorClassPlugin(Plugin):
+ """
+ Base class for ErrorClass plugins. Subclass this class and declare the
+ exceptions that you wish to handle as attributes of the subclass.
+ """
+ __metaclass__ = MetaErrorClass
+ score = 1000
+ errorClasses = ()
+
+ def addError(self, test, err):
+ err_cls, a, b = err
+ if not isclass(err_cls):
+ return
+ classes = [e[0] for e in self.errorClasses]
+ if filter(lambda c: issubclass(err_cls, c), classes):
+ return True
+
+ def prepareTestResult(self, result):
+ if not hasattr(result, 'errorClasses'):
+ self.patchResult(result)
+ for cls, (storage_attr, label, isfail) in self.errorClasses:
+ if cls not in result.errorClasses:
+ storage = getattr(result, storage_attr, [])
+ setattr(result, storage_attr, storage)
+ result.errorClasses[cls] = (storage, label, isfail)
+
+ def patchResult(self, result):
+ result.printLabel = print_label_patch(result)
+ result._orig_addError, result.addError = \
+ result.addError, add_error_patch(result)
+ result._orig_wasSuccessful, result.wasSuccessful = \
+ result.wasSuccessful, wassuccessful_patch(result)
+ if hasattr(result, 'printErrors'):
+ result._orig_printErrors, result.printErrors = \
+ result.printErrors, print_errors_patch(result)
+ if hasattr(result, 'addSkip'):
+ result._orig_addSkip, result.addSkip = \
+ result.addSkip, add_skip_patch(result)
+ result.errorClasses = {}
+
+
+def add_error_patch(result):
+ """Create a new addError method to patch into a result instance
+ that recognizes the errorClasses attribute and deals with
+ errorclasses correctly.
+ """
+ return make_instancemethod(TextTestResult.addError, result)
+
+
+def print_errors_patch(result):
+ """Create a new printErrors method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printErrors, result)
+
+
+def print_label_patch(result):
+ """Create a new printLabel method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printLabel, result)
+
+
+def wassuccessful_patch(result):
+ """Create a new wasSuccessful method that checks errorClasses for
+ exceptions that were put into other slots than error or failure
+ but that still count as not success.
+ """
+ return make_instancemethod(TextTestResult.wasSuccessful, result)
+
+
+def add_skip_patch(result):
+ """Create a new addSkip method to patch into a result instance
+ that delegates to addError.
+ """
+ return make_instancemethod(TextTestResult.addSkip, result)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py
new file mode 100755
index 00000000..6462865d
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/failuredetail.py
@@ -0,0 +1,49 @@
+"""
+This plugin provides assert introspection. When the plugin is enabled
+and a test failure occurs, the traceback is displayed with extra context
+around the line in which the exception was raised. Simple variable
+substitution is also performed in the context output to provide more
+debugging information.
+"""
+
+from nose.plugins import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.inspector import inspect_traceback
+
+class FailureDetail(Plugin):
+ """
+ Plugin that provides extra information in tracebacks of test failures.
+ """
+ score = 1600 # before capture
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ parser.add_option(
+ "-d", "--detailed-errors", "--failure-detail",
+ action="store_true",
+ default=env.get('NOSE_DETAILED_ERRORS'),
+ dest="detailedErrors", help="Add detail to error"
+ " output by attempting to evaluate failed"
+ " asserts [NOSE_DETAILED_ERRORS]")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.enabled = options.detailedErrors
+ self.conf = conf
+
+ def formatFailure(self, test, err):
+ """Add detail from traceback inspection to error message of a failure.
+ """
+ ec, ev, tb = err
+ tbinfo, str_ev = None, exc_to_unicode(ev)
+
+ if tb:
+ tbinfo = force_unicode(inspect_traceback(tb))
+ str_ev = '\n'.join([str_ev, tbinfo])
+ test.tbinfo = tbinfo
+ return (ec, str_ev, tb)
+
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py
new file mode 100755
index 00000000..13235dfb
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/isolate.py
@@ -0,0 +1,103 @@
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
+
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
+
+ def setup(module):
+ module._mods = sys.modules.copy()
+
+ def teardown(module):
+ to_del = [ m for m in sys.modules.keys() if m not in
+ module._mods ]
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(module._mods)
+
+Isolation works only during lazy loading. In normal use, this is only
+during discovery of modules within a directory, where the process of
+importing, loading tests and running tests from each module is
+encapsulated in a single loadTestsFromName call. This plugin
+implements loadTestsFromNames to force the same lazy-loading there,
+which allows isolation to work in directed mode as well as discovery,
+at the cost of some efficiency: lazy-loading names forces full context
+setup and teardown to run for each name, defeating the grouping that
+is normally used to ensure that context setup and teardown are run the
+fewest possible times for a given set of names.
+
+.. warning ::
+
+ This plugin should not be used in conjunction with other plugins
+ that assume that modules, once imported, will stay imported; for
+ instance, it may cause very odd results when used with the coverage
+ plugin.
+
+"""
+
+import logging
+import sys
+
+from nose.plugins import Plugin
+
+
+log = logging.getLogger('nose.plugins.isolation')
+
+class IsolationPlugin(Plugin):
+ """
+ Activate the isolation plugin to isolate changes to external
+ modules to a single test module or package. The isolation plugin
+ resets the contents of sys.modules after each test module or
+ package runs to its state before the test. PLEASE NOTE that this
+ plugin should not be used with the coverage plugin, or in any other case
+ where module reloading may produce undesirable side-effects.
+ """
+ score = 10 # I want to be last
+ name = 'isolation'
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ self._mod_stack = []
+
+ def beforeContext(self):
+ """Copy sys.modules onto my mod stack
+ """
+ mods = sys.modules.copy()
+ self._mod_stack.append(mods)
+
+ def afterContext(self):
+ """Pop my mod stack and restore sys.modules to the state
+ it was in when mod stack was pushed.
+ """
+ mods = self._mod_stack.pop()
+ to_del = [ m for m in sys.modules.keys() if m not in mods ]
+ if to_del:
+ log.debug('removing sys modules entries: %s', to_del)
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(mods)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Create a lazy suite that calls beforeContext and afterContext
+ around each name. The side-effect of this is that full context
+ fixtures will be set up and torn down around each test named.
+ """
+ # Fast path for when we don't care
+ if not names or len(names) == 1:
+ return
+ loader = self.loader
+ plugins = self.conf.plugins
+ def lazy():
+ for name in names:
+ plugins.beforeContext()
+ yield loader.loadTestsFromName(name, module=module)
+ plugins.afterContext()
+ return (loader.suiteClass(lazy), [])
+
+ def prepareTestLoader(self, loader):
+ """Get handle on test loader so we can use it in loadTestsFromNames.
+ """
+ self.loader = loader
+
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py
new file mode 100755
index 00000000..4c9a79f6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/logcapture.py
@@ -0,0 +1,245 @@
+"""
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
+
+You can filter captured logging statements with the ``--logging-filter`` option.
+If set, it specifies which logger(s) will be captured; loggers that do not match
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
+or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
+"""
+
+import logging
+from logging import Handler
+import threading
+
+from nose.plugins.base import Plugin
+from nose.util import anyp, ln, safe_str
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+class FilterSet(object):
+ def __init__(self, filter_components):
+ self.inclusive, self.exclusive = self._partition(filter_components)
+
+ # @staticmethod
+ def _partition(components):
+ inclusive, exclusive = [], []
+ for component in components:
+ if component.startswith('-'):
+ exclusive.append(component[1:])
+ else:
+ inclusive.append(component)
+ return inclusive, exclusive
+ _partition = staticmethod(_partition)
+
+ def allow(self, record):
+ """returns whether this record should be printed"""
+ if not self:
+ # nothing to filter
+ return True
+ return self._allow(record) and not self._deny(record)
+
+ # @staticmethod
+ def _any_match(matchers, record):
+ """return the bool of whether `record` starts with
+ any item in `matchers`"""
+ def record_matches_key(key):
+ return record == key or record.startswith(key + '.')
+ return anyp(bool, map(record_matches_key, matchers))
+ _any_match = staticmethod(_any_match)
+
+ def _allow(self, record):
+ if not self.inclusive:
+ return True
+ return self._any_match(self.inclusive, record)
+
+ def _deny(self, record):
+ if not self.exclusive:
+ return False
+ return self._any_match(self.exclusive, record)
+
+
+class MyMemoryHandler(Handler):
+ def __init__(self, logformat, logdatefmt, filters):
+ Handler.__init__(self)
+ fmt = logging.Formatter(logformat, logdatefmt)
+ self.setFormatter(fmt)
+ self.filterset = FilterSet(filters)
+ self.buffer = []
+ def emit(self, record):
+ self.buffer.append(self.format(record))
+ def flush(self):
+ pass # do nothing
+ def truncate(self):
+ self.buffer = []
+ def filter(self, record):
+ if self.filterset.allow(record.name):
+ return Handler.filter(self, record)
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['lock']
+ return state
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.lock = threading.RLock()
+
+
+class LogCapture(Plugin):
+ """
+ Log capture plugin. Enabled by default. Disable with --nologcapture.
+ This plugin captures logging statements issued during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOLOGCAPTURE'
+ name = 'logcapture'
+ score = 500
+ logformat = '%(name)s: %(levelname)s: %(message)s'
+ logdatefmt = None
+ clear = False
+ filters = ['-nose']
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--nologcapture", action="store_false",
+ default=not env.get(self.env_opt), dest="logcapture",
+ help="Disable logging capture plugin. "
+ "Logging configuration will be left intact."
+ " [NOSE_NOLOGCAPTURE]")
+ parser.add_option(
+ "--logging-format", action="store", dest="logcapture_format",
+ default=env.get('NOSE_LOGFORMAT') or self.logformat,
+ metavar="FORMAT",
+ help="Specify custom format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGFORMAT]")
+ parser.add_option(
+ "--logging-datefmt", action="store", dest="logcapture_datefmt",
+ default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
+ metavar="FORMAT",
+ help="Specify custom date/time format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGDATEFMT]")
+ parser.add_option(
+ "--logging-filter", action="store", dest="logcapture_filters",
+ default=env.get('NOSE_LOGFILTER'),
+ metavar="FILTER",
+ help="Specify which statements to filter in/out. "
+ "By default, everything is captured. If the output is too"
+ " verbose,\nuse this option to filter out needless output.\n"
+ "Example: filter=foo will capture statements issued ONLY to\n"
+ " foo or foo.what.ever.sub but not foobar or other logger.\n"
+ "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
+ "If any logger name is prefixed with a minus, eg filter=-foo,\n"
+ "it will be excluded rather than included. Default: "
+ "exclude logging messages from nose itself (-nose)."
+ " [NOSE_LOGFILTER]\n")
+ parser.add_option(
+ "--logging-clear-handlers", action="store_true",
+ default=False, dest="logcapture_clear",
+ help="Clear all other logging handlers")
+ parser.add_option(
+ "--logging-level", action="store",
+ default='NOTSET', dest="logcapture_level",
+ help="Set the log level to capture")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ self.conf = conf
+ # Disable if explicitly disabled, or if logging is
+ # configured via logging config file
+ if not options.logcapture or conf.loggingConfig:
+ self.enabled = False
+ self.logformat = options.logcapture_format
+ self.logdatefmt = options.logcapture_datefmt
+ self.clear = options.logcapture_clear
+ self.loglevel = options.logcapture_level
+ if options.logcapture_filters:
+ self.filters = options.logcapture_filters.split(',')
+
+ def setupLoghandler(self):
+ # setup our handler with root logger
+ root_logger = logging.getLogger()
+ if self.clear:
+ if hasattr(root_logger, "handlers"):
+ for handler in root_logger.handlers:
+ root_logger.removeHandler(handler)
+ for logger in logging.Logger.manager.loggerDict.values():
+ if hasattr(logger, "handlers"):
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+ # make sure there isn't one already
+ # you can't simply use "if self.handler not in root_logger.handlers"
+ # since at least in unit tests this doesn't work --
+ # LogCapture() is instantiated for each test case while root_logger
+ # is module global
+ # so we always add new MyMemoryHandler instance
+ for handler in root_logger.handlers[:]:
+ if isinstance(handler, MyMemoryHandler):
+ root_logger.handlers.remove(handler)
+ root_logger.addHandler(self.handler)
+ # to make sure everything gets captured
+ loglevel = getattr(self, "loglevel", "NOTSET")
+ root_logger.setLevel(getattr(logging, loglevel))
+
+ def begin(self):
+ """Set up logging handler before test run begins.
+ """
+ self.start()
+
+ def start(self):
+ self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
+ self.filters)
+ self.setupLoghandler()
+
+ def end(self):
+ pass
+
+ def beforeTest(self, test):
+ """Clear buffers and handlers before test.
+ """
+ self.setupLoghandler()
+
+ def afterTest(self, test):
+ """Clear buffers after test.
+ """
+ self.handler.truncate()
+
+ def formatFailure(self, test, err):
+ """Add captured log messages to failure output.
+ """
+ return self.formatError(test, err)
+
+ def formatError(self, test, err):
+ """Add captured log messages to error output.
+ """
+ # logic flow copied from Capture.formatError
+ test.capturedLogging = records = self.formatLogRecords()
+ if not records:
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, records), tb)
+
+ def formatLogRecords(self):
+ return map(safe_str, self.handler.buffer)
+
+ def addCaptureToErr(self, ev, records):
+ return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
+ records + \
+ [ln('>> end captured logging <<')])
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py
new file mode 100755
index 00000000..4d2ed22b
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/manager.py
@@ -0,0 +1,460 @@
+"""
+Plugin Manager
+--------------
+
+A plugin manager class is used to load plugins, manage the list of
+loaded plugins, and proxy calls to those plugins.
+
+The plugin managers provided with nose are:
+
+:class:`PluginManager`
+ This manager doesn't implement loadPlugins, so it can only work
+ with a static list of plugins.
+
+:class:`BuiltinPluginManager`
+ This manager loads plugins referenced in ``nose.plugins.builtin``.
+
+:class:`EntryPointPluginManager`
+ This manager uses setuptools entrypoints to load plugins.
+
+:class:`ExtraPluginsPluginManager`
+ This manager loads extra plugins specified with the keyword
+ `addplugins`.
+
+:class:`DefaultPluginMananger`
+ This is the manager class that will be used by default. If
+ setuptools is installed, it is a subclass of
+ :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
+ otherwise, an alias to :class:`BuiltinPluginManager`.
+
+:class:`RestrictedPluginManager`
+ This manager is for use in test runs where some plugin calls are
+ not available, such as runs started with ``python setup.py test``,
+ where the test runner is the default unittest :class:`TextTestRunner`. It
+ is a subclass of :class:`DefaultPluginManager`.
+
+Writing a plugin manager
+========================
+
+If you want to load plugins via some other means, you can write a
+plugin manager and pass an instance of your plugin manager class when
+instantiating the :class:`nose.config.Config` instance that you pass to
+:class:`TestProgram` (or :func:`main` or :func:`run`).
+
+To implement your plugin loading scheme, implement ``loadPlugins()``,
+and in that method, call ``addPlugin()`` with an instance of each plugin
+you wish to make available. Make sure to call
+``super(self).loadPlugins()`` as well if have subclassed a manager
+other than ``PluginManager``.
+
+"""
+import inspect
+import logging
+import os
+import sys
+from itertools import chain as iterchain
+from warnings import warn
+import nose.config
+from nose.failure import Failure
+from nose.plugins.base import IPluginInterface
+from nose.pyversion import sort_list
+
+try:
+ import cPickle as pickle
+except:
+ import pickle
+try:
+ from cStringIO import StringIO
+except:
+ from StringIO import StringIO
+
+
+__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
+ 'BuiltinPluginManager', 'RestrictedPluginManager']
+
+log = logging.getLogger(__name__)
+
+
+class PluginProxy(object):
+ """Proxy for plugin calls. Essentially a closure bound to the
+ given call and plugin list.
+
+ The plugin proxy also must be bound to a particular plugin
+ interface specification, so that it knows what calls are available
+ and any special handling that is required for each call.
+ """
+ interface = IPluginInterface
+ def __init__(self, call, plugins):
+ try:
+ self.method = getattr(self.interface, call)
+ except AttributeError:
+ raise AttributeError("%s is not a valid %s method"
+ % (call, self.interface.__name__))
+ self.call = self.makeCall(call)
+ self.plugins = []
+ for p in plugins:
+ self.addPlugin(p, call)
+
+ def __call__(self, *arg, **kw):
+ return self.call(*arg, **kw)
+
+ def addPlugin(self, plugin, call):
+ """Add plugin to my list of plugins to call, if it has the attribute
+ I'm bound to.
+ """
+ meth = getattr(plugin, call, None)
+ if meth is not None:
+ if call == 'loadTestsFromModule' and \
+ len(inspect.getargspec(meth)[0]) == 2:
+ orig_meth = meth
+ meth = lambda module, path, **kwargs: orig_meth(module)
+ self.plugins.append((plugin, meth))
+
+ def makeCall(self, call):
+ if call == 'loadTestsFromNames':
+ # special case -- load tests from names behaves somewhat differently
+ # from other chainable calls, because plugins return a tuple, only
+ # part of which can be chained to the next plugin.
+ return self._loadTestsFromNames
+
+ meth = self.method
+ if getattr(meth, 'generative', False):
+ # call all plugins and yield a flattened iterator of their results
+ return lambda *arg, **kw: list(self.generate(*arg, **kw))
+ elif getattr(meth, 'chainable', False):
+ return self.chain
+ else:
+ # return a value from the first plugin that returns non-None
+ return self.simple
+
+ def chain(self, *arg, **kw):
+ """Call plugins in a chain, where the result of each plugin call is
+ sent to the next plugin as input. The final output result is returned.
+ """
+ result = None
+ # extract the static arguments (if any) from arg so they can
+ # be passed to each plugin call in the chain
+ static = [a for (static, a)
+ in zip(getattr(self.method, 'static_args', []), arg)
+ if static]
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ arg = static[:]
+ arg.append(result)
+ return result
+
+ def generate(self, *arg, **kw):
+ """Call all plugins, yielding each item in each non-None result.
+ """
+ for p, meth in self.plugins:
+ result = None
+ try:
+ result = meth(*arg, **kw)
+ if result is not None:
+ for r in result:
+ yield r
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(*exc)
+ continue
+
+ def simple(self, *arg, **kw):
+ """Call all plugins, returning the first non-None result.
+ """
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ if result is not None:
+ return result
+
+ def _loadTestsFromNames(self, names, module=None):
+ """Chainable but not quite normal. Plugins return a tuple of
+ (tests, names) after processing the names. The tests are added
+ to a suite that is accumulated throughout the full call, while
+ names are input for the next plugin in the chain.
+ """
+ suite = []
+ for p, meth in self.plugins:
+ result = meth(names, module=module)
+ if result is not None:
+ suite_part, names = result
+ if suite_part:
+ suite.extend(suite_part)
+ return suite, names
+
+
+class NoPlugins(object):
+ """Null Plugin manager that has no plugins."""
+ interface = IPluginInterface
+ def __init__(self):
+ self._plugins = self.plugins = ()
+
+ def __iter__(self):
+ return ()
+
+ def _doNothing(self, *args, **kwds):
+ pass
+
+ def _emptyIterator(self, *args, **kwds):
+ return ()
+
+ def __getattr__(self, call):
+ method = getattr(self.interface, call)
+ if getattr(method, "generative", False):
+ return self._emptyIterator
+ else:
+ return self._doNothing
+
+ def addPlugin(self, plug):
+ raise NotImplementedError()
+
+ def addPlugins(self, plugins):
+ raise NotImplementedError()
+
+ def configure(self, options, config):
+ pass
+
+ def loadPlugins(self):
+ pass
+
+ def sort(self):
+ pass
+
+
+class PluginManager(object):
+ """Base class for plugin managers. PluginManager is intended to be
+ used only with a static list of plugins. The loadPlugins() implementation
+ only reloads plugins from _extraplugins to prevent those from being
+ overridden by a subclass.
+
+ The basic functionality of a plugin manager is to proxy all unknown
+ attributes through a ``PluginProxy`` to a list of plugins.
+
+ Note that the list of plugins *may not* be changed after the first plugin
+ call.
+ """
+ proxyClass = PluginProxy
+
+ def __init__(self, plugins=(), proxyClass=None):
+ self._plugins = []
+ self._extraplugins = ()
+ self._proxies = {}
+ if plugins:
+ self.addPlugins(plugins)
+ if proxyClass is not None:
+ self.proxyClass = proxyClass
+
+ def __getattr__(self, call):
+ try:
+ return self._proxies[call]
+ except KeyError:
+ proxy = self.proxyClass(call, self._plugins)
+ self._proxies[call] = proxy
+ return proxy
+
+ def __iter__(self):
+ return iter(self.plugins)
+
+ def addPlugin(self, plug):
+ # allow, for instance, plugins loaded via entry points to
+ # supplant builtin plugins.
+ new_name = getattr(plug, 'name', object())
+ self._plugins[:] = [p for p in self._plugins
+ if getattr(p, 'name', None) != new_name]
+ self._plugins.append(plug)
+
+ def addPlugins(self, plugins=(), extraplugins=()):
+ """extraplugins are maintained in a separate list and
+ re-added by loadPlugins() to prevent their being overwritten
+ by plugins added by a subclass of PluginManager
+ """
+ self._extraplugins = extraplugins
+ for plug in iterchain(plugins, extraplugins):
+ self.addPlugin(plug)
+
+ def configure(self, options, config):
+ """Configure the set of plugins with the given options
+ and config instance. After configuration, disabled plugins
+ are removed from the plugins list.
+ """
+ log.debug("Configuring plugins")
+ self.config = config
+ cfg = PluginProxy('configure', self._plugins)
+ cfg(options, config)
+ enabled = [plug for plug in self._plugins if plug.enabled]
+ self.plugins = enabled
+ self.sort()
+ log.debug("Plugins enabled: %s", enabled)
+
+ def loadPlugins(self):
+ for plug in self._extraplugins:
+ self.addPlugin(plug)
+
+ def sort(self):
+ return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
+
+ def _get_plugins(self):
+ return self._plugins
+
+ def _set_plugins(self, plugins):
+ self._plugins = []
+ self.addPlugins(plugins)
+
+ plugins = property(_get_plugins, _set_plugins, None,
+ """Access the list of plugins managed by
+ this plugin manager""")
+
+
+class ZeroNinePlugin:
+ """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
+ """
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def options(self, parser, env=os.environ):
+ self.plugin.add_options(parser, env)
+
+ def addError(self, test, err):
+ if not hasattr(self.plugin, 'addError'):
+ return
+ # switch off to addSkip, addDeprecated if those types
+ from nose.exc import SkipTest, DeprecatedTest
+ ec, ev, tb = err
+ if issubclass(ec, SkipTest):
+ if not hasattr(self.plugin, 'addSkip'):
+ return
+ return self.plugin.addSkip(test.test)
+ elif issubclass(ec, DeprecatedTest):
+ if not hasattr(self.plugin, 'addDeprecated'):
+ return
+ return self.plugin.addDeprecated(test.test)
+ # add capt
+ capt = test.capturedOutput
+ return self.plugin.addError(test.test, err, capt)
+
+ def loadTestsFromFile(self, filename):
+ if hasattr(self.plugin, 'loadTestsFromPath'):
+ return self.plugin.loadTestsFromPath(filename)
+
+ def addFailure(self, test, err):
+ if not hasattr(self.plugin, 'addFailure'):
+ return
+ # add capt and tbinfo
+ capt = test.capturedOutput
+ tbinfo = test.tbinfo
+ return self.plugin.addFailure(test.test, err, capt, tbinfo)
+
+ def addSuccess(self, test):
+ if not hasattr(self.plugin, 'addSuccess'):
+ return
+ capt = test.capturedOutput
+ self.plugin.addSuccess(test.test, capt)
+
+ def startTest(self, test):
+ if not hasattr(self.plugin, 'startTest'):
+ return
+ return self.plugin.startTest(test.test)
+
+ def stopTest(self, test):
+ if not hasattr(self.plugin, 'stopTest'):
+ return
+ return self.plugin.stopTest(test.test)
+
+ def __getattr__(self, val):
+ return getattr(self.plugin, val)
+
+
+class EntryPointPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the `nose.plugins` and
+ `nose.plugins.0.10` entry points.
+ """
+ entry_points = (('nose.plugins.0.10', None),
+ ('nose.plugins', ZeroNinePlugin))
+
+ def loadPlugins(self):
+ """Load plugins by iterating the `nose.plugins` entry point.
+ """
+ from pkg_resources import iter_entry_points
+ loaded = {}
+ for entry_point, adapt in self.entry_points:
+ for ep in iter_entry_points(entry_point):
+ if ep.name in loaded:
+ continue
+ loaded[ep.name] = True
+ log.debug('%s load plugin %s', self.__class__.__name__, ep)
+ try:
+ plugcls = ep.load()
+ except KeyboardInterrupt:
+ raise
+ except Exception, e:
+ # never want a plugin load to kill the test run
+ # but we can't log here because the logger is not yet
+ # configured
+ warn("Unable to load plugin %s: %s" % (ep, e),
+ RuntimeWarning)
+ continue
+ if adapt:
+ plug = adapt(plugcls())
+ else:
+ plug = plugcls()
+ self.addPlugin(plug)
+ super(EntryPointPluginManager, self).loadPlugins()
+
+
+class BuiltinPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the list in
+ `nose.plugins.builtin`.
+ """
+ def loadPlugins(self):
+ """Load plugins in nose.plugins.builtin
+ """
+ from nose.plugins import builtin
+ for plug in builtin.plugins:
+ self.addPlugin(plug())
+ super(BuiltinPluginManager, self).loadPlugins()
+
+try:
+ import pkg_resources
+ class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
+ pass
+
+except ImportError:
+ class DefaultPluginManager(BuiltinPluginManager):
+ pass
+
+class RestrictedPluginManager(DefaultPluginManager):
+ """Plugin manager that restricts the plugin list to those not
+ excluded by a list of exclude methods. Any plugin that implements
+ an excluded method will be removed from the manager's plugin list
+ after plugins are loaded.
+ """
+ def __init__(self, plugins=(), exclude=(), load=True):
+ DefaultPluginManager.__init__(self, plugins)
+ self.load = load
+ self.exclude = exclude
+ self.excluded = []
+ self._excludedOpts = None
+
+ def excludedOption(self, name):
+ if self._excludedOpts is None:
+ from optparse import OptionParser
+ self._excludedOpts = OptionParser(add_help_option=False)
+ for plugin in self.excluded:
+ plugin.options(self._excludedOpts, env={})
+ return self._excludedOpts.get_option('--' + name)
+
+ def loadPlugins(self):
+ if self.load:
+ DefaultPluginManager.loadPlugins(self)
+ allow = []
+ for plugin in self.plugins:
+ ok = True
+ for method in self.exclude:
+ if hasattr(plugin, method):
+ ok = False
+ self.excluded.append(plugin)
+ break
+ if ok:
+ allow.append(plugin)
+ self.plugins = allow
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py
new file mode 100755
index 00000000..2cae744a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/multiprocess.py
@@ -0,0 +1,835 @@
+"""
+Overview
+========
+
+The multiprocess plugin enables you to distribute your test run among a set of
+worker processes that run tests in parallel. This can speed up CPU-bound test
+runs (as long as the number of work processeses is around the number of
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_multiprocess/multiprocess` for
+ additional documentation and examples. Use of this plugin on python
+ 2.5 or earlier requires the multiprocessing_ module, also available
+ from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process
+separately. This ideal is not attainable in all cases, however, because many
+test suites depend on context (class, module or package) fixtures.
+
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute concurrently and multiple times, typically once per test.
+
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
+
+How results are collected and reported
+======================================
+
+As each test or suite executes in a worker process, results (failures, errors,
+and specially handled exceptions like SkipTest) are collected in that
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
+workers have died, the result summary is output as normal.
+
+Beware!
+=======
+
+Not all test suites will benefit from, or even operate correctly using, this
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
+interactions and behaviors due to the way in which tests are dispatched and
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
+
+But the biggest issue you will face is probably concurrency. Unless you
+have kept your tests as religiously pure unit tests, with no side-effects, no
+ordering issues, and no external dependencies, chances are you will experience
+odd, intermittent and unexplainable failures and errors when using this
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
+your test suite is not safe for concurrency.
+
+New Features in 1.1.0
+=====================
+
+* functions generated by test generators are now added to the worker queue
+ making them multi-threaded.
+* fixed timeout functionality, now functions will be terminated with a
+ TimedOutException exception when they exceed their execution time. The
+ worker processes are not terminated.
+* added ``--process-restartworker`` option to restart workers once they are
+ done, this helps control memory usage. Sometimes memory leaks can accumulate
+ making long runs very difficult.
+* added global _instantiate_plugins to configure which plugins are started
+ on the worker processes.
+
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+import unittest
+import pickle
+import signal
+import nose.case
+from nose.core import TextTestRunner
+from nose import failure
+from nose import loader
+from nose.plugins.base import Plugin
+from nose.pyversion import bytes_
+from nose.result import TextTestResult
+from nose.suite import ContextSuite
+from nose.util import test_address
+try:
+ # 2.7+
+ from unittest.runner import _WritelnDecorator
+except ImportError:
+ from unittest import _WritelnDecorator
+from Queue import Empty
+from warnings import warn
+try:
+ from cStringIO import StringIO
+except ImportError:
+ import StringIO
+
+# this is a list of plugin classes that will be checked for and created inside
+# each worker process
+_instantiate_plugins = None
+
+log = logging.getLogger(__name__)
+
+Process = Queue = Pool = Event = Value = Array = None
+
+# have to inherit KeyboardInterrupt to it will interrupt process properly
+class TimedOutException(KeyboardInterrupt):
+ def __init__(self, value = "Timed Out"):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+def _import_mp():
+ global Process, Queue, Pool, Event, Value, Array
+ try:
+ from multiprocessing import Manager, Process
+ #prevent the server process created in the manager which holds Python
+ #objects and allows other processes to manipulate them using proxies
+ #to interrupt on SIGINT (keyboardinterrupt) so that the communication
+ #channel between subprocesses and main process is still usable after
+ #ctrl+C is received in the main process.
+ old=signal.signal(signal.SIGINT, signal.SIG_IGN)
+ m = Manager()
+ #reset it back so main process will receive a KeyboardInterrupt
+ #exception on ctrl+c
+ signal.signal(signal.SIGINT, old)
+ Queue, Pool, Event, Value, Array = (
+ m.Queue, m.Pool, m.Event, m.Value, m.Array
+ )
+ except ImportError:
+ warn("multiprocessing module is not available, multiprocess plugin "
+ "cannot be used", RuntimeWarning)
+
+
+class TestLet:
+ def __init__(self, case):
+ try:
+ self._id = case.id()
+ except AttributeError:
+ pass
+ self._short_description = case.shortDescription()
+ self._str = str(case)
+
+ def id(self):
+ return self._id
+
+ def shortDescription(self):
+ return self._short_description
+
+ def __str__(self):
+ return self._str
+
+class MultiProcess(Plugin):
+ """
+ Run tests in multiple processes. Requires processing module.
+ """
+ score = 1000
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Register command-line options.
+ """
+ parser.add_option("--processes", action="store",
+ default=env.get('NOSE_PROCESSES', 0),
+ dest="multiprocess_workers",
+ metavar="NUM",
+ help="Spread test run among this many processes. "
+ "Set a number equal to the number of processors "
+ "or cores in your machine for best results. "
+ "Pass a negative number to have the number of "
+ "processes automatically set to the number of "
+ "cores. Passing 0 means to disable parallel "
+ "testing. Default is 0 unless NOSE_PROCESSES is "
+ "set. "
+ "[NOSE_PROCESSES]")
+ parser.add_option("--process-timeout", action="store",
+ default=env.get('NOSE_PROCESS_TIMEOUT', 10),
+ dest="multiprocess_timeout",
+ metavar="SECONDS",
+ help="Set timeout for return of results from each "
+ "test runner process. Default is 10. "
+ "[NOSE_PROCESS_TIMEOUT]")
+ parser.add_option("--process-restartworker", action="store_true",
+ default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
+ dest="multiprocess_restartworker",
+ help="If set, will restart each worker process once"
+ " their tests are done, this helps control memory "
+ "leaks from killing the system. "
+ "[NOSE_PROCESS_RESTARTWORKER]")
+
+ def configure(self, options, config):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ if not hasattr(options, 'multiprocess_workers'):
+ self.enabled = False
+ return
+ # don't start inside of a worker process
+ if config.worker:
+ return
+ self.config = config
+ try:
+ workers = int(options.multiprocess_workers)
+ except (TypeError, ValueError):
+ workers = 0
+ if workers:
+ _import_mp()
+ if Process is None:
+ self.enabled = False
+ return
+ # Negative number of workers will cause multiprocessing to hang.
+ # Set the number of workers to the CPU count to avoid this.
+ if workers < 0:
+ try:
+ import multiprocessing
+ workers = multiprocessing.cpu_count()
+ except NotImplementedError:
+ self.enabled = False
+ return
+ self.enabled = True
+ self.config.multiprocess_workers = workers
+ t = float(options.multiprocess_timeout)
+ self.config.multiprocess_timeout = t
+ r = int(options.multiprocess_restartworker)
+ self.config.multiprocess_restartworker = r
+ self.status['active'] = True
+
+ def prepareTestLoader(self, loader):
+ """Remember loader class so MultiProcessTestRunner can instantiate
+ the right loader.
+ """
+ self.loaderClass = loader.__class__
+
+ def prepareTestRunner(self, runner):
+ """Replace test runner with MultiProcessTestRunner.
+ """
+ # replace with our runner class
+ return MultiProcessTestRunner(stream=runner.stream,
+ verbosity=self.config.verbosity,
+ config=self.config,
+ loaderClass=self.loaderClass)
+
+def signalhandler(sig, frame):
+ raise TimedOutException()
+
+class MultiProcessTestRunner(TextTestRunner):
+ waitkilltime = 5.0 # max time to wait to terminate a process that does not
+ # respond to SIGILL
+ def __init__(self, **kw):
+ self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
+ super(MultiProcessTestRunner, self).__init__(**kw)
+
+ def collect(self, test, testQueue, tasks, to_teardown, result):
+ # dispatch and collect results
+ # put indexes only on queue because tests aren't picklable
+ for case in self.nextBatch(test):
+ log.debug("Next batch %s (%s)", case, type(case))
+ if (isinstance(case, nose.case.Test) and
+ isinstance(case.test, failure.Failure)):
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ # handle shared fixtures
+ if isinstance(case, ContextSuite) and case.context is failure.Failure:
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
+ log.debug("%s has shared fixtures", case)
+ try:
+ case.setUp()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ log.debug("%s setup failed", sys.exc_info())
+ result.addError(case, sys.exc_info())
+ else:
+ to_teardown.append(case)
+ if case.factory:
+ ancestors=case.factory.context.get(case, [])
+ for an in ancestors[:2]:
+ #log.debug('reset ancestor %s', an)
+ if getattr(an, '_multiprocess_shared_', False):
+ an._multiprocess_can_split_=True
+ #an._multiprocess_shared_=False
+ self.collect(case, testQueue, tasks, to_teardown, result)
+
+ else:
+ test_addr = self.addtask(testQueue,tasks,case)
+ log.debug("Queued test %s (%s) to %s",
+ len(tasks), test_addr, testQueue)
+
+ def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
+ currentaddr = Value('c',bytes_(''))
+ currentstart = Value('d',time.time())
+ keyboardCaught = Event()
+ p = Process(target=runner,
+ args=(iworker, testQueue,
+ resultQueue,
+ currentaddr,
+ currentstart,
+ keyboardCaught,
+ shouldStop,
+ self.loaderClass,
+ result.__class__,
+ pickle.dumps(self.config)))
+ p.currentaddr = currentaddr
+ p.currentstart = currentstart
+ p.keyboardCaught = keyboardCaught
+ old = signal.signal(signal.SIGILL, signalhandler)
+ p.start()
+ signal.signal(signal.SIGILL, old)
+ return p
+
+ def run(self, test):
+ """
+ Execute the test (which may be a test suite). If the test is a suite,
+ distribute it out among as many processes as have been configured, at
+ as fine a level as is possible given the context fixtures defined in
+ the suite or any sub-suites.
+
+ """
+ log.debug("%s.run(%s) (%s)", self, test, os.getpid())
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ testQueue = Queue()
+ resultQueue = Queue()
+ tasks = []
+ completed = []
+ workers = []
+ to_teardown = []
+ shouldStop = Event()
+
+ result = self._makeResult()
+ start = time.time()
+
+ self.collect(test, testQueue, tasks, to_teardown, result)
+
+ log.debug("Starting %s workers", self.config.multiprocess_workers)
+ for i in range(self.config.multiprocess_workers):
+ p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
+ workers.append(p)
+ log.debug("Started worker process %s", i+1)
+
+ total_tasks = len(tasks)
+ # need to keep track of the next time to check for timeouts in case
+ # more than one process times out at the same time.
+ nexttimeout=self.config.multiprocess_timeout
+ thrownError = None
+
+ try:
+ while tasks:
+ log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
+ len(completed), total_tasks,nexttimeout)
+ try:
+ iworker, addr, newtask_addrs, batch_result = resultQueue.get(
+ timeout=nexttimeout)
+ log.debug('Results received for worker %d, %s, new tasks: %d',
+ iworker,addr,len(newtask_addrs))
+ try:
+ try:
+ tasks.remove(addr)
+ except ValueError:
+ log.warn('worker %s failed to remove from tasks: %s',
+ iworker,addr)
+ total_tasks += len(newtask_addrs)
+ tasks.extend(newtask_addrs)
+ except KeyError:
+ log.debug("Got result for unknown task? %s", addr)
+ log.debug("current: %s",str(list(tasks)[0]))
+ else:
+ completed.append([addr,batch_result])
+ self.consolidate(result, batch_result)
+ if (self.config.stopOnError
+ and not result.wasSuccessful()):
+ # set the stop condition
+ shouldStop.set()
+ break
+ if self.config.multiprocess_restartworker:
+ log.debug('joining worker %s',iworker)
+ # wait for working, but not that important if worker
+ # cannot be joined in fact, for workers that add to
+ # testQueue, they will not terminate until all their
+ # items are read
+ workers[iworker].join(timeout=1)
+ if not shouldStop.is_set() and not testQueue.empty():
+ log.debug('starting new process on worker %s',iworker)
+ workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ except Empty:
+ log.debug("Timed out with %s tasks pending "
+ "(empty testQueue=%r): %s",
+ len(tasks),testQueue.empty(),str(tasks))
+ any_alive = False
+ for iworker, w in enumerate(workers):
+ if w.is_alive():
+ worker_addr = bytes_(w.currentaddr.value,'ascii')
+ timeprocessing = time.time() - w.currentstart.value
+ if ( len(worker_addr) == 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('worker %d has finished its work item, '
+ 'but is not exiting? do we wait for it?',
+ iworker)
+ else:
+ any_alive = True
+ if (len(worker_addr) > 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('timed out worker %s: %s',
+ iworker,worker_addr)
+ w.currentaddr.value = bytes_('')
+ # If the process is in C++ code, sending a SIGILL
+ # might not send a python KeybordInterrupt exception
+ # therefore, send multiple signals until an
+ # exception is caught. If this takes too long, then
+ # terminate the process
+ w.keyboardCaught.clear()
+ startkilltime = time.time()
+ while not w.keyboardCaught.is_set() and w.is_alive():
+ if time.time()-startkilltime > self.waitkilltime:
+ # have to terminate...
+ log.error("terminating worker %s",iworker)
+ w.terminate()
+ # there is a small probability that the
+ # terminated process might send a result,
+ # which has to be specially handled or
+ # else processes might get orphaned.
+ workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ break
+ os.kill(w.pid, signal.SIGILL)
+ time.sleep(0.1)
+ if not any_alive and testQueue.empty():
+ log.debug("All workers dead")
+ break
+ nexttimeout=self.config.multiprocess_timeout
+ for w in workers:
+ if w.is_alive() and len(w.currentaddr.value) > 0:
+ timeprocessing = time.time()-w.currentstart.value
+ if timeprocessing <= self.config.multiprocess_timeout:
+ nexttimeout = min(nexttimeout,
+ self.config.multiprocess_timeout-timeprocessing)
+ log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
+
+ except (KeyboardInterrupt, SystemExit), e:
+ log.info('parent received ctrl-c when waiting for test results')
+ thrownError = e
+ #resultQueue.get(False)
+
+ result.addError(test, sys.exc_info())
+
+ try:
+ for case in to_teardown:
+ log.debug("Tearing down shared fixtures for %s", case)
+ try:
+ case.tearDown()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(case, sys.exc_info())
+
+ stop = time.time()
+
+ # first write since can freeze on shutting down processes
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+
+ if thrownError is None:
+ log.debug("Tell all workers to stop")
+ for w in workers:
+ if w.is_alive():
+ testQueue.put('STOP', block=False)
+
+ # wait for the workers to end
+ for iworker,worker in enumerate(workers):
+ if worker.is_alive():
+ log.debug('joining worker %s',iworker)
+ worker.join()
+ if worker.is_alive():
+ log.debug('failed to join worker %s',iworker)
+ except (KeyboardInterrupt, SystemExit):
+ log.info('parent received ctrl-c when shutting down: stop all processes')
+ for worker in workers:
+ if worker.is_alive():
+ worker.terminate()
+
+ if thrownError: raise thrownError
+ else: raise
+
+ return result
+
+ def addtask(testQueue,tasks,case):
+ arg = None
+ if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
+ # this removes the top level descriptor and allows real function
+ # name to be returned
+ case.test.descriptor = None
+ arg = case.test.arg
+ test_addr = MultiProcessTestRunner.address(case)
+ testQueue.put((test_addr,arg), block=False)
+ if arg is not None:
+ test_addr += str(arg)
+ if tasks is not None:
+ tasks.append(test_addr)
+ return test_addr
+ addtask = staticmethod(addtask)
+
+ def address(case):
+ if hasattr(case, 'address'):
+ file, mod, call = case.address()
+ elif hasattr(case, 'context'):
+ file, mod, call = test_address(case.context)
+ else:
+ raise Exception("Unable to convert %s to address" % case)
+ parts = []
+ if file is None:
+ if mod is None:
+ raise Exception("Unaddressable case %s" % case)
+ else:
+ parts.append(mod)
+ else:
+ # strip __init__.py(c) from end of file part
+ # if present, having it there confuses loader
+ dirname, basename = os.path.split(file)
+ if basename.startswith('__init__'):
+ file = dirname
+ parts.append(file)
+ if call is not None:
+ parts.append(call)
+ return ':'.join(map(str, parts))
+ address = staticmethod(address)
+
+ def nextBatch(self, test):
+ # allows tests or suites to mark themselves as not safe
+ # for multiprocess execution
+ if hasattr(test, 'context'):
+ if not getattr(test.context, '_multiprocess_', True):
+ return
+
+ if ((isinstance(test, ContextSuite)
+ and test.hasFixtures(self.checkCanSplit))
+ or not getattr(test, 'can_split', True)
+ or not isinstance(test, unittest.TestSuite)):
+ # regular test case, or a suite with context fixtures
+
+ # special case: when run like nosetests path/to/module.py
+ # the top-level suite has only one item, and it shares
+ # the same context as that item. In that case, we want the
+ # item, not the top-level suite
+ if isinstance(test, ContextSuite):
+ contained = list(test)
+ if (len(contained) == 1
+ and getattr(contained[0],
+ 'context', None) == test.context):
+ test = contained[0]
+ yield test
+ else:
+ # Suite is without fixtures at this level; but it may have
+ # fixtures at any deeper level, so we need to examine it all
+ # the way down to the case level
+ for case in test:
+ for batch in self.nextBatch(case):
+ yield batch
+
+ def checkCanSplit(context, fixt):
+ """
+ Callback that we use to check whether the fixtures found in a
+ context or ancestor are ones we care about.
+
+ Contexts can tell us that their fixtures are reentrant by setting
+ _multiprocess_can_split_. So if we see that, we return False to
+ disregard those fixtures.
+ """
+ if not fixt:
+ return False
+ if getattr(context, '_multiprocess_can_split_', False):
+ return False
+ return True
+ checkCanSplit = staticmethod(checkCanSplit)
+
+ def sharedFixtures(self, case):
+ context = getattr(case, 'context', None)
+ if not context:
+ return False
+ return getattr(context, '_multiprocess_shared_', False)
+
+ def consolidate(self, result, batch_result):
+ log.debug("batch result is %s" , batch_result)
+ try:
+ output, testsRun, failures, errors, errorClasses = batch_result
+ except ValueError:
+ log.debug("result in unexpected format %s", batch_result)
+ failure.Failure(*sys.exc_info())(result)
+ return
+ self.stream.write(output)
+ result.testsRun += testsRun
+ result.failures.extend(failures)
+ result.errors.extend(errors)
+ for key, (storage, label, isfail) in errorClasses.items():
+ if key not in result.errorClasses:
+ # Ordinarily storage is result attribute
+ # but it's only processed through the errorClasses
+ # dict, so it's ok to fake it here
+ result.errorClasses[key] = ([], label, isfail)
+ mystorage, _junk, _junk = result.errorClasses[key]
+ mystorage.extend(storage)
+ log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
+
+
+def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+ try:
+ try:
+ return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config)
+ except KeyboardInterrupt:
+ log.debug('Worker %s keyboard interrupt, stopping',ix)
+ except Empty:
+ log.debug("Worker %s timed out waiting for tasks", ix)
+
+def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+
+ config = pickle.loads(config)
+ dummy_parser = config.parserClass()
+ if _instantiate_plugins is not None:
+ for pluginclass in _instantiate_plugins:
+ plugin = pluginclass()
+ plugin.addOptions(dummy_parser,{})
+ config.plugins.addPlugin(plugin)
+ config.plugins.configure(config.options,config)
+ config.plugins.begin()
+ log.debug("Worker %s executing, pid=%d", ix,os.getpid())
+ loader = loaderClass(config=config)
+ loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
+
+ def get():
+ return testQueue.get(timeout=config.multiprocess_timeout)
+
+ def makeResult():
+ stream = _WritelnDecorator(StringIO())
+ result = resultClass(stream, descriptions=1,
+ verbosity=config.verbosity,
+ config=config)
+ plug_result = config.plugins.prepareTestResult(result)
+ if plug_result:
+ return plug_result
+ return result
+
+ def batch(result):
+ failures = [(TestLet(c), err) for c, err in result.failures]
+ errors = [(TestLet(c), err) for c, err in result.errors]
+ errorClasses = {}
+ for key, (storage, label, isfail) in result.errorClasses.items():
+ errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
+ label, isfail)
+ return (
+ result.stream.getvalue(),
+ result.testsRun,
+ failures,
+ errors,
+ errorClasses)
+ for test_addr, arg in iter(get, 'STOP'):
+ if shouldStop.is_set():
+ log.exception('Worker %d STOPPED',ix)
+ break
+ result = makeResult()
+ test = loader.loadTestsFromNames([test_addr])
+ test.testQueue = testQueue
+ test.tasks = []
+ test.arg = arg
+ log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
+ try:
+ if arg is not None:
+ test_addr = test_addr + str(arg)
+ currentaddr.value = bytes_(test_addr)
+ currentstart.value = time.time()
+ test(result)
+ currentaddr.value = bytes_('')
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ except KeyboardInterrupt, e: #TimedOutException:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ keyboardCaught.set()
+ if len(currentaddr.value):
+ if timeout:
+ msg = 'Worker %s timed out, failing current test %s'
+ else:
+ msg = 'Worker %s keyboard interrupt, failing current test %s'
+ log.exception(msg,ix,test_addr)
+ currentaddr.value = bytes_('')
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ else:
+ if timeout:
+ msg = 'Worker %s test %s timed out'
+ else:
+ msg = 'Worker %s test %s keyboard interrupt'
+ log.debug(msg,ix,test_addr)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if not timeout:
+ raise
+ except SystemExit:
+ currentaddr.value = bytes_('')
+ log.exception('Worker %s system exit',ix)
+ raise
+ except:
+ currentaddr.value = bytes_('')
+ log.exception("Worker %s error running test or returning "
+ "results",ix)
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if config.multiprocess_restartworker:
+ break
+ log.debug("Worker %s ending", ix)
+
+
+class NoSharedFixtureContextSuite(ContextSuite):
+ """
+ Context suite that never fires shared fixtures.
+
+ When a context sets _multiprocess_shared_, fixtures in that context
+ are executed by the main process. Using this suite class prevents them
+ from executing in the runner process as well.
+
+ """
+ testQueue = None
+ tasks = None
+ arg = None
+ def setupContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).setupContext(context)
+
+ def teardownContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).teardownContext(context)
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s",
+ id(self), self, self._tests)
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ #log.debug('setUp for %s', id(self));
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if (isinstance(test,nose.case.Test)
+ and self.arg is not None):
+ test.test.arg = self.arg
+ else:
+ test.arg = self.arg
+ test.testQueue = self.testQueue
+ test.tasks = self.tasks
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ #log.debug('running test %s in suite %s', test, self);
+ try:
+ test(orig)
+ except KeyboardInterrupt, e:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ msg = 'Timeout when running test %s in suite %s'
+ else:
+ msg = 'KeyboardInterrupt when running test %s in suite %s'
+ log.debug(msg, test, self)
+ err = (TimedOutException,TimedOutException(str(test)),
+ sys.exc_info()[2])
+ test.config.plugins.addError(test,err)
+ orig.addError(test,err)
+ if not timeout:
+ raise
+ finally:
+ self.has_run = True
+ try:
+ #log.debug('tearDown for %s', id(self));
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py
new file mode 100755
index 00000000..76d0d2c4
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/plugintest.py
@@ -0,0 +1,416 @@
+"""
+Testing Plugins
+===============
+
+The plugin interface is well-tested enough to safely unit test your
+use of its hooks with some level of confidence. However, there is also
+a mixin for unittest.TestCase called PluginTester that's designed to
+test plugins in their native runtime environment.
+
+Here's a simple example with a do-nothing plugin and a composed suite.
+
+ >>> import unittest
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FooPlugin(Plugin):
+ ... pass
+ >>> class TestPluginFoo(PluginTester, unittest.TestCase):
+ ... activate = '--with-foo'
+ ... plugins = [FooPlugin()]
+ ... def test_foo(self):
+ ... for line in self.output:
+ ... # i.e. check for patterns
+ ... pass
+ ...
+ ... # or check for a line containing ...
+ ... assert "ValueError" in self.output
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate foo")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestPluginFoo('test_foo')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+And here is a more complex example of testing a plugin that has extra
+arguments and reads environment variables.
+
+ >>> import unittest, os
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FancyOutputter(Plugin):
+ ... name = "fancy"
+ ... def configure(self, options, conf):
+ ... Plugin.configure(self, options, conf)
+ ... if not self.enabled:
+ ... return
+ ... self.fanciness = 1
+ ... if options.more_fancy:
+ ... self.fanciness = 2
+ ... if 'EVEN_FANCIER' in self.env:
+ ... self.fanciness = 3
+ ...
+ ... def options(self, parser, env=os.environ):
+ ... self.env = env
+ ... parser.add_option('--more-fancy', action='store_true')
+ ... Plugin.options(self, parser, env=env)
+ ...
+ ... def report(self, stream):
+ ... stream.write("FANCY " * self.fanciness)
+ ...
+ >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
+ ... activate = '--with-fancy' # enables the plugin
+ ... plugins = [FancyOutputter()]
+ ... args = ['--more-fancy']
+ ... env = {'EVEN_FANCIER': '1'}
+ ...
+ ... def test_fancy_output(self):
+ ... assert "FANCY FANCY FANCY" in self.output, (
+ ... "got: %s" % self.output)
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate fancy stuff")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestFancyOutputter('test_fancy_output')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+"""
+
+import re
+import sys
+from warnings import warn
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ['PluginTester', 'run']
+
+from os import getpid
+class MultiProcessFile(object):
+ """
+ helper for testing multiprocessing
+
+ multiprocessing poses a problem for doctests, since the strategy
+ of replacing sys.stdout/stderr with file-like objects then
+ inspecting the results won't work: the child processes will
+ write to the objects, but the data will not be reflected
+ in the parent doctest-ing process.
+
+ The solution is to create file-like objects which will interact with
+ multiprocessing in a more desirable way.
+
+ All processes can write to this object, but only the creator can read.
+ This allows the testing system to see a unified picture of I/O.
+ """
+ def __init__(self):
+ # per advice at:
+ # http://docs.python.org/library/multiprocessing.html#all-platforms
+ self.__master = getpid()
+ self.__queue = Manager().Queue()
+ self.__buffer = StringIO()
+ self.softspace = 0
+
+ def buffer(self):
+ if getpid() != self.__master:
+ return
+
+ from Queue import Empty
+ from collections import defaultdict
+ cache = defaultdict(str)
+ while True:
+ try:
+ pid, data = self.__queue.get_nowait()
+ except Empty:
+ break
+ if pid == ():
+ #show parent output after children
+ #this is what users see, usually
+ pid = ( 1e100, ) # googol!
+ cache[pid] += data
+ for pid in sorted(cache):
+ #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
+ self.__buffer.write( cache[pid] )
+ def write(self, data):
+ # note that these pids are in the form of current_process()._identity
+ # rather than OS pids
+ from multiprocessing import current_process
+ pid = current_process()._identity
+ self.__queue.put((pid, data))
+ def __iter__(self):
+ "getattr doesn't work for iter()"
+ self.buffer()
+ return self.__buffer
+ def seek(self, offset, whence=0):
+ self.buffer()
+ return self.__buffer.seek(offset, whence)
+ def getvalue(self):
+ self.buffer()
+ return self.__buffer.getvalue()
+ def __getattr__(self, attr):
+ return getattr(self.__buffer, attr)
+
+try:
+ from multiprocessing import Manager
+ Buffer = MultiProcessFile
+except ImportError:
+ Buffer = StringIO
+
+class PluginTester(object):
+ """A mixin for testing nose plugins in their runtime environment.
+
+ Subclass this and mix in unittest.TestCase to run integration/functional
+ tests on your plugin. When setUp() is called, the stub test suite is
+ executed with your plugin so that during an actual test you can inspect the
+ artifacts of how your plugin interacted with the stub test suite.
+
+ - activate
+
+ - the argument to send nosetests to activate the plugin
+
+ - suitepath
+
+ - if set, this is the path of the suite to test. Otherwise, you
+ will need to use the hook, makeSuite()
+
+ - plugins
+
+ - the list of plugins to make available during the run. Note
+ that this does not mean these plugins will be *enabled* during
+ the run -- only the plugins enabled by the activate argument
+ or other settings in argv or env will be enabled.
+
+ - args
+
+ - a list of arguments to add to the nosetests command, in addition to
+ the activate argument
+
+ - env
+
+ - optional dict of environment variables to send nosetests
+
+ """
+ activate = None
+ suitepath = None
+ args = None
+ env = {}
+ argv = None
+ plugins = []
+ ignoreFiles = None
+
+ def makeSuite(self):
+ """returns a suite object of tests to run (unittest.TestSuite())
+
+ If self.suitepath is None, this must be implemented. The returned suite
+ object will be executed with all plugins activated. It may return
+ None.
+
+ Here is an example of a basic suite object you can return ::
+
+ >>> import unittest
+ >>> class SomeTest(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("Now do something, plugin!")
+ ...
+ >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
+ <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
+
+ """
+ raise NotImplementedError
+
+ def _execPlugin(self):
+ """execute the plugin on the internal test suite.
+ """
+ from nose.config import Config
+ from nose.core import TestProgram
+ from nose.plugins.manager import PluginManager
+
+ suite = None
+ stream = Buffer()
+ conf = Config(env=self.env,
+ stream=stream,
+ plugins=PluginManager(plugins=self.plugins))
+ if self.ignoreFiles is not None:
+ conf.ignoreFiles = self.ignoreFiles
+ if not self.suitepath:
+ suite = self.makeSuite()
+
+ self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
+ exit=False)
+ self.output = AccessDecorator(stream)
+
+ def setUp(self):
+ """runs nosetests with the specified test suite, all plugins
+ activated.
+ """
+ self.argv = ['nosetests', self.activate]
+ if self.args:
+ self.argv.extend(self.args)
+ if self.suitepath:
+ self.argv.append(self.suitepath)
+
+ self._execPlugin()
+
+
+class AccessDecorator(object):
+ stream = None
+ _buf = None
+ def __init__(self, stream):
+ self.stream = stream
+ stream.seek(0)
+ self._buf = stream.read()
+ stream.seek(0)
+ def __contains__(self, val):
+ return val in self._buf
+ def __iter__(self):
+ return iter(self.stream)
+ def __str__(self):
+ return self._buf
+
+
+def blankline_separated_blocks(text):
+ "a bunch of === characters is also considered a blank line"
+ block = []
+ for line in text.splitlines(True):
+ block.append(line)
+ line = line.strip()
+ if not line or line.startswith('===') and not line.strip('='):
+ yield "".join(block)
+ block = []
+ if block:
+ yield "".join(block)
+
+
+def remove_stack_traces(out):
+ # this regexp taken from Python 2.5's doctest
+ traceback_re = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^(?=\w) # a line *starts* with alphanum.
+ .*?(?P<exception> \w+ ) # exception name
+ (?P<msg> [:\n] .*) # the rest
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+ blocks = []
+ for block in blankline_separated_blocks(out):
+ blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
+ return "".join(blocks)
+
+
+def simplify_warnings(out):
+ warn_re = re.compile(r"""
+ # Cut the file and line no, up to the warning name
+ ^.*:\d+:\s
+ (?P<category>\w+): \s+ # warning category
+ (?P<detail>.+) $ \n? # warning message
+ ^ .* $ # stack frame
+ """, re.VERBOSE | re.MULTILINE)
+ return warn_re.sub(r"\g<category>: \g<detail>", out)
+
+
+def remove_timings(out):
+ return re.sub(
+ r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
+
+
+def munge_nose_output_for_doctest(out):
+ """Modify nose output to make it easy to use in doctests."""
+ out = remove_stack_traces(out)
+ out = simplify_warnings(out)
+ out = remove_timings(out)
+ return out.strip()
+
+
+def run(*arg, **kw):
+ """
+ Specialized version of nose.run for use inside of doctests that
+ test test runs.
+
+ This version of run() prints the result output to stdout. Before
+ printing, the output is processed by replacing the timing
+ information with an ellipsis (...), removing traceback stacks, and
+ removing trailing whitespace.
+
+ Use this version of run wherever you are writing a doctest that
+ tests nose (or unittest) test result output.
+
+ Note: do not use doctest: +ELLIPSIS when testing nose output,
+ since ellipses ("test_foo ... ok") in your expected test runner
+ output may match multiple lines of output, causing spurious test
+ passes!
+ """
+ from nose import run
+ from nose.config import Config
+ from nose.plugins.manager import PluginManager
+
+ buffer = Buffer()
+ if 'config' not in kw:
+ plugins = kw.pop('plugins', [])
+ if isinstance(plugins, list):
+ plugins = PluginManager(plugins=plugins)
+ env = kw.pop('env', {})
+ kw['config'] = Config(env=env, plugins=plugins)
+ if 'argv' not in kw:
+ kw['argv'] = ['nosetests', '-v']
+ kw['config'].stream = buffer
+
+ # Set up buffering so that all output goes to our buffer,
+ # or warn user if deprecated behavior is active. If this is not
+ # done, prints and warnings will either be out of place or
+ # disappear.
+ stderr = sys.stderr
+ stdout = sys.stdout
+ if kw.pop('buffer_all', False):
+ sys.stdout = sys.stderr = buffer
+ restore = True
+ else:
+ restore = False
+ warn("The behavior of nose.plugins.plugintest.run() will change in "
+ "the next release of nose. The current behavior does not "
+ "correctly account for output to stdout and stderr. To enable "
+ "correct behavior, use run_buffered() instead, or pass "
+ "the keyword argument buffer_all=True to run().",
+ DeprecationWarning, stacklevel=2)
+ try:
+ run(*arg, **kw)
+ finally:
+ if restore:
+ sys.stderr = stderr
+ sys.stdout = stdout
+ out = buffer.getvalue()
+ print munge_nose_output_for_doctest(out)
+
+
+def run_buffered(*arg, **kw):
+ kw['buffer_all'] = True
+ run(*arg, **kw)
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py
new file mode 100755
index 00000000..4d304a93
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/prof.py
@@ -0,0 +1,154 @@
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
+
+See the `hotshot documentation`_ in the standard library documentation for
+more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
+"""
+
+try:
+ import hotshot
+ from hotshot import stats
+except ImportError:
+ hotshot, stats = None, None
+import logging
+import os
+import sys
+import tempfile
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins')
+
+class Profile(Plugin):
+ """
+ Use this plugin to run tests using the hotshot profiler.
+ """
+ pfile = None
+ clean_stats_file = False
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ if not self.available():
+ return
+ Plugin.options(self, parser, env)
+ parser.add_option('--profile-sort', action='store', dest='profile_sort',
+ default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
+ metavar="SORT",
+ help="Set sort order for profiler output")
+ parser.add_option('--profile-stats-file', action='store',
+ dest='profile_stats_file',
+ metavar="FILE",
+ default=env.get('NOSE_PROFILE_STATS_FILE'),
+ help='Profiler stats file; default is a new '
+ 'temp file on each run')
+ parser.add_option('--profile-restrict', action='append',
+ dest='profile_restrict',
+ metavar="RESTRICT",
+ default=env.get('NOSE_PROFILE_RESTRICT'),
+ help="Restrict profiler output. See help for "
+ "pstats.Stats for details")
+
+ def available(cls):
+ return hotshot is not None
+ available = classmethod(available)
+
+ def begin(self):
+ """Create profile stats file and load profiler.
+ """
+ if not self.available():
+ return
+ self._create_pfile()
+ self.prof = hotshot.Profile(self.pfile)
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.available():
+ self.enabled = False
+ return
+ Plugin.configure(self, options, conf)
+ self.conf = conf
+ if options.profile_stats_file:
+ self.pfile = options.profile_stats_file
+ self.clean_stats_file = False
+ else:
+ self.pfile = None
+ self.clean_stats_file = True
+ self.fileno = None
+ self.sort = options.profile_sort
+ self.restrict = tolist(options.profile_restrict)
+
+ def prepareTest(self, test):
+ """Wrap entire test run in :func:`prof.runcall`.
+ """
+ if not self.available():
+ return
+ log.debug('preparing test %s' % test)
+ def run_and_profile(result, prof=self.prof, test=test):
+ self._create_pfile()
+ prof.runcall(test, result)
+ return run_and_profile
+
+ def report(self, stream):
+ """Output profiler report.
+ """
+ log.debug('printing profiler report')
+ self.prof.close()
+ prof_stats = stats.load(self.pfile)
+ prof_stats.sort_stats(self.sort)
+
+ # 2.5 has completely different stream handling from 2.4 and earlier.
+ # Before 2.5, stats objects have no stream attribute; in 2.5 and later
+ # a reference sys.stdout is stored before we can tweak it.
+ compat_25 = hasattr(prof_stats, 'stream')
+ if compat_25:
+ tmp = prof_stats.stream
+ prof_stats.stream = stream
+ else:
+ tmp = sys.stdout
+ sys.stdout = stream
+ try:
+ if self.restrict:
+ log.debug('setting profiler restriction to %s', self.restrict)
+ prof_stats.print_stats(*self.restrict)
+ else:
+ prof_stats.print_stats()
+ finally:
+ if compat_25:
+ prof_stats.stream = tmp
+ else:
+ sys.stdout = tmp
+
+ def finalize(self, result):
+ """Clean up stats file, if configured to do so.
+ """
+ if not self.available():
+ return
+ try:
+ self.prof.close()
+ except AttributeError:
+ # TODO: is this trying to catch just the case where not
+ # hasattr(self.prof, "close")? If so, the function call should be
+ # moved out of the try: suite.
+ pass
+ if self.clean_stats_file:
+ if self.fileno:
+ try:
+ os.close(self.fileno)
+ except OSError:
+ pass
+ try:
+ os.unlink(self.pfile)
+ except OSError:
+ pass
+ return None
+
+ def _create_pfile(self):
+ if not self.pfile:
+ self.fileno, self.pfile = tempfile.mkstemp()
+ self.clean_stats_file = True
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py
new file mode 100755
index 00000000..9d1ac8f6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/skip.py
@@ -0,0 +1,63 @@
+"""
+This plugin installs a SKIP error class for the SkipTest exception.
+When SkipTest is raised, the exception will be logged in the skipped
+attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
+the exception will not be counted as an error or failure. This plugin
+is enabled by default but may be disabled with the ``--no-skip`` option.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+# on SkipTest:
+# - unittest SkipTest is first preference, but it's only available
+# for >= 2.7
+# - unittest2 SkipTest is second preference for older pythons. This
+# mirrors logic for choosing SkipTest exception in testtools
+# - if none of the above, provide custom class
+try:
+ from unittest.case import SkipTest
+except ImportError:
+ try:
+ from unittest2.case import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ """Raise this exception to mark a test as skipped.
+ """
+ pass
+
+
+class Skip(ErrorClassPlugin):
+ """
+ Plugin that installs a SKIP error class for the SkipTest
+ exception. When SkipTest is raised, the exception will be logged
+ in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
+ will be output, and the exception will not be counted as an error
+ or failure.
+ """
+ enabled = True
+ skipped = ErrorClass(SkipTest,
+ label='SKIP',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """
+ Add my options to command line.
+ """
+ env_opt = 'NOSE_WITHOUT_SKIP'
+ parser.add_option('--no-skip', action='store_true',
+ dest='noSkip', default=env.get(env_opt, False),
+ help="Disable special handling of SkipTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin. Skip plugin is enabled by default.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noSkip', False)
+ if disable:
+ self.enabled = False
+
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py
new file mode 100755
index 00000000..49fff9b1
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/testid.py
@@ -0,0 +1,306 @@
+"""
+This plugin adds a test id (like #1) to each test name output. After
+you've run once to generate test ids, you can re-run individual
+tests by activating the plugin and passing the ids (with or
+without the # prefix) instead of test names.
+
+For example, if your normal test run looks like::
+
+ % nosetests -v
+ tests.test_a ... ok
+ tests.test_b ... ok
+ tests.test_c ... ok
+
+When adding ``--with-id`` you'll see::
+
+ % nosetests -v --with-id
+ #1 tests.test_a ... ok
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Then you can re-run individual tests by supplying just an id number::
+
+ % nosetests -v --with-id 2
+ #2 tests.test_b ... ok
+
+You can also pass multiple id numbers::
+
+ % nosetests -v --with-id 2 3
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Since most shells consider '#' a special character, you can leave it out when
+specifying a test id.
+
+Note that when run without the -v switch, no special output is displayed, but
+the ids file is still written.
+
+Looping over failed tests
+-------------------------
+
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+ #4 test.test_d ... ok
+
+On the second run, only tests #2 and #3 will run::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+
+As you correct errors and tests pass, they'll drop out of subsequent runs.
+
+First::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ok
+ #3 test.test_c ... FAILED
+
+Second::
+
+ % nosetests -v --failed
+ #3 test.test_c ... FAILED
+
+When all tests pass, the full set will run on the next invocation.
+
+First::
+
+ % nosetests -v --failed
+ #3 test.test_c ... ok
+
+Second::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ok
+ #3 test.test_c ... ok
+ #4 test.test_d ... ok
+
+.. note ::
+
+ If you expect to use ``--failed`` regularly, it's a good idea to always run
+ using the ``--with-id`` option. This will ensure that an id file is always
+ created, allowing you to add ``--failed`` to the command line as soon as
+ you have failing tests. Otherwise, your first run using ``--failed`` will
+ (perhaps surprisingly) run *all* tests, because there won't be an id file
+ containing the record of failed tests from your previous run.
+
+"""
+__test__ = False
+
+import logging
+import os
+from nose.plugins import Plugin
+from nose.util import src, set
+
+try:
+ from cPickle import dump, load
+except ImportError:
+ from pickle import dump, load
+
+log = logging.getLogger(__name__)
+
+
+class TestId(Plugin):
+ """
+ Activate to add a test id (like #1) to each test name output. Activate
+ with --failed to rerun failing tests only.
+ """
+ name = 'id'
+ idfile = None
+ collecting = True
+ loopOnFailed = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--id-file', action='store', dest='testIdFile',
+ default='.noseids', metavar="FILE",
+ help="Store test ids found in test runs in this "
+ "file. Default is the file .noseids in the "
+ "working directory.")
+ parser.add_option('--failed', action='store_true',
+ dest='failed', default=False,
+ help="Run the tests that failed in the last "
+ "test run.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ if options.failed:
+ self.enabled = True
+ self.loopOnFailed = True
+ log.debug("Looping on failed tests")
+ self.idfile = os.path.expanduser(options.testIdFile)
+ if not os.path.isabs(self.idfile):
+ self.idfile = os.path.join(conf.workingDir, self.idfile)
+ self.id = 1
+ # Ids and tests are mirror images: ids are {id: test address} and
+ # tests are {test address: id}
+ self.ids = {}
+ self.tests = {}
+ self.failed = []
+ self.source_names = []
+ # used to track ids seen when tests is filled from
+ # loaded ids file
+ self._seen = {}
+ self._write_hashes = conf.verbosity >= 2
+
+ def finalize(self, result):
+ """Save new ids file, if needed.
+ """
+ if result.wasSuccessful():
+ self.failed = []
+ if self.collecting:
+ ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
+ else:
+ ids = self.ids
+ fh = open(self.idfile, 'wb')
+ dump({'ids': ids,
+ 'failed': self.failed,
+ 'source_names': self.source_names}, fh)
+ fh.close()
+ log.debug('Saved test ids: %s, failed %s to %s',
+ ids, self.failed, self.idfile)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Translate ids in the list of requested names into their
+ test addresses, if they are found in my dict of tests.
+ """
+ log.debug('ltfn %s %s', names, module)
+ try:
+ fh = open(self.idfile, 'rb')
+ data = load(fh)
+ if 'ids' in data:
+ self.ids = data['ids']
+ self.failed = data['failed']
+ self.source_names = data['source_names']
+ else:
+ # old ids field
+ self.ids = data
+ self.failed = []
+ self.source_names = names
+ if self.ids:
+ self.id = max(self.ids) + 1
+ self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
+ else:
+ self.id = 1
+ log.debug(
+ 'Loaded test ids %s tests %s failed %s sources %s from %s',
+ self.ids, self.tests, self.failed, self.source_names,
+ self.idfile)
+ fh.close()
+ except IOError:
+ log.debug('IO error reading %s', self.idfile)
+
+ if self.loopOnFailed and self.failed:
+ self.collecting = False
+ names = self.failed
+ self.failed = []
+ # I don't load any tests myself, only translate names like '#2'
+ # into the associated test addresses
+ translated = []
+ new_source = []
+ really_new = []
+ for name in names:
+ trans = self.tr(name)
+ if trans != name:
+ translated.append(trans)
+ else:
+ new_source.append(name)
+ # names that are not ids and that are not in the current
+ # list of source names go into the list for next time
+ if new_source:
+ new_set = set(new_source)
+ old_set = set(self.source_names)
+ log.debug("old: %s new: %s", old_set, new_set)
+ really_new = [s for s in new_source
+ if not s in old_set]
+ if really_new:
+ # remember new sources
+ self.source_names.extend(really_new)
+ if not translated:
+ # new set of source names, no translations
+ # means "run the requested tests"
+ names = new_source
+ else:
+ # no new names to translate and add to id set
+ self.collecting = False
+ log.debug("translated: %s new sources %s names %s",
+ translated, really_new, names)
+ return (None, translated + really_new or names)
+
+ def makeName(self, addr):
+ log.debug("Make name %s", addr)
+ filename, module, call = addr
+ if filename is not None:
+ head = src(filename)
+ else:
+ head = module
+ if call is not None:
+ return "%s:%s" % (head, call)
+ return head
+
+ def setOutputStream(self, stream):
+ """Get handle on output stream so the plugin can print id #s
+ """
+ self.stream = stream
+
+ def startTest(self, test):
+ """Maybe output an id # before the test name.
+
+ Example output::
+
+ #1 test.test ... ok
+ #2 test.test_two ... ok
+
+ """
+ adr = test.address()
+ log.debug('start test %s (%s)', adr, adr in self.tests)
+ if adr in self.tests:
+ if adr in self._seen:
+ self.write(' ')
+ else:
+ self.write('#%s ' % self.tests[adr])
+ self._seen[adr] = 1
+ return
+ self.tests[adr] = self.id
+ self.write('#%s ' % self.id)
+ self.id += 1
+
+ def afterTest(self, test):
+ # None means test never ran, False means failed/err
+ if test.passed is False:
+ try:
+ key = str(self.tests[test.address()])
+ except KeyError:
+ # never saw this test -- startTest didn't run
+ pass
+ else:
+ if key not in self.failed:
+ self.failed.append(key)
+
+ def tr(self, name):
+ log.debug("tr '%s'", name)
+ try:
+ key = int(name.replace('#', ''))
+ except ValueError:
+ return name
+ log.debug("Got key %s", key)
+ # I'm running tests mapped from the ids file,
+ # not collecting new ones
+ if key in self.ids:
+ return self.makeName(self.ids[key])
+ return name
+
+ def write(self, output):
+ if self._write_hashes:
+ self.stream.write(output)
diff --git a/scripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py
new file mode 100755
index 00000000..e1ec0e1d
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/python2/nose/plugins/xunit.py
@@ -0,0 +1,329 @@
+"""This plugin provides test results in the standard XUnit XML format.
+
+It's designed for the `Jenkins`_ (previously Hudson) continuous build
+system, but will probably work for anything else that understands an
+XUnit-formatted XML representation of test results.
+
+Add this shell command to your builder ::
+
+ nosetests --with-xunit
+
+And by default a file named nosetests.xml will be written to the
+working directory.
+
+In a Jenkins builder, tick the box named "Publish JUnit test result report"
+under the Post-build Actions and enter this value for Test report XMLs::
+
+ **/nosetests.xml
+
+If you need to change the name or location of the file, you can set the
+``--xunit-file`` option.
+
+Here is an abbreviated version of what an XML test report might look like::
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
+ <testcase classname="path_to_test_suite.TestSomething"
+ name="test_it" time="0">
+ <error type="exceptions.TypeError" message="oops, wrong type">
+ Traceback (most recent call last):
+ ...
+ TypeError: oops, wrong type
+ </error>
+ </testcase>
+ </testsuite>
+
+.. _Jenkins: http://jenkins-ci.org/
+
+"""
+import codecs
+import doctest
+import os
+import sys
+import traceback
+import re
+import inspect
+from StringIO import StringIO
+from time import time
+from xml.sax import saxutils
+
+from nose.plugins.base import Plugin
+from nose.exc import SkipTest
+from nose.pyversion import force_unicode, format_exception
+
+# Invalid XML characters, control characters 0-31 sans \t, \n and \r
+CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def xml_safe(value):
+ """Replaces invalid XML characters with '?'."""
+ return CONTROL_CHARACTERS.sub('?', value)
+
+def escape_cdata(cdata):
+ """Escape a string for an XML CDATA section."""
+ return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
+
+def id_split(idval):
+ m = TEST_ID.match(idval)
+ if m:
+ name, fargs = m.groups()
+ head, tail = name.rsplit(".", 1)
+ return [head, tail+fargs]
+ else:
+ return idval.rsplit(".", 1)
+
+def nice_classname(obj):
+ """Returns a nice name for class object or class instance.
+
+ >>> nice_classname(Exception()) # doctest: +ELLIPSIS
+ '...Exception'
+ >>> nice_classname(Exception) # doctest: +ELLIPSIS
+ '...Exception'
+
+ """
+ if inspect.isclass(obj):
+ cls_name = obj.__name__
+ else:
+ cls_name = obj.__class__.__name__
+ mod = inspect.getmodule(obj)
+ if mod:
+ name = mod.__name__
+ # jython
+ if name.startswith('org.python.core.'):
+ name = name[len('org.python.core.'):]
+ return "%s.%s" % (name, cls_name)
+ else:
+ return cls_name
+
+def exc_message(exc_info):
+ """Return the exception's message."""
+ exc = exc_info[1]
+ if exc is None:
+ # str exception
+ result = exc_info[0]
+ else:
+ try:
+ result = str(exc)
+ except UnicodeEncodeError:
+ try:
+ result = unicode(exc)
+ except UnicodeError:
+ # Fallback to args as neither str nor
+ # unicode(Exception(u'\xe6')) work in Python < 2.6
+ result = exc.args[0]
+ result = force_unicode(result, 'UTF-8')
+ return xml_safe(result)
+
+class Tee(object):
+ def __init__(self, encoding, *args):
+ self._encoding = encoding
+ self._streams = args
+
+ def write(self, data):
+ data = force_unicode(data, self._encoding)
+ for s in self._streams:
+ s.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ for s in self._streams:
+ s.flush()
+
+ def isatty(self):
+ return False
+
+
+class Xunit(Plugin):
+ """This plugin provides test results in the standard XUnit XML format."""
+ name = 'xunit'
+ score = 1500
+ encoding = 'UTF-8'
+ error_report_file = None
+
+ def __init__(self):
+ super(Xunit, self).__init__()
+ self._capture_stack = []
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def _timeTaken(self):
+ if hasattr(self, '_timer'):
+ taken = time() - self._timer
+ else:
+ # test died before it ran (probably error in setup())
+ # or success/failure added before test started probably
+ # due to custom TestResult munging
+ taken = 0.0
+ return taken
+
+ def _quoteattr(self, attr):
+ """Escape an XML attribute. Value can be unicode."""
+ attr = xml_safe(attr)
+ return saxutils.quoteattr(attr)
+
+ def options(self, parser, env):
+ """Sets additional command line options."""
+ Plugin.options(self, parser, env)
+ parser.add_option(
+ '--xunit-file', action='store',
+ dest='xunit_file', metavar="FILE",
+ default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
+ help=("Path to xml file to store the xunit report in. "
+ "Default is nosetests.xml in the working directory "
+ "[NOSE_XUNIT_FILE]"))
+
+ def configure(self, options, config):
+ """Configures the xunit plugin."""
+ Plugin.configure(self, options, config)
+ self.config = config
+ if self.enabled:
+ self.stats = {'errors': 0,
+ 'failures': 0,
+ 'passes': 0,
+ 'skipped': 0
+ }
+ self.errorlist = []
+ self.error_report_file_name = os.path.realpath(options.xunit_file)
+
+ def report(self, stream):
+ """Writes an Xunit-formatted XML file
+
+ The file includes a report of test errors and failures.
+
+ """
+ self.error_report_file = codecs.open(self.error_report_file_name, 'w',
+ self.encoding, 'replace')
+ self.stats['encoding'] = self.encoding
+ self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ + self.stats['passes'] + self.stats['skipped'])
+ self.error_report_file.write(
+ u'<?xml version="1.0" encoding="%(encoding)s"?>'
+ u'<testsuite name="nosetests" tests="%(total)d" '
+ u'errors="%(errors)d" failures="%(failures)d" '
+ u'skip="%(skipped)d">' % self.stats)
+ self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
+ for e in self.errorlist]))
+ self.error_report_file.write(u'</testsuite>')
+ self.error_report_file.close()
+ if self.config.verbosity > 1:
+ stream.writeln("-" * 70)
+ stream.writeln("XML: %s" % self.error_report_file.name)
+
+ def _startCapture(self):
+ self._capture_stack.append((sys.stdout, sys.stderr))
+ self._currentStdout = StringIO()
+ self._currentStderr = StringIO()
+ sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
+ sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
+
+ def startContext(self, context):
+ self._startCapture()
+
+ def stopContext(self, context):
+ self._endCapture()
+
+ def beforeTest(self, test):
+ """Initializes a timer before starting a test."""
+ self._timer = time()
+ self._startCapture()
+
+ def _endCapture(self):
+ if self._capture_stack:
+ sys.stdout, sys.stderr = self._capture_stack.pop()
+
+ def afterTest(self, test):
+ self._endCapture()
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def finalize(self, test):
+ while self._capture_stack:
+ self._endCapture()
+
+ def _getCapturedStdout(self):
+ if self._currentStdout:
+ value = self._currentStdout.getvalue()
+ if value:
+ return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
+ value)
+ return ''
+
+ def _getCapturedStderr(self):
+ if self._currentStderr:
+ value = self._currentStderr.getvalue()
+ if value:
+ return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
+ value)
+ return ''
+
+ def addError(self, test, err, capt=None):
+ """Add error output to Xunit report.
+ """
+ taken = self._timeTaken()
+
+ if issubclass(err[0], SkipTest):
+ type = 'skipped'
+ self.stats['skipped'] += 1
+ else:
+ type = 'error'
+ self.stats['errors'] += 1
+
+ tb = format_exception(err, self.encoding)
+ id = test.id()
+
+ self.errorlist.append(
+ u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'type': type,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addFailure(self, test, err, capt=None, tb_info=None):
+ """Add failure output to Xunit report.
+ """
+ taken = self._timeTaken()
+ tb = format_exception(err, self.encoding)
+ self.stats['failures'] += 1
+ id = test.id()
+
+ self.errorlist.append(
+ u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ u'</failure>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addSuccess(self, test, capt=None):
+ """Add success output to Xunit report.
+ """
+ taken = self._timeTaken()
+ self.stats['passes'] += 1
+ id = test.id()
+ self.errorlist.append(
+ '<testcase classname=%(cls)s name=%(name)s '
+ 'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })