summaryrefslogtreecommitdiffstats
path: root/scripts/external_libs
diff options
context:
space:
mode:
authorYaroslav Brustinov <ybrustin@cisco.com>2016-01-05 15:22:22 +0200
committerYaroslav Brustinov <ybrustin@cisco.com>2016-01-05 15:22:22 +0200
commit823b8294539f2e55db09795a7fff03d7be6b6346 (patch)
tree149cdce761ead614409829d9ab1c2d9cdf680c4a /scripts/external_libs
parentfecdb3ea73b380e01a8877c8e88ce61e853000bc (diff)
move regression to trex-core
slight fixes of hltapi + vm in packet builder update yaml lib version from 3.01 to 3.11
Diffstat (limited to 'scripts/external_libs')
-rw-r--r--scripts/external_libs/PyYAML-3.01/LICENSE19
-rw-r--r--scripts/external_libs/PyYAML-3.01/PKG-INFO28
-rw-r--r--scripts/external_libs/PyYAML-3.01/README18
-rw-r--r--scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py484
-rw-r--r--scripts/external_libs/PyYAML-3.01/setup.py52
-rwxr-xr-xscripts/external_libs/nose-1.3.4/AUTHORS27
-rwxr-xr-xscripts/external_libs/nose-1.3.4/PKG-INFO38
-rwxr-xr-xscripts/external_libs/nose-1.3.4/lgpl.txt504
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/__init__.py15
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/__main__.py8
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/case.py397
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/commands.py172
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/config.py661
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/core.py341
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/exc.py9
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/ext/__init__.py3
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py2272
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/failure.py42
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/importer.py167
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/inspector.py207
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/loader.py619
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/__init__.py190
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py45
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/attrib.py286
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/base.py725
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/builtin.py34
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/capture.py115
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/collect.py94
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/cover.py253
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/debug.py67
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py45
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/doctests.py455
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py210
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py49
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/isolate.py103
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py245
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/manager.py460
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py835
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py416
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/prof.py154
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/skip.py63
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/testid.py306
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/plugins/xunit.py329
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/proxy.py188
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/pyversion.py214
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/result.py200
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/selector.py251
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py1
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py189
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/suite.py609
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/tools/__init__.py15
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py151
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/tools/trivial.py54
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/twistedtools.py173
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/usage.txt115
-rwxr-xr-xscripts/external_libs/nose-1.3.4/nose/util.py660
-rwxr-xr-xscripts/external_libs/progressbar-2.2/LICENSE16
-rwxr-xr-xscripts/external_libs/progressbar-2.2/MANIFEST6
-rwxr-xr-xscripts/external_libs/progressbar-2.2/MANIFEST.in3
-rwxr-xr-xscripts/external_libs/progressbar-2.2/PKG-INFO38
-rwxr-xr-xscripts/external_libs/progressbar-2.2/README18
-rwxr-xr-xscripts/external_libs/progressbar-2.2/progressbar.py381
-rwxr-xr-xscripts/external_libs/progressbar-2.2/setup.py31
-rwxr-xr-xscripts/external_libs/rednose-0.4.1/rednose.py387
-rwxr-xr-xscripts/external_libs/rednose-0.4.1/setup.py29
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/__init__.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py)117
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/composer.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py)48
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/constructor.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py)317
-rwxr-xr-xscripts/external_libs/yaml-3.11/yaml/cyaml.py85
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/dumper.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py)2
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/emitter.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py)258
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/error.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/error.py)2
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/events.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/events.py)2
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/loader.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py)0
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/nodes.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py)2
-rwxr-xr-xscripts/external_libs/yaml-3.11/yaml/parser.py589
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/reader.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py)58
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/representer.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py)207
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/resolver.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py)47
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/scanner.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py)57
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/serializer.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py)26
-rwxr-xr-x[-rw-r--r--]scripts/external_libs/yaml-3.11/yaml/tokens.py (renamed from scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py)2
82 files changed, 15947 insertions, 1163 deletions
diff --git a/scripts/external_libs/PyYAML-3.01/LICENSE b/scripts/external_libs/PyYAML-3.01/LICENSE
deleted file mode 100644
index 050ced23..00000000
--- a/scripts/external_libs/PyYAML-3.01/LICENSE
+++ /dev/null
@@ -1,19 +0,0 @@
-Copyright (c) 2006 Kirill Simonov
-
-Permission is hereby granted, free of charge, to any person obtaining a copy of
-this software and associated documentation files (the "Software"), to deal in
-the Software without restriction, including without limitation the rights to
-use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies
-of the Software, and to permit persons to whom the Software is furnished to do
-so, subject to the following conditions:
-
-The above copyright notice and this permission notice shall be included in all
-copies or substantial portions of the Software.
-
-THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
-IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
-FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
-AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
-LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
-OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
-SOFTWARE.
diff --git a/scripts/external_libs/PyYAML-3.01/PKG-INFO b/scripts/external_libs/PyYAML-3.01/PKG-INFO
deleted file mode 100644
index 6ec73b1f..00000000
--- a/scripts/external_libs/PyYAML-3.01/PKG-INFO
+++ /dev/null
@@ -1,28 +0,0 @@
-Metadata-Version: 1.0
-Name: PyYAML
-Version: 3.01
-Summary: YAML parser and emitter for Python
-Home-page: http://pyyaml.org/wiki/PyYAML
-Author: Kirill Simonov
-Author-email: xi@resolvent.net
-License: MIT
-Download-URL: http://pyyaml.org/download/pyyaml/PyYAML-3.01.tar.gz
-Description: YAML is a data serialization format designed for human readability and
- interaction with scripting languages. PyYAML is a YAML parser and
- emitter for Python.
-
- PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
- support, capable extension API, and sensible error messages. PyYAML
- supports standard YAML tags and provides Python-specific tags that allow
- to represent an arbitrary Python object.
-
- PyYAML is applicable for a broad range of tasks from complex
- configuration files to object serialization and persistance.
-Platform: Any
-Classifier: Development Status :: 4 - Beta
-Classifier: Intended Audience :: Developers
-Classifier: License :: OSI Approved :: MIT License
-Classifier: Operating System :: OS Independent
-Classifier: Programming Language :: Python
-Classifier: Topic :: Software Development :: Libraries :: Python Modules
-Classifier: Topic :: Text Processing :: Markup
diff --git a/scripts/external_libs/PyYAML-3.01/README b/scripts/external_libs/PyYAML-3.01/README
deleted file mode 100644
index 8a6dec77..00000000
--- a/scripts/external_libs/PyYAML-3.01/README
+++ /dev/null
@@ -1,18 +0,0 @@
-PyYAML 3000 - The next generation YAML parser and emitter for Python.
-
-To install, type 'python setup.py install'.
-
-For more information, check the PyYAML homepage:
-'http://pyyaml.org/wiki/PyYAML'.
-
-Documentation (rough and incomplete though):
-'http://pyyaml.org/wiki/PyYAMLDocumentation'.
-
-Post your questions and opinions to the YAML-Core mailing list:
-'http://lists.sourceforge.net/lists/listinfo/yaml-core'.
-
-Submit bug reports and feature requests to the PyYAML bug tracker:
-'http://pyyaml.org/newticket?component=pyyaml'.
-
-PyYAML 3000 is written by Kirill Simonov <xi@resolvent.net>. It is released
-under the MIT license. See the file LICENSE for more details.
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py b/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py
deleted file mode 100644
index 2aec0fe3..00000000
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/parser.py
+++ /dev/null
@@ -1,484 +0,0 @@
-
-# YAML can be parsed by an LL(1) parser!
-#
-# We use the following production rules:
-# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
-# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END?
-# implicit_document ::= block_node DOCUMENT-END?
-# block_node ::= ALIAS | properties? block_content
-# flow_node ::= ALIAS | properties? flow_content
-# properties ::= TAG ANCHOR? | ANCHOR TAG?
-# block_content ::= block_collection | flow_collection | SCALAR
-# flow_content ::= flow_collection | SCALAR
-# block_collection ::= block_sequence | block_mapping
-# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
-# block_mapping ::= BLOCK-MAPPING_START ((KEY block_node_or_indentless_sequence?)? (VALUE block_node_or_indentless_sequence?)?)* BLOCK-END
-# block_node_or_indentless_sequence ::= ALIAS | properties? (block_content | indentless_block_sequence)
-# indentless_block_sequence ::= (BLOCK-ENTRY block_node?)+
-# flow_collection ::= flow_sequence | flow_mapping
-# flow_sequence ::= FLOW-SEQUENCE-START (flow_sequence_entry FLOW-ENTRY)* flow_sequence_entry? FLOW-SEQUENCE-END
-# flow_mapping ::= FLOW-MAPPING-START (flow_mapping_entry FLOW-ENTRY)* flow_mapping_entry? FLOW-MAPPING-END
-# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
-
-# TODO: support for BOM within a stream.
-# stream ::= (BOM? implicit_document)? (BOM? explicit_document)* STREAM-END
-
-# FIRST sets:
-# stream: { STREAM-START }
-# explicit_document: { DIRECTIVE DOCUMENT-START }
-# implicit_document: FIRST(block_node)
-# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
-# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# block_sequence: { BLOCK-SEQUENCE-START }
-# block_mapping: { BLOCK-MAPPING-START }
-# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
-# indentless_sequence: { ENTRY }
-# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
-# flow_sequence: { FLOW-SEQUENCE-START }
-# flow_mapping: { FLOW-MAPPING-START }
-# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
-
-__all__ = ['Parser', 'ParserError']
-
-from error import MarkedYAMLError
-from tokens import *
-from events import *
-from scanner import *
-
-class ParserError(MarkedYAMLError):
- pass
-
-class Parser:
- # Since writing a recursive-descendant parser is a straightforward task, we
- # do not give many comments here.
- # Note that we use Python generators. If you rewrite the parser in another
- # language, you may replace all 'yield'-s with event handler calls.
-
- DEFAULT_TAGS = {
- u'!': u'!',
- u'!!': u'tag:yaml.org,2002:',
- }
-
- def __init__(self):
- self.current_event = None
- self.yaml_version = None
- self.tag_handles = {}
- self.event_generator = self.parse_stream()
-
- def check_event(self, *choices):
- # Check the type of the next event.
- if self.current_event is None:
- try:
- self.current_event = self.event_generator.next()
- except StopIteration:
- pass
- if self.current_event is not None:
- if not choices:
- return True
- for choice in choices:
- if isinstance(self.current_event, choice):
- return True
- return False
-
- def peek_event(self):
- # Get the next event.
- if self.current_event is None:
- try:
- self.current_event = self.event_generator.next()
- except StopIteration:
- pass
- return self.current_event
-
- def get_event(self):
- # Get the next event.
- if self.current_event is None:
- try:
- self.current_event = self.event_generator.next()
- except StopIteration:
- pass
- value = self.current_event
- self.current_event = None
- return value
-
- def __iter__(self):
- # Iterator protocol.
- return self.event_generator
-
- def parse_stream(self):
- # STREAM-START implicit_document? explicit_document* STREAM-END
-
- # Parse start of stream.
- token = self.get_token()
- yield StreamStartEvent(token.start_mark, token.end_mark,
- encoding=token.encoding)
-
- # Parse implicit document.
- if not self.check_token(DirectiveToken, DocumentStartToken,
- StreamEndToken):
- self.tag_handles = self.DEFAULT_TAGS
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- yield DocumentStartEvent(start_mark, end_mark,
- explicit=False)
- for event in self.parse_block_node():
- yield event
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- while self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit = True
- yield DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Parse explicit documents.
- while not self.check_token(StreamEndToken):
- token = self.peek_token()
- start_mark = token.start_mark
- version, tags = self.process_directives()
- if not self.check_token(DocumentStartToken):
- raise ParserError(None, None,
- "expected '<document start>', but found %r"
- % self.peek_token().id,
- self.peek_token().start_mark)
- token = self.get_token()
- end_mark = token.end_mark
- yield DocumentStartEvent(start_mark, end_mark,
- explicit=True, version=version, tags=tags)
- if self.check_token(DirectiveToken,
- DocumentStartToken, DocumentEndToken, StreamEndToken):
- yield self.process_empty_scalar(token.end_mark)
- else:
- for event in self.parse_block_node():
- yield event
- token = self.peek_token()
- start_mark = end_mark = token.start_mark
- explicit = False
- while self.check_token(DocumentEndToken):
- token = self.get_token()
- end_mark = token.end_mark
- explicit=True
- yield DocumentEndEvent(start_mark, end_mark,
- explicit=explicit)
-
- # Parse end of stream.
- token = self.get_token()
- yield StreamEndEvent(token.start_mark, token.end_mark)
-
- def process_directives(self):
- # DIRECTIVE*
- self.yaml_version = None
- self.tag_handles = {}
- while self.check_token(DirectiveToken):
- token = self.get_token()
- if token.name == u'YAML':
- if self.yaml_version is not None:
- raise ParserError(None, None,
- "found duplicate YAML directive", token.start_mark)
- major, minor = token.value
- if major != 1:
- raise ParserError(None, None,
- "found incompatible YAML document (version 1.* is required)",
- token.start_mark)
- self.yaml_version = token.value
- elif token.name == u'TAG':
- handle, prefix = token.value
- if handle in self.tag_handles:
- raise ParserError(None, None,
- "duplicate tag handle %r" % handle.encode('utf-8'),
- token.start_mark)
- self.tag_handles[handle] = prefix
- if self.tag_handles:
- value = self.yaml_version, self.tag_handles.copy()
- else:
- value = self.yaml_version, None
- for key in self.DEFAULT_TAGS:
- if key not in self.tag_handles:
- self.tag_handles[key] = self.DEFAULT_TAGS[key]
- return value
-
- def parse_block_node(self):
- return self.parse_node(block=True)
-
- def parse_flow_node(self):
- return self.parse_node()
-
- def parse_block_node_or_indentless_sequence(self):
- return self.parse_node(block=True, indentless_sequence=True)
-
- def parse_node(self, block=False, indentless_sequence=False):
- # block_node ::= ALIAS | properties? block_content
- # flow_node ::= ALIAS | properties? flow_content
- # properties ::= TAG ANCHOR? | ANCHOR TAG?
- # block_content ::= block_collection | flow_collection | SCALAR
- # flow_content ::= flow_collection | SCALAR
- # block_collection ::= block_sequence | block_mapping
- # block_node_or_indentless_sequence ::= ALIAS | properties?
- # (block_content | indentless_block_sequence)
- if self.check_token(AliasToken):
- token = self.get_token()
- yield AliasEvent(token.value, token.start_mark, token.end_mark)
- else:
- anchor = None
- tag = None
- start_mark = end_mark = tag_mark = None
- if self.check_token(AnchorToken):
- token = self.get_token()
- start_mark = token.start_mark
- end_mark = token.end_mark
- anchor = token.value
- if self.check_token(TagToken):
- token = self.get_token()
- tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- elif self.check_token(TagToken):
- token = self.get_token()
- start_mark = tag_mark = token.start_mark
- end_mark = token.end_mark
- tag = token.value
- if self.check_token(AnchorToken):
- token = self.get_token()
- end_mark = token.end_mark
- anchor = token.value
- if tag is not None and tag != u'!':
- handle, suffix = tag
- if handle is not None:
- if handle not in self.tag_handles:
- raise ParserError("while parsing a node", start_mark,
- "found undefined tag handle %r" % handle.encode('utf-8'),
- tag_mark)
- tag = self.tag_handles[handle]+suffix
- else:
- tag = suffix
- #if tag == u'!':
- # raise ParserError("while parsing a node", start_mark,
- # "found non-specific tag '!'", tag_mark,
- # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
- if start_mark is None:
- start_mark = end_mark = self.peek_token().start_mark
- event = None
- collection_events = None
- implicit = (tag is None or tag == u'!')
- if indentless_sequence and self.check_token(BlockEntryToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark)
- collection_events = self.parse_indentless_sequence()
- else:
- if self.check_token(ScalarToken):
- token = self.get_token()
- end_mark = token.end_mark
- if (token.plain and tag is None) or tag == u'!':
- implicit = (True, False)
- elif tag is None:
- implicit = (False, True)
- else:
- implicit = (False, False)
- event = ScalarEvent(anchor, tag, implicit, token.value,
- start_mark, end_mark, style=token.style)
- elif self.check_token(FlowSequenceStartToken):
- end_mark = self.peek_token().end_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- collection_events = self.parse_flow_sequence()
- elif self.check_token(FlowMappingStartToken):
- end_mark = self.peek_token().end_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=True)
- collection_events = self.parse_flow_mapping()
- elif block and self.check_token(BlockSequenceStartToken):
- end_mark = self.peek_token().start_mark
- event = SequenceStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- collection_events = self.parse_block_sequence()
- elif block and self.check_token(BlockMappingStartToken):
- end_mark = self.peek_token().start_mark
- event = MappingStartEvent(anchor, tag, implicit,
- start_mark, end_mark, flow_style=False)
- collection_events = self.parse_block_mapping()
- elif anchor is not None or tag is not None:
- # Empty scalars are allowed even if a tag or an anchor is
- # specified.
- event = ScalarEvent(anchor, tag, (implicit, False), u'',
- start_mark, end_mark)
- else:
- if block:
- node = 'block'
- else:
- node = 'flow'
- token = self.peek_token()
- raise ParserError("while scanning a %s node" % node, start_mark,
- "expected the node content, but found %r" % token.id,
- token.start_mark)
- yield event
- if collection_events is not None:
- for event in collection_events:
- yield event
-
- def parse_block_sequence(self):
- # BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
- token = self.get_token()
- start_mark = token.start_mark
- while self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken, BlockEndToken):
- for event in self.parse_block_node():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while scanning a block collection", start_mark,
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- yield SequenceEndEvent(token.start_mark, token.end_mark)
-
- def parse_indentless_sequence(self):
- # (BLOCK-ENTRY block_node?)+
- while self.check_token(BlockEntryToken):
- token = self.get_token()
- if not self.check_token(BlockEntryToken,
- KeyToken, ValueToken, BlockEndToken):
- for event in self.parse_block_node():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- token = self.peek_token()
- yield SequenceEndEvent(token.start_mark, token.start_mark)
-
- def parse_block_mapping(self):
- # BLOCK-MAPPING_START
- # ((KEY block_node_or_indentless_sequence?)?
- # (VALUE block_node_or_indentless_sequence?)?)*
- # BLOCK-END
- token = self.get_token()
- start_mark = token.start_mark
- while self.check_token(KeyToken, ValueToken):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- for event in self.parse_block_node_or_indentless_sequence():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(KeyToken, ValueToken, BlockEndToken):
- for event in self.parse_block_node_or_indentless_sequence():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- else:
- token = self.peek_token()
- yield self.process_empty_scalar(token.start_mark)
- if not self.check_token(BlockEndToken):
- token = self.peek_token()
- raise ParserError("while scanning a block mapping", start_mark,
- "expected <block end>, but found %r" % token.id, token.start_mark)
- token = self.get_token()
- yield MappingEndEvent(token.start_mark, token.end_mark)
-
- def parse_flow_sequence(self):
- # flow_sequence ::= FLOW-SEQUENCE-START
- # (flow_sequence_entry FLOW-ENTRY)*
- # flow_sequence_entry?
- # FLOW-SEQUENCE-END
- # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- #
- # Note that while production rules for both flow_sequence_entry and
- # flow_mapping_entry are equal, their interpretations are different.
- # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
- # generate an inline mapping (set syntax).
- token = self.get_token()
- start_mark = token.start_mark
- while not self.check_token(FlowSequenceEndToken):
- if self.check_token(KeyToken):
- token = self.get_token()
- yield MappingStartEvent(None, None, True,
- token.start_mark, token.end_mark,
- flow_style=True)
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowSequenceEndToken):
- for event in self.parse_flow_node():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- for event in self.parse_flow_node():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- else:
- token = self.peek_token()
- yield self.process_empty_scalar(token.start_mark)
- token = self.peek_token()
- yield MappingEndEvent(token.start_mark, token.start_mark)
- else:
- for event in self.parse_flow_node():
- yield event
- if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
- token = self.peek_token()
- raise ParserError("while scanning a flow sequence", start_mark,
- "expected ',' or ']', but got %r" % token.id, token.start_mark)
- if self.check_token(FlowEntryToken):
- self.get_token()
- token = self.get_token()
- yield SequenceEndEvent(token.start_mark, token.end_mark)
-
- def parse_flow_mapping(self):
- # flow_mapping ::= FLOW-MAPPING-START
- # (flow_mapping_entry FLOW-ENTRY)*
- # flow_mapping_entry?
- # FLOW-MAPPING-END
- # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
- token = self.get_token()
- start_mark = token.start_mark
- while not self.check_token(FlowMappingEndToken):
- if self.check_token(KeyToken):
- token = self.get_token()
- if not self.check_token(ValueToken,
- FlowEntryToken, FlowMappingEndToken):
- for event in self.parse_flow_node():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- if self.check_token(ValueToken):
- token = self.get_token()
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- for event in self.parse_flow_node():
- yield event
- else:
- yield self.process_empty_scalar(token.end_mark)
- else:
- token = self.peek_token()
- yield self.process_empty_scalar(token.start_mark)
- else:
- for event in self.parse_flow_node():
- yield event
- yield self.process_empty_scalar(self.peek_token().start_mark)
- if not self.check_token(FlowEntryToken, FlowMappingEndToken):
- token = self.peek_token()
- raise ParserError("while scanning a flow mapping", start_mark,
- "expected ',' or '}', but got %r" % token.id, token.start_mark)
- if self.check_token(FlowEntryToken):
- self.get_token()
- if not self.check_token(FlowMappingEndToken):
- token = self.peek_token()
- raise ParserError("while scanning a flow mapping", start_mark,
- "expected '}', but found %r" % token.id, token.start_mark)
- token = self.get_token()
- yield MappingEndEvent(token.start_mark, token.end_mark)
-
- def process_empty_scalar(self, mark):
- return ScalarEvent(None, None, (True, False), u'', mark, mark)
-
diff --git a/scripts/external_libs/PyYAML-3.01/setup.py b/scripts/external_libs/PyYAML-3.01/setup.py
deleted file mode 100644
index 23c1efac..00000000
--- a/scripts/external_libs/PyYAML-3.01/setup.py
+++ /dev/null
@@ -1,52 +0,0 @@
-
-NAME = 'PyYAML'
-VERSION = '3.01'
-DESCRIPTION = "YAML parser and emitter for Python"
-LONG_DESCRIPTION = """\
-YAML is a data serialization format designed for human readability and
-interaction with scripting languages. PyYAML is a YAML parser and
-emitter for Python.
-
-PyYAML features a complete YAML 1.1 parser, Unicode support, pickle
-support, capable extension API, and sensible error messages. PyYAML
-supports standard YAML tags and provides Python-specific tags that allow
-to represent an arbitrary Python object.
-
-PyYAML is applicable for a broad range of tasks from complex
-configuration files to object serialization and persistance."""
-AUTHOR = "Kirill Simonov"
-AUTHOR_EMAIL = 'xi@resolvent.net'
-LICENSE = "MIT"
-PLATFORMS = "Any"
-URL = "http://pyyaml.org/wiki/PyYAML"
-DOWNLOAD_URL = "http://pyyaml.org/download/pyyaml/%s-%s.tar.gz" % (NAME, VERSION)
-CLASSIFIERS = [
- "Development Status :: 4 - Beta",
- "Intended Audience :: Developers",
- "License :: OSI Approved :: MIT License",
- "Operating System :: OS Independent",
- "Programming Language :: Python",
- "Topic :: Software Development :: Libraries :: Python Modules",
- "Topic :: Text Processing :: Markup",
-]
-
-
-from distutils.core import setup
-
-setup(
- name=NAME,
- version=VERSION,
- description=DESCRIPTION,
- long_description=LONG_DESCRIPTION,
- author=AUTHOR,
- author_email=AUTHOR_EMAIL,
- license=LICENSE,
- platforms=PLATFORMS,
- url=URL,
- download_url=DOWNLOAD_URL,
- classifiers=CLASSIFIERS,
-
- package_dir={'': 'lib'},
- packages=['yaml'],
-)
-
diff --git a/scripts/external_libs/nose-1.3.4/AUTHORS b/scripts/external_libs/nose-1.3.4/AUTHORS
new file mode 100755
index 00000000..5414bcda
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/AUTHORS
@@ -0,0 +1,27 @@
+Jason Pellerin
+Kumar McMillan
+Mika Eloranta
+Jay Parlar
+Scot Doyle
+James Casbon
+Antoine Pitrou
+John J Lee
+Allen Bierbaum
+Pam Zerbinos
+Augie Fackler
+Peter Fein
+Kevin Mitchell
+Alex Stewart
+Timothee Peignier
+Thomas Kluyver
+Heng Liu
+Rosen Diankov
+Buck Golemon
+Bobby Impollonia
+Takafumi Arakaki
+Peter Bengtsson
+Gary Donovan
+Brendan McCollam
+Erik Rose
+Sascha Peilicke
+Andre Caron
diff --git a/scripts/external_libs/nose-1.3.4/PKG-INFO b/scripts/external_libs/nose-1.3.4/PKG-INFO
new file mode 100755
index 00000000..dea3d585
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.1
+Name: nose
+Version: 1.3.4
+Summary: nose extends unittest to make testing easier
+Home-page: http://readthedocs.org/docs/nose/
+Author: Jason Pellerin
+Author-email: jpellerin+nose@gmail.com
+License: GNU LGPL
+Description: nose extends the test loading and running features of unittest, making
+ it easier to write, find and run tests.
+
+ By default, nose will run tests in files or directories under the current
+ working directory whose names include "test" or "Test" at a word boundary
+ (like "test_this" or "functional_test" or "TestClass" but not
+ "libtest"). Test output is similar to that of unittest, but also includes
+ captured stdout output from failing tests, for easy print-style debugging.
+
+ These features, and many more, are customizable through the use of
+ plugins. Plugins included with nose provide support for doctest, code
+ coverage and profiling, flexible attribute-based test selection,
+ output capture and more. More information about writing plugins may be
+ found on in the nose API documentation, here:
+ http://readthedocs.org/docs/nose/
+
+ If you have recently reported a bug marked as fixed, or have a craving for
+ the very latest, you may want the development version instead:
+ https://github.com/nose-devs/nose/tarball/master#egg=nose-dev
+
+Keywords: test unittest doctest automatic discovery
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Intended Audience :: Developers
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Natural Language :: English
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Programming Language :: Python :: 3
+Classifier: Topic :: Software Development :: Testing
diff --git a/scripts/external_libs/nose-1.3.4/lgpl.txt b/scripts/external_libs/nose-1.3.4/lgpl.txt
new file mode 100755
index 00000000..8add30ad
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/lgpl.txt
@@ -0,0 +1,504 @@
+ GNU LESSER GENERAL PUBLIC LICENSE
+ Version 2.1, February 1999
+
+ Copyright (C) 1991, 1999 Free Software Foundation, Inc.
+ 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+ Everyone is permitted to copy and distribute verbatim copies
+ of this license document, but changing it is not allowed.
+
+[This is the first released version of the Lesser GPL. It also counts
+ as the successor of the GNU Library Public License, version 2, hence
+ the version number 2.1.]
+
+ Preamble
+
+ The licenses for most software are designed to take away your
+freedom to share and change it. By contrast, the GNU General Public
+Licenses are intended to guarantee your freedom to share and change
+free software--to make sure the software is free for all its users.
+
+ This license, the Lesser General Public License, applies to some
+specially designated software packages--typically libraries--of the
+Free Software Foundation and other authors who decide to use it. You
+can use it too, but we suggest you first think carefully about whether
+this license or the ordinary General Public License is the better
+strategy to use in any particular case, based on the explanations below.
+
+ When we speak of free software, we are referring to freedom of use,
+not price. Our General Public Licenses are designed to make sure that
+you have the freedom to distribute copies of free software (and charge
+for this service if you wish); that you receive source code or can get
+it if you want it; that you can change the software and use pieces of
+it in new free programs; and that you are informed that you can do
+these things.
+
+ To protect your rights, we need to make restrictions that forbid
+distributors to deny you these rights or to ask you to surrender these
+rights. These restrictions translate to certain responsibilities for
+you if you distribute copies of the library or if you modify it.
+
+ For example, if you distribute copies of the library, whether gratis
+or for a fee, you must give the recipients all the rights that we gave
+you. You must make sure that they, too, receive or can get the source
+code. If you link other code with the library, you must provide
+complete object files to the recipients, so that they can relink them
+with the library after making changes to the library and recompiling
+it. And you must show them these terms so they know their rights.
+
+ We protect your rights with a two-step method: (1) we copyright the
+library, and (2) we offer you this license, which gives you legal
+permission to copy, distribute and/or modify the library.
+
+ To protect each distributor, we want to make it very clear that
+there is no warranty for the free library. Also, if the library is
+modified by someone else and passed on, the recipients should know
+that what they have is not the original version, so that the original
+author's reputation will not be affected by problems that might be
+introduced by others.
+
+ Finally, software patents pose a constant threat to the existence of
+any free program. We wish to make sure that a company cannot
+effectively restrict the users of a free program by obtaining a
+restrictive license from a patent holder. Therefore, we insist that
+any patent license obtained for a version of the library must be
+consistent with the full freedom of use specified in this license.
+
+ Most GNU software, including some libraries, is covered by the
+ordinary GNU General Public License. This license, the GNU Lesser
+General Public License, applies to certain designated libraries, and
+is quite different from the ordinary General Public License. We use
+this license for certain libraries in order to permit linking those
+libraries into non-free programs.
+
+ When a program is linked with a library, whether statically or using
+a shared library, the combination of the two is legally speaking a
+combined work, a derivative of the original library. The ordinary
+General Public License therefore permits such linking only if the
+entire combination fits its criteria of freedom. The Lesser General
+Public License permits more lax criteria for linking other code with
+the library.
+
+ We call this license the "Lesser" General Public License because it
+does Less to protect the user's freedom than the ordinary General
+Public License. It also provides other free software developers Less
+of an advantage over competing non-free programs. These disadvantages
+are the reason we use the ordinary General Public License for many
+libraries. However, the Lesser license provides advantages in certain
+special circumstances.
+
+ For example, on rare occasions, there may be a special need to
+encourage the widest possible use of a certain library, so that it becomes
+a de-facto standard. To achieve this, non-free programs must be
+allowed to use the library. A more frequent case is that a free
+library does the same job as widely used non-free libraries. In this
+case, there is little to gain by limiting the free library to free
+software only, so we use the Lesser General Public License.
+
+ In other cases, permission to use a particular library in non-free
+programs enables a greater number of people to use a large body of
+free software. For example, permission to use the GNU C Library in
+non-free programs enables many more people to use the whole GNU
+operating system, as well as its variant, the GNU/Linux operating
+system.
+
+ Although the Lesser General Public License is Less protective of the
+users' freedom, it does ensure that the user of a program that is
+linked with the Library has the freedom and the wherewithal to run
+that program using a modified version of the Library.
+
+ The precise terms and conditions for copying, distribution and
+modification follow. Pay close attention to the difference between a
+"work based on the library" and a "work that uses the library". The
+former contains code derived from the library, whereas the latter must
+be combined with the library in order to run.
+
+ GNU LESSER GENERAL PUBLIC LICENSE
+ TERMS AND CONDITIONS FOR COPYING, DISTRIBUTION AND MODIFICATION
+
+ 0. This License Agreement applies to any software library or other
+program which contains a notice placed by the copyright holder or
+other authorized party saying it may be distributed under the terms of
+this Lesser General Public License (also called "this License").
+Each licensee is addressed as "you".
+
+ A "library" means a collection of software functions and/or data
+prepared so as to be conveniently linked with application programs
+(which use some of those functions and data) to form executables.
+
+ The "Library", below, refers to any such software library or work
+which has been distributed under these terms. A "work based on the
+Library" means either the Library or any derivative work under
+copyright law: that is to say, a work containing the Library or a
+portion of it, either verbatim or with modifications and/or translated
+straightforwardly into another language. (Hereinafter, translation is
+included without limitation in the term "modification".)
+
+ "Source code" for a work means the preferred form of the work for
+making modifications to it. For a library, complete source code means
+all the source code for all modules it contains, plus any associated
+interface definition files, plus the scripts used to control compilation
+and installation of the library.
+
+ Activities other than copying, distribution and modification are not
+covered by this License; they are outside its scope. The act of
+running a program using the Library is not restricted, and output from
+such a program is covered only if its contents constitute a work based
+on the Library (independent of the use of the Library in a tool for
+writing it). Whether that is true depends on what the Library does
+and what the program that uses the Library does.
+
+ 1. You may copy and distribute verbatim copies of the Library's
+complete source code as you receive it, in any medium, provided that
+you conspicuously and appropriately publish on each copy an
+appropriate copyright notice and disclaimer of warranty; keep intact
+all the notices that refer to this License and to the absence of any
+warranty; and distribute a copy of this License along with the
+Library.
+
+ You may charge a fee for the physical act of transferring a copy,
+and you may at your option offer warranty protection in exchange for a
+fee.
+
+ 2. You may modify your copy or copies of the Library or any portion
+of it, thus forming a work based on the Library, and copy and
+distribute such modifications or work under the terms of Section 1
+above, provided that you also meet all of these conditions:
+
+ a) The modified work must itself be a software library.
+
+ b) You must cause the files modified to carry prominent notices
+ stating that you changed the files and the date of any change.
+
+ c) You must cause the whole of the work to be licensed at no
+ charge to all third parties under the terms of this License.
+
+ d) If a facility in the modified Library refers to a function or a
+ table of data to be supplied by an application program that uses
+ the facility, other than as an argument passed when the facility
+ is invoked, then you must make a good faith effort to ensure that,
+ in the event an application does not supply such function or
+ table, the facility still operates, and performs whatever part of
+ its purpose remains meaningful.
+
+ (For example, a function in a library to compute square roots has
+ a purpose that is entirely well-defined independent of the
+ application. Therefore, Subsection 2d requires that any
+ application-supplied function or table used by this function must
+ be optional: if the application does not supply it, the square
+ root function must still compute square roots.)
+
+These requirements apply to the modified work as a whole. If
+identifiable sections of that work are not derived from the Library,
+and can be reasonably considered independent and separate works in
+themselves, then this License, and its terms, do not apply to those
+sections when you distribute them as separate works. But when you
+distribute the same sections as part of a whole which is a work based
+on the Library, the distribution of the whole must be on the terms of
+this License, whose permissions for other licensees extend to the
+entire whole, and thus to each and every part regardless of who wrote
+it.
+
+Thus, it is not the intent of this section to claim rights or contest
+your rights to work written entirely by you; rather, the intent is to
+exercise the right to control the distribution of derivative or
+collective works based on the Library.
+
+In addition, mere aggregation of another work not based on the Library
+with the Library (or with a work based on the Library) on a volume of
+a storage or distribution medium does not bring the other work under
+the scope of this License.
+
+ 3. You may opt to apply the terms of the ordinary GNU General Public
+License instead of this License to a given copy of the Library. To do
+this, you must alter all the notices that refer to this License, so
+that they refer to the ordinary GNU General Public License, version 2,
+instead of to this License. (If a newer version than version 2 of the
+ordinary GNU General Public License has appeared, then you can specify
+that version instead if you wish.) Do not make any other change in
+these notices.
+
+ Once this change is made in a given copy, it is irreversible for
+that copy, so the ordinary GNU General Public License applies to all
+subsequent copies and derivative works made from that copy.
+
+ This option is useful when you wish to copy part of the code of
+the Library into a program that is not a library.
+
+ 4. You may copy and distribute the Library (or a portion or
+derivative of it, under Section 2) in object code or executable form
+under the terms of Sections 1 and 2 above provided that you accompany
+it with the complete corresponding machine-readable source code, which
+must be distributed under the terms of Sections 1 and 2 above on a
+medium customarily used for software interchange.
+
+ If distribution of object code is made by offering access to copy
+from a designated place, then offering equivalent access to copy the
+source code from the same place satisfies the requirement to
+distribute the source code, even though third parties are not
+compelled to copy the source along with the object code.
+
+ 5. A program that contains no derivative of any portion of the
+Library, but is designed to work with the Library by being compiled or
+linked with it, is called a "work that uses the Library". Such a
+work, in isolation, is not a derivative work of the Library, and
+therefore falls outside the scope of this License.
+
+ However, linking a "work that uses the Library" with the Library
+creates an executable that is a derivative of the Library (because it
+contains portions of the Library), rather than a "work that uses the
+library". The executable is therefore covered by this License.
+Section 6 states terms for distribution of such executables.
+
+ When a "work that uses the Library" uses material from a header file
+that is part of the Library, the object code for the work may be a
+derivative work of the Library even though the source code is not.
+Whether this is true is especially significant if the work can be
+linked without the Library, or if the work is itself a library. The
+threshold for this to be true is not precisely defined by law.
+
+ If such an object file uses only numerical parameters, data
+structure layouts and accessors, and small macros and small inline
+functions (ten lines or less in length), then the use of the object
+file is unrestricted, regardless of whether it is legally a derivative
+work. (Executables containing this object code plus portions of the
+Library will still fall under Section 6.)
+
+ Otherwise, if the work is a derivative of the Library, you may
+distribute the object code for the work under the terms of Section 6.
+Any executables containing that work also fall under Section 6,
+whether or not they are linked directly with the Library itself.
+
+ 6. As an exception to the Sections above, you may also combine or
+link a "work that uses the Library" with the Library to produce a
+work containing portions of the Library, and distribute that work
+under terms of your choice, provided that the terms permit
+modification of the work for the customer's own use and reverse
+engineering for debugging such modifications.
+
+ You must give prominent notice with each copy of the work that the
+Library is used in it and that the Library and its use are covered by
+this License. You must supply a copy of this License. If the work
+during execution displays copyright notices, you must include the
+copyright notice for the Library among them, as well as a reference
+directing the user to the copy of this License. Also, you must do one
+of these things:
+
+ a) Accompany the work with the complete corresponding
+ machine-readable source code for the Library including whatever
+ changes were used in the work (which must be distributed under
+ Sections 1 and 2 above); and, if the work is an executable linked
+ with the Library, with the complete machine-readable "work that
+ uses the Library", as object code and/or source code, so that the
+ user can modify the Library and then relink to produce a modified
+ executable containing the modified Library. (It is understood
+ that the user who changes the contents of definitions files in the
+ Library will not necessarily be able to recompile the application
+ to use the modified definitions.)
+
+ b) Use a suitable shared library mechanism for linking with the
+ Library. A suitable mechanism is one that (1) uses at run time a
+ copy of the library already present on the user's computer system,
+ rather than copying library functions into the executable, and (2)
+ will operate properly with a modified version of the library, if
+ the user installs one, as long as the modified version is
+ interface-compatible with the version that the work was made with.
+
+ c) Accompany the work with a written offer, valid for at
+ least three years, to give the same user the materials
+ specified in Subsection 6a, above, for a charge no more
+ than the cost of performing this distribution.
+
+ d) If distribution of the work is made by offering access to copy
+ from a designated place, offer equivalent access to copy the above
+ specified materials from the same place.
+
+ e) Verify that the user has already received a copy of these
+ materials or that you have already sent this user a copy.
+
+ For an executable, the required form of the "work that uses the
+Library" must include any data and utility programs needed for
+reproducing the executable from it. However, as a special exception,
+the materials to be distributed need not include anything that is
+normally distributed (in either source or binary form) with the major
+components (compiler, kernel, and so on) of the operating system on
+which the executable runs, unless that component itself accompanies
+the executable.
+
+ It may happen that this requirement contradicts the license
+restrictions of other proprietary libraries that do not normally
+accompany the operating system. Such a contradiction means you cannot
+use both them and the Library together in an executable that you
+distribute.
+
+ 7. You may place library facilities that are a work based on the
+Library side-by-side in a single library together with other library
+facilities not covered by this License, and distribute such a combined
+library, provided that the separate distribution of the work based on
+the Library and of the other library facilities is otherwise
+permitted, and provided that you do these two things:
+
+ a) Accompany the combined library with a copy of the same work
+ based on the Library, uncombined with any other library
+ facilities. This must be distributed under the terms of the
+ Sections above.
+
+ b) Give prominent notice with the combined library of the fact
+ that part of it is a work based on the Library, and explaining
+ where to find the accompanying uncombined form of the same work.
+
+ 8. You may not copy, modify, sublicense, link with, or distribute
+the Library except as expressly provided under this License. Any
+attempt otherwise to copy, modify, sublicense, link with, or
+distribute the Library is void, and will automatically terminate your
+rights under this License. However, parties who have received copies,
+or rights, from you under this License will not have their licenses
+terminated so long as such parties remain in full compliance.
+
+ 9. You are not required to accept this License, since you have not
+signed it. However, nothing else grants you permission to modify or
+distribute the Library or its derivative works. These actions are
+prohibited by law if you do not accept this License. Therefore, by
+modifying or distributing the Library (or any work based on the
+Library), you indicate your acceptance of this License to do so, and
+all its terms and conditions for copying, distributing or modifying
+the Library or works based on it.
+
+ 10. Each time you redistribute the Library (or any work based on the
+Library), the recipient automatically receives a license from the
+original licensor to copy, distribute, link with or modify the Library
+subject to these terms and conditions. You may not impose any further
+restrictions on the recipients' exercise of the rights granted herein.
+You are not responsible for enforcing compliance by third parties with
+this License.
+
+ 11. If, as a consequence of a court judgment or allegation of patent
+infringement or for any other reason (not limited to patent issues),
+conditions are imposed on you (whether by court order, agreement or
+otherwise) that contradict the conditions of this License, they do not
+excuse you from the conditions of this License. If you cannot
+distribute so as to satisfy simultaneously your obligations under this
+License and any other pertinent obligations, then as a consequence you
+may not distribute the Library at all. For example, if a patent
+license would not permit royalty-free redistribution of the Library by
+all those who receive copies directly or indirectly through you, then
+the only way you could satisfy both it and this License would be to
+refrain entirely from distribution of the Library.
+
+If any portion of this section is held invalid or unenforceable under any
+particular circumstance, the balance of the section is intended to apply,
+and the section as a whole is intended to apply in other circumstances.
+
+It is not the purpose of this section to induce you to infringe any
+patents or other property right claims or to contest validity of any
+such claims; this section has the sole purpose of protecting the
+integrity of the free software distribution system which is
+implemented by public license practices. Many people have made
+generous contributions to the wide range of software distributed
+through that system in reliance on consistent application of that
+system; it is up to the author/donor to decide if he or she is willing
+to distribute software through any other system and a licensee cannot
+impose that choice.
+
+This section is intended to make thoroughly clear what is believed to
+be a consequence of the rest of this License.
+
+ 12. If the distribution and/or use of the Library is restricted in
+certain countries either by patents or by copyrighted interfaces, the
+original copyright holder who places the Library under this License may add
+an explicit geographical distribution limitation excluding those countries,
+so that distribution is permitted only in or among countries not thus
+excluded. In such case, this License incorporates the limitation as if
+written in the body of this License.
+
+ 13. The Free Software Foundation may publish revised and/or new
+versions of the Lesser General Public License from time to time.
+Such new versions will be similar in spirit to the present version,
+but may differ in detail to address new problems or concerns.
+
+Each version is given a distinguishing version number. If the Library
+specifies a version number of this License which applies to it and
+"any later version", you have the option of following the terms and
+conditions either of that version or of any later version published by
+the Free Software Foundation. If the Library does not specify a
+license version number, you may choose any version ever published by
+the Free Software Foundation.
+
+ 14. If you wish to incorporate parts of the Library into other free
+programs whose distribution conditions are incompatible with these,
+write to the author to ask for permission. For software which is
+copyrighted by the Free Software Foundation, write to the Free
+Software Foundation; we sometimes make exceptions for this. Our
+decision will be guided by the two goals of preserving the free status
+of all derivatives of our free software and of promoting the sharing
+and reuse of software generally.
+
+ NO WARRANTY
+
+ 15. BECAUSE THE LIBRARY IS LICENSED FREE OF CHARGE, THERE IS NO
+WARRANTY FOR THE LIBRARY, TO THE EXTENT PERMITTED BY APPLICABLE LAW.
+EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT HOLDERS AND/OR
+OTHER PARTIES PROVIDE THE LIBRARY "AS IS" WITHOUT WARRANTY OF ANY
+KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO, THE
+IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
+PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE
+LIBRARY IS WITH YOU. SHOULD THE LIBRARY PROVE DEFECTIVE, YOU ASSUME
+THE COST OF ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
+
+ 16. IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN
+WRITING WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MAY MODIFY
+AND/OR REDISTRIBUTE THE LIBRARY AS PERMITTED ABOVE, BE LIABLE TO YOU
+FOR DAMAGES, INCLUDING ANY GENERAL, SPECIAL, INCIDENTAL OR
+CONSEQUENTIAL DAMAGES ARISING OUT OF THE USE OR INABILITY TO USE THE
+LIBRARY (INCLUDING BUT NOT LIMITED TO LOSS OF DATA OR DATA BEING
+RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD PARTIES OR A
+FAILURE OF THE LIBRARY TO OPERATE WITH ANY OTHER SOFTWARE), EVEN IF
+SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF SUCH
+DAMAGES.
+
+ END OF TERMS AND CONDITIONS
+
+ How to Apply These Terms to Your New Libraries
+
+ If you develop a new library, and you want it to be of the greatest
+possible use to the public, we recommend making it free software that
+everyone can redistribute and change. You can do so by permitting
+redistribution under these terms (or, alternatively, under the terms of the
+ordinary General Public License).
+
+ To apply these terms, attach the following notices to the library. It is
+safest to attach them to the start of each source file to most effectively
+convey the exclusion of warranty; and each file should have at least the
+"copyright" line and a pointer to where the full notice is found.
+
+ <one line to give the library's name and a brief idea of what it does.>
+ Copyright (C) <year> <name of author>
+
+ This library is free software; you can redistribute it and/or
+ modify it under the terms of the GNU Lesser General Public
+ License as published by the Free Software Foundation; either
+ version 2.1 of the License, or (at your option) any later version.
+
+ This library is distributed in the hope that it will be useful,
+ but WITHOUT ANY WARRANTY; without even the implied warranty of
+ MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+ Lesser General Public License for more details.
+
+ You should have received a copy of the GNU Lesser General Public
+ License along with this library; if not, write to the Free Software
+ Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+Also add information on how to contact you by electronic and paper mail.
+
+You should also get your employer (if you work as a programmer) or your
+school, if any, to sign a "copyright disclaimer" for the library, if
+necessary. Here is a sample; alter the names:
+
+ Yoyodyne, Inc., hereby disclaims all copyright interest in the
+ library `Frob' (a library for tweaking knobs) written by James Random Hacker.
+
+ <signature of Ty Coon>, 1 April 1990
+ Ty Coon, President of Vice
+
+That's all there is to it!
+
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/__init__.py b/scripts/external_libs/nose-1.3.4/nose/__init__.py
new file mode 100755
index 00000000..8ab010bf
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/__init__.py
@@ -0,0 +1,15 @@
+from nose.core import collector, main, run, run_exit, runmodule
+# backwards compatibility
+from nose.exc import SkipTest, DeprecatedTest
+from nose.tools import with_setup
+
+__author__ = 'Jason Pellerin'
+__versioninfo__ = (1, 3, 4)
+__version__ = '.'.join(map(str, __versioninfo__))
+
+__all__ = [
+ 'main', 'run', 'run_exit', 'runmodule', 'with_setup',
+ 'SkipTest', 'DeprecatedTest', 'collector'
+ ]
+
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/__main__.py b/scripts/external_libs/nose-1.3.4/nose/__main__.py
new file mode 100755
index 00000000..b402d9df
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/__main__.py
@@ -0,0 +1,8 @@
+import sys
+
+from nose.core import run_exit
+
+if sys.argv[0].endswith('__main__.py'):
+ sys.argv[0] = '%s -m nose' % sys.executable
+
+run_exit()
diff --git a/scripts/external_libs/nose-1.3.4/nose/case.py b/scripts/external_libs/nose-1.3.4/nose/case.py
new file mode 100755
index 00000000..cffa4ab4
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/case.py
@@ -0,0 +1,397 @@
+"""nose unittest.TestCase subclasses. It is not necessary to subclass these
+classes when writing tests; they are used internally by nose.loader.TestLoader
+to create test cases from test functions and methods in test classes.
+"""
+import logging
+import sys
+import unittest
+from inspect import isfunction
+from nose.config import Config
+from nose.failure import Failure # for backwards compatibility
+from nose.util import resolve_name, test_address, try_run
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Test']
+
+
+class Test(unittest.TestCase):
+ """The universal test case wrapper.
+
+ When a plugin sees a test, it will always see an instance of this
+ class. To access the actual test case that will be run, access the
+ test property of the nose.case.Test instance.
+ """
+ __test__ = False # do not collect
+ def __init__(self, test, config=None, resultProxy=None):
+ # sanity check
+ if not callable(test):
+ raise TypeError("nose.case.Test called with argument %r that "
+ "is not callable. A callable is required."
+ % test)
+ self.test = test
+ if config is None:
+ config = Config()
+ self.config = config
+ self.tbinfo = None
+ self.capturedOutput = None
+ self.resultProxy = resultProxy
+ self.plugins = config.plugins
+ self.passed = None
+ unittest.TestCase.__init__(self)
+
+ def __call__(self, *arg, **kwarg):
+ return self.run(*arg, **kwarg)
+
+ def __str__(self):
+ name = self.plugins.testName(self)
+ if name is not None:
+ return name
+ return str(self.test)
+
+ def __repr__(self):
+ return "Test(%r)" % self.test
+
+ def afterTest(self, result):
+ """Called after test is complete (after result.stopTest)
+ """
+ try:
+ afterTest = result.afterTest
+ except AttributeError:
+ pass
+ else:
+ afterTest(self.test)
+
+ def beforeTest(self, result):
+ """Called before test is run (before result.startTest)
+ """
+ try:
+ beforeTest = result.beforeTest
+ except AttributeError:
+ pass
+ else:
+ beforeTest(self.test)
+
+ def exc_info(self):
+ """Extract exception info.
+ """
+ exc, exv, tb = sys.exc_info()
+ return (exc, exv, tb)
+
+ def id(self):
+ """Get a short(er) description of the test
+ """
+ return self.test.id()
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if hasattr(self.test, 'address'):
+ return self.test.address()
+ else:
+ # not a nose case
+ return test_address(self.test)
+
+ def _context(self):
+ try:
+ return self.test.context
+ except AttributeError:
+ pass
+ try:
+ return self.test.__class__
+ except AttributeError:
+ pass
+ try:
+ return resolve_name(self.test.__module__)
+ except AttributeError:
+ pass
+ return None
+ context = property(_context, None, None,
+ """Get the context object of this test (if any).""")
+
+ def run(self, result):
+ """Modified run for the test wrapper.
+
+ From here we don't call result.startTest or stopTest or
+ addSuccess. The wrapper calls addError/addFailure only if its
+ own setup or teardown fails, or running the wrapped test fails
+ (eg, if the wrapped "test" is not callable).
+
+ Two additional methods are called, beforeTest and
+ afterTest. These give plugins a chance to modify the wrapped
+ test before it is called and do cleanup after it is
+ called. They are called unconditionally.
+ """
+ if self.resultProxy:
+ result = self.resultProxy(result, self)
+ try:
+ try:
+ self.beforeTest(result)
+ self.runTest(result)
+ except KeyboardInterrupt:
+ raise
+ except:
+ err = sys.exc_info()
+ result.addError(self, err)
+ finally:
+ self.afterTest(result)
+
+ def runTest(self, result):
+ """Run the test. Plugins may alter the test by returning a
+ value from prepareTestCase. The value must be callable and
+ must accept one argument, the result instance.
+ """
+ test = self.test
+ plug_test = self.config.plugins.prepareTestCase(self)
+ if plug_test is not None:
+ test = plug_test
+ test(result)
+
+ def shortDescription(self):
+ desc = self.plugins.describeTest(self)
+ if desc is not None:
+ return desc
+ # work around bug in unittest.TestCase.shortDescription
+ # with multiline docstrings.
+ test = self.test
+ try:
+ test._testMethodDoc = test._testMethodDoc.strip()# 2.5
+ except AttributeError:
+ try:
+ # 2.4 and earlier
+ test._TestCase__testMethodDoc = \
+ test._TestCase__testMethodDoc.strip()
+ except AttributeError:
+ pass
+ # 2.7 compat: shortDescription() always returns something
+ # which is a change from 2.6 and below, and breaks the
+ # testName plugin call.
+ try:
+ desc = self.test.shortDescription()
+ except Exception:
+ # this is probably caused by a problem in test.__str__() and is
+ # only triggered by python 3.1's unittest!
+ pass
+ try:
+ if desc == str(self.test):
+ return
+ except Exception:
+ # If str() triggers an exception then ignore it.
+ # see issue 422
+ pass
+ return desc
+
+
+class TestBase(unittest.TestCase):
+ """Common functionality for FunctionTestCase and MethodTestCase.
+ """
+ __test__ = False # do not collect
+
+ def id(self):
+ return str(self)
+
+ def runTest(self):
+ self.test(*self.arg)
+
+ def shortDescription(self):
+ if hasattr(self.test, 'description'):
+ return self.test.description
+ func, arg = self._descriptors()
+ doc = getattr(func, '__doc__', None)
+ if not doc:
+ doc = str(self)
+ return doc.strip().split("\n")[0].strip()
+
+
+class FunctionTestCase(TestBase):
+ """TestCase wrapper for test functions.
+
+ Don't use this class directly; it is used internally in nose to
+ create test cases for test functions.
+ """
+ __test__ = False # do not collect
+
+ def __init__(self, test, setUp=None, tearDown=None, arg=tuple(),
+ descriptor=None):
+ """Initialize the MethodTestCase.
+
+ Required argument:
+
+ * test -- the test function to call.
+
+ Optional arguments:
+
+ * setUp -- function to run at setup.
+
+ * tearDown -- function to run at teardown.
+
+ * arg -- arguments to pass to the test function. This is to support
+ generator functions that yield arguments.
+
+ * descriptor -- the function, other than the test, that should be used
+ to construct the test name. This is to support generator functions.
+ """
+
+ self.test = test
+ self.setUpFunc = setUp
+ self.tearDownFunc = tearDown
+ self.arg = arg
+ self.descriptor = descriptor
+ TestBase.__init__(self)
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if self.descriptor is not None:
+ return test_address(self.descriptor)
+ else:
+ return test_address(self.test)
+
+ def _context(self):
+ return resolve_name(self.test.__module__)
+ context = property(_context, None, None,
+ """Get context (module) of this test""")
+
+ def setUp(self):
+ """Run any setup function attached to the test function
+ """
+ if self.setUpFunc:
+ self.setUpFunc()
+ else:
+ names = ('setup', 'setUp', 'setUpFunc')
+ try_run(self.test, names)
+
+ def tearDown(self):
+ """Run any teardown function attached to the test function
+ """
+ if self.tearDownFunc:
+ self.tearDownFunc()
+ else:
+ names = ('teardown', 'tearDown', 'tearDownFunc')
+ try_run(self.test, names)
+
+ def __str__(self):
+ func, arg = self._descriptors()
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ name = "%s.%s" % (func.__module__, name)
+ if arg:
+ name = "%s%s" % (name, arg)
+ # FIXME need to include the full dir path to disambiguate
+ # in cases where test module of the same name was seen in
+ # another directory (old fromDirectory)
+ return name
+ __repr__ = __str__
+
+ def _descriptors(self):
+ """Get the descriptors of the test function: the function and
+ arguments that will be used to construct the test name. In
+ most cases, this is the function itself and no arguments. For
+ tests generated by generator functions, the original
+ (generator) function and args passed to the generated function
+ are returned.
+ """
+ if self.descriptor:
+ return self.descriptor, self.arg
+ else:
+ return self.test, self.arg
+
+
+class MethodTestCase(TestBase):
+ """Test case wrapper for test methods.
+
+ Don't use this class directly; it is used internally in nose to
+ create test cases for test methods.
+ """
+ __test__ = False # do not collect
+
+ def __init__(self, method, test=None, arg=tuple(), descriptor=None):
+ """Initialize the MethodTestCase.
+
+ Required argument:
+
+ * method -- the method to call, may be bound or unbound. In either
+ case, a new instance of the method's class will be instantiated to
+ make the call. Note: In Python 3.x, if using an unbound method, you
+ must wrap it using pyversion.unbound_method.
+
+ Optional arguments:
+
+ * test -- the test function to call. If this is passed, it will be
+ called instead of getting a new bound method of the same name as the
+ desired method from the test instance. This is to support generator
+ methods that yield inline functions.
+
+ * arg -- arguments to pass to the test function. This is to support
+ generator methods that yield arguments.
+
+ * descriptor -- the function, other than the test, that should be used
+ to construct the test name. This is to support generator methods.
+ """
+ self.method = method
+ self.test = test
+ self.arg = arg
+ self.descriptor = descriptor
+ if isfunction(method):
+ raise ValueError("Unbound methods must be wrapped using pyversion.unbound_method before passing to MethodTestCase")
+ self.cls = method.im_class
+ self.inst = self.cls()
+ if self.test is None:
+ method_name = self.method.__name__
+ self.test = getattr(self.inst, method_name)
+ TestBase.__init__(self)
+
+ def __str__(self):
+ func, arg = self._descriptors()
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ name = "%s.%s.%s" % (self.cls.__module__,
+ self.cls.__name__,
+ name)
+ if arg:
+ name = "%s%s" % (name, arg)
+ return name
+ __repr__ = __str__
+
+ def address(self):
+ """Return a round-trip name for this test, a name that can be
+ fed back as input to loadTestByName and (assuming the same
+ plugin configuration) result in the loading of this test.
+ """
+ if self.descriptor is not None:
+ return test_address(self.descriptor)
+ else:
+ return test_address(self.method)
+
+ def _context(self):
+ return self.cls
+ context = property(_context, None, None,
+ """Get context (class) of this test""")
+
+ def setUp(self):
+ try_run(self.inst, ('setup', 'setUp'))
+
+ def tearDown(self):
+ try_run(self.inst, ('teardown', 'tearDown'))
+
+ def _descriptors(self):
+ """Get the descriptors of the test method: the method and
+ arguments that will be used to construct the test name. In
+ most cases, this is the method itself and no arguments. For
+ tests generated by generator methods, the original
+ (generator) method and args passed to the generated method
+ or function are returned.
+ """
+ if self.descriptor:
+ return self.descriptor, self.arg
+ else:
+ return self.method, self.arg
diff --git a/scripts/external_libs/nose-1.3.4/nose/commands.py b/scripts/external_libs/nose-1.3.4/nose/commands.py
new file mode 100755
index 00000000..ef0e9cae
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/commands.py
@@ -0,0 +1,172 @@
+"""
+nosetests setuptools command
+----------------------------
+
+The easiest way to run tests with nose is to use the `nosetests` setuptools
+command::
+
+ python setup.py nosetests
+
+This command has one *major* benefit over the standard `test` command: *all
+nose plugins are supported*.
+
+To configure the `nosetests` command, add a [nosetests] section to your
+setup.cfg. The [nosetests] section can contain any command line arguments that
+nosetests supports. The differences between issuing an option on the command
+line and adding it to setup.cfg are:
+
+* In setup.cfg, the -- prefix must be excluded
+* In setup.cfg, command line flags that take no arguments must be given an
+ argument flag (1, T or TRUE for active, 0, F or FALSE for inactive)
+
+Here's an example [nosetests] setup.cfg section::
+
+ [nosetests]
+ verbosity=1
+ detailed-errors=1
+ with-coverage=1
+ cover-package=nose
+ debug=nose.loader
+ pdb=1
+ pdb-failures=1
+
+If you commonly run nosetests with a large number of options, using
+the nosetests setuptools command and configuring with setup.cfg can
+make running your tests much less tedious. (Note that the same options
+and format supported in setup.cfg are supported in all other config
+files, and the nosetests script will also load config files.)
+
+Another reason to run tests with the command is that the command will
+install packages listed in your `tests_require`, as well as doing a
+complete build of your package before running tests. For packages with
+dependencies or that build C extensions, using the setuptools command
+can be more convenient than building by hand and running the nosetests
+script.
+
+Bootstrapping
+-------------
+
+If you are distributing your project and want users to be able to run tests
+without having to install nose themselves, add nose to the setup_requires
+section of your setup()::
+
+ setup(
+ # ...
+ setup_requires=['nose>=1.0']
+ )
+
+This will direct setuptools to download and activate nose during the setup
+process, making the ``nosetests`` command available.
+
+"""
+try:
+ from setuptools import Command
+except ImportError:
+ Command = nosetests = None
+else:
+ from nose.config import Config, option_blacklist, user_config_files, \
+ flag, _bool
+ from nose.core import TestProgram
+ from nose.plugins import DefaultPluginManager
+
+
+ def get_user_options(parser):
+ """convert a optparse option list into a distutils option tuple list"""
+ opt_list = []
+ for opt in parser.option_list:
+ if opt._long_opts[0][2:] in option_blacklist:
+ continue
+ long_name = opt._long_opts[0][2:]
+ if opt.action not in ('store_true', 'store_false'):
+ long_name = long_name + "="
+ short_name = None
+ if opt._short_opts:
+ short_name = opt._short_opts[0][1:]
+ opt_list.append((long_name, short_name, opt.help or ""))
+ return opt_list
+
+
+ class nosetests(Command):
+ description = "Run unit tests using nosetests"
+ __config = Config(files=user_config_files(),
+ plugins=DefaultPluginManager())
+ __parser = __config.getParser()
+ user_options = get_user_options(__parser)
+
+ def initialize_options(self):
+ """create the member variables, but change hyphens to
+ underscores
+ """
+
+ self.option_to_cmds = {}
+ for opt in self.__parser.option_list:
+ cmd_name = opt._long_opts[0][2:]
+ option_name = cmd_name.replace('-', '_')
+ self.option_to_cmds[option_name] = cmd_name
+ setattr(self, option_name, None)
+ self.attr = None
+
+ def finalize_options(self):
+ """nothing to do here"""
+ pass
+
+ def run(self):
+ """ensure tests are capable of being run, then
+ run nose.main with a reconstructed argument list"""
+ if getattr(self.distribution, 'use_2to3', False):
+ # If we run 2to3 we can not do this inplace:
+
+ # Ensure metadata is up-to-date
+ build_py = self.get_finalized_command('build_py')
+ build_py.inplace = 0
+ build_py.run()
+ bpy_cmd = self.get_finalized_command("build_py")
+ build_path = bpy_cmd.build_lib
+
+ # Build extensions
+ egg_info = self.get_finalized_command('egg_info')
+ egg_info.egg_base = build_path
+ egg_info.run()
+
+ build_ext = self.get_finalized_command('build_ext')
+ build_ext.inplace = 0
+ build_ext.run()
+ else:
+ self.run_command('egg_info')
+
+ # Build extensions in-place
+ build_ext = self.get_finalized_command('build_ext')
+ build_ext.inplace = 1
+ build_ext.run()
+
+ if self.distribution.install_requires:
+ self.distribution.fetch_build_eggs(
+ self.distribution.install_requires)
+ if self.distribution.tests_require:
+ self.distribution.fetch_build_eggs(
+ self.distribution.tests_require)
+
+ ei_cmd = self.get_finalized_command("egg_info")
+ argv = ['nosetests', '--where', ei_cmd.egg_base]
+ for (option_name, cmd_name) in self.option_to_cmds.items():
+ if option_name in option_blacklist:
+ continue
+ value = getattr(self, option_name)
+ if value is not None:
+ argv.extend(
+ self.cfgToArg(option_name.replace('_', '-'), value))
+ TestProgram(argv=argv, config=self.__config)
+
+ def cfgToArg(self, optname, value):
+ argv = []
+ long_optname = '--' + optname
+ opt = self.__parser.get_option(long_optname)
+ if opt.action in ('store_true', 'store_false'):
+ if not flag(value):
+ raise ValueError("Invalid value '%s' for '%s'" % (
+ value, optname))
+ if _bool(value):
+ argv.append(long_optname)
+ else:
+ argv.extend([long_optname, value])
+ return argv
diff --git a/scripts/external_libs/nose-1.3.4/nose/config.py b/scripts/external_libs/nose-1.3.4/nose/config.py
new file mode 100755
index 00000000..4214c2d6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/config.py
@@ -0,0 +1,661 @@
+import logging
+import optparse
+import os
+import re
+import sys
+import ConfigParser
+from optparse import OptionParser
+from nose.util import absdir, tolist
+from nose.plugins.manager import NoPlugins
+from warnings import warn, filterwarnings
+
+log = logging.getLogger(__name__)
+
+# not allowed in config files
+option_blacklist = ['help', 'verbose']
+
+config_files = [
+ # Linux users will prefer this
+ "~/.noserc",
+ # Windows users will prefer this
+ "~/nose.cfg"
+ ]
+
+# plaforms on which the exe check defaults to off
+# Windows and IronPython
+exe_allowed_platforms = ('win32', 'cli')
+
+filterwarnings("always", category=DeprecationWarning,
+ module=r'(.*\.)?nose\.config')
+
+class NoSuchOptionError(Exception):
+ def __init__(self, name):
+ Exception.__init__(self, name)
+ self.name = name
+
+
+class ConfigError(Exception):
+ pass
+
+
+class ConfiguredDefaultsOptionParser(object):
+ """
+ Handler for options from commandline and config files.
+ """
+ def __init__(self, parser, config_section, error=None, file_error=None):
+ self._parser = parser
+ self._config_section = config_section
+ if error is None:
+ error = self._parser.error
+ self._error = error
+ if file_error is None:
+ file_error = lambda msg, **kw: error(msg)
+ self._file_error = file_error
+
+ def _configTuples(self, cfg, filename):
+ config = []
+ if self._config_section in cfg.sections():
+ for name, value in cfg.items(self._config_section):
+ config.append((name, value, filename))
+ return config
+
+ def _readFromFilenames(self, filenames):
+ config = []
+ for filename in filenames:
+ cfg = ConfigParser.RawConfigParser()
+ try:
+ cfg.read(filename)
+ except ConfigParser.Error, exc:
+ raise ConfigError("Error reading config file %r: %s" %
+ (filename, str(exc)))
+ config.extend(self._configTuples(cfg, filename))
+ return config
+
+ def _readFromFileObject(self, fh):
+ cfg = ConfigParser.RawConfigParser()
+ try:
+ filename = fh.name
+ except AttributeError:
+ filename = '<???>'
+ try:
+ cfg.readfp(fh)
+ except ConfigParser.Error, exc:
+ raise ConfigError("Error reading config file %r: %s" %
+ (filename, str(exc)))
+ return self._configTuples(cfg, filename)
+
+ def _readConfiguration(self, config_files):
+ try:
+ config_files.readline
+ except AttributeError:
+ filename_or_filenames = config_files
+ if isinstance(filename_or_filenames, basestring):
+ filenames = [filename_or_filenames]
+ else:
+ filenames = filename_or_filenames
+ config = self._readFromFilenames(filenames)
+ else:
+ fh = config_files
+ config = self._readFromFileObject(fh)
+ return config
+
+ def _processConfigValue(self, name, value, values, parser):
+ opt_str = '--' + name
+ option = parser.get_option(opt_str)
+ if option is None:
+ raise NoSuchOptionError(name)
+ else:
+ option.process(opt_str, value, values, parser)
+
+ def _applyConfigurationToValues(self, parser, config, values):
+ for name, value, filename in config:
+ if name in option_blacklist:
+ continue
+ try:
+ self._processConfigValue(name, value, values, parser)
+ except NoSuchOptionError, exc:
+ self._file_error(
+ "Error reading config file %r: "
+ "no such option %r" % (filename, exc.name),
+ name=name, filename=filename)
+ except optparse.OptionValueError, exc:
+ msg = str(exc).replace('--' + name, repr(name), 1)
+ self._file_error("Error reading config file %r: "
+ "%s" % (filename, msg),
+ name=name, filename=filename)
+
+ def parseArgsAndConfigFiles(self, args, config_files):
+ values = self._parser.get_default_values()
+ try:
+ config = self._readConfiguration(config_files)
+ except ConfigError, exc:
+ self._error(str(exc))
+ else:
+ try:
+ self._applyConfigurationToValues(self._parser, config, values)
+ except ConfigError, exc:
+ self._error(str(exc))
+ return self._parser.parse_args(args, values)
+
+
+class Config(object):
+ """nose configuration.
+
+ Instances of Config are used throughout nose to configure
+ behavior, including plugin lists. Here are the default values for
+ all config keys::
+
+ self.env = env = kw.pop('env', {})
+ self.args = ()
+ self.testMatch = re.compile(r'(?:^|[\\b_\\.%s-])[Tt]est' % os.sep)
+ self.addPaths = not env.get('NOSE_NOPATH', False)
+ self.configSection = 'nosetests'
+ self.debug = env.get('NOSE_DEBUG')
+ self.debugLog = env.get('NOSE_DEBUG_LOG')
+ self.exclude = None
+ self.getTestCaseNamesCompat = False
+ self.includeExe = env.get('NOSE_INCLUDE_EXE',
+ sys.platform in exe_allowed_platforms)
+ self.ignoreFiles = (re.compile(r'^\.'),
+ re.compile(r'^_'),
+ re.compile(r'^setup\.py$')
+ )
+ self.include = None
+ self.loggingConfig = None
+ self.logStream = sys.stderr
+ self.options = NoOptions()
+ self.parser = None
+ self.plugins = NoPlugins()
+ self.srcDirs = ('lib', 'src')
+ self.runOnInit = True
+ self.stopOnError = env.get('NOSE_STOP', False)
+ self.stream = sys.stderr
+ self.testNames = ()
+ self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+ self.where = ()
+ self.py3where = ()
+ self.workingDir = None
+ """
+
+ def __init__(self, **kw):
+ self.env = env = kw.pop('env', {})
+ self.args = ()
+ self.testMatchPat = env.get('NOSE_TESTMATCH',
+ r'(?:^|[\b_\.%s-])[Tt]est' % os.sep)
+ self.testMatch = re.compile(self.testMatchPat)
+ self.addPaths = not env.get('NOSE_NOPATH', False)
+ self.configSection = 'nosetests'
+ self.debug = env.get('NOSE_DEBUG')
+ self.debugLog = env.get('NOSE_DEBUG_LOG')
+ self.exclude = None
+ self.getTestCaseNamesCompat = False
+ self.includeExe = env.get('NOSE_INCLUDE_EXE',
+ sys.platform in exe_allowed_platforms)
+ self.ignoreFilesDefaultStrings = [r'^\.',
+ r'^_',
+ r'^setup\.py$',
+ ]
+ self.ignoreFiles = map(re.compile, self.ignoreFilesDefaultStrings)
+ self.include = None
+ self.loggingConfig = None
+ self.logStream = sys.stderr
+ self.options = NoOptions()
+ self.parser = None
+ self.plugins = NoPlugins()
+ self.srcDirs = ('lib', 'src')
+ self.runOnInit = True
+ self.stopOnError = env.get('NOSE_STOP', False)
+ self.stream = sys.stderr
+ self.testNames = []
+ self.verbosity = int(env.get('NOSE_VERBOSE', 1))
+ self.where = ()
+ self.py3where = ()
+ self.workingDir = os.getcwd()
+ self.traverseNamespace = False
+ self.firstPackageWins = False
+ self.parserClass = OptionParser
+ self.worker = False
+
+ self._default = self.__dict__.copy()
+ self.update(kw)
+ self._orig = self.__dict__.copy()
+
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['stream']
+ del state['_orig']
+ del state['_default']
+ del state['env']
+ del state['logStream']
+ # FIXME remove plugins, have only plugin manager class
+ state['plugins'] = self.plugins.__class__
+ return state
+
+ def __setstate__(self, state):
+ plugincls = state.pop('plugins')
+ self.update(state)
+ self.worker = True
+ # FIXME won't work for static plugin lists
+ self.plugins = plugincls()
+ self.plugins.loadPlugins()
+ # needed so .can_configure gets set appropriately
+ dummy_parser = self.parserClass()
+ self.plugins.addOptions(dummy_parser, {})
+ self.plugins.configure(self.options, self)
+
+ def __repr__(self):
+ d = self.__dict__.copy()
+ # don't expose env, could include sensitive info
+ d['env'] = {}
+ keys = [ k for k in d.keys()
+ if not k.startswith('_') ]
+ keys.sort()
+ return "Config(%s)" % ', '.join([ '%s=%r' % (k, d[k])
+ for k in keys ])
+ __str__ = __repr__
+
+ def _parseArgs(self, argv, cfg_files):
+ def warn_sometimes(msg, name=None, filename=None):
+ if (hasattr(self.plugins, 'excludedOption') and
+ self.plugins.excludedOption(name)):
+ msg = ("Option %r in config file %r ignored: "
+ "excluded by runtime environment" %
+ (name, filename))
+ warn(msg, RuntimeWarning)
+ else:
+ raise ConfigError(msg)
+ parser = ConfiguredDefaultsOptionParser(
+ self.getParser(), self.configSection, file_error=warn_sometimes)
+ return parser.parseArgsAndConfigFiles(argv[1:], cfg_files)
+
+ def configure(self, argv=None, doc=None):
+ """Configure the nose running environment. Execute configure before
+ collecting tests with nose.TestCollector to enable output capture and
+ other features.
+ """
+ env = self.env
+ if argv is None:
+ argv = sys.argv
+
+ cfg_files = getattr(self, 'files', [])
+ options, args = self._parseArgs(argv, cfg_files)
+ # If -c --config has been specified on command line,
+ # load those config files and reparse
+ if getattr(options, 'files', []):
+ options, args = self._parseArgs(argv, options.files)
+
+ self.options = options
+ if args:
+ self.testNames = args
+ if options.testNames is not None:
+ self.testNames.extend(tolist(options.testNames))
+
+ if options.py3where is not None:
+ if sys.version_info >= (3,):
+ options.where = options.py3where
+
+ # `where` is an append action, so it can't have a default value
+ # in the parser, or that default will always be in the list
+ if not options.where:
+ options.where = env.get('NOSE_WHERE', None)
+
+ # include and exclude also
+ if not options.ignoreFiles:
+ options.ignoreFiles = env.get('NOSE_IGNORE_FILES', [])
+ if not options.include:
+ options.include = env.get('NOSE_INCLUDE', [])
+ if not options.exclude:
+ options.exclude = env.get('NOSE_EXCLUDE', [])
+
+ self.addPaths = options.addPaths
+ self.stopOnError = options.stopOnError
+ self.verbosity = options.verbosity
+ self.includeExe = options.includeExe
+ self.traverseNamespace = options.traverseNamespace
+ self.debug = options.debug
+ self.debugLog = options.debugLog
+ self.loggingConfig = options.loggingConfig
+ self.firstPackageWins = options.firstPackageWins
+ self.configureLogging()
+
+ if not options.byteCompile:
+ sys.dont_write_bytecode = True
+
+ if options.where is not None:
+ self.configureWhere(options.where)
+
+ if options.testMatch:
+ self.testMatch = re.compile(options.testMatch)
+
+ if options.ignoreFiles:
+ self.ignoreFiles = map(re.compile, tolist(options.ignoreFiles))
+ log.info("Ignoring files matching %s", options.ignoreFiles)
+ else:
+ log.info("Ignoring files matching %s", self.ignoreFilesDefaultStrings)
+
+ if options.include:
+ self.include = map(re.compile, tolist(options.include))
+ log.info("Including tests matching %s", options.include)
+
+ if options.exclude:
+ self.exclude = map(re.compile, tolist(options.exclude))
+ log.info("Excluding tests matching %s", options.exclude)
+
+ # When listing plugins we don't want to run them
+ if not options.showPlugins:
+ self.plugins.configure(options, self)
+ self.plugins.begin()
+
+ def configureLogging(self):
+ """Configure logging for nose, or optionally other packages. Any logger
+ name may be set with the debug option, and that logger will be set to
+ debug level and be assigned the same handler as the nose loggers, unless
+ it already has a handler.
+ """
+ if self.loggingConfig:
+ from logging.config import fileConfig
+ fileConfig(self.loggingConfig)
+ return
+
+ format = logging.Formatter('%(name)s: %(levelname)s: %(message)s')
+ if self.debugLog:
+ handler = logging.FileHandler(self.debugLog)
+ else:
+ handler = logging.StreamHandler(self.logStream)
+ handler.setFormatter(format)
+
+ logger = logging.getLogger('nose')
+ logger.propagate = 0
+
+ # only add our default handler if there isn't already one there
+ # this avoids annoying duplicate log messages.
+ found = False
+ if self.debugLog:
+ debugLogAbsPath = os.path.abspath(self.debugLog)
+ for h in logger.handlers:
+ if type(h) == logging.FileHandler and \
+ h.baseFilename == debugLogAbsPath:
+ found = True
+ else:
+ for h in logger.handlers:
+ if type(h) == logging.StreamHandler and \
+ h.stream == self.logStream:
+ found = True
+ if not found:
+ logger.addHandler(handler)
+
+ # default level
+ lvl = logging.WARNING
+ if self.verbosity >= 5:
+ lvl = 0
+ elif self.verbosity >= 4:
+ lvl = logging.DEBUG
+ elif self.verbosity >= 3:
+ lvl = logging.INFO
+ logger.setLevel(lvl)
+
+ # individual overrides
+ if self.debug:
+ # no blanks
+ debug_loggers = [ name for name in self.debug.split(',')
+ if name ]
+ for logger_name in debug_loggers:
+ l = logging.getLogger(logger_name)
+ l.setLevel(logging.DEBUG)
+ if not l.handlers and not logger_name.startswith('nose'):
+ l.addHandler(handler)
+
+ def configureWhere(self, where):
+ """Configure the working directory or directories for the test run.
+ """
+ from nose.importer import add_path
+ self.workingDir = None
+ where = tolist(where)
+ warned = False
+ for path in where:
+ if not self.workingDir:
+ abs_path = absdir(path)
+ if abs_path is None:
+ raise ValueError("Working directory %s not found, or "
+ "not a directory" % path)
+ log.info("Set working dir to %s", abs_path)
+ self.workingDir = abs_path
+ if self.addPaths and \
+ os.path.exists(os.path.join(abs_path, '__init__.py')):
+ log.info("Working directory %s is a package; "
+ "adding to sys.path" % abs_path)
+ add_path(abs_path)
+ continue
+ if not warned:
+ warn("Use of multiple -w arguments is deprecated and "
+ "support may be removed in a future release. You can "
+ "get the same behavior by passing directories without "
+ "the -w argument on the command line, or by using the "
+ "--tests argument in a configuration file.",
+ DeprecationWarning)
+ warned = True
+ self.testNames.append(path)
+
+ def default(self):
+ """Reset all config values to defaults.
+ """
+ self.__dict__.update(self._default)
+
+ def getParser(self, doc=None):
+ """Get the command line option parser.
+ """
+ if self.parser:
+ return self.parser
+ env = self.env
+ parser = self.parserClass(doc)
+ parser.add_option(
+ "-V","--version", action="store_true",
+ dest="version", default=False,
+ help="Output nose version and exit")
+ parser.add_option(
+ "-p", "--plugins", action="store_true",
+ dest="showPlugins", default=False,
+ help="Output list of available plugins and exit. Combine with "
+ "higher verbosity for greater detail")
+ parser.add_option(
+ "-v", "--verbose",
+ action="count", dest="verbosity",
+ default=self.verbosity,
+ help="Be more verbose. [NOSE_VERBOSE]")
+ parser.add_option(
+ "--verbosity", action="store", dest="verbosity",
+ metavar='VERBOSITY',
+ type="int", help="Set verbosity; --verbosity=2 is "
+ "the same as -v")
+ parser.add_option(
+ "-q", "--quiet", action="store_const", const=0, dest="verbosity",
+ help="Be less verbose")
+ parser.add_option(
+ "-c", "--config", action="append", dest="files",
+ metavar="FILES",
+ help="Load configuration from config file(s). May be specified "
+ "multiple times; in that case, all config files will be "
+ "loaded and combined")
+ parser.add_option(
+ "-w", "--where", action="append", dest="where",
+ metavar="WHERE",
+ help="Look for tests in this directory. "
+ "May be specified multiple times. The first directory passed "
+ "will be used as the working directory, in place of the current "
+ "working directory, which is the default. Others will be added "
+ "to the list of tests to execute. [NOSE_WHERE]"
+ )
+ parser.add_option(
+ "--py3where", action="append", dest="py3where",
+ metavar="PY3WHERE",
+ help="Look for tests in this directory under Python 3.x. "
+ "Functions the same as 'where', but only applies if running under "
+ "Python 3.x or above. Note that, if present under 3.x, this "
+ "option completely replaces any directories specified with "
+ "'where', so the 'where' option becomes ineffective. "
+ "[NOSE_PY3WHERE]"
+ )
+ parser.add_option(
+ "-m", "--match", "--testmatch", action="store",
+ dest="testMatch", metavar="REGEX",
+ help="Files, directories, function names, and class names "
+ "that match this regular expression are considered tests. "
+ "Default: %s [NOSE_TESTMATCH]" % self.testMatchPat,
+ default=self.testMatchPat)
+ parser.add_option(
+ "--tests", action="store", dest="testNames", default=None,
+ metavar='NAMES',
+ help="Run these tests (comma-separated list). This argument is "
+ "useful mainly from configuration files; on the command line, "
+ "just pass the tests to run as additional arguments with no "
+ "switch.")
+ parser.add_option(
+ "-l", "--debug", action="store",
+ dest="debug", default=self.debug,
+ help="Activate debug logging for one or more systems. "
+ "Available debug loggers: nose, nose.importer, "
+ "nose.inspector, nose.plugins, nose.result and "
+ "nose.selector. Separate multiple names with a comma.")
+ parser.add_option(
+ "--debug-log", dest="debugLog", action="store",
+ default=self.debugLog, metavar="FILE",
+ help="Log debug messages to this file "
+ "(default: sys.stderr)")
+ parser.add_option(
+ "--logging-config", "--log-config",
+ dest="loggingConfig", action="store",
+ default=self.loggingConfig, metavar="FILE",
+ help="Load logging config from this file -- bypasses all other"
+ " logging config settings.")
+ parser.add_option(
+ "-I", "--ignore-files", action="append", dest="ignoreFiles",
+ metavar="REGEX",
+ help="Completely ignore any file that matches this regular "
+ "expression. Takes precedence over any other settings or "
+ "plugins. "
+ "Specifying this option will replace the default setting. "
+ "Specify this option multiple times "
+ "to add more regular expressions [NOSE_IGNORE_FILES]")
+ parser.add_option(
+ "-e", "--exclude", action="append", dest="exclude",
+ metavar="REGEX",
+ help="Don't run tests that match regular "
+ "expression [NOSE_EXCLUDE]")
+ parser.add_option(
+ "-i", "--include", action="append", dest="include",
+ metavar="REGEX",
+ help="This regular expression will be applied to files, "
+ "directories, function names, and class names for a chance "
+ "to include additional tests that do not match TESTMATCH. "
+ "Specify this option multiple times "
+ "to add more regular expressions [NOSE_INCLUDE]")
+ parser.add_option(
+ "-x", "--stop", action="store_true", dest="stopOnError",
+ default=self.stopOnError,
+ help="Stop running tests after the first error or failure")
+ parser.add_option(
+ "-P", "--no-path-adjustment", action="store_false",
+ dest="addPaths",
+ default=self.addPaths,
+ help="Don't make any changes to sys.path when "
+ "loading tests [NOSE_NOPATH]")
+ parser.add_option(
+ "--exe", action="store_true", dest="includeExe",
+ default=self.includeExe,
+ help="Look for tests in python modules that are "
+ "executable. Normal behavior is to exclude executable "
+ "modules, since they may not be import-safe "
+ "[NOSE_INCLUDE_EXE]")
+ parser.add_option(
+ "--noexe", action="store_false", dest="includeExe",
+ help="DO NOT look for tests in python modules that are "
+ "executable. (The default on the windows platform is to "
+ "do so.)")
+ parser.add_option(
+ "--traverse-namespace", action="store_true",
+ default=self.traverseNamespace, dest="traverseNamespace",
+ help="Traverse through all path entries of a namespace package")
+ parser.add_option(
+ "--first-package-wins", "--first-pkg-wins", "--1st-pkg-wins",
+ action="store_true", default=False, dest="firstPackageWins",
+ help="nose's importer will normally evict a package from sys."
+ "modules if it sees a package with the same name in a different "
+ "location. Set this option to disable that behavior.")
+ parser.add_option(
+ "--no-byte-compile",
+ action="store_false", default=True, dest="byteCompile",
+ help="Prevent nose from byte-compiling the source into .pyc files "
+ "while nose is scanning for and running tests.")
+
+ self.plugins.loadPlugins()
+ self.pluginOpts(parser)
+
+ self.parser = parser
+ return parser
+
+ def help(self, doc=None):
+ """Return the generated help message
+ """
+ return self.getParser(doc).format_help()
+
+ def pluginOpts(self, parser):
+ self.plugins.addOptions(parser, self.env)
+
+ def reset(self):
+ self.__dict__.update(self._orig)
+
+ def todict(self):
+ return self.__dict__.copy()
+
+ def update(self, d):
+ self.__dict__.update(d)
+
+
+class NoOptions(object):
+ """Options container that returns None for all options.
+ """
+ def __getstate__(self):
+ return {}
+
+ def __setstate__(self, state):
+ pass
+
+ def __getnewargs__(self):
+ return ()
+
+ def __nonzero__(self):
+ return False
+
+
+def user_config_files():
+ """Return path to any existing user config files
+ """
+ return filter(os.path.exists,
+ map(os.path.expanduser, config_files))
+
+
+def all_config_files():
+ """Return path to any existing user config files, plus any setup.cfg
+ in the current working directory.
+ """
+ user = user_config_files()
+ if os.path.exists('setup.cfg'):
+ return user + ['setup.cfg']
+ return user
+
+
+# used when parsing config files
+def flag(val):
+ """Does the value look like an on/off flag?"""
+ if val == 1:
+ return True
+ elif val == 0:
+ return False
+ val = str(val)
+ if len(val) > 5:
+ return False
+ return val.upper() in ('1', '0', 'F', 'T', 'TRUE', 'FALSE', 'ON', 'OFF')
+
+
+def _bool(val):
+ return str(val).upper() in ('1', 'T', 'TRUE', 'ON')
diff --git a/scripts/external_libs/nose-1.3.4/nose/core.py b/scripts/external_libs/nose-1.3.4/nose/core.py
new file mode 100755
index 00000000..49e7939b
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/core.py
@@ -0,0 +1,341 @@
+"""Implements nose test program and collector.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import time
+import unittest
+
+from nose.config import Config, all_config_files
+from nose.loader import defaultTestLoader
+from nose.plugins.manager import PluginManager, DefaultPluginManager, \
+ RestrictedPluginManager
+from nose.result import TextTestResult
+from nose.suite import FinalizingSuiteWrapper
+from nose.util import isclass, tolist
+
+
+log = logging.getLogger('nose.core')
+compat_24 = sys.version_info >= (2, 4)
+
+__all__ = ['TestProgram', 'main', 'run', 'run_exit', 'runmodule', 'collector',
+ 'TextTestRunner']
+
+
+class TextTestRunner(unittest.TextTestRunner):
+ """Test runner that uses nose's TextTestResult to enable errorClasses,
+ as well as providing hooks for plugins to override or replace the test
+ output stream, results, and the test case itself.
+ """
+ def __init__(self, stream=sys.stderr, descriptions=1, verbosity=1,
+ config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ unittest.TextTestRunner.__init__(self, stream, descriptions, verbosity)
+
+
+ def _makeResult(self):
+ return TextTestResult(self.stream,
+ self.descriptions,
+ self.verbosity,
+ self.config)
+
+ def run(self, test):
+ """Overrides to provide plugin hooks and defer all output to
+ the test result class.
+ """
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ result = self._makeResult()
+ start = time.time()
+ try:
+ test(result)
+ except KeyboardInterrupt:
+ pass
+ stop = time.time()
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+ return result
+
+
+class TestProgram(unittest.TestProgram):
+ """Collect and run tests, returning success or failure.
+
+ The arguments to TestProgram() are the same as to
+ :func:`main()` and :func:`run()`:
+
+ * module: All tests are in this module (default: None)
+ * defaultTest: Tests to load (default: '.')
+ * argv: Command line arguments (default: None; sys.argv is read)
+ * testRunner: Test runner instance (default: None)
+ * testLoader: Test loader instance (default: None)
+ * env: Environment; ignored if config is provided (default: None;
+ os.environ is read)
+ * config: :class:`nose.config.Config` instance (default: None)
+ * suite: Suite or list of tests to run (default: None). Passing a
+ suite or lists of tests will bypass all test discovery and
+ loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+ instance as the suite, context fixtures at the class, module and
+ package level will not be used, and many plugin hooks will not
+ be called. If you want normal nose behavior, either pass a list
+ of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+ * exit: Exit after running tests and printing report (default: True)
+ * plugins: List of plugins to use; ignored if config is provided
+ (default: load plugins with DefaultPluginManager)
+ * addplugins: List of **extra** plugins to use. Pass a list of plugin
+ instances in this argument to make custom plugins available while
+ still using the DefaultPluginManager.
+ """
+ verbosity = 1
+
+ def __init__(self, module=None, defaultTest='.', argv=None,
+ testRunner=None, testLoader=None, env=None, config=None,
+ suite=None, exit=True, plugins=None, addplugins=None):
+ if env is None:
+ env = os.environ
+ if config is None:
+ config = self.makeConfig(env, plugins)
+ if addplugins:
+ config.plugins.addPlugins(extraplugins=addplugins)
+ self.config = config
+ self.suite = suite
+ self.exit = exit
+ extra_args = {}
+ version = sys.version_info[0:2]
+ if version >= (2,7) and version != (3,0):
+ extra_args['exit'] = exit
+ unittest.TestProgram.__init__(
+ self, module=module, defaultTest=defaultTest,
+ argv=argv, testRunner=testRunner, testLoader=testLoader,
+ **extra_args)
+
+ def getAllConfigFiles(self, env=None):
+ env = env or {}
+ if env.get('NOSE_IGNORE_CONFIG_FILES', False):
+ return []
+ else:
+ return all_config_files()
+
+ def makeConfig(self, env, plugins=None):
+ """Load a Config, pre-filled with user config files if any are
+ found.
+ """
+ cfg_files = self.getAllConfigFiles(env)
+ if plugins:
+ manager = PluginManager(plugins=plugins)
+ else:
+ manager = DefaultPluginManager()
+ return Config(
+ env=env, files=cfg_files, plugins=manager)
+
+ def parseArgs(self, argv):
+ """Parse argv and env and configure running environment.
+ """
+ self.config.configure(argv, doc=self.usage())
+ log.debug("configured %s", self.config)
+
+ # quick outs: version, plugins (optparse would have already
+ # caught and exited on help)
+ if self.config.options.version:
+ from nose import __version__
+ sys.stdout = sys.__stdout__
+ print "%s version %s" % (os.path.basename(sys.argv[0]), __version__)
+ sys.exit(0)
+
+ if self.config.options.showPlugins:
+ self.showPlugins()
+ sys.exit(0)
+
+ if self.testLoader is None:
+ self.testLoader = defaultTestLoader(config=self.config)
+ elif isclass(self.testLoader):
+ self.testLoader = self.testLoader(config=self.config)
+ plug_loader = self.config.plugins.prepareTestLoader(self.testLoader)
+ if plug_loader is not None:
+ self.testLoader = plug_loader
+ log.debug("test loader is %s", self.testLoader)
+
+ # FIXME if self.module is a string, add it to self.testNames? not sure
+
+ if self.config.testNames:
+ self.testNames = self.config.testNames
+ else:
+ self.testNames = tolist(self.defaultTest)
+ log.debug('defaultTest %s', self.defaultTest)
+ log.debug('Test names are %s', self.testNames)
+ if self.config.workingDir is not None:
+ os.chdir(self.config.workingDir)
+ self.createTests()
+
+ def createTests(self):
+ """Create the tests to run. If a self.suite
+ is set, then that suite will be used. Otherwise, tests will be
+ loaded from the given test names (self.testNames) using the
+ test loader.
+ """
+ log.debug("createTests called with %s", self.suite)
+ if self.suite is not None:
+ # We were given an explicit suite to run. Make sure it's
+ # loaded and wrapped correctly.
+ self.test = self.testLoader.suiteClass(self.suite)
+ else:
+ self.test = self.testLoader.loadTestsFromNames(self.testNames)
+
+ def runTests(self):
+ """Run Tests. Returns true on success, false on failure, and sets
+ self.success to the same value.
+ """
+ log.debug("runTests called")
+ if self.testRunner is None:
+ self.testRunner = TextTestRunner(stream=self.config.stream,
+ verbosity=self.config.verbosity,
+ config=self.config)
+ plug_runner = self.config.plugins.prepareTestRunner(self.testRunner)
+ if plug_runner is not None:
+ self.testRunner = plug_runner
+ result = self.testRunner.run(self.test)
+ self.success = result.wasSuccessful()
+ if self.exit:
+ sys.exit(not self.success)
+ return self.success
+
+ def showPlugins(self):
+ """Print list of available plugins.
+ """
+ import textwrap
+
+ class DummyParser:
+ def __init__(self):
+ self.options = []
+ def add_option(self, *arg, **kw):
+ self.options.append((arg, kw.pop('help', '')))
+
+ v = self.config.verbosity
+ self.config.plugins.sort()
+ for p in self.config.plugins:
+ print "Plugin %s" % p.name
+ if v >= 2:
+ print " score: %s" % p.score
+ print '\n'.join(textwrap.wrap(p.help().strip(),
+ initial_indent=' ',
+ subsequent_indent=' '))
+ if v >= 3:
+ parser = DummyParser()
+ p.addOptions(parser)
+ if len(parser.options):
+ print
+ print " Options:"
+ for opts, help in parser.options:
+ print ' %s' % (', '.join(opts))
+ if help:
+ print '\n'.join(
+ textwrap.wrap(help.strip(),
+ initial_indent=' ',
+ subsequent_indent=' '))
+ print
+
+ def usage(cls):
+ import nose
+ try:
+ ld = nose.__loader__
+ text = ld.get_data(os.path.join(
+ os.path.dirname(__file__), 'usage.txt'))
+ except AttributeError:
+ f = open(os.path.join(
+ os.path.dirname(__file__), 'usage.txt'), 'r')
+ try:
+ text = f.read()
+ finally:
+ f.close()
+ # Ensure that we return str, not bytes.
+ if not isinstance(text, str):
+ text = text.decode('utf-8')
+ return text
+ usage = classmethod(usage)
+
+# backwards compatibility
+run_exit = main = TestProgram
+
+
+def run(*arg, **kw):
+ """Collect and run tests, returning success or failure.
+
+ The arguments to `run()` are the same as to `main()`:
+
+ * module: All tests are in this module (default: None)
+ * defaultTest: Tests to load (default: '.')
+ * argv: Command line arguments (default: None; sys.argv is read)
+ * testRunner: Test runner instance (default: None)
+ * testLoader: Test loader instance (default: None)
+ * env: Environment; ignored if config is provided (default: None;
+ os.environ is read)
+ * config: :class:`nose.config.Config` instance (default: None)
+ * suite: Suite or list of tests to run (default: None). Passing a
+ suite or lists of tests will bypass all test discovery and
+ loading. *ALSO NOTE* that if you pass a unittest.TestSuite
+ instance as the suite, context fixtures at the class, module and
+ package level will not be used, and many plugin hooks will not
+ be called. If you want normal nose behavior, either pass a list
+ of tests, or a fully-configured :class:`nose.suite.ContextSuite`.
+ * plugins: List of plugins to use; ignored if config is provided
+ (default: load plugins with DefaultPluginManager)
+ * addplugins: List of **extra** plugins to use. Pass a list of plugin
+ instances in this argument to make custom plugins available while
+ still using the DefaultPluginManager.
+
+ With the exception that the ``exit`` argument is always set
+ to False.
+ """
+ kw['exit'] = False
+ return TestProgram(*arg, **kw).success
+
+
+def runmodule(name='__main__', **kw):
+ """Collect and run tests in a single module only. Defaults to running
+ tests in __main__. Additional arguments to TestProgram may be passed
+ as keyword arguments.
+ """
+ main(defaultTest=name, **kw)
+
+
+def collector():
+ """TestSuite replacement entry point. Use anywhere you might use a
+ unittest.TestSuite. The collector will, by default, load options from
+ all config files and execute loader.loadTestsFromNames() on the
+ configured testNames, or '.' if no testNames are configured.
+ """
+ # plugins that implement any of these methods are disabled, since
+ # we don't control the test runner and won't be able to run them
+ # finalize() is also not called, but plugins that use it aren't disabled,
+ # because capture needs it.
+ setuptools_incompat = ('report', 'prepareTest',
+ 'prepareTestLoader', 'prepareTestRunner',
+ 'setOutputStream')
+
+ plugins = RestrictedPluginManager(exclude=setuptools_incompat)
+ conf = Config(files=all_config_files(),
+ plugins=plugins)
+ conf.configure(argv=['collector'])
+ loader = defaultTestLoader(conf)
+
+ if conf.testNames:
+ suite = loader.loadTestsFromNames(conf.testNames)
+ else:
+ suite = loader.loadTestsFromNames(('.',))
+ return FinalizingSuiteWrapper(suite, plugins.finalize)
+
+
+
+if __name__ == '__main__':
+ main()
diff --git a/scripts/external_libs/nose-1.3.4/nose/exc.py b/scripts/external_libs/nose-1.3.4/nose/exc.py
new file mode 100755
index 00000000..8b780db0
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/exc.py
@@ -0,0 +1,9 @@
+"""Exceptions for marking tests as skipped or deprecated.
+
+This module exists to provide backwards compatibility with previous
+versions of nose where skipped and deprecated tests were core
+functionality, rather than being provided by plugins. It may be
+removed in a future release.
+"""
+from nose.plugins.skip import SkipTest
+from nose.plugins.deprecated import DeprecatedTest
diff --git a/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py b/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py
new file mode 100755
index 00000000..5fd1516a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/ext/__init__.py
@@ -0,0 +1,3 @@
+"""
+External or vendor files
+"""
diff --git a/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py b/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py
new file mode 100755
index 00000000..332cf08c
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/ext/dtcompat.py
@@ -0,0 +1,2272 @@
+# Module doctest.
+# Released to the public domain 16-Jan-2001, by Tim Peters (tim@python.org).
+# Major enhancements and refactoring by:
+# Jim Fulton
+# Edward Loper
+
+# Provided as-is; use at your own risk; no warranty; no promises; enjoy!
+#
+# Modified for inclusion in nose to provide support for DocFileTest in
+# python 2.3:
+#
+# - all doctests removed from module (they fail under 2.3 and 2.5)
+# - now handles the $py.class extension when ran under Jython
+
+r"""Module doctest -- a framework for running examples in docstrings.
+
+In simplest use, end each module M to be tested with:
+
+def _test():
+ import doctest
+ doctest.testmod()
+
+if __name__ == "__main__":
+ _test()
+
+Then running the module as a script will cause the examples in the
+docstrings to get executed and verified:
+
+python M.py
+
+This won't display anything unless an example fails, in which case the
+failing example(s) and the cause(s) of the failure(s) are printed to stdout
+(why not stderr? because stderr is a lame hack <0.2 wink>), and the final
+line of output is "Test failed.".
+
+Run it with the -v switch instead:
+
+python M.py -v
+
+and a detailed report of all examples tried is printed to stdout, along
+with assorted summaries at the end.
+
+You can force verbose mode by passing "verbose=True" to testmod, or prohibit
+it by passing "verbose=False". In either of those cases, sys.argv is not
+examined by testmod.
+
+There are a variety of other ways to run doctests, including integration
+with the unittest framework, and support for running non-Python text
+files containing doctests. There are also many ways to override parts
+of doctest's default behaviors. See the Library Reference Manual for
+details.
+"""
+
+__docformat__ = 'reStructuredText en'
+
+__all__ = [
+ # 0, Option Flags
+ 'register_optionflag',
+ 'DONT_ACCEPT_TRUE_FOR_1',
+ 'DONT_ACCEPT_BLANKLINE',
+ 'NORMALIZE_WHITESPACE',
+ 'ELLIPSIS',
+ 'IGNORE_EXCEPTION_DETAIL',
+ 'COMPARISON_FLAGS',
+ 'REPORT_UDIFF',
+ 'REPORT_CDIFF',
+ 'REPORT_NDIFF',
+ 'REPORT_ONLY_FIRST_FAILURE',
+ 'REPORTING_FLAGS',
+ # 1. Utility Functions
+ 'is_private',
+ # 2. Example & DocTest
+ 'Example',
+ 'DocTest',
+ # 3. Doctest Parser
+ 'DocTestParser',
+ # 4. Doctest Finder
+ 'DocTestFinder',
+ # 5. Doctest Runner
+ 'DocTestRunner',
+ 'OutputChecker',
+ 'DocTestFailure',
+ 'UnexpectedException',
+ 'DebugRunner',
+ # 6. Test Functions
+ 'testmod',
+ 'testfile',
+ 'run_docstring_examples',
+ # 7. Tester
+ 'Tester',
+ # 8. Unittest Support
+ 'DocTestSuite',
+ 'DocFileSuite',
+ 'set_unittest_reportflags',
+ # 9. Debugging Support
+ 'script_from_examples',
+ 'testsource',
+ 'debug_src',
+ 'debug',
+]
+
+import __future__
+
+import sys, traceback, inspect, linecache, os, re
+import unittest, difflib, pdb, tempfile
+import warnings
+from StringIO import StringIO
+
+# Don't whine about the deprecated is_private function in this
+# module's tests.
+warnings.filterwarnings("ignore", "is_private", DeprecationWarning,
+ __name__, 0)
+
+# There are 4 basic classes:
+# - Example: a <source, want> pair, plus an intra-docstring line number.
+# - DocTest: a collection of examples, parsed from a docstring, plus
+# info about where the docstring came from (name, filename, lineno).
+# - DocTestFinder: extracts DocTests from a given object's docstring and
+# its contained objects' docstrings.
+# - DocTestRunner: runs DocTest cases, and accumulates statistics.
+#
+# So the basic picture is:
+#
+# list of:
+# +------+ +---------+ +-------+
+# |object| --DocTestFinder-> | DocTest | --DocTestRunner-> |results|
+# +------+ +---------+ +-------+
+# | Example |
+# | ... |
+# | Example |
+# +---------+
+
+# Option constants.
+
+OPTIONFLAGS_BY_NAME = {}
+def register_optionflag(name):
+ # Create a new flag unless `name` is already known.
+ return OPTIONFLAGS_BY_NAME.setdefault(name, 1 << len(OPTIONFLAGS_BY_NAME))
+
+DONT_ACCEPT_TRUE_FOR_1 = register_optionflag('DONT_ACCEPT_TRUE_FOR_1')
+DONT_ACCEPT_BLANKLINE = register_optionflag('DONT_ACCEPT_BLANKLINE')
+NORMALIZE_WHITESPACE = register_optionflag('NORMALIZE_WHITESPACE')
+ELLIPSIS = register_optionflag('ELLIPSIS')
+IGNORE_EXCEPTION_DETAIL = register_optionflag('IGNORE_EXCEPTION_DETAIL')
+
+COMPARISON_FLAGS = (DONT_ACCEPT_TRUE_FOR_1 |
+ DONT_ACCEPT_BLANKLINE |
+ NORMALIZE_WHITESPACE |
+ ELLIPSIS |
+ IGNORE_EXCEPTION_DETAIL)
+
+REPORT_UDIFF = register_optionflag('REPORT_UDIFF')
+REPORT_CDIFF = register_optionflag('REPORT_CDIFF')
+REPORT_NDIFF = register_optionflag('REPORT_NDIFF')
+REPORT_ONLY_FIRST_FAILURE = register_optionflag('REPORT_ONLY_FIRST_FAILURE')
+
+REPORTING_FLAGS = (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF |
+ REPORT_ONLY_FIRST_FAILURE)
+
+# Special string markers for use in `want` strings:
+BLANKLINE_MARKER = '<BLANKLINE>'
+ELLIPSIS_MARKER = '...'
+
+######################################################################
+## Table of Contents
+######################################################################
+# 1. Utility Functions
+# 2. Example & DocTest -- store test cases
+# 3. DocTest Parser -- extracts examples from strings
+# 4. DocTest Finder -- extracts test cases from objects
+# 5. DocTest Runner -- runs test cases
+# 6. Test Functions -- convenient wrappers for testing
+# 7. Tester Class -- for backwards compatibility
+# 8. Unittest Support
+# 9. Debugging Support
+# 10. Example Usage
+
+######################################################################
+## 1. Utility Functions
+######################################################################
+
+def is_private(prefix, base):
+ """prefix, base -> true iff name prefix + "." + base is "private".
+
+ Prefix may be an empty string, and base does not contain a period.
+ Prefix is ignored (although functions you write conforming to this
+ protocol may make use of it).
+ Return true iff base begins with an (at least one) underscore, but
+ does not both begin and end with (at least) two underscores.
+ """
+ warnings.warn("is_private is deprecated; it wasn't useful; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning, stacklevel=2)
+ return base[:1] == "_" and not base[:2] == "__" == base[-2:]
+
+def _extract_future_flags(globs):
+ """
+ Return the compiler-flags associated with the future features that
+ have been imported into the given namespace (globs).
+ """
+ flags = 0
+ for fname in __future__.all_feature_names:
+ feature = globs.get(fname, None)
+ if feature is getattr(__future__, fname):
+ flags |= feature.compiler_flag
+ return flags
+
+def _normalize_module(module, depth=2):
+ """
+ Return the module specified by `module`. In particular:
+ - If `module` is a module, then return module.
+ - If `module` is a string, then import and return the
+ module with that name.
+ - If `module` is None, then return the calling module.
+ The calling module is assumed to be the module of
+ the stack frame at the given depth in the call stack.
+ """
+ if inspect.ismodule(module):
+ return module
+ elif isinstance(module, (str, unicode)):
+ return __import__(module, globals(), locals(), ["*"])
+ elif module is None:
+ return sys.modules[sys._getframe(depth).f_globals['__name__']]
+ else:
+ raise TypeError("Expected a module, string, or None")
+
+def _indent(s, indent=4):
+ """
+ Add the given number of space characters to the beginning every
+ non-blank line in `s`, and return the result.
+ """
+ # This regexp matches the start of non-blank lines:
+ return re.sub('(?m)^(?!$)', indent*' ', s)
+
+def _exception_traceback(exc_info):
+ """
+ Return a string containing a traceback message for the given
+ exc_info tuple (as returned by sys.exc_info()).
+ """
+ # Get a traceback message.
+ excout = StringIO()
+ exc_type, exc_val, exc_tb = exc_info
+ traceback.print_exception(exc_type, exc_val, exc_tb, file=excout)
+ return excout.getvalue()
+
+# Override some StringIO methods.
+class _SpoofOut(StringIO):
+ def getvalue(self):
+ result = StringIO.getvalue(self)
+ # If anything at all was written, make sure there's a trailing
+ # newline. There's no way for the expected output to indicate
+ # that a trailing newline is missing.
+ if result and not result.endswith("\n"):
+ result += "\n"
+ # Prevent softspace from screwing up the next test case, in
+ # case they used print with a trailing comma in an example.
+ if hasattr(self, "softspace"):
+ del self.softspace
+ return result
+
+ def truncate(self, size=None):
+ StringIO.truncate(self, size)
+ if hasattr(self, "softspace"):
+ del self.softspace
+
+# Worst-case linear-time ellipsis matching.
+def _ellipsis_match(want, got):
+ if ELLIPSIS_MARKER not in want:
+ return want == got
+
+ # Find "the real" strings.
+ ws = want.split(ELLIPSIS_MARKER)
+ assert len(ws) >= 2
+
+ # Deal with exact matches possibly needed at one or both ends.
+ startpos, endpos = 0, len(got)
+ w = ws[0]
+ if w: # starts with exact match
+ if got.startswith(w):
+ startpos = len(w)
+ del ws[0]
+ else:
+ return False
+ w = ws[-1]
+ if w: # ends with exact match
+ if got.endswith(w):
+ endpos -= len(w)
+ del ws[-1]
+ else:
+ return False
+
+ if startpos > endpos:
+ # Exact end matches required more characters than we have, as in
+ # _ellipsis_match('aa...aa', 'aaa')
+ return False
+
+ # For the rest, we only need to find the leftmost non-overlapping
+ # match for each piece. If there's no overall match that way alone,
+ # there's no overall match period.
+ for w in ws:
+ # w may be '' at times, if there are consecutive ellipses, or
+ # due to an ellipsis at the start or end of `want`. That's OK.
+ # Search for an empty string succeeds, and doesn't change startpos.
+ startpos = got.find(w, startpos, endpos)
+ if startpos < 0:
+ return False
+ startpos += len(w)
+
+ return True
+
+def _comment_line(line):
+ "Return a commented form of the given line"
+ line = line.rstrip()
+ if line:
+ return '# '+line
+ else:
+ return '#'
+
+class _OutputRedirectingPdb(pdb.Pdb):
+ """
+ A specialized version of the python debugger that redirects stdout
+ to a given stream when interacting with the user. Stdout is *not*
+ redirected when traced code is executed.
+ """
+ def __init__(self, out):
+ self.__out = out
+ pdb.Pdb.__init__(self)
+
+ def trace_dispatch(self, *args):
+ # Redirect stdout to the given stream.
+ save_stdout = sys.stdout
+ sys.stdout = self.__out
+ # Call Pdb's trace dispatch method.
+ try:
+ return pdb.Pdb.trace_dispatch(self, *args)
+ finally:
+ sys.stdout = save_stdout
+
+# [XX] Normalize with respect to os.path.pardir?
+def _module_relative_path(module, path):
+ if not inspect.ismodule(module):
+ raise TypeError, 'Expected a module: %r' % module
+ if path.startswith('/'):
+ raise ValueError, 'Module-relative files may not have absolute paths'
+
+ # Find the base directory for the path.
+ if hasattr(module, '__file__'):
+ # A normal module/package
+ basedir = os.path.split(module.__file__)[0]
+ elif module.__name__ == '__main__':
+ # An interactive session.
+ if len(sys.argv)>0 and sys.argv[0] != '':
+ basedir = os.path.split(sys.argv[0])[0]
+ else:
+ basedir = os.curdir
+ else:
+ # A module w/o __file__ (this includes builtins)
+ raise ValueError("Can't resolve paths relative to the module " +
+ module + " (it has no __file__)")
+
+ # Combine the base directory and the path.
+ return os.path.join(basedir, *(path.split('/')))
+
+######################################################################
+## 2. Example & DocTest
+######################################################################
+## - An "example" is a <source, want> pair, where "source" is a
+## fragment of source code, and "want" is the expected output for
+## "source." The Example class also includes information about
+## where the example was extracted from.
+##
+## - A "doctest" is a collection of examples, typically extracted from
+## a string (such as an object's docstring). The DocTest class also
+## includes information about where the string was extracted from.
+
+class Example:
+ """
+ A single doctest example, consisting of source code and expected
+ output. `Example` defines the following attributes:
+
+ - source: A single Python statement, always ending with a newline.
+ The constructor adds a newline if needed.
+
+ - want: The expected output from running the source code (either
+ from stdout, or a traceback in case of exception). `want` ends
+ with a newline unless it's empty, in which case it's an empty
+ string. The constructor adds a newline if needed.
+
+ - exc_msg: The exception message generated by the example, if
+ the example is expected to generate an exception; or `None` if
+ it is not expected to generate an exception. This exception
+ message is compared against the return value of
+ `traceback.format_exception_only()`. `exc_msg` ends with a
+ newline unless it's `None`. The constructor adds a newline
+ if needed.
+
+ - lineno: The line number within the DocTest string containing
+ this Example where the Example begins. This line number is
+ zero-based, with respect to the beginning of the DocTest.
+
+ - indent: The example's indentation in the DocTest string.
+ I.e., the number of space characters that preceed the
+ example's first prompt.
+
+ - options: A dictionary mapping from option flags to True or
+ False, which is used to override default options for this
+ example. Any option flags not contained in this dictionary
+ are left at their default value (as specified by the
+ DocTestRunner's optionflags). By default, no options are set.
+ """
+ def __init__(self, source, want, exc_msg=None, lineno=0, indent=0,
+ options=None):
+ # Normalize inputs.
+ if not source.endswith('\n'):
+ source += '\n'
+ if want and not want.endswith('\n'):
+ want += '\n'
+ if exc_msg is not None and not exc_msg.endswith('\n'):
+ exc_msg += '\n'
+ # Store properties.
+ self.source = source
+ self.want = want
+ self.lineno = lineno
+ self.indent = indent
+ if options is None: options = {}
+ self.options = options
+ self.exc_msg = exc_msg
+
+class DocTest:
+ """
+ A collection of doctest examples that should be run in a single
+ namespace. Each `DocTest` defines the following attributes:
+
+ - examples: the list of examples.
+
+ - globs: The namespace (aka globals) that the examples should
+ be run in.
+
+ - name: A name identifying the DocTest (typically, the name of
+ the object whose docstring this DocTest was extracted from).
+
+ - filename: The name of the file that this DocTest was extracted
+ from, or `None` if the filename is unknown.
+
+ - lineno: The line number within filename where this DocTest
+ begins, or `None` if the line number is unavailable. This
+ line number is zero-based, with respect to the beginning of
+ the file.
+
+ - docstring: The string that the examples were extracted from,
+ or `None` if the string is unavailable.
+ """
+ def __init__(self, examples, globs, name, filename, lineno, docstring):
+ """
+ Create a new DocTest containing the given examples. The
+ DocTest's globals are initialized with a copy of `globs`.
+ """
+ assert not isinstance(examples, basestring), \
+ "DocTest no longer accepts str; use DocTestParser instead"
+ self.examples = examples
+ self.docstring = docstring
+ self.globs = globs.copy()
+ self.name = name
+ self.filename = filename
+ self.lineno = lineno
+
+ def __repr__(self):
+ if len(self.examples) == 0:
+ examples = 'no examples'
+ elif len(self.examples) == 1:
+ examples = '1 example'
+ else:
+ examples = '%d examples' % len(self.examples)
+ return ('<DocTest %s from %s:%s (%s)>' %
+ (self.name, self.filename, self.lineno, examples))
+
+
+ # This lets us sort tests by name:
+ def __cmp__(self, other):
+ if not isinstance(other, DocTest):
+ return -1
+ return cmp((self.name, self.filename, self.lineno, id(self)),
+ (other.name, other.filename, other.lineno, id(other)))
+
+######################################################################
+## 3. DocTestParser
+######################################################################
+
+class DocTestParser:
+ """
+ A class used to parse strings containing doctest examples.
+ """
+ # This regular expression is used to find doctest examples in a
+ # string. It defines three groups: `source` is the source code
+ # (including leading indentation and prompts); `indent` is the
+ # indentation of the first (PS1) line of the source code; and
+ # `want` is the expected output (including leading indentation).
+ _EXAMPLE_RE = re.compile(r'''
+ # Source consists of a PS1 line followed by zero or more PS2 lines.
+ (?P<source>
+ (?:^(?P<indent> [ ]*) >>> .*) # PS1 line
+ (?:\n [ ]* \.\.\. .*)*) # PS2 lines
+ \n?
+ # Want consists of any non-blank lines that do not start with PS1.
+ (?P<want> (?:(?![ ]*$) # Not a blank line
+ (?![ ]*>>>) # Not a line starting with PS1
+ .*$\n? # But any other line
+ )*)
+ ''', re.MULTILINE | re.VERBOSE)
+
+ # A regular expression for handling `want` strings that contain
+ # expected exceptions. It divides `want` into three pieces:
+ # - the traceback header line (`hdr`)
+ # - the traceback stack (`stack`)
+ # - the exception message (`msg`), as generated by
+ # traceback.format_exception_only()
+ # `msg` may have multiple lines. We assume/require that the
+ # exception message is the first non-indented line starting with a word
+ # character following the traceback header line.
+ _EXCEPTION_RE = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^ (?P<msg> \w+ .*) # a line *starts* with alphanum.
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+
+ # A callable returning a true value iff its argument is a blank line
+ # or contains a single comment.
+ _IS_BLANK_OR_COMMENT = re.compile(r'^[ ]*(#.*)?$').match
+
+ def parse(self, string, name='<string>'):
+ """
+ Divide the given string into examples and intervening text,
+ and return them as a list of alternating Examples and strings.
+ Line numbers for the Examples are 0-based. The optional
+ argument `name` is a name identifying this string, and is only
+ used for error messages.
+ """
+ string = string.expandtabs()
+ # If all lines begin with the same indentation, then strip it.
+ min_indent = self._min_indent(string)
+ if min_indent > 0:
+ string = '\n'.join([l[min_indent:] for l in string.split('\n')])
+
+ output = []
+ charno, lineno = 0, 0
+ # Find all doctest examples in the string:
+ for m in self._EXAMPLE_RE.finditer(string):
+ # Add the pre-example text to `output`.
+ output.append(string[charno:m.start()])
+ # Update lineno (lines before this example)
+ lineno += string.count('\n', charno, m.start())
+ # Extract info from the regexp match.
+ (source, options, want, exc_msg) = \
+ self._parse_example(m, name, lineno)
+ # Create an Example, and add it to the list.
+ if not self._IS_BLANK_OR_COMMENT(source):
+ output.append( Example(source, want, exc_msg,
+ lineno=lineno,
+ indent=min_indent+len(m.group('indent')),
+ options=options) )
+ # Update lineno (lines inside this example)
+ lineno += string.count('\n', m.start(), m.end())
+ # Update charno.
+ charno = m.end()
+ # Add any remaining post-example text to `output`.
+ output.append(string[charno:])
+ return output
+
+ def get_doctest(self, string, globs, name, filename, lineno):
+ """
+ Extract all doctest examples from the given string, and
+ collect them into a `DocTest` object.
+
+ `globs`, `name`, `filename`, and `lineno` are attributes for
+ the new `DocTest` object. See the documentation for `DocTest`
+ for more information.
+ """
+ return DocTest(self.get_examples(string, name), globs,
+ name, filename, lineno, string)
+
+ def get_examples(self, string, name='<string>'):
+ """
+ Extract all doctest examples from the given string, and return
+ them as a list of `Example` objects. Line numbers are
+ 0-based, because it's most common in doctests that nothing
+ interesting appears on the same line as opening triple-quote,
+ and so the first interesting line is called \"line 1\" then.
+
+ The optional argument `name` is a name identifying this
+ string, and is only used for error messages.
+ """
+ return [x for x in self.parse(string, name)
+ if isinstance(x, Example)]
+
+ def _parse_example(self, m, name, lineno):
+ """
+ Given a regular expression match from `_EXAMPLE_RE` (`m`),
+ return a pair `(source, want)`, where `source` is the matched
+ example's source code (with prompts and indentation stripped);
+ and `want` is the example's expected output (with indentation
+ stripped).
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ # Get the example's indentation level.
+ indent = len(m.group('indent'))
+
+ # Divide source into lines; check that they're properly
+ # indented; and then strip their indentation & prompts.
+ source_lines = m.group('source').split('\n')
+ self._check_prompt_blank(source_lines, indent, name, lineno)
+ self._check_prefix(source_lines[1:], ' '*indent + '.', name, lineno)
+ source = '\n'.join([sl[indent+4:] for sl in source_lines])
+
+ # Divide want into lines; check that it's properly indented; and
+ # then strip the indentation. Spaces before the last newline should
+ # be preserved, so plain rstrip() isn't good enough.
+ want = m.group('want')
+ want_lines = want.split('\n')
+ if len(want_lines) > 1 and re.match(r' *$', want_lines[-1]):
+ del want_lines[-1] # forget final newline & spaces after it
+ self._check_prefix(want_lines, ' '*indent, name,
+ lineno + len(source_lines))
+ want = '\n'.join([wl[indent:] for wl in want_lines])
+
+ # If `want` contains a traceback message, then extract it.
+ m = self._EXCEPTION_RE.match(want)
+ if m:
+ exc_msg = m.group('msg')
+ else:
+ exc_msg = None
+
+ # Extract options from the source.
+ options = self._find_options(source, name, lineno)
+
+ return source, options, want, exc_msg
+
+ # This regular expression looks for option directives in the
+ # source code of an example. Option directives are comments
+ # starting with "doctest:". Warning: this may give false
+ # positives for string-literals that contain the string
+ # "#doctest:". Eliminating these false positives would require
+ # actually parsing the string; but we limit them by ignoring any
+ # line containing "#doctest:" that is *followed* by a quote mark.
+ _OPTION_DIRECTIVE_RE = re.compile(r'#\s*doctest:\s*([^\n\'"]*)$',
+ re.MULTILINE)
+
+ def _find_options(self, source, name, lineno):
+ """
+ Return a dictionary containing option overrides extracted from
+ option directives in the given source string.
+
+ `name` is the string's name, and `lineno` is the line number
+ where the example starts; both are used for error messages.
+ """
+ options = {}
+ # (note: with the current regexp, this will match at most once:)
+ for m in self._OPTION_DIRECTIVE_RE.finditer(source):
+ option_strings = m.group(1).replace(',', ' ').split()
+ for option in option_strings:
+ if (option[0] not in '+-' or
+ option[1:] not in OPTIONFLAGS_BY_NAME):
+ raise ValueError('line %r of the doctest for %s '
+ 'has an invalid option: %r' %
+ (lineno+1, name, option))
+ flag = OPTIONFLAGS_BY_NAME[option[1:]]
+ options[flag] = (option[0] == '+')
+ if options and self._IS_BLANK_OR_COMMENT(source):
+ raise ValueError('line %r of the doctest for %s has an option '
+ 'directive on a line with no example: %r' %
+ (lineno, name, source))
+ return options
+
+ # This regular expression finds the indentation of every non-blank
+ # line in a string.
+ _INDENT_RE = re.compile('^([ ]*)(?=\S)', re.MULTILINE)
+
+ def _min_indent(self, s):
+ "Return the minimum indentation of any non-blank line in `s`"
+ indents = [len(indent) for indent in self._INDENT_RE.findall(s)]
+ if len(indents) > 0:
+ return min(indents)
+ else:
+ return 0
+
+ def _check_prompt_blank(self, lines, indent, name, lineno):
+ """
+ Given the lines of a source string (including prompts and
+ leading indentation), check to make sure that every prompt is
+ followed by a space character. If any line is not followed by
+ a space character, then raise ValueError.
+ """
+ for i, line in enumerate(lines):
+ if len(line) >= indent+4 and line[indent+3] != ' ':
+ raise ValueError('line %r of the docstring for %s '
+ 'lacks blank after %s: %r' %
+ (lineno+i+1, name,
+ line[indent:indent+3], line))
+
+ def _check_prefix(self, lines, prefix, name, lineno):
+ """
+ Check that every line in the given list starts with the given
+ prefix; if any line does not, then raise a ValueError.
+ """
+ for i, line in enumerate(lines):
+ if line and not line.startswith(prefix):
+ raise ValueError('line %r of the docstring for %s has '
+ 'inconsistent leading whitespace: %r' %
+ (lineno+i+1, name, line))
+
+
+######################################################################
+## 4. DocTest Finder
+######################################################################
+
+class DocTestFinder:
+ """
+ A class used to extract the DocTests that are relevant to a given
+ object, from its docstring and the docstrings of its contained
+ objects. Doctests can currently be extracted from the following
+ object types: modules, functions, classes, methods, staticmethods,
+ classmethods, and properties.
+ """
+
+ def __init__(self, verbose=False, parser=DocTestParser(),
+ recurse=True, _namefilter=None, exclude_empty=True):
+ """
+ Create a new doctest finder.
+
+ The optional argument `parser` specifies a class or
+ function that should be used to create new DocTest objects (or
+ objects that implement the same interface as DocTest). The
+ signature for this factory function should match the signature
+ of the DocTest constructor.
+
+ If the optional argument `recurse` is false, then `find` will
+ only examine the given object, and not any contained objects.
+
+ If the optional argument `exclude_empty` is false, then `find`
+ will include tests for objects with empty docstrings.
+ """
+ self._parser = parser
+ self._verbose = verbose
+ self._recurse = recurse
+ self._exclude_empty = exclude_empty
+ # _namefilter is undocumented, and exists only for temporary backward-
+ # compatibility support of testmod's deprecated isprivate mess.
+ self._namefilter = _namefilter
+
+ def find(self, obj, name=None, module=None, globs=None,
+ extraglobs=None):
+ """
+ Return a list of the DocTests that are defined by the given
+ object's docstring, or by any of its contained objects'
+ docstrings.
+
+ The optional parameter `module` is the module that contains
+ the given object. If the module is not specified or is None, then
+ the test finder will attempt to automatically determine the
+ correct module. The object's module is used:
+
+ - As a default namespace, if `globs` is not specified.
+ - To prevent the DocTestFinder from extracting DocTests
+ from objects that are imported from other modules.
+ - To find the name of the file containing the object.
+ - To help find the line number of the object within its
+ file.
+
+ Contained objects whose module does not match `module` are ignored.
+
+ If `module` is False, no attempt to find the module will be made.
+ This is obscure, of use mostly in tests: if `module` is False, or
+ is None but cannot be found automatically, then all objects are
+ considered to belong to the (non-existent) module, so all contained
+ objects will (recursively) be searched for doctests.
+
+ The globals for each DocTest is formed by combining `globs`
+ and `extraglobs` (bindings in `extraglobs` override bindings
+ in `globs`). A new copy of the globals dictionary is created
+ for each DocTest. If `globs` is not specified, then it
+ defaults to the module's `__dict__`, if specified, or {}
+ otherwise. If `extraglobs` is not specified, then it defaults
+ to {}.
+
+ """
+ # If name was not specified, then extract it from the object.
+ if name is None:
+ name = getattr(obj, '__name__', None)
+ if name is None:
+ raise ValueError("DocTestFinder.find: name must be given "
+ "when obj.__name__ doesn't exist: %r" %
+ (type(obj),))
+
+ # Find the module that contains the given object (if obj is
+ # a module, then module=obj.). Note: this may fail, in which
+ # case module will be None.
+ if module is False:
+ module = None
+ elif module is None:
+ module = inspect.getmodule(obj)
+
+ # Read the module's source code. This is used by
+ # DocTestFinder._find_lineno to find the line number for a
+ # given object's docstring.
+ try:
+ file = inspect.getsourcefile(obj) or inspect.getfile(obj)
+ source_lines = linecache.getlines(file)
+ if not source_lines:
+ source_lines = None
+ except TypeError:
+ source_lines = None
+
+ # Initialize globals, and merge in extraglobs.
+ if globs is None:
+ if module is None:
+ globs = {}
+ else:
+ globs = module.__dict__.copy()
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ # Recursively expore `obj`, extracting DocTests.
+ tests = []
+ self._find(tests, obj, name, module, source_lines, globs, {})
+ # Sort the tests by alpha order of names, for consistency in
+ # verbose-mode output. This was a feature of doctest in Pythons
+ # <= 2.3 that got lost by accident in 2.4. It was repaired in
+ # 2.4.4 and 2.5.
+ tests.sort()
+ return tests
+
+ def _filter(self, obj, prefix, base):
+ """
+ Return true if the given object should not be examined.
+ """
+ return (self._namefilter is not None and
+ self._namefilter(prefix, base))
+
+ def _from_module(self, module, object):
+ """
+ Return true if the given object is defined in the given
+ module.
+ """
+ if module is None:
+ return True
+ elif inspect.isfunction(object):
+ return module.__dict__ is object.func_globals
+ elif inspect.isclass(object):
+ # Some jython classes don't set __module__
+ return module.__name__ == getattr(object, '__module__', None)
+ elif inspect.getmodule(object) is not None:
+ return module is inspect.getmodule(object)
+ elif hasattr(object, '__module__'):
+ return module.__name__ == object.__module__
+ elif isinstance(object, property):
+ return True # [XX] no way not be sure.
+ else:
+ raise ValueError("object must be a class or function")
+
+ def _find(self, tests, obj, name, module, source_lines, globs, seen):
+ """
+ Find tests for the given object and any contained objects, and
+ add them to `tests`.
+ """
+ if self._verbose:
+ print 'Finding tests in %s' % name
+
+ # If we've already processed this object, then ignore it.
+ if id(obj) in seen:
+ return
+ seen[id(obj)] = 1
+
+ # Find a test for this object, and add it to the list of tests.
+ test = self._get_test(obj, name, module, globs, source_lines)
+ if test is not None:
+ tests.append(test)
+
+ # Look for tests in a module's contained objects.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ valname = '%s.%s' % (name, valname)
+ # Recurse to functions & classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val)) and
+ self._from_module(module, val)):
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a module's __test__ dictionary.
+ if inspect.ismodule(obj) and self._recurse:
+ for valname, val in getattr(obj, '__test__', {}).items():
+ if not isinstance(valname, basestring):
+ raise ValueError("DocTestFinder.find: __test__ keys "
+ "must be strings: %r" %
+ (type(valname),))
+ if not (inspect.isfunction(val) or inspect.isclass(val) or
+ inspect.ismethod(val) or inspect.ismodule(val) or
+ isinstance(val, basestring)):
+ raise ValueError("DocTestFinder.find: __test__ values "
+ "must be strings, functions, methods, "
+ "classes, or modules: %r" %
+ (type(val),))
+ valname = '%s.__test__.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ # Look for tests in a class's contained objects.
+ if inspect.isclass(obj) and self._recurse:
+ for valname, val in obj.__dict__.items():
+ # Check if this contained object should be ignored.
+ if self._filter(val, name, valname):
+ continue
+ # Special handling for staticmethod/classmethod.
+ if isinstance(val, staticmethod):
+ val = getattr(obj, valname)
+ if isinstance(val, classmethod):
+ val = getattr(obj, valname).im_func
+
+ # Recurse to methods, properties, and nested classes.
+ if ((inspect.isfunction(val) or inspect.isclass(val) or
+ isinstance(val, property)) and
+ self._from_module(module, val)):
+ valname = '%s.%s' % (name, valname)
+ self._find(tests, val, valname, module, source_lines,
+ globs, seen)
+
+ def _get_test(self, obj, name, module, globs, source_lines):
+ """
+ Return a DocTest for the given object, if it defines a docstring;
+ otherwise, return None.
+ """
+ # Extract the object's docstring. If it doesn't have one,
+ # then return None (no test for this object).
+ if isinstance(obj, basestring):
+ docstring = obj
+ else:
+ try:
+ if obj.__doc__ is None:
+ docstring = ''
+ else:
+ docstring = obj.__doc__
+ if not isinstance(docstring, basestring):
+ docstring = str(docstring)
+ except (TypeError, AttributeError):
+ docstring = ''
+
+ # Find the docstring's location in the file.
+ lineno = self._find_lineno(obj, source_lines)
+
+ # Don't bother if the docstring is empty.
+ if self._exclude_empty and not docstring:
+ return None
+
+ # Return a DocTest for this object.
+ if module is None:
+ filename = None
+ else:
+ filename = getattr(module, '__file__', module.__name__)
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ elif sys.platform.startswith('java') and \
+ filename.endswith('$py.class'):
+ filename = '%s.py' % filename[:-9]
+ return self._parser.get_doctest(docstring, globs, name,
+ filename, lineno)
+
+ def _find_lineno(self, obj, source_lines):
+ """
+ Return a line number of the given object's docstring. Note:
+ this method assumes that the object has a docstring.
+ """
+ lineno = None
+
+ # Find the line number for modules.
+ if inspect.ismodule(obj):
+ lineno = 0
+
+ # Find the line number for classes.
+ # Note: this could be fooled if a class is defined multiple
+ # times in a single file.
+ if inspect.isclass(obj):
+ if source_lines is None:
+ return None
+ pat = re.compile(r'^\s*class\s*%s\b' %
+ getattr(obj, '__name__', '-'))
+ for i, line in enumerate(source_lines):
+ if pat.match(line):
+ lineno = i
+ break
+
+ # Find the line number for functions & methods.
+ if inspect.ismethod(obj): obj = obj.im_func
+ if inspect.isfunction(obj): obj = obj.func_code
+ if inspect.istraceback(obj): obj = obj.tb_frame
+ if inspect.isframe(obj): obj = obj.f_code
+ if inspect.iscode(obj):
+ lineno = getattr(obj, 'co_firstlineno', None)-1
+
+ # Find the line number where the docstring starts. Assume
+ # that it's the first line that begins with a quote mark.
+ # Note: this could be fooled by a multiline function
+ # signature, where a continuation line begins with a quote
+ # mark.
+ if lineno is not None:
+ if source_lines is None:
+ return lineno+1
+ pat = re.compile('(^|.*:)\s*\w*("|\')')
+ for lineno in range(lineno, len(source_lines)):
+ if pat.match(source_lines[lineno]):
+ return lineno
+
+ # We couldn't find the line number.
+ return None
+
+######################################################################
+## 5. DocTest Runner
+######################################################################
+
+class DocTestRunner:
+ # This divider string is used to separate failure messages, and to
+ # separate sections of the summary.
+ DIVIDER = "*" * 70
+
+ def __init__(self, checker=None, verbose=None, optionflags=0):
+ """
+ Create a new test runner.
+
+ Optional keyword arg `checker` is the `OutputChecker` that
+ should be used to compare the expected outputs and actual
+ outputs of doctest examples.
+
+ Optional keyword arg 'verbose' prints lots of stuff if true,
+ only failures if false; by default, it's true iff '-v' is in
+ sys.argv.
+
+ Optional argument `optionflags` can be used to control how the
+ test runner compares expected output to actual output, and how
+ it displays failures. See the documentation for `testmod` for
+ more information.
+ """
+ self._checker = checker or OutputChecker()
+ if verbose is None:
+ verbose = '-v' in sys.argv
+ self._verbose = verbose
+ self.optionflags = optionflags
+ self.original_optionflags = optionflags
+
+ # Keep track of the examples we've run.
+ self.tries = 0
+ self.failures = 0
+ self._name2ft = {}
+
+ # Create a fake output target for capturing doctest output.
+ self._fakeout = _SpoofOut()
+
+ #/////////////////////////////////////////////////////////////////
+ # Reporting methods
+ #/////////////////////////////////////////////////////////////////
+
+ def report_start(self, out, test, example):
+ """
+ Report that the test runner is about to process the given
+ example. (Only displays a message if verbose=True)
+ """
+ if self._verbose:
+ if example.want:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting:\n' + _indent(example.want))
+ else:
+ out('Trying:\n' + _indent(example.source) +
+ 'Expecting nothing\n')
+
+ def report_success(self, out, test, example, got):
+ """
+ Report that the given example ran successfully. (Only
+ displays a message if verbose=True)
+ """
+ if self._verbose:
+ out("ok\n")
+
+ def report_failure(self, out, test, example, got):
+ """
+ Report that the given example failed.
+ """
+ out(self._failure_header(test, example) +
+ self._checker.output_difference(example, got, self.optionflags))
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ """
+ Report that the given example raised an unexpected exception.
+ """
+ out(self._failure_header(test, example) +
+ 'Exception raised:\n' + _indent(_exception_traceback(exc_info)))
+
+ def _failure_header(self, test, example):
+ out = [self.DIVIDER]
+ if test.filename:
+ if test.lineno is not None and example.lineno is not None:
+ lineno = test.lineno + example.lineno + 1
+ else:
+ lineno = '?'
+ out.append('File "%s", line %s, in %s' %
+ (test.filename, lineno, test.name))
+ else:
+ out.append('Line %s, in %s' % (example.lineno+1, test.name))
+ out.append('Failed example:')
+ source = example.source
+ out.append(_indent(source))
+ return '\n'.join(out)
+
+ #/////////////////////////////////////////////////////////////////
+ # DocTest Running
+ #/////////////////////////////////////////////////////////////////
+
+ def __run(self, test, compileflags, out):
+ """
+ Run the examples in `test`. Write the outcome of each example
+ with one of the `DocTestRunner.report_*` methods, using the
+ writer function `out`. `compileflags` is the set of compiler
+ flags that should be used to execute examples. Return a tuple
+ `(f, t)`, where `t` is the number of examples tried, and `f`
+ is the number of examples that failed. The examples are run
+ in the namespace `test.globs`.
+ """
+ # Keep track of the number of failures and tries.
+ failures = tries = 0
+
+ # Save the option flags (since option directives can be used
+ # to modify them).
+ original_optionflags = self.optionflags
+
+ SUCCESS, FAILURE, BOOM = range(3) # `outcome` state
+
+ check = self._checker.check_output
+
+ # Process each example.
+ for examplenum, example in enumerate(test.examples):
+
+ # If REPORT_ONLY_FIRST_FAILURE is set, then supress
+ # reporting after the first failure.
+ quiet = (self.optionflags & REPORT_ONLY_FIRST_FAILURE and
+ failures > 0)
+
+ # Merge in the example's options.
+ self.optionflags = original_optionflags
+ if example.options:
+ for (optionflag, val) in example.options.items():
+ if val:
+ self.optionflags |= optionflag
+ else:
+ self.optionflags &= ~optionflag
+
+ # Record that we started this example.
+ tries += 1
+ if not quiet:
+ self.report_start(out, test, example)
+
+ # Use a special filename for compile(), so we can retrieve
+ # the source code during interactive debugging (see
+ # __patched_linecache_getlines).
+ filename = '<doctest %s[%d]>' % (test.name, examplenum)
+
+ # Run the example in the given context (globs), and record
+ # any exception that gets raised. (But don't intercept
+ # keyboard interrupts.)
+ try:
+ # Don't blink! This is where the user's code gets run.
+ exec compile(example.source, filename, "single",
+ compileflags, 1) in test.globs
+ self.debugger.set_continue() # ==== Example Finished ====
+ exception = None
+ except KeyboardInterrupt:
+ raise
+ except:
+ exception = sys.exc_info()
+ self.debugger.set_continue() # ==== Example Finished ====
+
+ got = self._fakeout.getvalue() # the actual output
+ self._fakeout.truncate(0)
+ outcome = FAILURE # guilty until proved innocent or insane
+
+ # If the example executed without raising any exceptions,
+ # verify its output.
+ if exception is None:
+ if check(example.want, got, self.optionflags):
+ outcome = SUCCESS
+
+ # The example raised an exception: check if it was expected.
+ else:
+ exc_info = sys.exc_info()
+ exc_msg = traceback.format_exception_only(*exc_info[:2])[-1]
+ if not quiet:
+ got += _exception_traceback(exc_info)
+
+ # If `example.exc_msg` is None, then we weren't expecting
+ # an exception.
+ if example.exc_msg is None:
+ outcome = BOOM
+
+ # We expected an exception: see whether it matches.
+ elif check(example.exc_msg, exc_msg, self.optionflags):
+ outcome = SUCCESS
+
+ # Another chance if they didn't care about the detail.
+ elif self.optionflags & IGNORE_EXCEPTION_DETAIL:
+ m1 = re.match(r'[^:]*:', example.exc_msg)
+ m2 = re.match(r'[^:]*:', exc_msg)
+ if m1 and m2 and check(m1.group(0), m2.group(0),
+ self.optionflags):
+ outcome = SUCCESS
+
+ # Report the outcome.
+ if outcome is SUCCESS:
+ if not quiet:
+ self.report_success(out, test, example, got)
+ elif outcome is FAILURE:
+ if not quiet:
+ self.report_failure(out, test, example, got)
+ failures += 1
+ elif outcome is BOOM:
+ if not quiet:
+ self.report_unexpected_exception(out, test, example,
+ exc_info)
+ failures += 1
+ else:
+ assert False, ("unknown outcome", outcome)
+
+ # Restore the option flags (in case they were modified)
+ self.optionflags = original_optionflags
+
+ # Record and return the number of failures and tries.
+ self.__record_outcome(test, failures, tries)
+ return failures, tries
+
+ def __record_outcome(self, test, f, t):
+ """
+ Record the fact that the given DocTest (`test`) generated `f`
+ failures out of `t` tried examples.
+ """
+ f2, t2 = self._name2ft.get(test.name, (0,0))
+ self._name2ft[test.name] = (f+f2, t+t2)
+ self.failures += f
+ self.tries += t
+
+ __LINECACHE_FILENAME_RE = re.compile(r'<doctest '
+ r'(?P<name>[\w\.]+)'
+ r'\[(?P<examplenum>\d+)\]>$')
+ def __patched_linecache_getlines(self, filename):
+ m = self.__LINECACHE_FILENAME_RE.match(filename)
+ if m and m.group('name') == self.test.name:
+ example = self.test.examples[int(m.group('examplenum'))]
+ return example.source.splitlines(True)
+ else:
+ return self.save_linecache_getlines(filename)
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ """
+ Run the examples in `test`, and display the results using the
+ writer function `out`.
+
+ The examples are run in the namespace `test.globs`. If
+ `clear_globs` is true (the default), then this namespace will
+ be cleared after the test runs, to help with garbage
+ collection. If you would like to examine the namespace after
+ the test completes, then use `clear_globs=False`.
+
+ `compileflags` gives the set of flags that should be used by
+ the Python compiler when running the examples. If not
+ specified, then it will default to the set of future-import
+ flags that apply to `globs`.
+
+ The output of each example is checked using
+ `DocTestRunner.check_output`, and the results are formatted by
+ the `DocTestRunner.report_*` methods.
+ """
+ self.test = test
+
+ if compileflags is None:
+ compileflags = _extract_future_flags(test.globs)
+
+ save_stdout = sys.stdout
+ if out is None:
+ out = save_stdout.write
+ sys.stdout = self._fakeout
+
+ # Patch pdb.set_trace to restore sys.stdout during interactive
+ # debugging (so it's not still redirected to self._fakeout).
+ # Note that the interactive output will go to *our*
+ # save_stdout, even if that's not the real sys.stdout; this
+ # allows us to write test cases for the set_trace behavior.
+ save_set_trace = pdb.set_trace
+ self.debugger = _OutputRedirectingPdb(save_stdout)
+ self.debugger.reset()
+ pdb.set_trace = self.debugger.set_trace
+
+ # Patch linecache.getlines, so we can see the example's source
+ # when we're inside the debugger.
+ self.save_linecache_getlines = linecache.getlines
+ linecache.getlines = self.__patched_linecache_getlines
+
+ try:
+ return self.__run(test, compileflags, out)
+ finally:
+ sys.stdout = save_stdout
+ pdb.set_trace = save_set_trace
+ linecache.getlines = self.save_linecache_getlines
+ if clear_globs:
+ test.globs.clear()
+
+ #/////////////////////////////////////////////////////////////////
+ # Summarization
+ #/////////////////////////////////////////////////////////////////
+ def summarize(self, verbose=None):
+ """
+ Print a summary of all the test cases that have been run by
+ this DocTestRunner, and return a tuple `(f, t)`, where `f` is
+ the total number of failed examples, and `t` is the total
+ number of tried examples.
+
+ The optional `verbose` argument controls how detailed the
+ summary is. If the verbosity is not specified, then the
+ DocTestRunner's verbosity is used.
+ """
+ if verbose is None:
+ verbose = self._verbose
+ notests = []
+ passed = []
+ failed = []
+ totalt = totalf = 0
+ for x in self._name2ft.items():
+ name, (f, t) = x
+ assert f <= t
+ totalt += t
+ totalf += f
+ if t == 0:
+ notests.append(name)
+ elif f == 0:
+ passed.append( (name, t) )
+ else:
+ failed.append(x)
+ if verbose:
+ if notests:
+ print len(notests), "items had no tests:"
+ notests.sort()
+ for thing in notests:
+ print " ", thing
+ if passed:
+ print len(passed), "items passed all tests:"
+ passed.sort()
+ for thing, count in passed:
+ print " %3d tests in %s" % (count, thing)
+ if failed:
+ print self.DIVIDER
+ print len(failed), "items had failures:"
+ failed.sort()
+ for thing, (f, t) in failed:
+ print " %3d of %3d in %s" % (f, t, thing)
+ if verbose:
+ print totalt, "tests in", len(self._name2ft), "items."
+ print totalt - totalf, "passed and", totalf, "failed."
+ if totalf:
+ print "***Test Failed***", totalf, "failures."
+ elif verbose:
+ print "Test passed."
+ return totalf, totalt
+
+ #/////////////////////////////////////////////////////////////////
+ # Backward compatibility cruft to maintain doctest.master.
+ #/////////////////////////////////////////////////////////////////
+ def merge(self, other):
+ d = self._name2ft
+ for name, (f, t) in other._name2ft.items():
+ if name in d:
+ print "*** DocTestRunner.merge: '" + name + "' in both" \
+ " testers; summing outcomes."
+ f2, t2 = d[name]
+ f = f + f2
+ t = t + t2
+ d[name] = f, t
+
+class OutputChecker:
+ """
+ A class used to check the whether the actual output from a doctest
+ example matches the expected output. `OutputChecker` defines two
+ methods: `check_output`, which compares a given pair of outputs,
+ and returns true if they match; and `output_difference`, which
+ returns a string describing the differences between two outputs.
+ """
+ def check_output(self, want, got, optionflags):
+ """
+ Return True iff the actual output from an example (`got`)
+ matches the expected output (`want`). These strings are
+ always considered to match if they are identical; but
+ depending on what option flags the test runner is using,
+ several non-exact match types are also possible. See the
+ documentation for `TestRunner` for more information about
+ option flags.
+ """
+ # Handle the common case first, for efficiency:
+ # if they're string-identical, always return true.
+ if got == want:
+ return True
+
+ # The values True and False replaced 1 and 0 as the return
+ # value for boolean comparisons in Python 2.3.
+ if not (optionflags & DONT_ACCEPT_TRUE_FOR_1):
+ if (got,want) == ("True\n", "1\n"):
+ return True
+ if (got,want) == ("False\n", "0\n"):
+ return True
+
+ # <BLANKLINE> can be used as a special sequence to signify a
+ # blank line, unless the DONT_ACCEPT_BLANKLINE flag is used.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ # Replace <BLANKLINE> in want with a blank line.
+ want = re.sub('(?m)^%s\s*?$' % re.escape(BLANKLINE_MARKER),
+ '', want)
+ # If a line in got contains only spaces, then remove the
+ # spaces.
+ got = re.sub('(?m)^\s*?$', '', got)
+ if got == want:
+ return True
+
+ # This flag causes doctest to ignore any differences in the
+ # contents of whitespace strings. Note that this can be used
+ # in conjunction with the ELLIPSIS flag.
+ if optionflags & NORMALIZE_WHITESPACE:
+ got = ' '.join(got.split())
+ want = ' '.join(want.split())
+ if got == want:
+ return True
+
+ # The ELLIPSIS flag says to let the sequence "..." in `want`
+ # match any substring in `got`.
+ if optionflags & ELLIPSIS:
+ if _ellipsis_match(want, got):
+ return True
+
+ # We didn't find any match; return false.
+ return False
+
+ # Should we do a fancy diff?
+ def _do_a_fancy_diff(self, want, got, optionflags):
+ # Not unless they asked for a fancy diff.
+ if not optionflags & (REPORT_UDIFF |
+ REPORT_CDIFF |
+ REPORT_NDIFF):
+ return False
+
+ # If expected output uses ellipsis, a meaningful fancy diff is
+ # too hard ... or maybe not. In two real-life failures Tim saw,
+ # a diff was a major help anyway, so this is commented out.
+ # [todo] _ellipsis_match() knows which pieces do and don't match,
+ # and could be the basis for a kick-ass diff in this case.
+ ##if optionflags & ELLIPSIS and ELLIPSIS_MARKER in want:
+ ## return False
+
+ # ndiff does intraline difference marking, so can be useful even
+ # for 1-line differences.
+ if optionflags & REPORT_NDIFF:
+ return True
+
+ # The other diff types need at least a few lines to be helpful.
+ return want.count('\n') > 2 and got.count('\n') > 2
+
+ def output_difference(self, example, got, optionflags):
+ """
+ Return a string describing the differences between the
+ expected output for a given example (`example`) and the actual
+ output (`got`). `optionflags` is the set of option flags used
+ to compare `want` and `got`.
+ """
+ want = example.want
+ # If <BLANKLINE>s are being used, then replace blank lines
+ # with <BLANKLINE> in the actual output string.
+ if not (optionflags & DONT_ACCEPT_BLANKLINE):
+ got = re.sub('(?m)^[ ]*(?=\n)', BLANKLINE_MARKER, got)
+
+ # Check if we should use diff.
+ if self._do_a_fancy_diff(want, got, optionflags):
+ # Split want & got into lines.
+ want_lines = want.splitlines(True) # True == keep line ends
+ got_lines = got.splitlines(True)
+ # Use difflib to find their differences.
+ if optionflags & REPORT_UDIFF:
+ diff = difflib.unified_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'unified diff with -expected +actual'
+ elif optionflags & REPORT_CDIFF:
+ diff = difflib.context_diff(want_lines, got_lines, n=2)
+ diff = list(diff)[2:] # strip the diff header
+ kind = 'context diff with expected followed by actual'
+ elif optionflags & REPORT_NDIFF:
+ engine = difflib.Differ(charjunk=difflib.IS_CHARACTER_JUNK)
+ diff = list(engine.compare(want_lines, got_lines))
+ kind = 'ndiff with -expected +actual'
+ else:
+ assert 0, 'Bad diff option'
+ # Remove trailing whitespace on diff output.
+ diff = [line.rstrip() + '\n' for line in diff]
+ return 'Differences (%s):\n' % kind + _indent(''.join(diff))
+
+ # If we're not using diff, then simply list the expected
+ # output followed by the actual output.
+ if want and got:
+ return 'Expected:\n%sGot:\n%s' % (_indent(want), _indent(got))
+ elif want:
+ return 'Expected:\n%sGot nothing\n' % _indent(want)
+ elif got:
+ return 'Expected nothing\nGot:\n%s' % _indent(got)
+ else:
+ return 'Expected nothing\nGot nothing\n'
+
+class DocTestFailure(Exception):
+ """A DocTest example has failed in debugging mode.
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - got: the actual output
+ """
+ def __init__(self, test, example, got):
+ self.test = test
+ self.example = example
+ self.got = got
+
+ def __str__(self):
+ return str(self.test)
+
+class UnexpectedException(Exception):
+ """A DocTest example has encountered an unexpected exception
+
+ The exception instance has variables:
+
+ - test: the DocTest object being run
+
+ - excample: the Example object that failed
+
+ - exc_info: the exception info
+ """
+ def __init__(self, test, example, exc_info):
+ self.test = test
+ self.example = example
+ self.exc_info = exc_info
+
+ def __str__(self):
+ return str(self.test)
+
+class DebugRunner(DocTestRunner):
+
+ def run(self, test, compileflags=None, out=None, clear_globs=True):
+ r = DocTestRunner.run(self, test, compileflags, out, False)
+ if clear_globs:
+ test.globs.clear()
+ return r
+
+ def report_unexpected_exception(self, out, test, example, exc_info):
+ raise UnexpectedException(test, example, exc_info)
+
+ def report_failure(self, out, test, example, got):
+ raise DocTestFailure(test, example, got)
+
+######################################################################
+## 6. Test Functions
+######################################################################
+# These should be backwards compatible.
+
+# For backward compatibility, a global instance of a DocTestRunner
+# class, updated by testmod.
+master = None
+
+def testmod(m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None,
+ raise_on_error=False, exclude_empty=False):
+ """m=None, name=None, globs=None, verbose=None, isprivate=None,
+ report=True, optionflags=0, extraglobs=None, raise_on_error=False,
+ exclude_empty=False
+
+ Test examples in docstrings in functions and classes reachable
+ from module m (or the current module if m is not supplied), starting
+ with m.__doc__. Unless isprivate is specified, private names
+ are not skipped.
+
+ Also test examples reachable from dict m.__test__ if it exists and is
+ not None. m.__test__ maps names to functions, classes and strings;
+ function and class docstrings are tested even if the name is private;
+ strings are tested directly, as if they were docstrings.
+
+ Return (#failures, #tests).
+
+ See doctest.__doc__ for an overview.
+
+ Optional keyword arg "name" gives the name of the module; by default
+ use m.__name__.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use m.__dict__. A copy of this
+ dict is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used. This is new in 2.4.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. This is new in 2.3. Possible values (see the
+ docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Deprecated in Python 2.4:
+ Optional keyword arg "isprivate" specifies a function used to
+ determine whether a name is private. The default function is
+ treat all functions as public. Optionally, "isprivate" can be
+ set to doctest.is_private to skip over functions marked as private
+ using the underscore naming convention; see its docs for details.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if isprivate is not None:
+ warnings.warn("the isprivate argument is deprecated; "
+ "examine DocTestFinder.find() lists instead",
+ DeprecationWarning)
+
+ # If no module was given, then use __main__.
+ if m is None:
+ # DWA - m will still be None if this wasn't invoked from the command
+ # line, in which case the following TypeError is about as good an error
+ # as we should expect
+ m = sys.modules.get('__main__')
+
+ # Check that we were actually given a module.
+ if not inspect.ismodule(m):
+ raise TypeError("testmod: module required; %r" % (m,))
+
+ # If no name was given, then use the module's name.
+ if name is None:
+ name = m.__name__
+
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(_namefilter=isprivate, exclude_empty=exclude_empty)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ for test in finder.find(m, name, globs=globs, extraglobs=extraglobs):
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def testfile(filename, module_relative=True, name=None, package=None,
+ globs=None, verbose=None, report=True, optionflags=0,
+ extraglobs=None, raise_on_error=False, parser=DocTestParser()):
+ """
+ Test examples in the given file. Return (#failures, #tests).
+
+ Optional keyword arg "module_relative" specifies how filenames
+ should be interpreted:
+
+ - If "module_relative" is True (the default), then "filename"
+ specifies a module-relative path. By default, this path is
+ relative to the calling module's directory; but if the
+ "package" argument is specified, then it is relative to that
+ package. To ensure os-independence, "filename" should use
+ "/" characters to separate path segments, and should not
+ be an absolute path (i.e., it may not begin with "/").
+
+ - If "module_relative" is False, then "filename" specifies an
+ os-specific path. The path may be absolute or relative (to
+ the current working directory).
+
+ Optional keyword arg "name" gives the name of the test; by default
+ use the file's basename.
+
+ Optional keyword argument "package" is a Python package or the
+ name of a Python package whose directory should be used as the
+ base directory for a module relative filename. If no package is
+ specified, then the calling module's directory is used as the base
+ directory for module relative filenames. It is an error to
+ specify "package" if "module_relative" is False.
+
+ Optional keyword arg "globs" gives a dict to be used as the globals
+ when executing examples; by default, use {}. A copy of this dict
+ is actually used for each docstring, so that each docstring's
+ examples start with a clean slate.
+
+ Optional keyword arg "extraglobs" gives a dictionary that should be
+ merged into the globals that are used to execute examples. By
+ default, no extra globals are used.
+
+ Optional keyword arg "verbose" prints lots of stuff if true, prints
+ only failures if false; by default, it's true iff "-v" is in sys.argv.
+
+ Optional keyword arg "report" prints a summary at the end when true,
+ else prints nothing at the end. In verbose mode, the summary is
+ detailed, else very brief (in fact, empty if all tests passed).
+
+ Optional keyword arg "optionflags" or's together module constants,
+ and defaults to 0. Possible values (see the docs for details):
+
+ DONT_ACCEPT_TRUE_FOR_1
+ DONT_ACCEPT_BLANKLINE
+ NORMALIZE_WHITESPACE
+ ELLIPSIS
+ IGNORE_EXCEPTION_DETAIL
+ REPORT_UDIFF
+ REPORT_CDIFF
+ REPORT_NDIFF
+ REPORT_ONLY_FIRST_FAILURE
+
+ Optional keyword arg "raise_on_error" raises an exception on the
+ first unexpected exception or failure. This allows failures to be
+ post-mortem debugged.
+
+ Optional keyword arg "parser" specifies a DocTestParser (or
+ subclass) that should be used to extract tests from the files.
+
+ Advanced tomfoolery: testmod runs methods of a local instance of
+ class doctest.Tester, then merges the results into (or creates)
+ global Tester instance doctest.master. Methods of doctest.master
+ can be called directly too, if you want to do something unusual.
+ Passing report=0 to testmod is especially useful then, to delay
+ displaying a summary. Invoke doctest.master.summarize(verbose)
+ when you're done fiddling.
+ """
+ global master
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path
+ if module_relative:
+ package = _normalize_module(package)
+ filename = _module_relative_path(package, filename)
+
+ # If no name was given, then use the file's name.
+ if name is None:
+ name = os.path.basename(filename)
+
+ # Assemble the globals.
+ if globs is None:
+ globs = {}
+ else:
+ globs = globs.copy()
+ if extraglobs is not None:
+ globs.update(extraglobs)
+
+ if raise_on_error:
+ runner = DebugRunner(verbose=verbose, optionflags=optionflags)
+ else:
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+
+ # Read the file, convert it to a test, and run it.
+ s = open(filename).read()
+ test = parser.get_doctest(s, globs, name, filename, 0)
+ runner.run(test)
+
+ if report:
+ runner.summarize()
+
+ if master is None:
+ master = runner
+ else:
+ master.merge(runner)
+
+ return runner.failures, runner.tries
+
+def run_docstring_examples(f, globs, verbose=False, name="NoName",
+ compileflags=None, optionflags=0):
+ """
+ Test examples in the given object's docstring (`f`), using `globs`
+ as globals. Optional argument `name` is used in failure messages.
+ If the optional argument `verbose` is true, then generate output
+ even if there are no failures.
+
+ `compileflags` gives the set of flags that should be used by the
+ Python compiler when running the examples. If not specified, then
+ it will default to the set of future-import flags that apply to
+ `globs`.
+
+ Optional keyword arg `optionflags` specifies options for the
+ testing and output. See the documentation for `testmod` for more
+ information.
+ """
+ # Find, parse, and run all tests in the given module.
+ finder = DocTestFinder(verbose=verbose, recurse=False)
+ runner = DocTestRunner(verbose=verbose, optionflags=optionflags)
+ for test in finder.find(f, name, globs=globs):
+ runner.run(test, compileflags=compileflags)
+
+######################################################################
+## 7. Tester
+######################################################################
+# This is provided only for backwards compatibility. It's not
+# actually used in any way.
+
+class Tester:
+ def __init__(self, mod=None, globs=None, verbose=None,
+ isprivate=None, optionflags=0):
+
+ warnings.warn("class Tester is deprecated; "
+ "use class doctest.DocTestRunner instead",
+ DeprecationWarning, stacklevel=2)
+ if mod is None and globs is None:
+ raise TypeError("Tester.__init__: must specify mod or globs")
+ if mod is not None and not inspect.ismodule(mod):
+ raise TypeError("Tester.__init__: mod must be a module; %r" %
+ (mod,))
+ if globs is None:
+ globs = mod.__dict__
+ self.globs = globs
+
+ self.verbose = verbose
+ self.isprivate = isprivate
+ self.optionflags = optionflags
+ self.testfinder = DocTestFinder(_namefilter=isprivate)
+ self.testrunner = DocTestRunner(verbose=verbose,
+ optionflags=optionflags)
+
+ def runstring(self, s, name):
+ test = DocTestParser().get_doctest(s, self.globs, name, None, None)
+ if self.verbose:
+ print "Running string", name
+ (f,t) = self.testrunner.run(test)
+ if self.verbose:
+ print f, "of", t, "examples failed in string", name
+ return (f,t)
+
+ def rundoc(self, object, name=None, module=None):
+ f = t = 0
+ tests = self.testfinder.find(object, name, module=module,
+ globs=self.globs)
+ for test in tests:
+ (f2, t2) = self.testrunner.run(test)
+ (f,t) = (f+f2, t+t2)
+ return (f,t)
+
+ def rundict(self, d, name, module=None):
+ import new
+ m = new.module(name)
+ m.__dict__.update(d)
+ if module is None:
+ module = False
+ return self.rundoc(m, name, module)
+
+ def run__test__(self, d, name):
+ import new
+ m = new.module(name)
+ m.__test__ = d
+ return self.rundoc(m, name)
+
+ def summarize(self, verbose=None):
+ return self.testrunner.summarize(verbose)
+
+ def merge(self, other):
+ self.testrunner.merge(other.testrunner)
+
+######################################################################
+## 8. Unittest Support
+######################################################################
+
+_unittest_reportflags = 0
+
+def set_unittest_reportflags(flags):
+ global _unittest_reportflags
+
+ if (flags & REPORTING_FLAGS) != flags:
+ raise ValueError("Only reporting flags allowed", flags)
+ old = _unittest_reportflags
+ _unittest_reportflags = flags
+ return old
+
+
+class DocTestCase(unittest.TestCase):
+
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None):
+
+ unittest.TestCase.__init__(self)
+ self._dt_optionflags = optionflags
+ self._dt_checker = checker
+ self._dt_test = test
+ self._dt_setUp = setUp
+ self._dt_tearDown = tearDown
+
+ def setUp(self):
+ test = self._dt_test
+
+ if self._dt_setUp is not None:
+ self._dt_setUp(test)
+
+ def tearDown(self):
+ test = self._dt_test
+
+ if self._dt_tearDown is not None:
+ self._dt_tearDown(test)
+
+ test.globs.clear()
+
+ def runTest(self):
+ test = self._dt_test
+ old = sys.stdout
+ new = StringIO()
+ optionflags = self._dt_optionflags
+
+ if not (optionflags & REPORTING_FLAGS):
+ # The option flags don't include any reporting flags,
+ # so add the default reporting flags
+ optionflags |= _unittest_reportflags
+
+ runner = DocTestRunner(optionflags=optionflags,
+ checker=self._dt_checker, verbose=False)
+
+ try:
+ runner.DIVIDER = "-"*70
+ failures, tries = runner.run(
+ test, out=new.write, clear_globs=False)
+ finally:
+ sys.stdout = old
+
+ if failures:
+ raise self.failureException(self.format_failure(new.getvalue()))
+
+ def format_failure(self, err):
+ test = self._dt_test
+ if test.lineno is None:
+ lineno = 'unknown line number'
+ else:
+ lineno = '%s' % test.lineno
+ lname = '.'.join(test.name.split('.')[-1:])
+ return ('Failed doctest test for %s\n'
+ ' File "%s", line %s, in %s\n\n%s'
+ % (test.name, test.filename, lineno, lname, err)
+ )
+
+ def debug(self):
+ self.setUp()
+ runner = DebugRunner(optionflags=self._dt_optionflags,
+ checker=self._dt_checker, verbose=False)
+ runner.run(self._dt_test)
+ self.tearDown()
+
+ def id(self):
+ return self._dt_test.name
+
+ def __repr__(self):
+ name = self._dt_test.name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return "Doctest: " + self._dt_test.name
+
+def DocTestSuite(module=None, globs=None, extraglobs=None, test_finder=None,
+ **options):
+ """
+ Convert doctest tests for a module to a unittest test suite.
+
+ This converts each documentation string in a module that
+ contains doctest tests to a unittest test case. If any of the
+ tests in a doc string fail, then the test case fails. An exception
+ is raised showing the name of the file containing the test and a
+ (sometimes approximate) line number.
+
+ The `module` argument provides the module to be tested. The argument
+ can be either a module or a module name.
+
+ If no argument is given, the calling module is used.
+
+ A number of options may be provided as keyword arguments:
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+ """
+
+ if test_finder is None:
+ test_finder = DocTestFinder()
+
+ module = _normalize_module(module)
+ tests = test_finder.find(module, globs=globs, extraglobs=extraglobs)
+ if globs is None:
+ globs = module.__dict__
+ if not tests:
+ # Why do we want to do this? Because it reveals a bug that might
+ # otherwise be hidden.
+ raise ValueError(module, "has no tests")
+
+ tests.sort()
+ suite = unittest.TestSuite()
+ for test in tests:
+ if len(test.examples) == 0:
+ continue
+ if not test.filename:
+ filename = module.__file__
+ if filename[-4:] in (".pyc", ".pyo"):
+ filename = filename[:-1]
+ elif sys.platform.startswith('java') and \
+ filename.endswith('$py.class'):
+ filename = '%s.py' % filename[:-9]
+ test.filename = filename
+ suite.addTest(DocTestCase(test, **options))
+
+ return suite
+
+class DocFileCase(DocTestCase):
+
+ def id(self):
+ return '_'.join(self._dt_test.name.split('.'))
+
+ def __repr__(self):
+ return self._dt_test.filename
+ __str__ = __repr__
+
+ def format_failure(self, err):
+ return ('Failed doctest test for %s\n File "%s", line 0\n\n%s'
+ % (self._dt_test.name, self._dt_test.filename, err)
+ )
+
+def DocFileTest(path, module_relative=True, package=None,
+ globs=None, parser=DocTestParser(), **options):
+ if globs is None:
+ globs = {}
+
+ if package and not module_relative:
+ raise ValueError("Package may only be specified for module-"
+ "relative paths.")
+
+ # Relativize the path.
+ if module_relative:
+ package = _normalize_module(package)
+ path = _module_relative_path(package, path)
+
+ # Find the file and read it.
+ name = os.path.basename(path)
+ doc = open(path).read()
+
+ # Convert it to a test, and wrap it in a DocFileCase.
+ test = parser.get_doctest(doc, globs, name, path, 0)
+ return DocFileCase(test, **options)
+
+def DocFileSuite(*paths, **kw):
+ """A unittest suite for one or more doctest files.
+
+ The path to each doctest file is given as a string; the
+ interpretation of that string depends on the keyword argument
+ "module_relative".
+
+ A number of options may be provided as keyword arguments:
+
+ module_relative
+ If "module_relative" is True, then the given file paths are
+ interpreted as os-independent module-relative paths. By
+ default, these paths are relative to the calling module's
+ directory; but if the "package" argument is specified, then
+ they are relative to that package. To ensure os-independence,
+ "filename" should use "/" characters to separate path
+ segments, and may not be an absolute path (i.e., it may not
+ begin with "/").
+
+ If "module_relative" is False, then the given file paths are
+ interpreted as os-specific paths. These paths may be absolute
+ or relative (to the current working directory).
+
+ package
+ A Python package or the name of a Python package whose directory
+ should be used as the base directory for module relative paths.
+ If "package" is not specified, then the calling module's
+ directory is used as the base directory for module relative
+ filenames. It is an error to specify "package" if
+ "module_relative" is False.
+
+ setUp
+ A set-up function. This is called before running the
+ tests in each file. The setUp function will be passed a DocTest
+ object. The setUp function can access the test globals as the
+ globs attribute of the test passed.
+
+ tearDown
+ A tear-down function. This is called after running the
+ tests in each file. The tearDown function will be passed a DocTest
+ object. The tearDown function can access the test globals as the
+ globs attribute of the test passed.
+
+ globs
+ A dictionary containing initial global variables for the tests.
+
+ optionflags
+ A set of doctest option flags expressed as an integer.
+
+ parser
+ A DocTestParser (or subclass) that should be used to extract
+ tests from the files.
+ """
+ suite = unittest.TestSuite()
+
+ # We do this here so that _normalize_module is called at the right
+ # level. If it were called in DocFileTest, then this function
+ # would be the caller and we might guess the package incorrectly.
+ if kw.get('module_relative', True):
+ kw['package'] = _normalize_module(kw.get('package'))
+
+ for path in paths:
+ suite.addTest(DocFileTest(path, **kw))
+
+ return suite
+
+######################################################################
+## 9. Debugging Support
+######################################################################
+
+def script_from_examples(s):
+ output = []
+ for piece in DocTestParser().parse(s):
+ if isinstance(piece, Example):
+ # Add the example's source code (strip trailing NL)
+ output.append(piece.source[:-1])
+ # Add the expected output:
+ want = piece.want
+ if want:
+ output.append('# Expected:')
+ output += ['## '+l for l in want.split('\n')[:-1]]
+ else:
+ # Add non-example text.
+ output += [_comment_line(l)
+ for l in piece.split('\n')[:-1]]
+
+ # Trim junk on both ends.
+ while output and output[-1] == '#':
+ output.pop()
+ while output and output[0] == '#':
+ output.pop(0)
+ # Combine the output, and return it.
+ # Add a courtesy newline to prevent exec from choking (see bug #1172785)
+ return '\n'.join(output) + '\n'
+
+def testsource(module, name):
+ """Extract the test sources from a doctest docstring as a script.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the doc string with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ tests = DocTestFinder().find(module)
+ test = [t for t in tests if t.name == name]
+ if not test:
+ raise ValueError(name, "not found in tests")
+ test = test[0]
+ testsrc = script_from_examples(test.docstring)
+ return testsrc
+
+def debug_src(src, pm=False, globs=None):
+ """Debug a single doctest docstring, in argument `src`'"""
+ testsrc = script_from_examples(src)
+ debug_script(testsrc, pm, globs)
+
+def debug_script(src, pm=False, globs=None):
+ "Debug a test script. `src` is the script, as a string."
+ import pdb
+
+ # Note that tempfile.NameTemporaryFile() cannot be used. As the
+ # docs say, a file so created cannot be opened by name a second time
+ # on modern Windows boxes, and execfile() needs to open it.
+ srcfilename = tempfile.mktemp(".py", "doctestdebug")
+ f = open(srcfilename, 'w')
+ f.write(src)
+ f.close()
+
+ try:
+ if globs:
+ globs = globs.copy()
+ else:
+ globs = {}
+
+ if pm:
+ try:
+ execfile(srcfilename, globs, globs)
+ except:
+ print sys.exc_info()[1]
+ pdb.post_mortem(sys.exc_info()[2])
+ else:
+ # Note that %r is vital here. '%s' instead can, e.g., cause
+ # backslashes to get treated as metacharacters on Windows.
+ pdb.run("execfile(%r)" % srcfilename, globs, globs)
+
+ finally:
+ os.remove(srcfilename)
+
+def debug(module, name, pm=False):
+ """Debug a single doctest docstring.
+
+ Provide the module (or dotted name of the module) containing the
+ test to be debugged and the name (within the module) of the object
+ with the docstring with tests to be debugged.
+ """
+ module = _normalize_module(module)
+ testsrc = testsource(module, name)
+ debug_script(testsrc, pm, module.__dict__)
+
+
+__test__ = {}
diff --git a/scripts/external_libs/nose-1.3.4/nose/failure.py b/scripts/external_libs/nose-1.3.4/nose/failure.py
new file mode 100755
index 00000000..c5fabfda
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/failure.py
@@ -0,0 +1,42 @@
+import logging
+import unittest
+from traceback import format_tb
+from nose.pyversion import is_base_exception
+
+log = logging.getLogger(__name__)
+
+
+__all__ = ['Failure']
+
+
+class Failure(unittest.TestCase):
+ """Unloadable or unexecutable test.
+
+ A Failure case is placed in a test suite to indicate the presence of a
+ test that could not be loaded or executed. A common example is a test
+ module that fails to import.
+
+ """
+ __test__ = False # do not collect
+ def __init__(self, exc_class, exc_val, tb=None, address=None):
+ log.debug("A failure! %s %s %s", exc_class, exc_val, format_tb(tb))
+ self.exc_class = exc_class
+ self.exc_val = exc_val
+ self.tb = tb
+ self._address = address
+ unittest.TestCase.__init__(self)
+
+ def __str__(self):
+ return "Failure: %s (%s)" % (
+ getattr(self.exc_class, '__name__', self.exc_class), self.exc_val)
+
+ def address(self):
+ return self._address
+
+ def runTest(self):
+ if self.tb is not None:
+ if is_base_exception(self.exc_val):
+ raise self.exc_val, None, self.tb
+ raise self.exc_class, self.exc_val, self.tb
+ else:
+ raise self.exc_class(self.exc_val)
diff --git a/scripts/external_libs/nose-1.3.4/nose/importer.py b/scripts/external_libs/nose-1.3.4/nose/importer.py
new file mode 100755
index 00000000..e677658c
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/importer.py
@@ -0,0 +1,167 @@
+"""Implements an importer that looks only in specific path (ignoring
+sys.path), and uses a per-path cache in addition to sys.modules. This is
+necessary because test modules in different directories frequently have the
+same names, which means that the first loaded would mask the rest when using
+the builtin importer.
+"""
+import logging
+import os
+import sys
+from nose.config import Config
+
+from imp import find_module, load_module, acquire_lock, release_lock
+
+log = logging.getLogger(__name__)
+
+try:
+ _samefile = os.path.samefile
+except AttributeError:
+ def _samefile(src, dst):
+ return (os.path.normcase(os.path.realpath(src)) ==
+ os.path.normcase(os.path.realpath(dst)))
+
+
+class Importer(object):
+ """An importer class that does only path-specific imports. That
+ is, the given module is not searched for on sys.path, but only at
+ the path or in the directory specified.
+ """
+ def __init__(self, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+
+ def importFromPath(self, path, fqname):
+ """Import a dotted-name package whose tail is at path. In other words,
+ given foo.bar and path/to/foo/bar.py, import foo from path/to/foo then
+ bar from path/to/foo/bar, returning bar.
+ """
+ # find the base dir of the package
+ path_parts = os.path.normpath(os.path.abspath(path)).split(os.sep)
+ name_parts = fqname.split('.')
+ if path_parts[-1] == '__init__.py':
+ path_parts.pop()
+ path_parts = path_parts[:-(len(name_parts))]
+ dir_path = os.sep.join(path_parts)
+ # then import fqname starting from that dir
+ return self.importFromDir(dir_path, fqname)
+
+ def importFromDir(self, dir, fqname):
+ """Import a module *only* from path, ignoring sys.path and
+ reloading if the version in sys.modules is not the one we want.
+ """
+ dir = os.path.normpath(os.path.abspath(dir))
+ log.debug("Import %s from %s", fqname, dir)
+
+ # FIXME reimplement local per-dir cache?
+
+ # special case for __main__
+ if fqname == '__main__':
+ return sys.modules[fqname]
+
+ if self.config.addPaths:
+ add_path(dir, self.config)
+
+ path = [dir]
+ parts = fqname.split('.')
+ part_fqname = ''
+ mod = parent = fh = None
+
+ for part in parts:
+ if part_fqname == '':
+ part_fqname = part
+ else:
+ part_fqname = "%s.%s" % (part_fqname, part)
+ try:
+ acquire_lock()
+ log.debug("find module part %s (%s) in %s",
+ part, part_fqname, path)
+ fh, filename, desc = find_module(part, path)
+ old = sys.modules.get(part_fqname)
+ if old is not None:
+ # test modules frequently have name overlap; make sure
+ # we get a fresh copy of anything we are trying to load
+ # from a new path
+ log.debug("sys.modules has %s as %s", part_fqname, old)
+ if (self.sameModule(old, filename)
+ or (self.config.firstPackageWins and
+ getattr(old, '__path__', None))):
+ mod = old
+ else:
+ del sys.modules[part_fqname]
+ mod = load_module(part_fqname, fh, filename, desc)
+ else:
+ mod = load_module(part_fqname, fh, filename, desc)
+ finally:
+ if fh:
+ fh.close()
+ release_lock()
+ if parent:
+ setattr(parent, part, mod)
+ if hasattr(mod, '__path__'):
+ path = mod.__path__
+ parent = mod
+ return mod
+
+ def _dirname_if_file(self, filename):
+ # We only take the dirname if we have a path to a non-dir,
+ # because taking the dirname of a symlink to a directory does not
+ # give the actual directory parent.
+ if os.path.isdir(filename):
+ return filename
+ else:
+ return os.path.dirname(filename)
+
+ def sameModule(self, mod, filename):
+ mod_paths = []
+ if hasattr(mod, '__path__'):
+ for path in mod.__path__:
+ mod_paths.append(self._dirname_if_file(path))
+ elif hasattr(mod, '__file__'):
+ mod_paths.append(self._dirname_if_file(mod.__file__))
+ else:
+ # builtin or other module-like object that
+ # doesn't have __file__; must be new
+ return False
+ new_path = self._dirname_if_file(filename)
+ for mod_path in mod_paths:
+ log.debug(
+ "module already loaded? mod: %s new: %s",
+ mod_path, new_path)
+ if _samefile(mod_path, new_path):
+ return True
+ return False
+
+
+def add_path(path, config=None):
+ """Ensure that the path, or the root of the current package (if
+ path is in a package), is in sys.path.
+ """
+
+ # FIXME add any src-looking dirs seen too... need to get config for that
+
+ log.debug('Add path %s' % path)
+ if not path:
+ return []
+ added = []
+ parent = os.path.dirname(path)
+ if (parent
+ and os.path.exists(os.path.join(path, '__init__.py'))):
+ added.extend(add_path(parent, config))
+ elif not path in sys.path:
+ log.debug("insert %s into sys.path", path)
+ sys.path.insert(0, path)
+ added.append(path)
+ if config and config.srcDirs:
+ for dirname in config.srcDirs:
+ dirpath = os.path.join(path, dirname)
+ if os.path.isdir(dirpath):
+ sys.path.insert(0, dirpath)
+ added.append(dirpath)
+ return added
+
+
+def remove_path(path):
+ log.debug('Remove path %s' % path)
+ if path in sys.path:
+ sys.path.remove(path)
diff --git a/scripts/external_libs/nose-1.3.4/nose/inspector.py b/scripts/external_libs/nose-1.3.4/nose/inspector.py
new file mode 100755
index 00000000..a6c4a3e3
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/inspector.py
@@ -0,0 +1,207 @@
+"""Simple traceback introspection. Used to add additional information to
+AssertionErrors in tests, so that failure messages may be more informative.
+"""
+import inspect
+import logging
+import re
+import sys
+import textwrap
+import tokenize
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+def inspect_traceback(tb):
+ """Inspect a traceback and its frame, returning source for the expression
+ where the exception was raised, with simple variable replacement performed
+ and the line on which the exception was raised marked with '>>'
+ """
+ log.debug('inspect traceback %s', tb)
+
+ # we only want the innermost frame, where the exception was raised
+ while tb.tb_next:
+ tb = tb.tb_next
+
+ frame = tb.tb_frame
+ lines, exc_line = tbsource(tb)
+
+ # figure out the set of lines to grab.
+ inspect_lines, mark_line = find_inspectable_lines(lines, exc_line)
+ src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+ exp = Expander(frame.f_locals, frame.f_globals)
+
+ while inspect_lines:
+ try:
+ for tok in tokenize.generate_tokens(src.readline):
+ exp(*tok)
+ except tokenize.TokenError, e:
+ # this can happen if our inspectable region happens to butt up
+ # against the end of a construct like a docstring with the closing
+ # """ on separate line
+ log.debug("Tokenizer error: %s", e)
+ inspect_lines.pop(0)
+ mark_line -= 1
+ src = StringIO(textwrap.dedent(''.join(inspect_lines)))
+ exp = Expander(frame.f_locals, frame.f_globals)
+ continue
+ break
+ padded = []
+ if exp.expanded_source:
+ exp_lines = exp.expanded_source.split('\n')
+ ep = 0
+ for line in exp_lines:
+ if ep == mark_line:
+ padded.append('>> ' + line)
+ else:
+ padded.append(' ' + line)
+ ep += 1
+ return '\n'.join(padded)
+
+
+def tbsource(tb, context=6):
+ """Get source from a traceback object.
+
+ A tuple of two things is returned: a list of lines of context from
+ the source code, and the index of the current line within that list.
+ The optional second argument specifies the number of lines of context
+ to return, which are centered around the current line.
+
+ .. Note ::
+ This is adapted from inspect.py in the python 2.4 standard library,
+ since a bug in the 2.3 version of inspect prevents it from correctly
+ locating source lines in a traceback frame.
+ """
+
+ lineno = tb.tb_lineno
+ frame = tb.tb_frame
+
+ if context > 0:
+ start = lineno - 1 - context//2
+ log.debug("lineno: %s start: %s", lineno, start)
+
+ try:
+ lines, dummy = inspect.findsource(frame)
+ except IOError:
+ lines, index = [''], 0
+ else:
+ all_lines = lines
+ start = max(start, 1)
+ start = max(0, min(start, len(lines) - context))
+ lines = lines[start:start+context]
+ index = lineno - 1 - start
+
+ # python 2.5 compat: if previous line ends in a continuation,
+ # decrement start by 1 to match 2.4 behavior
+ if sys.version_info >= (2, 5) and index > 0:
+ while lines[index-1].strip().endswith('\\'):
+ start -= 1
+ lines = all_lines[start:start+context]
+ else:
+ lines, index = [''], 0
+ log.debug("tbsource lines '''%s''' around index %s", lines, index)
+ return (lines, index)
+
+
+def find_inspectable_lines(lines, pos):
+ """Find lines in home that are inspectable.
+
+ Walk back from the err line up to 3 lines, but don't walk back over
+ changes in indent level.
+
+ Walk forward up to 3 lines, counting \ separated lines as 1. Don't walk
+ over changes in indent level (unless part of an extended line)
+ """
+ cnt = re.compile(r'\\[\s\n]*$')
+ df = re.compile(r':[\s\n]*$')
+ ind = re.compile(r'^(\s*)')
+ toinspect = []
+ home = lines[pos]
+ home_indent = ind.match(home).groups()[0]
+
+ before = lines[max(pos-3, 0):pos]
+ before.reverse()
+ after = lines[pos+1:min(pos+4, len(lines))]
+
+ for line in before:
+ if ind.match(line).groups()[0] == home_indent:
+ toinspect.append(line)
+ else:
+ break
+ toinspect.reverse()
+ toinspect.append(home)
+ home_pos = len(toinspect)-1
+ continued = cnt.search(home)
+ for line in after:
+ if ((continued or ind.match(line).groups()[0] == home_indent)
+ and not df.search(line)):
+ toinspect.append(line)
+ continued = cnt.search(line)
+ else:
+ break
+ log.debug("Inspecting lines '''%s''' around %s", toinspect, home_pos)
+ return toinspect, home_pos
+
+
+class Expander:
+ """Simple expression expander. Uses tokenize to find the names and
+ expands any that can be looked up in the frame.
+ """
+ def __init__(self, locals, globals):
+ self.locals = locals
+ self.globals = globals
+ self.lpos = None
+ self.expanded_source = ''
+
+ def __call__(self, ttype, tok, start, end, line):
+ # TODO
+ # deal with unicode properly
+
+ # TODO
+ # Dealing with instance members
+ # always keep the last thing seen
+ # if the current token is a dot,
+ # get ready to getattr(lastthing, this thing) on the
+ # next call.
+
+ if self.lpos is not None:
+ if start[1] >= self.lpos:
+ self.expanded_source += ' ' * (start[1]-self.lpos)
+ elif start[1] < self.lpos:
+ # newline, indent correctly
+ self.expanded_source += ' ' * start[1]
+ self.lpos = end[1]
+
+ if ttype == tokenize.INDENT:
+ pass
+ elif ttype == tokenize.NAME:
+ # Clean this junk up
+ try:
+ val = self.locals[tok]
+ if callable(val):
+ val = tok
+ else:
+ val = repr(val)
+ except KeyError:
+ try:
+ val = self.globals[tok]
+ if callable(val):
+ val = tok
+ else:
+ val = repr(val)
+
+ except KeyError:
+ val = tok
+ # FIXME... not sure how to handle things like funcs, classes
+ # FIXME this is broken for some unicode strings
+ self.expanded_source += val
+ else:
+ self.expanded_source += tok
+ # if this is the end of the line and the line ends with
+ # \, then tack a \ and newline onto the output
+ # print line[end[1]:]
+ if re.match(r'\s+\\\n', line[end[1]:]):
+ self.expanded_source += ' \\\n'
diff --git a/scripts/external_libs/nose-1.3.4/nose/loader.py b/scripts/external_libs/nose-1.3.4/nose/loader.py
new file mode 100755
index 00000000..966b6dc7
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/loader.py
@@ -0,0 +1,619 @@
+"""
+Test Loader
+-----------
+
+nose's test loader implements the same basic functionality as its
+superclass, unittest.TestLoader, but extends it by more liberal
+interpretations of what may be a test and how a test may be named.
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+import types
+from inspect import isfunction
+from nose.pyversion import unbound_method, ismethod
+from nose.case import FunctionTestCase, MethodTestCase
+from nose.failure import Failure
+from nose.config import Config
+from nose.importer import Importer, add_path, remove_path
+from nose.selector import defaultSelector, TestAddress
+from nose.util import func_lineno, getpackage, isclass, isgenerator, \
+ ispackage, regex_last_key, resolve_name, transplant_func, \
+ transplant_class, test_address
+from nose.suite import ContextSuiteFactory, ContextList, LazySuite
+from nose.pyversion import sort_list, cmp_to_key
+
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# for efficiency and easier mocking
+op_normpath = os.path.normpath
+op_abspath = os.path.abspath
+op_join = os.path.join
+op_isdir = os.path.isdir
+op_isfile = os.path.isfile
+
+
+__all__ = ['TestLoader', 'defaultTestLoader']
+
+
+class TestLoader(unittest.TestLoader):
+ """Test loader that extends unittest.TestLoader to:
+
+ * Load tests from test-like functions and classes that are not
+ unittest.TestCase subclasses
+ * Find and load test modules in a directory
+ * Support tests that are generators
+ * Support easy extensions of or changes to that behavior through plugins
+ """
+ config = None
+ importer = None
+ workingDir = None
+ selector = None
+ suiteClass = None
+
+ def __init__(self, config=None, importer=None, workingDir=None,
+ selector=None):
+ """Initialize a test loader.
+
+ Parameters (all optional):
+
+ * config: provide a `nose.config.Config`_ or other config class
+ instance; if not provided a `nose.config.Config`_ with
+ default values is used.
+ * importer: provide an importer instance that implements
+ `importFromPath`. If not provided, a
+ `nose.importer.Importer`_ is used.
+ * workingDir: the directory to which file and module names are
+ relative. If not provided, assumed to be the current working
+ directory.
+ * selector: a selector class or instance. If a class is
+ provided, it will be instantiated with one argument, the
+ current config. If not provided, a `nose.selector.Selector`_
+ is used.
+ """
+ if config is None:
+ config = Config()
+ if importer is None:
+ importer = Importer(config=config)
+ if workingDir is None:
+ workingDir = config.workingDir
+ if selector is None:
+ selector = defaultSelector(config)
+ elif isclass(selector):
+ selector = selector(config)
+ self.config = config
+ self.importer = importer
+ self.workingDir = op_normpath(op_abspath(workingDir))
+ self.selector = selector
+ if config.addPaths:
+ add_path(workingDir, config)
+ self.suiteClass = ContextSuiteFactory(config=config)
+
+ self._visitedPaths = set([])
+
+ unittest.TestLoader.__init__(self)
+
+ def getTestCaseNames(self, testCaseClass):
+ """Override to select with selector, unless
+ config.getTestCaseNamesCompat is True
+ """
+ if self.config.getTestCaseNamesCompat:
+ return unittest.TestLoader.getTestCaseNames(self, testCaseClass)
+
+ def wanted(attr, cls=testCaseClass, sel=self.selector):
+ item = getattr(cls, attr, None)
+ if isfunction(item):
+ item = unbound_method(cls, item)
+ elif not ismethod(item):
+ return False
+ return sel.wantMethod(item)
+
+ cases = filter(wanted, dir(testCaseClass))
+
+ # add runTest if nothing else picked
+ if not cases and hasattr(testCaseClass, 'runTest'):
+ cases = ['runTest']
+ if self.sortTestMethodsUsing:
+ sort_list(cases, cmp_to_key(self.sortTestMethodsUsing))
+ return cases
+
+ def _haveVisited(self, path):
+ # For cases where path is None, we always pretend we haven't visited
+ # them.
+ if path is None:
+ return False
+
+ return path in self._visitedPaths
+
+ def _addVisitedPath(self, path):
+ if path is not None:
+ self._visitedPaths.add(path)
+
+ def loadTestsFromDir(self, path):
+ """Load tests from the directory at path. This is a generator
+ -- each suite of tests from a module or other file is yielded
+ and is expected to be executed before the next file is
+ examined.
+ """
+ log.debug("load from dir %s", path)
+ plugins = self.config.plugins
+ plugins.beforeDirectory(path)
+ if self.config.addPaths:
+ paths_added = add_path(path, self.config)
+
+ entries = os.listdir(path)
+ sort_list(entries, regex_last_key(self.config.testMatch))
+ for entry in entries:
+ # this hard-coded initial-dot test will be removed:
+ # http://code.google.com/p/python-nose/issues/detail?id=82
+ if entry.startswith('.'):
+ continue
+ entry_path = op_abspath(op_join(path, entry))
+ is_file = op_isfile(entry_path)
+ wanted = False
+ if is_file:
+ is_dir = False
+ wanted = self.selector.wantFile(entry_path)
+ else:
+ is_dir = op_isdir(entry_path)
+ if is_dir:
+ # this hard-coded initial-underscore test will be removed:
+ # http://code.google.com/p/python-nose/issues/detail?id=82
+ if entry.startswith('_'):
+ continue
+ wanted = self.selector.wantDirectory(entry_path)
+ is_package = ispackage(entry_path)
+
+ # Python 3.3 now implements PEP 420: Implicit Namespace Packages.
+ # As a result, it's now possible that parent paths that have a
+ # segment with the same basename as our package ends up
+ # in module.__path__. So we have to keep track of what we've
+ # visited, and not-revisit them again.
+ if wanted and not self._haveVisited(entry_path):
+ self._addVisitedPath(entry_path)
+ if is_file:
+ plugins.beforeContext()
+ if entry.endswith('.py'):
+ yield self.loadTestsFromName(
+ entry_path, discovered=True)
+ else:
+ yield self.loadTestsFromFile(entry_path)
+ plugins.afterContext()
+ elif is_package:
+ # Load the entry as a package: given the full path,
+ # loadTestsFromName() will figure it out
+ yield self.loadTestsFromName(
+ entry_path, discovered=True)
+ else:
+ # Another test dir in this one: recurse lazily
+ yield self.suiteClass(
+ lambda: self.loadTestsFromDir(entry_path))
+ tests = []
+ for test in plugins.loadTestsFromDir(path):
+ tests.append(test)
+ # TODO: is this try/except needed?
+ try:
+ if tests:
+ yield self.suiteClass(tests)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ yield self.suiteClass([Failure(*sys.exc_info())])
+
+ # pop paths
+ if self.config.addPaths:
+ for p in paths_added:
+ remove_path(p)
+ plugins.afterDirectory(path)
+
+ def loadTestsFromFile(self, filename):
+ """Load tests from a non-module file. Default is to raise a
+ ValueError; plugins may implement `loadTestsFromFile` to
+ provide a list of tests loaded from the file.
+ """
+ log.debug("Load from non-module file %s", filename)
+ try:
+ tests = [test for test in
+ self.config.plugins.loadTestsFromFile(filename)]
+ if tests:
+ # Plugins can yield False to indicate that they were
+ # unable to load tests from a file, but it was not an
+ # error -- the file just had no tests to load.
+ tests = filter(None, tests)
+ return self.suiteClass(tests)
+ else:
+ # Nothing was able to even try to load from this file
+ open(filename, 'r').close() # trigger os error
+ raise ValueError("Unable to load tests from file %s"
+ % filename)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return self.suiteClass(
+ [Failure(exc[0], exc[1], exc[2],
+ address=(filename, None, None))])
+
+ def loadTestsFromGenerator(self, generator, module):
+ """Lazy-load tests from a generator function. The generator function
+ may yield either:
+
+ * a callable, or
+ * a function name resolvable within the same module
+ """
+ def generate(g=generator, m=module):
+ try:
+ for test in g():
+ test_func, arg = self.parseGeneratedTest(test)
+ if not callable(test_func):
+ test_func = getattr(m, test_func)
+ yield FunctionTestCase(test_func, arg=arg, descriptor=g)
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(exc[0], exc[1], exc[2],
+ address=test_address(generator))
+ return self.suiteClass(generate, context=generator, can_split=False)
+
+ def loadTestsFromGeneratorMethod(self, generator, cls):
+ """Lazy-load tests from a generator method.
+
+ This is more complicated than loading from a generator function,
+ since a generator method may yield:
+
+ * a function
+ * a bound or unbound method, or
+ * a method name
+ """
+ # convert the unbound generator method
+ # into a bound method so it can be called below
+ if hasattr(generator, 'im_class'):
+ cls = generator.im_class
+ inst = cls()
+ method = generator.__name__
+ generator = getattr(inst, method)
+
+ def generate(g=generator, c=cls):
+ try:
+ for test in g():
+ test_func, arg = self.parseGeneratedTest(test)
+ if not callable(test_func):
+ test_func = unbound_method(c, getattr(c, test_func))
+ if ismethod(test_func):
+ yield MethodTestCase(test_func, arg=arg, descriptor=g)
+ elif callable(test_func):
+ # In this case we're forcing the 'MethodTestCase'
+ # to run the inline function as its test call,
+ # but using the generator method as the 'method of
+ # record' (so no need to pass it as the descriptor)
+ yield MethodTestCase(g, test=test_func, arg=arg)
+ else:
+ yield Failure(
+ TypeError,
+ "%s is not a callable or method" % test_func)
+ except KeyboardInterrupt:
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(exc[0], exc[1], exc[2],
+ address=test_address(generator))
+ return self.suiteClass(generate, context=generator, can_split=False)
+
+ def loadTestsFromModule(self, module, path=None, discovered=False):
+ """Load all tests from module and return a suite containing
+ them. If the module has been discovered and is not test-like,
+ the suite will be empty by default, though plugins may add
+ their own tests.
+ """
+ log.debug("Load from module %s", module)
+ tests = []
+ test_classes = []
+ test_funcs = []
+ # For *discovered* modules, we only load tests when the module looks
+ # testlike. For modules we've been directed to load, we always
+ # look for tests. (discovered is set to True by loadTestsFromDir)
+ if not discovered or self.selector.wantModule(module):
+ for item in dir(module):
+ test = getattr(module, item, None)
+ # print "Check %s (%s) in %s" % (item, test, module.__name__)
+ if isclass(test):
+ if self.selector.wantClass(test):
+ test_classes.append(test)
+ elif isfunction(test) and self.selector.wantFunction(test):
+ test_funcs.append(test)
+ sort_list(test_classes, lambda x: x.__name__)
+ sort_list(test_funcs, func_lineno)
+ tests = map(lambda t: self.makeTest(t, parent=module),
+ test_classes + test_funcs)
+
+ # Now, descend into packages
+ # FIXME can or should this be lazy?
+ # is this syntax 2.2 compatible?
+ module_paths = getattr(module, '__path__', [])
+ if path:
+ path = os.path.realpath(path)
+ for module_path in module_paths:
+ log.debug("Load tests from module path %s?", module_path)
+ log.debug("path: %s os.path.realpath(%s): %s",
+ path, module_path, os.path.realpath(module_path))
+ if (self.config.traverseNamespace or not path) or \
+ os.path.realpath(module_path).startswith(path):
+ # Egg files can be on sys.path, so make sure the path is a
+ # directory before trying to load from it.
+ if os.path.isdir(module_path):
+ tests.extend(self.loadTestsFromDir(module_path))
+
+ for test in self.config.plugins.loadTestsFromModule(module, path):
+ tests.append(test)
+
+ return self.suiteClass(ContextList(tests, context=module))
+
+ def loadTestsFromName(self, name, module=None, discovered=False):
+ """Load tests from the entity with the given name.
+
+ The name may indicate a file, directory, module, or any object
+ within a module. See `nose.util.split_test_name` for details on
+ test name parsing.
+ """
+ # FIXME refactor this method into little bites?
+ log.debug("load from %s (%s)", name, module)
+
+ suite = self.suiteClass
+
+ # give plugins first crack
+ plug_tests = self.config.plugins.loadTestsFromName(name, module)
+ if plug_tests:
+ return suite(plug_tests)
+
+ addr = TestAddress(name, workingDir=self.workingDir)
+ if module:
+ # Two cases:
+ # name is class.foo
+ # The addr will be incorrect, since it thinks class.foo is
+ # a dotted module name. It's actually a dotted attribute
+ # name. In this case we want to use the full submitted
+ # name as the name to load from the module.
+ # name is module:class.foo
+ # The addr will be correct. The part we want is the part after
+ # the :, which is in addr.call.
+ if addr.call:
+ name = addr.call
+ parent, obj = self.resolve(name, module)
+ if (isclass(parent)
+ and getattr(parent, '__module__', None) != module.__name__
+ and not isinstance(obj, Failure)):
+ parent = transplant_class(parent, module.__name__)
+ obj = getattr(parent, obj.__name__)
+ log.debug("parent %s obj %s module %s", parent, obj, module)
+ if isinstance(obj, Failure):
+ return suite([obj])
+ else:
+ return suite(ContextList([self.makeTest(obj, parent)],
+ context=parent))
+ else:
+ if addr.module:
+ try:
+ if addr.filename is None:
+ module = resolve_name(addr.module)
+ else:
+ self.config.plugins.beforeImport(
+ addr.filename, addr.module)
+ # FIXME: to support module.name names,
+ # do what resolve-name does and keep trying to
+ # import, popping tail of module into addr.call,
+ # until we either get an import or run out of
+ # module parts
+ try:
+ module = self.importer.importFromPath(
+ addr.filename, addr.module)
+ finally:
+ self.config.plugins.afterImport(
+ addr.filename, addr.module)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return suite([Failure(exc[0], exc[1], exc[2],
+ address=addr.totuple())])
+ if addr.call:
+ return self.loadTestsFromName(addr.call, module)
+ else:
+ return self.loadTestsFromModule(
+ module, addr.filename,
+ discovered=discovered)
+ elif addr.filename:
+ path = addr.filename
+ if addr.call:
+ package = getpackage(path)
+ if package is None:
+ return suite([
+ Failure(ValueError,
+ "Can't find callable %s in file %s: "
+ "file is not a python module" %
+ (addr.call, path),
+ address=addr.totuple())])
+ return self.loadTestsFromName(addr.call, module=package)
+ else:
+ if op_isdir(path):
+ # In this case we *can* be lazy since we know
+ # that each module in the dir will be fully
+ # loaded before its tests are executed; we
+ # also know that we're not going to be asked
+ # to load from . and ./some_module.py *as part
+ # of this named test load*
+ return LazySuite(
+ lambda: self.loadTestsFromDir(path))
+ elif op_isfile(path):
+ return self.loadTestsFromFile(path)
+ else:
+ return suite([
+ Failure(OSError, "No such file %s" % path,
+ address=addr.totuple())])
+ else:
+ # just a function? what to do? I think it can only be
+ # handled when module is not None
+ return suite([
+ Failure(ValueError, "Unresolvable test name %s" % name,
+ address=addr.totuple())])
+
+ def loadTestsFromNames(self, names, module=None):
+ """Load tests from all names, returning a suite containing all
+ tests.
+ """
+ plug_res = self.config.plugins.loadTestsFromNames(names, module)
+ if plug_res:
+ suite, names = plug_res
+ if suite:
+ return self.suiteClass([
+ self.suiteClass(suite),
+ unittest.TestLoader.loadTestsFromNames(self, names, module)
+ ])
+ return unittest.TestLoader.loadTestsFromNames(self, names, module)
+
+ def loadTestsFromTestCase(self, testCaseClass):
+ """Load tests from a unittest.TestCase subclass.
+ """
+ cases = []
+ plugins = self.config.plugins
+ for case in plugins.loadTestsFromTestCase(testCaseClass):
+ cases.append(case)
+ # For efficiency in the most common case, just call and return from
+ # super. This avoids having to extract cases and rebuild a context
+ # suite when there are no plugin-contributed cases.
+ if not cases:
+ return super(TestLoader, self).loadTestsFromTestCase(testCaseClass)
+ cases.extend(
+ [case for case in
+ super(TestLoader, self).loadTestsFromTestCase(testCaseClass)])
+ return self.suiteClass(cases)
+
+ def loadTestsFromTestClass(self, cls):
+ """Load tests from a test class that is *not* a unittest.TestCase
+ subclass.
+
+ In this case, we can't depend on the class's `__init__` taking method
+ name arguments, so we have to compose a MethodTestCase for each
+ method in the class that looks testlike.
+ """
+ def wanted(attr, cls=cls, sel=self.selector):
+ item = getattr(cls, attr, None)
+ if isfunction(item):
+ item = unbound_method(cls, item)
+ elif not ismethod(item):
+ return False
+ return sel.wantMethod(item)
+ cases = [self.makeTest(getattr(cls, case), cls)
+ for case in filter(wanted, dir(cls))]
+ for test in self.config.plugins.loadTestsFromTestClass(cls):
+ cases.append(test)
+ return self.suiteClass(ContextList(cases, context=cls))
+
+ def makeTest(self, obj, parent=None):
+ try:
+ return self._makeTest(obj, parent)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ try:
+ addr = test_address(obj)
+ except KeyboardInterrupt:
+ raise
+ except:
+ addr = None
+ return Failure(exc[0], exc[1], exc[2], address=addr)
+
+ def _makeTest(self, obj, parent=None):
+ """Given a test object and its parent, return a test case
+ or test suite.
+ """
+ plug_tests = []
+ try:
+ addr = test_address(obj)
+ except KeyboardInterrupt:
+ raise
+ except:
+ addr = None
+ for test in self.config.plugins.makeTest(obj, parent):
+ plug_tests.append(test)
+ # TODO: is this try/except needed?
+ try:
+ if plug_tests:
+ return self.suiteClass(plug_tests)
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ return Failure(exc[0], exc[1], exc[2], address=addr)
+
+ if isfunction(obj) and parent and not isinstance(parent, types.ModuleType):
+ # This is a Python 3.x 'unbound method'. Wrap it with its
+ # associated class..
+ obj = unbound_method(parent, obj)
+
+ if isinstance(obj, unittest.TestCase):
+ return obj
+ elif isclass(obj):
+ if parent and obj.__module__ != parent.__name__:
+ obj = transplant_class(obj, parent.__name__)
+ if issubclass(obj, unittest.TestCase):
+ return self.loadTestsFromTestCase(obj)
+ else:
+ return self.loadTestsFromTestClass(obj)
+ elif ismethod(obj):
+ if parent is None:
+ parent = obj.__class__
+ if issubclass(parent, unittest.TestCase):
+ return parent(obj.__name__)
+ else:
+ if isgenerator(obj):
+ return self.loadTestsFromGeneratorMethod(obj, parent)
+ else:
+ return MethodTestCase(obj)
+ elif isfunction(obj):
+ if parent and obj.__module__ != parent.__name__:
+ obj = transplant_func(obj, parent.__name__)
+ if isgenerator(obj):
+ return self.loadTestsFromGenerator(obj, parent)
+ else:
+ return FunctionTestCase(obj)
+ else:
+ return Failure(TypeError,
+ "Can't make a test from %s" % obj,
+ address=addr)
+
+ def resolve(self, name, module):
+ """Resolve name within module
+ """
+ obj = module
+ parts = name.split('.')
+ for part in parts:
+ parent, obj = obj, getattr(obj, part, None)
+ if obj is None:
+ # no such test
+ obj = Failure(ValueError, "No such test %s" % name)
+ return parent, obj
+
+ def parseGeneratedTest(self, test):
+ """Given the yield value of a test generator, return a func and args.
+
+ This is used in the two loadTestsFromGenerator* methods.
+
+ """
+ if not isinstance(test, tuple): # yield test
+ test_func, arg = (test, tuple())
+ elif len(test) == 1: # yield (test,)
+ test_func, arg = (test[0], tuple())
+ else: # yield test, foo, bar, ...
+ assert len(test) > 1 # sanity check
+ test_func, arg = (test[0], test[1:])
+ return test_func, arg
+
+defaultTestLoader = TestLoader
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py b/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py
new file mode 100755
index 00000000..08ee8f32
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/__init__.py
@@ -0,0 +1,190 @@
+"""
+Writing Plugins
+---------------
+
+nose supports plugins for test collection, selection, observation and
+reporting. There are two basic rules for plugins:
+
+* Plugin classes should subclass :class:`nose.plugins.Plugin`.
+
+* Plugins may implement any of the methods described in the class
+ :doc:`IPluginInterface <interface>` in nose.plugins.base. Please note that
+ this class is for documentary purposes only; plugins may not subclass
+ IPluginInterface.
+
+Hello World
+===========
+
+Here's a basic plugin. It doesn't do much so read on for more ideas or dive
+into the :doc:`IPluginInterface <interface>` to see all available hooks.
+
+.. code-block:: python
+
+ import logging
+ import os
+
+ from nose.plugins import Plugin
+
+ log = logging.getLogger('nose.plugins.helloworld')
+
+ class HelloWorld(Plugin):
+ name = 'helloworld'
+
+ def options(self, parser, env=os.environ):
+ super(HelloWorld, self).options(parser, env=env)
+
+ def configure(self, options, conf):
+ super(HelloWorld, self).configure(options, conf)
+ if not self.enabled:
+ return
+
+ def finalize(self, result):
+ log.info('Hello pluginized world!')
+
+Registering
+===========
+
+.. Note::
+ Important note: the following applies only to the default
+ plugin manager. Other plugin managers may use different means to
+ locate and load plugins.
+
+For nose to find a plugin, it must be part of a package that uses
+setuptools_, and the plugin must be included in the entry points defined
+in the setup.py for the package:
+
+.. code-block:: python
+
+ setup(name='Some plugin',
+ # ...
+ entry_points = {
+ 'nose.plugins.0.10': [
+ 'someplugin = someplugin:SomePlugin'
+ ]
+ },
+ # ...
+ )
+
+Once the package is installed with install or develop, nose will be able
+to load the plugin.
+
+.. _setuptools: http://peak.telecommunity.com/DevCenter/setuptools
+
+Registering a plugin without setuptools
+=======================================
+
+It is currently possible to register a plugin programmatically by
+creating a custom nose runner like this :
+
+.. code-block:: python
+
+ import nose
+ from yourplugin import YourPlugin
+
+ if __name__ == '__main__':
+ nose.main(addplugins=[YourPlugin()])
+
+Defining options
+================
+
+All plugins must implement the methods ``options(self, parser, env)``
+and ``configure(self, options, conf)``. Subclasses of nose.plugins.Plugin
+that want the standard options should call the superclass methods.
+
+nose uses optparse.OptionParser from the standard library to parse
+arguments. A plugin's ``options()`` method receives a parser
+instance. It's good form for a plugin to use that instance only to add
+additional arguments that take only long arguments (--like-this). Most
+of nose's built-in arguments get their default value from an environment
+variable.
+
+A plugin's ``configure()`` method receives the parsed ``OptionParser`` options
+object, as well as the current config object. Plugins should configure their
+behavior based on the user-selected settings, and may raise exceptions
+if the configured behavior is nonsensical.
+
+Logging
+=======
+
+nose uses the logging classes from the standard library. To enable users
+to view debug messages easily, plugins should use ``logging.getLogger()`` to
+acquire a logger in the ``nose.plugins`` namespace.
+
+Recipes
+=======
+
+* Writing a plugin that monitors or controls test result output
+
+ Implement any or all of ``addError``, ``addFailure``, etc., to monitor test
+ results. If you also want to monitor output, implement
+ ``setOutputStream`` and keep a reference to the output stream. If you
+ want to prevent the builtin ``TextTestResult`` output, implement
+ ``setOutputSteam`` and *return a dummy stream*. The default output will go
+ to the dummy stream, while you send your desired output to the real stream.
+
+ Example: `examples/html_plugin/htmlplug.py`_
+
+* Writing a plugin that handles exceptions
+
+ Subclass :doc:`ErrorClassPlugin <errorclasses>`.
+
+ Examples: :doc:`nose.plugins.deprecated <deprecated>`,
+ :doc:`nose.plugins.skip <skip>`
+
+* Writing a plugin that adds detail to error reports
+
+ Implement ``formatError`` and/or ``formatFailure``. The error tuple
+ you return (error class, error message, traceback) will replace the
+ original error tuple.
+
+ Examples: :doc:`nose.plugins.capture <capture>`,
+ :doc:`nose.plugins.failuredetail <failuredetail>`
+
+* Writing a plugin that loads tests from files other than python modules
+
+ Implement ``wantFile`` and ``loadTestsFromFile``. In ``wantFile``,
+ return True for files that you want to examine for tests. In
+ ``loadTestsFromFile``, for those files, return an iterable
+ containing TestCases (or yield them as you find them;
+ ``loadTestsFromFile`` may also be a generator).
+
+ Example: :doc:`nose.plugins.doctests <doctests>`
+
+* Writing a plugin that prints a report
+
+ Implement ``begin`` if you need to perform setup before testing
+ begins. Implement ``report`` and output your report to the provided stream.
+
+ Examples: :doc:`nose.plugins.cover <cover>`, :doc:`nose.plugins.prof <prof>`
+
+* Writing a plugin that selects or rejects tests
+
+ Implement any or all ``want*`` methods. Return False to reject the test
+ candidate, True to accept it -- which means that the test candidate
+ will pass through the rest of the system, so you must be prepared to
+ load tests from it if tests can't be loaded by the core loader or
+ another plugin -- and None if you don't care.
+
+ Examples: :doc:`nose.plugins.attrib <attrib>`,
+ :doc:`nose.plugins.doctests <doctests>`, :doc:`nose.plugins.testid <testid>`
+
+
+More Examples
+=============
+
+See any builtin plugin or example plugin in the examples_ directory in
+the nose source distribution. There is a list of third-party plugins
+`on jottit`_.
+
+.. _examples/html_plugin/htmlplug.py: http://python-nose.googlecode.com/svn/trunk/examples/html_plugin/htmlplug.py
+.. _examples: http://python-nose.googlecode.com/svn/trunk/examples
+.. _on jottit: http://nose-plugins.jottit.com/
+
+"""
+from nose.plugins.base import Plugin
+from nose.plugins.manager import *
+from nose.plugins.plugintest import PluginTester
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py b/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py
new file mode 100755
index 00000000..1ccd7773
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/allmodules.py
@@ -0,0 +1,45 @@
+"""Use the AllModules plugin by passing ``--all-modules`` or setting the
+NOSE_ALL_MODULES environment variable to enable collection and execution of
+tests in all python modules. Normal nose behavior is to look for tests only in
+modules that match testMatch.
+
+More information: :doc:`../doc_tests/test_allmodules/test_allmodules`
+
+.. warning ::
+
+ This plugin can have surprising interactions with plugins that load tests
+ from what nose normally considers non-test modules, such as
+ the :doc:`doctest plugin <doctests>`. This is because any given
+ object in a module can't be loaded both by a plugin and the normal nose
+ :class:`test loader <nose.loader.TestLoader>`. Also, if you have functions
+ or classes in non-test modules that look like tests but aren't, you will
+ likely see errors as nose attempts to run them as tests.
+
+"""
+
+import os
+from nose.plugins.base import Plugin
+
+class AllModules(Plugin):
+ """Collect tests from all python modules.
+ """
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_ALL_MODULES'
+ parser.add_option('--all-modules',
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def wantFile(self, file):
+ """Override to return True for all files ending with .py"""
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+
+ def wantModule(self, module):
+ """Override return True for all modules"""
+ return True
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py b/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py
new file mode 100755
index 00000000..3d4422a2
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/attrib.py
@@ -0,0 +1,286 @@
+"""Attribute selector plugin.
+
+Oftentimes when testing you will want to select tests based on
+criteria rather then simply by filename. For example, you might want
+to run all tests except for the slow ones. You can do this with the
+Attribute selector plugin by setting attributes on your test methods.
+Here is an example:
+
+.. code-block:: python
+
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+ test_big_download.slow = 1
+
+Once you've assigned an attribute ``slow = 1`` you can exclude that
+test and all other tests having the slow attribute by running ::
+
+ $ nosetests -a '!slow'
+
+There is also a decorator available for you that will set attributes.
+Here's how to set ``slow=1`` like above with the decorator:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr('slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+And here's how to set an attribute with a specific value:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ def test_big_download():
+ import urllib
+ # commence slowness...
+
+This test could be run with ::
+
+ $ nosetests -a speed=slow
+
+In Python 2.6 and higher, ``@attr`` can be used on a class to set attributes
+on all its test methods at once. For example:
+
+.. code-block:: python
+
+ from nose.plugins.attrib import attr
+ @attr(speed='slow')
+ class MyTestCase:
+ def test_long_integration(self):
+ pass
+ def test_end_to_end_something(self):
+ pass
+
+Below is a reference to the different syntaxes available.
+
+Simple syntax
+-------------
+
+Examples of using the ``-a`` and ``--attr`` options:
+
+* ``nosetests -a status=stable``
+ Only runs tests with attribute "status" having value "stable"
+
+* ``nosetests -a priority=2,status=stable``
+ Runs tests having both attributes and values
+
+* ``nosetests -a priority=2 -a slow``
+ Runs tests that match either attribute
+
+* ``nosetests -a tags=http``
+ If a test's ``tags`` attribute was a list and it contained the value
+ ``http`` then it would be run
+
+* ``nosetests -a slow``
+ Runs tests with the attribute ``slow`` if its value does not equal False
+ (False, [], "", etc...)
+
+* ``nosetests -a '!slow'``
+ Runs tests that do NOT have the attribute ``slow`` or have a ``slow``
+ attribute that is equal to False
+ **NOTE**:
+ if your shell (like bash) interprets '!' as a special character make sure to
+ put single quotes around it.
+
+Expression Evaluation
+---------------------
+
+Examples using the ``-A`` and ``--eval-attr`` options:
+
+* ``nosetests -A "not slow"``
+ Evaluates the Python expression "not slow" and runs the test if True
+
+* ``nosetests -A "(priority > 5) and not slow"``
+ Evaluates a complex Python expression and runs the test if True
+
+"""
+import inspect
+import logging
+import os
+import sys
+from inspect import isfunction
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins.attrib')
+compat_24 = sys.version_info >= (2, 4)
+
+def attr(*args, **kwargs):
+ """Decorator that adds attributes to classes or functions
+ for use with the Attribute (-a) plugin.
+ """
+ def wrap_ob(ob):
+ for name in args:
+ setattr(ob, name, True)
+ for name, value in kwargs.iteritems():
+ setattr(ob, name, value)
+ return ob
+ return wrap_ob
+
+def get_method_attr(method, cls, attr_name, default = False):
+ """Look up an attribute on a method/ function.
+ If the attribute isn't found there, looking it up in the
+ method's class, if any.
+ """
+ Missing = object()
+ value = getattr(method, attr_name, Missing)
+ if value is Missing and cls is not None:
+ value = getattr(cls, attr_name, Missing)
+ if value is Missing:
+ return default
+ return value
+
+
+class ContextHelper:
+ """Object that can act as context dictionary for eval and looks up
+ names as attributes on a method/ function and its class.
+ """
+ def __init__(self, method, cls):
+ self.method = method
+ self.cls = cls
+
+ def __getitem__(self, name):
+ return get_method_attr(self.method, self.cls, name)
+
+
+class AttributeSelector(Plugin):
+ """Selects test cases to be run based on their attributes.
+ """
+
+ def __init__(self):
+ Plugin.__init__(self)
+ self.attribs = []
+
+ def options(self, parser, env):
+ """Register command line options"""
+ parser.add_option("-a", "--attr",
+ dest="attr", action="append",
+ default=env.get('NOSE_ATTR'),
+ metavar="ATTR",
+ help="Run only tests that have attributes "
+ "specified by ATTR [NOSE_ATTR]")
+ # disable in < 2.4: eval can't take needed args
+ if compat_24:
+ parser.add_option("-A", "--eval-attr",
+ dest="eval_attr", metavar="EXPR", action="append",
+ default=env.get('NOSE_EVAL_ATTR'),
+ help="Run only tests for whose attributes "
+ "the Python expression EXPR evaluates "
+ "to True [NOSE_EVAL_ATTR]")
+
+ def configure(self, options, config):
+ """Configure the plugin and system, based on selected options.
+
+ attr and eval_attr may each be lists.
+
+ self.attribs will be a list of lists of tuples. In that list, each
+ list is a group of attributes, all of which must match for the rule to
+ match.
+ """
+ self.attribs = []
+
+ # handle python eval-expression parameter
+ if compat_24 and options.eval_attr:
+ eval_attr = tolist(options.eval_attr)
+ for attr in eval_attr:
+ # "<python expression>"
+ # -> eval(expr) in attribute context must be True
+ def eval_in_context(expr, obj, cls):
+ return eval(expr, None, ContextHelper(obj, cls))
+ self.attribs.append([(attr, eval_in_context)])
+
+ # attribute requirements are a comma separated list of
+ # 'key=value' pairs
+ if options.attr:
+ std_attr = tolist(options.attr)
+ for attr in std_attr:
+ # all attributes within an attribute group must match
+ attr_group = []
+ for attrib in attr.strip().split(","):
+ # don't die on trailing comma
+ if not attrib:
+ continue
+ items = attrib.split("=", 1)
+ if len(items) > 1:
+ # "name=value"
+ # -> 'str(obj.name) == value' must be True
+ key, value = items
+ else:
+ key = items[0]
+ if key[0] == "!":
+ # "!name"
+ # 'bool(obj.name)' must be False
+ key = key[1:]
+ value = False
+ else:
+ # "name"
+ # -> 'bool(obj.name)' must be True
+ value = True
+ attr_group.append((key, value))
+ self.attribs.append(attr_group)
+ if self.attribs:
+ self.enabled = True
+
+ def validateAttrib(self, method, cls = None):
+ """Verify whether a method has the required attributes
+ The method is considered a match if it matches all attributes
+ for any attribute group.
+ ."""
+ # TODO: is there a need for case-sensitive value comparison?
+ any = False
+ for group in self.attribs:
+ match = True
+ for key, value in group:
+ attr = get_method_attr(method, cls, key)
+ if callable(value):
+ if not value(key, method, cls):
+ match = False
+ break
+ elif value is True:
+ # value must exist and be True
+ if not bool(attr):
+ match = False
+ break
+ elif value is False:
+ # value must not exist or be False
+ if bool(attr):
+ match = False
+ break
+ elif type(attr) in (list, tuple):
+ # value must be found in the list attribute
+ if not str(value).lower() in [str(x).lower()
+ for x in attr]:
+ match = False
+ break
+ else:
+ # value must match, convert to string and compare
+ if (value != attr
+ and str(value).lower() != str(attr).lower()):
+ match = False
+ break
+ any = any or match
+ if any:
+ # not True because we don't want to FORCE the selection of the
+ # item, only say that it is acceptable
+ return None
+ return False
+
+ def wantFunction(self, function):
+ """Accept the function if its attributes match.
+ """
+ return self.validateAttrib(function)
+
+ def wantMethod(self, method):
+ """Accept the method if its attributes match.
+ """
+ try:
+ cls = method.im_class
+ except AttributeError:
+ return False
+ return self.validateAttrib(method, cls)
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/base.py b/scripts/external_libs/nose-1.3.4/nose/plugins/base.py
new file mode 100755
index 00000000..f09beb69
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/base.py
@@ -0,0 +1,725 @@
+import os
+import textwrap
+from optparse import OptionConflictError
+from warnings import warn
+from nose.util import tolist
+
+class Plugin(object):
+ """Base class for nose plugins. It's recommended but not *necessary* to
+ subclass this class to create a plugin, but all plugins *must* implement
+ `options(self, parser, env)` and `configure(self, options, conf)`, and
+ must have the attributes `enabled`, `name` and `score`. The `name`
+ attribute may contain hyphens ('-').
+
+ Plugins should not be enabled by default.
+
+ Subclassing Plugin (and calling the superclass methods in
+ __init__, configure, and options, if you override them) will give
+ your plugin some friendly default behavior:
+
+ * A --with-$name option will be added to the command line interface
+ to enable the plugin, and a corresponding environment variable
+ will be used as the default value. The plugin class's docstring
+ will be used as the help for this option.
+ * The plugin will not be enabled unless this option is selected by
+ the user.
+ """
+ can_configure = False
+ enabled = False
+ enableOpt = None
+ name = None
+ score = 100
+
+ def __init__(self):
+ if self.name is None:
+ self.name = self.__class__.__name__.lower()
+ if self.enableOpt is None:
+ self.enableOpt = "enable_plugin_%s" % self.name.replace('-', '_')
+
+ def addOptions(self, parser, env=None):
+ """Add command-line options for this plugin.
+
+ The base plugin class adds --with-$name by default, used to enable the
+ plugin.
+
+ .. warning :: Don't implement addOptions unless you want to override
+ all default option handling behavior, including
+ warnings for conflicting options. Implement
+ :meth:`options
+ <nose.plugins.base.IPluginInterface.options>`
+ instead.
+ """
+ self.add_options(parser, env)
+
+ def add_options(self, parser, env=None):
+ """Non-camel-case version of func name for backwards compatibility.
+
+ .. warning ::
+
+ DEPRECATED: Do not use this method,
+ use :meth:`options <nose.plugins.base.IPluginInterface.options>`
+ instead.
+
+ """
+ # FIXME raise deprecation warning if wasn't called by wrapper
+ if env is None:
+ env = os.environ
+ try:
+ self.options(parser, env)
+ self.can_configure = True
+ except OptionConflictError, e:
+ warn("Plugin %s has conflicting option string: %s and will "
+ "be disabled" % (self, e), RuntimeWarning)
+ self.enabled = False
+ self.can_configure = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+
+ Implement this method for normal options behavior with protection from
+ OptionConflictErrors. If you override this method and want the default
+ --with-$name option to be registered, be sure to call super().
+ """
+ env_opt = 'NOSE_WITH_%s' % self.name.upper()
+ env_opt = env_opt.replace('-', '_')
+ parser.add_option("--with-%s" % self.name,
+ action="store_true",
+ dest=self.enableOpt,
+ default=env.get(env_opt),
+ help="Enable plugin %s: %s [%s]" %
+ (self.__class__.__name__, self.help(), env_opt))
+
+ def configure(self, options, conf):
+ """Configure the plugin and system, based on selected options.
+
+ The base plugin class sets the plugin to enabled if the enable option
+ for the plugin (self.enableOpt) is true.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ if hasattr(options, self.enableOpt):
+ self.enabled = getattr(options, self.enableOpt)
+
+ def help(self):
+ """Return help for this plugin. This will be output as the help
+ section of the --with-$name option that enables the plugin.
+ """
+ if self.__class__.__doc__:
+ # doc sections are often indented; compress the spaces
+ return textwrap.dedent(self.__class__.__doc__)
+ return "(no help available)"
+
+ # Compatiblity shim
+ def tolist(self, val):
+ warn("Plugin.tolist is deprecated. Use nose.util.tolist instead",
+ DeprecationWarning)
+ return tolist(val)
+
+
+class IPluginInterface(object):
+ """
+ IPluginInterface describes the plugin API. Do not subclass or use this
+ class directly.
+ """
+ def __new__(cls, *arg, **kw):
+ raise TypeError("IPluginInterface class is for documentation only")
+
+ def addOptions(self, parser, env):
+ """Called to allow plugin to register command-line options with the
+ parser. DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ .. warning ::
+
+ DEPRECATED -- implement
+ :meth:`options <nose.plugins.base.IPluginInterface.options>` instead.
+ """
+ pass
+ add_options = addOptions
+ add_options.deprecated = True
+
+ def addDeprecated(self, test):
+ """Called when a deprecated test is seen. DO NOT return a value
+ unless you want to stop other plugins from seeing the deprecated
+ test.
+
+ .. warning :: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addDeprecated.deprecated = True
+
+ def addError(self, test, err):
+ """Called when a test raises an uncaught exception. DO NOT return a
+ value unless you want to stop other plugins from seeing that the
+ test has raised an error.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ addError.changed = True
+
+ def addFailure(self, test, err):
+ """Called when a test fails. DO NOT return a value unless you
+ want to stop other plugins from seeing that the test has failed.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: 3-tuple
+ :type err: sys.exc_info() tuple
+ """
+ pass
+ addFailure.changed = True
+
+ def addSkip(self, test):
+ """Called when a test is skipped. DO NOT return a value unless
+ you want to stop other plugins from seeing the skipped test.
+
+ .. warning:: DEPRECATED -- check error class in addError instead
+ """
+ pass
+ addSkip.deprecated = True
+
+ def addSuccess(self, test):
+ """Called when a test passes. DO NOT return a value unless you
+ want to stop other plugins from seeing the passing test.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ addSuccess.changed = True
+
+ def afterContext(self):
+ """Called after a context (generally a module) has been
+ lazy-loaded, imported, setup, had its tests loaded and
+ executed, and torn down.
+ """
+ pass
+ afterContext._new = True
+
+ def afterDirectory(self, path):
+ """Called after all tests have been loaded from directory at path
+ and run.
+
+ :param path: the directory that has finished processing
+ :type path: string
+ """
+ pass
+ afterDirectory._new = True
+
+ def afterImport(self, filename, module):
+ """Called after module is imported from filename. afterImport
+ is called even if the import failed.
+
+ :param filename: The file that was loaded
+ :type filename: string
+ :param module: The name of the module
+ :type module: string
+ """
+ pass
+ afterImport._new = True
+
+ def afterTest(self, test):
+ """Called after the test has been run and the result recorded
+ (after stopTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ afterTest._new = True
+
+ def beforeContext(self):
+ """Called before a context (generally a module) is
+ examined. Because the context is not yet loaded, plugins don't
+ get to know what the context is; so any context operations
+ should use a stack that is pushed in `beforeContext` and popped
+ in `afterContext` to ensure they operate symmetrically.
+
+ `beforeContext` and `afterContext` are mainly useful for tracking
+ and restoring global state around possible changes from within a
+ context, whatever the context may be. If you need to operate on
+ contexts themselves, see `startContext` and `stopContext`, which
+ are passed the context in question, but are called after
+ it has been loaded (imported in the module case).
+ """
+ pass
+ beforeContext._new = True
+
+ def beforeDirectory(self, path):
+ """Called before tests are loaded from directory at path.
+
+ :param path: the directory that is about to be processed
+ """
+ pass
+ beforeDirectory._new = True
+
+ def beforeImport(self, filename, module):
+ """Called before module is imported from filename.
+
+ :param filename: The file that will be loaded
+ :param module: The name of the module found in file
+ :type module: string
+ """
+ beforeImport._new = True
+
+ def beforeTest(self, test):
+ """Called before the test is run (before startTest).
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ beforeTest._new = True
+
+ def begin(self):
+ """Called before any tests are collected or run. Use this to
+ perform any setup needed before testing begins.
+ """
+ pass
+
+ def configure(self, options, conf):
+ """Called after the command line has been parsed, with the
+ parsed options and the config container. Here, implement any
+ config storage or changes to state or operation that are set
+ by command line options.
+
+ DO NOT return a value from this method unless you want to
+ stop all other plugins from being configured.
+ """
+ pass
+
+ def finalize(self, result):
+ """Called after all report output, including output from all
+ plugins, has been sent to the stream. Use this to print final
+ test results or perform final cleanup. Return None to allow
+ other plugins to continue printing, or any other value to stop
+ them.
+
+ :param result: test result object
+
+ .. Note:: When tests are run under a test runner other than
+ :class:`nose.core.TextTestRunner`, such as
+ via ``python setup.py test``, this method may be called
+ **before** the default report output is sent.
+ """
+ pass
+
+ def describeTest(self, test):
+ """Return a test description.
+
+ Called by :meth:`nose.case.Test.shortDescription`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ describeTest._new = True
+
+ def formatError(self, test, err):
+ """Called in result.addError, before plugin.addError. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatError._new = True
+ formatError.chainable = True
+ # test arg is not chainable
+ formatError.static_args = (True, False)
+
+ def formatFailure(self, test, err):
+ """Called in result.addFailure, before plugin.addFailure. If you
+ want to replace or modify the error tuple, return a new error
+ tuple, otherwise return err, the original error tuple.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ formatFailure._new = True
+ formatFailure.chainable = True
+ # test arg is not chainable
+ formatFailure.static_args = (True, False)
+
+ def handleError(self, test, err):
+ """Called on addError. To handle the error yourself and prevent normal
+ error processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleError._new = True
+
+ def handleFailure(self, test, err):
+ """Called on addFailure. To handle the failure yourself and
+ prevent normal failure processing, return a true value.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ :param err: sys.exc_info() tuple
+ :type err: 3-tuple
+ """
+ pass
+ handleFailure._new = True
+
+ def loadTestsFromDir(self, path):
+ """Return iterable of tests from a directory. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance or suite instance.
+ Return None if your plugin cannot collect any tests from
+ directory.
+
+ :param path: The path to the directory.
+ """
+ pass
+ loadTestsFromDir.generative = True
+ loadTestsFromDir._new = True
+
+ def loadTestsFromModule(self, module, path=None):
+ """Return iterable of tests in a module. May be a
+ generator. Each item returned must be a runnable
+ unittest.TestCase (or subclass) instance.
+ Return None if your plugin cannot
+ collect any tests from module.
+
+ :param module: The module object
+ :type module: python module
+ :param path: the path of the module to search, to distinguish from
+ namespace package modules
+
+ .. note::
+
+ NEW. The ``path`` parameter will only be passed by nose 0.11
+ or above.
+ """
+ pass
+ loadTestsFromModule.generative = True
+
+ def loadTestsFromName(self, name, module=None, importPath=None):
+ """Return tests in this file or module. Return None if you are not able
+ to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param name: The test name. May be a file or module name plus a test
+ callable. Use split_test_name to split into parts. Or it might
+ be some crazy name of your own devising, in which case, do
+ whatever you want.
+ :param module: Module from which the name is to be loaded
+ :param importPath: Path from which file (must be a python module) was
+ found
+
+ .. warning:: DEPRECATED: this argument will NOT be passed.
+ """
+ pass
+ loadTestsFromName.generative = True
+
+ def loadTestsFromNames(self, names, module=None):
+ """Return a tuple of (tests loaded, remaining names). Return
+ None if you are not able to load any tests. Multiple plugins
+ may implement loadTestsFromNames; the remaining name list from
+ each will be passed to the next as input.
+
+ :param names: List of test names.
+ :type names: iterable
+ :param module: Module from which the names are to be loaded
+ """
+ pass
+ loadTestsFromNames._new = True
+ loadTestsFromNames.chainable = True
+
+ def loadTestsFromFile(self, filename):
+ """Return tests in this file. Return None if you are not
+ interested in loading any tests, or an iterable if you are and
+ can load some. May be a generator. *If you are interested in
+ loading tests from the file and encounter no errors, but find
+ no tests, yield False or return [False].*
+
+ .. Note:: This method replaces loadTestsFromPath from the 0.9
+ API.
+
+ :param filename: The full path to the file or directory.
+ """
+ pass
+ loadTestsFromFile.generative = True
+ loadTestsFromFile._new = True
+
+ def loadTestsFromPath(self, path):
+ """
+ .. warning:: DEPRECATED -- use loadTestsFromFile instead
+ """
+ pass
+ loadTestsFromPath.deprecated = True
+
+ def loadTestsFromTestCase(self, cls):
+ """Return tests in this test case class. Return None if you are
+ not able to load any tests, or an iterable if you are. May be a
+ generator.
+
+ :param cls: The test case class. Must be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestCase.generative = True
+
+ def loadTestsFromTestClass(self, cls):
+ """Return tests in this test class. Class will *not* be a
+ unittest.TestCase subclass. Return None if you are not able to
+ load any tests, an iterable if you are. May be a generator.
+
+ :param cls: The test case class. Must be **not** be subclass of
+ :class:`unittest.TestCase`.
+ """
+ pass
+ loadTestsFromTestClass._new = True
+ loadTestsFromTestClass.generative = True
+
+ def makeTest(self, obj, parent):
+ """Given an object and its parent, return or yield one or more
+ test cases. Each test must be a unittest.TestCase (or subclass)
+ instance. This is called before default test loading to allow
+ plugins to load an alternate test case or cases for an
+ object. May be a generator.
+
+ :param obj: The object to be made into a test
+ :param parent: The parent of obj (eg, for a method, the class)
+ """
+ pass
+ makeTest._new = True
+ makeTest.generative = True
+
+ def options(self, parser, env):
+ """Called to allow plugin to register command line
+ options with the parser.
+
+ DO NOT return a value from this method unless you want to stop
+ all other plugins from setting their options.
+
+ :param parser: options parser instance
+ :type parser: :class:`ConfigParser.ConfigParser`
+ :param env: environment, default is os.environ
+ """
+ pass
+ options._new = True
+
+ def prepareTest(self, test):
+ """Called before the test is run by the test runner. Please
+ note the article *the* in the previous sentence: prepareTest
+ is called *only once*, and is passed the test case or test
+ suite that the test runner will execute. It is *not* called
+ for each individual test case. If you return a non-None value,
+ that return value will be run as the test. Use this hook to
+ wrap or decorate the test with another function. If you need
+ to modify or wrap individual test cases, use `prepareTestCase`
+ instead.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def prepareTestCase(self, test):
+ """Prepare or wrap an individual test case. Called before
+ execution of the test. The test passed here is a
+ nose.case.Test instance; the case to be executed is in the
+ test attribute of the passed case. To modify the test to be
+ run, you should return a callable that takes one argument (the
+ test result object) -- it is recommended that you *do not*
+ side-effect the nose.case.Test instance you have been passed.
+
+ Keep in mind that when you replace the test callable you are
+ replacing the run() method of the test case -- including the
+ exception handling and result calls, etc.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ prepareTestCase._new = True
+
+ def prepareTestLoader(self, loader):
+ """Called before tests are loaded. To replace the test loader,
+ return a test loader. To allow other plugins to process the
+ test loader, return None. Only one plugin may replace the test
+ loader. Only valid when using nose.TestProgram.
+
+ :param loader: :class:`nose.loader.TestLoader`
+ (or other loader) instance
+ """
+ pass
+ prepareTestLoader._new = True
+
+ def prepareTestResult(self, result):
+ """Called before the first test is run. To use a different
+ test result handler for all tests than the given result,
+ return a test result handler. NOTE however that this handler
+ will only be seen by tests, that is, inside of the result
+ proxy system. The TestRunner and TestProgram -- whether nose's
+ or other -- will continue to see the original result
+ handler. For this reason, it is usually better to monkeypatch
+ the result (for instance, if you want to handle some
+ exceptions in a unique way). Only one plugin may replace the
+ result, but many may monkeypatch it. If you want to
+ monkeypatch and stop other plugins from doing so, monkeypatch
+ and return the patched result.
+
+ :param result: :class:`nose.result.TextTestResult`
+ (or other result) instance
+ """
+ pass
+ prepareTestResult._new = True
+
+ def prepareTestRunner(self, runner):
+ """Called before tests are run. To replace the test runner,
+ return a test runner. To allow other plugins to process the
+ test runner, return None. Only valid when using nose.TestProgram.
+
+ :param runner: :class:`nose.core.TextTestRunner`
+ (or other runner) instance
+ """
+ pass
+ prepareTestRunner._new = True
+
+ def report(self, stream):
+ """Called after all error output has been printed. Print your
+ plugin's report to the provided stream. Return None to allow
+ other plugins to print reports, any other value to stop them.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+ pass
+
+ def setOutputStream(self, stream):
+ """Called before test output begins. To direct test output to a
+ new stream, return a stream object, which must implement a
+ `write(msg)` method. If you only want to note the stream, not
+ capture or redirect it, then return None.
+
+ :param stream: stream object; send your output here
+ :type stream: file-like object
+ """
+
+ def startContext(self, context):
+ """Called before context setup and the running of tests in the
+ context. Note that tests have already been *loaded* from the
+ context before this call.
+
+ :param context: the context about to be setup. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ startContext._new = True
+
+ def startTest(self, test):
+ """Called before each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing the test start.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def stopContext(self, context):
+ """Called after the tests in a context have run and the
+ context has been torn down.
+
+ :param context: the context that has been torn down. May be a module or
+ class, or any other object that contains tests.
+ """
+ pass
+ stopContext._new = True
+
+ def stopTest(self, test):
+ """Called after each test is run. DO NOT return a value unless
+ you want to stop other plugins from seeing that the test has stopped.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+
+ def testName(self, test):
+ """Return a short test name. Called by `nose.case.Test.__str__`.
+
+ :param test: the test case
+ :type test: :class:`nose.case.Test`
+ """
+ pass
+ testName._new = True
+
+ def wantClass(self, cls):
+ """Return true if you want the main test selector to collect
+ tests from this class, false if you don't, and None if you don't
+ care.
+
+ :param cls: The class being examined by the selector
+ """
+ pass
+
+ def wantDirectory(self, dirname):
+ """Return true if you want test collection to descend into this
+ directory, false if you do not, and None if you don't care.
+
+ :param dirname: Full path to directory being examined by the selector
+ """
+ pass
+
+ def wantFile(self, file):
+ """Return true if you want to collect tests from this file,
+ false if you do not and None if you don't care.
+
+ Change from 0.9: The optional package parameter is no longer passed.
+
+ :param file: Full path to file being examined by the selector
+ """
+ pass
+
+ def wantFunction(self, function):
+ """Return true to collect this function as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param function: The function object being examined by the selector
+ """
+ pass
+
+ def wantMethod(self, method):
+ """Return true to collect this method as a test, false to
+ prevent it from being collected, and None if you don't care.
+
+ :param method: The method object being examined by the selector
+ :type method: unbound method
+ """
+ pass
+
+ def wantModule(self, module):
+ """Return true if you want to collection to descend into this
+ module, false to prevent the collector from descending into the
+ module, and None if you don't care.
+
+ :param module: The module object being examined by the selector
+ :type module: python module
+ """
+ pass
+
+ def wantModuleTests(self, module):
+ """
+ .. warning:: DEPRECATED -- this method will not be called, it has
+ been folded into wantModule.
+ """
+ pass
+ wantModuleTests.deprecated = True
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py b/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py
new file mode 100755
index 00000000..4fcc0018
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/builtin.py
@@ -0,0 +1,34 @@
+"""
+Lists builtin plugins.
+"""
+plugins = []
+builtins = (
+ ('nose.plugins.attrib', 'AttributeSelector'),
+ ('nose.plugins.capture', 'Capture'),
+ ('nose.plugins.logcapture', 'LogCapture'),
+ ('nose.plugins.cover', 'Coverage'),
+ ('nose.plugins.debug', 'Pdb'),
+ ('nose.plugins.deprecated', 'Deprecated'),
+ ('nose.plugins.doctests', 'Doctest'),
+ ('nose.plugins.isolate', 'IsolationPlugin'),
+ ('nose.plugins.failuredetail', 'FailureDetail'),
+ ('nose.plugins.prof', 'Profile'),
+ ('nose.plugins.skip', 'Skip'),
+ ('nose.plugins.testid', 'TestId'),
+ ('nose.plugins.multiprocess', 'MultiProcess'),
+ ('nose.plugins.xunit', 'Xunit'),
+ ('nose.plugins.allmodules', 'AllModules'),
+ ('nose.plugins.collect', 'CollectOnly'),
+ )
+
+for module, cls in builtins:
+ try:
+ plugmod = __import__(module, globals(), locals(), [cls])
+ except KeyboardInterrupt:
+ raise
+ except:
+ continue
+ plug = getattr(plugmod, cls)
+ plugins.append(plug)
+ globals()[cls] = plug
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py b/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py
new file mode 100755
index 00000000..fa4e5dca
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/capture.py
@@ -0,0 +1,115 @@
+"""
+This plugin captures stdout during test execution. If the test fails
+or raises an error, the captured output will be appended to the error
+or failure output. It is enabled by default but can be disabled with
+the options ``-s`` or ``--nocapture``.
+
+:Options:
+ ``--nocapture``
+ Don't capture stdout (any stdout output will be printed immediately)
+
+"""
+import logging
+import os
+import sys
+from nose.plugins.base import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.util import ln
+from StringIO import StringIO
+
+
+log = logging.getLogger(__name__)
+
+class Capture(Plugin):
+ """
+ Output capture plugin. Enabled by default. Disable with ``-s`` or
+ ``--nocapture``. This plugin captures stdout during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOCAPTURE'
+ name = 'capture'
+ score = 1600
+
+ def __init__(self):
+ self.stdout = []
+ self._buf = None
+
+ def options(self, parser, env):
+ """Register commandline options
+ """
+ parser.add_option(
+ "-s", "--nocapture", action="store_false",
+ default=not env.get(self.env_opt), dest="capture",
+ help="Don't capture stdout (any stdout output "
+ "will be printed immediately) [NOSE_NOCAPTURE]")
+
+ def configure(self, options, conf):
+ """Configure plugin. Plugin is enabled by default.
+ """
+ self.conf = conf
+ if not options.capture:
+ self.enabled = False
+
+ def afterTest(self, test):
+ """Clear capture buffer.
+ """
+ self.end()
+ self._buf = None
+
+ def begin(self):
+ """Replace sys.stdout with capture buffer.
+ """
+ self.start() # get an early handle on sys.stdout
+
+ def beforeTest(self, test):
+ """Flush capture buffer.
+ """
+ self.start()
+
+ def formatError(self, test, err):
+ """Add captured output to error report.
+ """
+ test.capturedOutput = output = self.buffer
+ self._buf = None
+ if not output:
+ # Don't return None as that will prevent other
+ # formatters from formatting and remove earlier formatters
+ # formats, instead return the err we got
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, output), tb)
+
+ def formatFailure(self, test, err):
+ """Add captured output to failure report.
+ """
+ return self.formatError(test, err)
+
+ def addCaptureToErr(self, ev, output):
+ ev = exc_to_unicode(ev)
+ output = force_unicode(output)
+ return u'\n'.join([ev, ln(u'>> begin captured stdout <<'),
+ output, ln(u'>> end captured stdout <<')])
+
+ def start(self):
+ self.stdout.append(sys.stdout)
+ self._buf = StringIO()
+ sys.stdout = self._buf
+
+ def end(self):
+ if self.stdout:
+ sys.stdout = self.stdout.pop()
+
+ def finalize(self, result):
+ """Restore stdout.
+ """
+ while self.stdout:
+ self.end()
+
+ def _get_buffer(self):
+ if self._buf is not None:
+ return self._buf.getvalue()
+
+ buffer = property(_get_buffer, None, None,
+ """Captured stdout output.""")
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py b/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py
new file mode 100755
index 00000000..6f9f0faa
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/collect.py
@@ -0,0 +1,94 @@
+"""
+This plugin bypasses the actual execution of tests, and instead just collects
+test names. Fixtures are also bypassed, so running nosetests with the
+collection plugin enabled should be very quick.
+
+This plugin is useful in combination with the testid plugin (``--with-id``).
+Run both together to get an indexed list of all tests, which will enable you to
+run individual tests by index number.
+
+This plugin is also useful for counting tests in a test suite, and making
+people watching your demo think all of your tests pass.
+"""
+from nose.plugins.base import Plugin
+from nose.case import Test
+import logging
+import unittest
+
+log = logging.getLogger(__name__)
+
+
+class CollectOnly(Plugin):
+ """
+ Collect and output test names only, don't run any tests.
+ """
+ name = "collect-only"
+ enableOpt = 'collect_only'
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option('--collect-only',
+ action='store_true',
+ dest=self.enableOpt,
+ default=env.get('NOSE_COLLECT_ONLY'),
+ help="Enable collect-only: %s [COLLECT_ONLY]" %
+ (self.help()))
+
+ def prepareTestLoader(self, loader):
+ """Install collect-only suite class in TestLoader.
+ """
+ # Disable context awareness
+ log.debug("Preparing test loader")
+ loader.suiteClass = TestSuiteFactory(self.conf)
+
+ def prepareTestCase(self, test):
+ """Replace actual test with dummy that always passes.
+ """
+ # Return something that always passes
+ log.debug("Preparing test case %s", test)
+ if not isinstance(test, Test):
+ return
+ def run(result):
+ # We need to make these plugin calls because there won't be
+ # a result proxy, due to using a stripped-down test suite
+ self.conf.plugins.startTest(test)
+ result.startTest(test)
+ self.conf.plugins.addSuccess(test)
+ result.addSuccess(test)
+ self.conf.plugins.stopTest(test)
+ result.stopTest(test)
+ return run
+
+
+class TestSuiteFactory:
+ """
+ Factory for producing configured test suites.
+ """
+ def __init__(self, conf):
+ self.conf = conf
+
+ def __call__(self, tests=(), **kw):
+ return TestSuite(tests, conf=self.conf)
+
+
+class TestSuite(unittest.TestSuite):
+ """
+ Basic test suite that bypasses most proxy and plugin calls, but does
+ wrap tests in a nose.case.Test so prepareTestCase will be called.
+ """
+ def __init__(self, tests=(), conf=None):
+ self.conf = conf
+ # Exec lazy suites: makes discovery depth-first
+ if callable(tests):
+ tests = tests()
+ log.debug("TestSuite(%r)", tests)
+ unittest.TestSuite.__init__(self, tests)
+
+ def addTest(self, test):
+ log.debug("Add test %s", test)
+ if isinstance(test, unittest.TestSuite):
+ self._tests.append(test)
+ else:
+ self._tests.append(Test(test, config=self.conf))
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py b/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py
new file mode 100755
index 00000000..551f3320
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/cover.py
@@ -0,0 +1,253 @@
+"""If you have Ned Batchelder's coverage_ module installed, you may activate a
+coverage report with the ``--with-coverage`` switch or NOSE_WITH_COVERAGE
+environment variable. The coverage report will cover any python source module
+imported after the start of the test run, excluding modules that match
+testMatch. If you want to include those modules too, use the ``--cover-tests``
+switch, or set the NOSE_COVER_TESTS environment variable to a true value. To
+restrict the coverage report to modules from a particular package or packages,
+use the ``--cover-package`` switch or the NOSE_COVER_PACKAGE environment
+variable.
+
+.. _coverage: http://www.nedbatchelder.com/code/modules/coverage.html
+"""
+import logging
+import re
+import sys
+import StringIO
+from nose.plugins.base import Plugin
+from nose.util import src, tolist
+
+log = logging.getLogger(__name__)
+
+
+class Coverage(Plugin):
+ """
+ Activate a coverage report using Ned Batchelder's coverage module.
+ """
+ coverTests = False
+ coverPackages = None
+ coverInstance = None
+ coverErase = False
+ coverMinPercentage = None
+ score = 200
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Add options to command line.
+ """
+ super(Coverage, self).options(parser, env)
+ parser.add_option("--cover-package", action="append",
+ default=env.get('NOSE_COVER_PACKAGE'),
+ metavar="PACKAGE",
+ dest="cover_packages",
+ help="Restrict coverage output to selected packages "
+ "[NOSE_COVER_PACKAGE]")
+ parser.add_option("--cover-erase", action="store_true",
+ default=env.get('NOSE_COVER_ERASE'),
+ dest="cover_erase",
+ help="Erase previously collected coverage "
+ "statistics before run")
+ parser.add_option("--cover-tests", action="store_true",
+ dest="cover_tests",
+ default=env.get('NOSE_COVER_TESTS'),
+ help="Include test modules in coverage report "
+ "[NOSE_COVER_TESTS]")
+ parser.add_option("--cover-min-percentage", action="store",
+ dest="cover_min_percentage",
+ default=env.get('NOSE_COVER_MIN_PERCENTAGE'),
+ help="Minimum percentage of coverage for tests "
+ "to pass [NOSE_COVER_MIN_PERCENTAGE]")
+ parser.add_option("--cover-inclusive", action="store_true",
+ dest="cover_inclusive",
+ default=env.get('NOSE_COVER_INCLUSIVE'),
+ help="Include all python files under working "
+ "directory in coverage report. Useful for "
+ "discovering holes in test coverage if not all "
+ "files are imported by the test suite. "
+ "[NOSE_COVER_INCLUSIVE]")
+ parser.add_option("--cover-html", action="store_true",
+ default=env.get('NOSE_COVER_HTML'),
+ dest='cover_html',
+ help="Produce HTML coverage information")
+ parser.add_option('--cover-html-dir', action='store',
+ default=env.get('NOSE_COVER_HTML_DIR', 'cover'),
+ dest='cover_html_dir',
+ metavar='DIR',
+ help='Produce HTML coverage information in dir')
+ parser.add_option("--cover-branches", action="store_true",
+ default=env.get('NOSE_COVER_BRANCHES'),
+ dest="cover_branches",
+ help="Include branch coverage in coverage report "
+ "[NOSE_COVER_BRANCHES]")
+ parser.add_option("--cover-xml", action="store_true",
+ default=env.get('NOSE_COVER_XML'),
+ dest="cover_xml",
+ help="Produce XML coverage information")
+ parser.add_option("--cover-xml-file", action="store",
+ default=env.get('NOSE_COVER_XML_FILE', 'coverage.xml'),
+ dest="cover_xml_file",
+ metavar="FILE",
+ help="Produce XML coverage information in file")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ super(Coverage, self).configure(options, conf)
+ if conf.worker:
+ return
+ if self.enabled:
+ try:
+ import coverage
+ if not hasattr(coverage, 'coverage'):
+ raise ImportError("Unable to import coverage module")
+ except ImportError:
+ log.error("Coverage not available: "
+ "unable to import coverage module")
+ self.enabled = False
+ return
+ self.conf = conf
+ self.coverErase = options.cover_erase
+ self.coverTests = options.cover_tests
+ self.coverPackages = []
+ if options.cover_packages:
+ if isinstance(options.cover_packages, (list, tuple)):
+ cover_packages = options.cover_packages
+ else:
+ cover_packages = [options.cover_packages]
+ for pkgs in [tolist(x) for x in cover_packages]:
+ self.coverPackages.extend(pkgs)
+ self.coverInclusive = options.cover_inclusive
+ if self.coverPackages:
+ log.info("Coverage report will include only packages: %s",
+ self.coverPackages)
+ self.coverHtmlDir = None
+ if options.cover_html:
+ self.coverHtmlDir = options.cover_html_dir
+ log.debug('Will put HTML coverage report in %s', self.coverHtmlDir)
+ self.coverBranches = options.cover_branches
+ self.coverXmlFile = None
+ if options.cover_min_percentage:
+ self.coverMinPercentage = int(options.cover_min_percentage.rstrip('%'))
+ if options.cover_xml:
+ self.coverXmlFile = options.cover_xml_file
+ log.debug('Will put XML coverage report in %s', self.coverXmlFile)
+ if self.enabled:
+ self.status['active'] = True
+ self.coverInstance = coverage.coverage(auto_data=False,
+ branch=self.coverBranches, data_suffix=None,
+ source=self.coverPackages)
+
+ def begin(self):
+ """
+ Begin recording coverage information.
+ """
+ log.debug("Coverage begin")
+ self.skipModules = sys.modules.keys()[:]
+ if self.coverErase:
+ log.debug("Clearing previously collected coverage statistics")
+ self.coverInstance.combine()
+ self.coverInstance.erase()
+ self.coverInstance.exclude('#pragma[: ]+[nN][oO] [cC][oO][vV][eE][rR]')
+ self.coverInstance.load()
+ self.coverInstance.start()
+
+ def report(self, stream):
+ """
+ Output code coverage report.
+ """
+ log.debug("Coverage report")
+ self.coverInstance.stop()
+ self.coverInstance.combine()
+ self.coverInstance.save()
+ modules = [module
+ for name, module in sys.modules.items()
+ if self.wantModuleCoverage(name, module)]
+ log.debug("Coverage report will cover modules: %s", modules)
+ self.coverInstance.report(modules, file=stream)
+
+ import coverage
+ if self.coverHtmlDir:
+ log.debug("Generating HTML coverage report")
+ try:
+ self.coverInstance.html_report(modules, self.coverHtmlDir)
+ except coverage.misc.CoverageException, e:
+ log.warning("Failed to generate HTML report: %s" % str(e))
+
+ if self.coverXmlFile:
+ log.debug("Generating XML coverage report")
+ try:
+ self.coverInstance.xml_report(modules, self.coverXmlFile)
+ except coverage.misc.CoverageException, e:
+ log.warning("Failed to generate XML report: %s" % str(e))
+
+ # make sure we have minimum required coverage
+ if self.coverMinPercentage:
+ f = StringIO.StringIO()
+ self.coverInstance.report(modules, file=f)
+
+ multiPackageRe = (r'-------\s\w+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%\s+\d*\s{0,1}$')
+ singlePackageRe = (r'-------\s[\w./]+\s+\d+\s+\d+(?:\s+\d+\s+\d+)?'
+ r'\s+(\d+)%(?:\s+[-\d, ]+)\s{0,1}$')
+
+ m = re.search(multiPackageRe, f.getvalue())
+ if m is None:
+ m = re.search(singlePackageRe, f.getvalue())
+
+ if m:
+ percentage = int(m.groups()[0])
+ if percentage < self.coverMinPercentage:
+ log.error('TOTAL Coverage did not reach minimum '
+ 'required: %d%%' % self.coverMinPercentage)
+ sys.exit(1)
+ else:
+ log.error("No total percentage was found in coverage output, "
+ "something went wrong.")
+
+
+ def wantModuleCoverage(self, name, module):
+ if not hasattr(module, '__file__'):
+ log.debug("no coverage of %s: no __file__", name)
+ return False
+ module_file = src(module.__file__)
+ if not module_file or not module_file.endswith('.py'):
+ log.debug("no coverage of %s: not a python file", name)
+ return False
+ if self.coverPackages:
+ for package in self.coverPackages:
+ if (re.findall(r'^%s\b' % re.escape(package), name)
+ and (self.coverTests
+ or not self.conf.testMatch.search(name))):
+ log.debug("coverage for %s", name)
+ return True
+ if name in self.skipModules:
+ log.debug("no coverage for %s: loaded before coverage start",
+ name)
+ return False
+ if self.conf.testMatch.search(name) and not self.coverTests:
+ log.debug("no coverage for %s: is a test", name)
+ return False
+ # accept any package that passed the previous tests, unless
+ # coverPackages is on -- in that case, if we wanted this
+ # module, we would have already returned True
+ return not self.coverPackages
+
+ def wantFile(self, file, package=None):
+ """If inclusive coverage enabled, return true for all source files
+ in wanted packages.
+ """
+ if self.coverInclusive:
+ if file.endswith(".py"):
+ if package and self.coverPackages:
+ for want in self.coverPackages:
+ if package.startswith(want):
+ return True
+ else:
+ return True
+ return None
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py b/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py
new file mode 100755
index 00000000..78243e60
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/debug.py
@@ -0,0 +1,67 @@
+"""
+This plugin provides ``--pdb`` and ``--pdb-failures`` options. The ``--pdb``
+option will drop the test runner into pdb when it encounters an error. To
+drop into pdb on failure, use ``--pdb-failures``.
+"""
+
+import pdb
+from nose.plugins.base import Plugin
+
+class Pdb(Plugin):
+ """
+ Provides --pdb and --pdb-failures options that cause the test runner to
+ drop into pdb if it encounters an error or failure, respectively.
+ """
+ enabled_for_errors = False
+ enabled_for_failures = False
+ score = 5 # run last, among builtins
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--pdb", action="store_true", dest="debugBoth",
+ default=env.get('NOSE_PDB', False),
+ help="Drop into debugger on failures or errors")
+ parser.add_option(
+ "--pdb-failures", action="store_true",
+ dest="debugFailures",
+ default=env.get('NOSE_PDB_FAILURES', False),
+ help="Drop into debugger on failures")
+ parser.add_option(
+ "--pdb-errors", action="store_true",
+ dest="debugErrors",
+ default=env.get('NOSE_PDB_ERRORS', False),
+ help="Drop into debugger on errors")
+
+ def configure(self, options, conf):
+ """Configure which kinds of exceptions trigger plugin.
+ """
+ self.conf = conf
+ self.enabled_for_errors = options.debugErrors or options.debugBoth
+ self.enabled_for_failures = options.debugFailures or options.debugBoth
+ self.enabled = self.enabled_for_failures or self.enabled_for_errors
+
+ def addError(self, test, err):
+ """Enter pdb if configured to debug errors.
+ """
+ if not self.enabled_for_errors:
+ return
+ self.debug(err)
+
+ def addFailure(self, test, err):
+ """Enter pdb if configured to debug failures.
+ """
+ if not self.enabled_for_failures:
+ return
+ self.debug(err)
+
+ def debug(self, err):
+ import sys # FIXME why is this import here?
+ ec, ev, tb = err
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ try:
+ pdb.post_mortem(tb)
+ finally:
+ sys.stdout = stdout
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py b/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py
new file mode 100755
index 00000000..461a26be
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/deprecated.py
@@ -0,0 +1,45 @@
+"""
+This plugin installs a DEPRECATED error class for the :class:`DeprecatedTest`
+exception. When :class:`DeprecatedTest` is raised, the exception will be logged
+in the deprecated attribute of the result, ``D`` or ``DEPRECATED`` (verbose)
+will be output, and the exception will not be counted as an error or failure.
+It is enabled by default, but can be turned off by using ``--no-deprecated``.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+class DeprecatedTest(Exception):
+ """Raise this exception to mark a test as deprecated.
+ """
+ pass
+
+
+class Deprecated(ErrorClassPlugin):
+ """
+ Installs a DEPRECATED error class for the DeprecatedTest exception. Enabled
+ by default.
+ """
+ enabled = True
+ deprecated = ErrorClass(DeprecatedTest,
+ label='DEPRECATED',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ env_opt = 'NOSE_WITHOUT_DEPRECATED'
+ parser.add_option('--no-deprecated', action='store_true',
+ dest='noDeprecated', default=env.get(env_opt, False),
+ help="Disable special handling of DeprecatedTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noDeprecated', False)
+ if disable:
+ self.enabled = False
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py b/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py
new file mode 100755
index 00000000..5ef65799
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/doctests.py
@@ -0,0 +1,455 @@
+"""Use the Doctest plugin with ``--with-doctest`` or the NOSE_WITH_DOCTEST
+environment variable to enable collection and execution of :mod:`doctests
+<doctest>`. Because doctests are usually included in the tested package
+(instead of being grouped into packages or modules of their own), nose only
+looks for them in the non-test packages it discovers in the working directory.
+
+Doctests may also be placed into files other than python modules, in which
+case they can be collected and executed by using the ``--doctest-extension``
+switch or NOSE_DOCTEST_EXTENSION environment variable to indicate which file
+extension(s) to load.
+
+When loading doctests from non-module files, use the ``--doctest-fixtures``
+switch to specify how to find modules containing fixtures for the tests. A
+module name will be produced by appending the value of that switch to the base
+name of each doctest file loaded. For example, a doctest file "widgets.rst"
+with the switch ``--doctest_fixtures=_fixt`` will load fixtures from the module
+``widgets_fixt.py``.
+
+A fixtures module may define any or all of the following functions:
+
+* setup([module]) or setup_module([module])
+
+ Called before the test runs. You may raise SkipTest to skip all tests.
+
+* teardown([module]) or teardown_module([module])
+
+ Called after the test runs, if setup/setup_module did not raise an
+ unhandled exception.
+
+* setup_test(test)
+
+ Called before the test. NOTE: the argument passed is a
+ doctest.DocTest instance, *not* a unittest.TestCase.
+
+* teardown_test(test)
+
+ Called after the test, if setup_test did not raise an exception. NOTE: the
+ argument passed is a doctest.DocTest instance, *not* a unittest.TestCase.
+
+Doctests are run like any other test, with the exception that output
+capture does not work; doctest does its own output capture while running a
+test.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_doctest_fixtures/doctest_fixtures` for
+ additional documentation and examples.
+
+"""
+from __future__ import generators
+
+import logging
+import os
+import sys
+import unittest
+from inspect import getmodule
+from nose.plugins.base import Plugin
+from nose.suite import ContextList
+from nose.util import anyp, getpackage, test_address, resolve_name, \
+ src, tolist, isproperty
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+import sys
+import __builtin__ as builtin_mod
+
+log = logging.getLogger(__name__)
+
+try:
+ import doctest
+ doctest.DocTestCase
+ # system version of doctest is acceptable, but needs a monkeypatch
+except (ImportError, AttributeError):
+ # system version is too old
+ import nose.ext.dtcompat as doctest
+
+
+#
+# Doctest and coverage don't get along, so we need to create
+# a monkeypatch that will replace the part of doctest that
+# interferes with coverage reports.
+#
+# The monkeypatch is based on this zope patch:
+# http://svn.zope.org/Zope3/trunk/src/zope/testing/doctest.py?rev=28679&r1=28703&r2=28705
+#
+_orp = doctest._OutputRedirectingPdb
+
+class NoseOutputRedirectingPdb(_orp):
+ def __init__(self, out):
+ self.__debugger_used = False
+ _orp.__init__(self, out)
+
+ def set_trace(self):
+ self.__debugger_used = True
+ _orp.set_trace(self, sys._getframe().f_back)
+
+ def set_continue(self):
+ # Calling set_continue unconditionally would break unit test
+ # coverage reporting, as Bdb.set_continue calls sys.settrace(None).
+ if self.__debugger_used:
+ _orp.set_continue(self)
+doctest._OutputRedirectingPdb = NoseOutputRedirectingPdb
+
+
+class DoctestSuite(unittest.TestSuite):
+ """
+ Doctest suites are parallelizable at the module or file level only,
+ since they may be attached to objects that are not individually
+ addressable (like properties). This suite subclass is used when
+ loading doctests from a module to ensure that behavior.
+
+ This class is used only if the plugin is not fully prepared;
+ in normal use, the loader's suiteClass is used.
+
+ """
+ can_split = False
+
+ def __init__(self, tests=(), context=None, can_split=False):
+ self.context = context
+ self.can_split = can_split
+ unittest.TestSuite.__init__(self, tests=tests)
+
+ def address(self):
+ return test_address(self.context)
+
+ def __iter__(self):
+ # 2.3 compat
+ return iter(self._tests)
+
+ def __str__(self):
+ return str(self._tests)
+
+
+class Doctest(Plugin):
+ """
+ Activate doctest plugin to find and run doctests in non-test modules.
+ """
+ extension = None
+ suiteClass = DoctestSuite
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--doctest-tests', action='store_true',
+ dest='doctest_tests',
+ default=env.get('NOSE_DOCTEST_TESTS'),
+ help="Also look for doctests in test modules. "
+ "Note that classes, methods and functions should "
+ "have either doctests or non-doctest tests, "
+ "not both. [NOSE_DOCTEST_TESTS]")
+ parser.add_option('--doctest-extension', action="append",
+ dest="doctestExtension",
+ metavar="EXT",
+ help="Also look for doctests in files with "
+ "this extension [NOSE_DOCTEST_EXTENSION]")
+ parser.add_option('--doctest-result-variable',
+ dest='doctest_result_var',
+ default=env.get('NOSE_DOCTEST_RESULT_VAR'),
+ metavar="VAR",
+ help="Change the variable name set to the result of "
+ "the last interpreter command from the default '_'. "
+ "Can be used to avoid conflicts with the _() "
+ "function used for text translation. "
+ "[NOSE_DOCTEST_RESULT_VAR]")
+ parser.add_option('--doctest-fixtures', action="store",
+ dest="doctestFixtures",
+ metavar="SUFFIX",
+ help="Find fixtures for a doctest file in module "
+ "with this name appended to the base name "
+ "of the doctest file")
+ parser.add_option('--doctest-options', action="append",
+ dest="doctestOptions",
+ metavar="OPTIONS",
+ help="Specify options to pass to doctest. " +
+ "Eg. '+ELLIPSIS,+NORMALIZE_WHITESPACE'")
+ # Set the default as a list, if given in env; otherwise
+ # an additional value set on the command line will cause
+ # an error.
+ env_setting = env.get('NOSE_DOCTEST_EXTENSION')
+ if env_setting is not None:
+ parser.set_defaults(doctestExtension=tolist(env_setting))
+
+ def configure(self, options, config):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, config)
+ self.doctest_result_var = options.doctest_result_var
+ self.doctest_tests = options.doctest_tests
+ self.extension = tolist(options.doctestExtension)
+ self.fixtures = options.doctestFixtures
+ self.finder = doctest.DocTestFinder()
+ self.optionflags = 0
+ if options.doctestOptions:
+ flags = ",".join(options.doctestOptions).split(',')
+ for flag in flags:
+ if not flag or flag[0] not in '+-':
+ raise ValueError(
+ "Must specify doctest options with starting " +
+ "'+' or '-'. Got %s" % (flag,))
+ mode, option_name = flag[0], flag[1:]
+ option_flag = doctest.OPTIONFLAGS_BY_NAME.get(option_name)
+ if not option_flag:
+ raise ValueError("Unknown doctest option %s" %
+ (option_name,))
+ if mode == '+':
+ self.optionflags |= option_flag
+ elif mode == '-':
+ self.optionflags &= ~option_flag
+
+ def prepareTestLoader(self, loader):
+ """Capture loader's suiteClass.
+
+ This is used to create test suites from doctest files.
+
+ """
+ self.suiteClass = loader.suiteClass
+
+ def loadTestsFromModule(self, module):
+ """Load doctests from the module.
+ """
+ log.debug("loading from %s", module)
+ if not self.matches(module.__name__):
+ log.debug("Doctest doesn't want module %s", module)
+ return
+ try:
+ tests = self.finder.find(module)
+ except AttributeError:
+ log.exception("Attribute error loading from %s", module)
+ # nose allows module.__test__ = False; doctest does not and throws
+ # AttributeError
+ return
+ if not tests:
+ log.debug("No tests found in %s", module)
+ return
+ tests.sort()
+ module_file = src(module.__file__)
+ # FIXME this breaks the id plugin somehow (tests probably don't
+ # get wrapped in result proxy or something)
+ cases = []
+ for test in tests:
+ if not test.examples:
+ continue
+ if not test.filename:
+ test.filename = module_file
+ cases.append(DocTestCase(test,
+ optionflags=self.optionflags,
+ result_var=self.doctest_result_var))
+ if cases:
+ yield self.suiteClass(cases, context=module, can_split=False)
+
+ def loadTestsFromFile(self, filename):
+ """Load doctests from the file.
+
+ Tests are loaded only if filename's extension matches
+ configured doctest extension.
+
+ """
+ if self.extension and anyp(filename.endswith, self.extension):
+ name = os.path.basename(filename)
+ dh = open(filename)
+ try:
+ doc = dh.read()
+ finally:
+ dh.close()
+
+ fixture_context = None
+ globs = {'__file__': filename}
+ if self.fixtures:
+ base, ext = os.path.splitext(name)
+ dirname = os.path.dirname(filename)
+ sys.path.append(dirname)
+ fixt_mod = base + self.fixtures
+ try:
+ fixture_context = __import__(
+ fixt_mod, globals(), locals(), ["nop"])
+ except ImportError, e:
+ log.debug(
+ "Could not import %s: %s (%s)", fixt_mod, e, sys.path)
+ log.debug("Fixture module %s resolved to %s",
+ fixt_mod, fixture_context)
+ if hasattr(fixture_context, 'globs'):
+ globs = fixture_context.globs(globs)
+ parser = doctest.DocTestParser()
+ test = parser.get_doctest(
+ doc, globs=globs, name=name,
+ filename=filename, lineno=0)
+ if test.examples:
+ case = DocFileCase(
+ test,
+ optionflags=self.optionflags,
+ setUp=getattr(fixture_context, 'setup_test', None),
+ tearDown=getattr(fixture_context, 'teardown_test', None),
+ result_var=self.doctest_result_var)
+ if fixture_context:
+ yield ContextList((case,), context=fixture_context)
+ else:
+ yield case
+ else:
+ yield False # no tests to load
+
+ def makeTest(self, obj, parent):
+ """Look for doctests in the given object, which will be a
+ function, method or class.
+ """
+ name = getattr(obj, '__name__', 'Unnammed %s' % type(obj))
+ doctests = self.finder.find(obj, module=getmodule(parent), name=name)
+ if doctests:
+ for test in doctests:
+ if len(test.examples) == 0:
+ continue
+ yield DocTestCase(test, obj=obj, optionflags=self.optionflags,
+ result_var=self.doctest_result_var)
+
+ def matches(self, name):
+ # FIXME this seems wrong -- nothing is ever going to
+ # fail this test, since we're given a module NAME not FILE
+ if name == '__init__.py':
+ return False
+ # FIXME don't think we need include/exclude checks here?
+ return ((self.doctest_tests or not self.conf.testMatch.search(name)
+ or (self.conf.include
+ and filter(None,
+ [inc.search(name)
+ for inc in self.conf.include])))
+ and (not self.conf.exclude
+ or not filter(None,
+ [exc.search(name)
+ for exc in self.conf.exclude])))
+
+ def wantFile(self, file):
+ """Override to select all modules and any file ending with
+ configured doctest extension.
+ """
+ # always want .py files
+ if file.endswith('.py'):
+ return True
+ # also want files that match my extension
+ if (self.extension
+ and anyp(file.endswith, self.extension)
+ and (not self.conf.exclude
+ or not filter(None,
+ [exc.search(file)
+ for exc in self.conf.exclude]))):
+ return True
+ return None
+
+
+class DocTestCase(doctest.DocTestCase):
+ """Overrides DocTestCase to
+ provide an address() method that returns the correct address for
+ the doctest case. To provide hints for address(), an obj may also
+ be passed -- this will be used as the test object for purposes of
+ determining the test address, if it is provided.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, obj=None, result_var='_'):
+ self._result_var = result_var
+ self._nose_obj = obj
+ super(DocTestCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=checker)
+
+ def address(self):
+ if self._nose_obj is not None:
+ return test_address(self._nose_obj)
+ obj = resolve_name(self._dt_test.name)
+
+ if isproperty(obj):
+ # properties have no connection to the class they are in
+ # so we can't just look 'em up, we have to first look up
+ # the class, then stick the prop on the end
+ parts = self._dt_test.name.split('.')
+ class_name = '.'.join(parts[:-1])
+ cls = resolve_name(class_name)
+ base_addr = test_address(cls)
+ return (base_addr[0], base_addr[1],
+ '.'.join([base_addr[2], parts[-1]]))
+ else:
+ return test_address(obj)
+
+ # doctests loaded via find(obj) omit the module name
+ # so we need to override id, __repr__ and shortDescription
+ # bonus: this will squash a 2.3 vs 2.4 incompatiblity
+ def id(self):
+ name = self._dt_test.name
+ filename = self._dt_test.filename
+ if filename is not None:
+ pk = getpackage(filename)
+ if pk is None:
+ return name
+ if not name.startswith(pk):
+ name = "%s.%s" % (pk, name)
+ return name
+
+ def __repr__(self):
+ name = self.id()
+ name = name.split('.')
+ return "%s (%s)" % (name[-1], '.'.join(name[:-1]))
+ __str__ = __repr__
+
+ def shortDescription(self):
+ return 'Doctest: %s' % self.id()
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocTestCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print repr(value)
+
+ def tearDown(self):
+ super(DocTestCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
+
+
+class DocFileCase(doctest.DocFileCase):
+ """Overrides to provide address() method that returns the correct
+ address for the doc file case.
+ """
+ def __init__(self, test, optionflags=0, setUp=None, tearDown=None,
+ checker=None, result_var='_'):
+ self._result_var = result_var
+ super(DocFileCase, self).__init__(
+ test, optionflags=optionflags, setUp=setUp, tearDown=tearDown,
+ checker=None)
+
+ def address(self):
+ return (self._dt_test.filename, None, None)
+
+ def setUp(self):
+ if self._result_var is not None:
+ self._old_displayhook = sys.displayhook
+ sys.displayhook = self._displayhook
+ super(DocFileCase, self).setUp()
+
+ def _displayhook(self, value):
+ if value is None:
+ return
+ setattr(builtin_mod, self._result_var, value)
+ print repr(value)
+
+ def tearDown(self):
+ super(DocFileCase, self).tearDown()
+ if self._result_var is not None:
+ sys.displayhook = self._old_displayhook
+ delattr(builtin_mod, self._result_var)
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py b/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py
new file mode 100755
index 00000000..d1540e00
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/errorclass.py
@@ -0,0 +1,210 @@
+"""
+ErrorClass Plugins
+------------------
+
+ErrorClass plugins provide an easy way to add support for custom
+handling of particular classes of exceptions.
+
+An ErrorClass plugin defines one or more ErrorClasses and how each is
+handled and reported on. Each error class is stored in a different
+attribute on the result, and reported separately. Each error class must
+indicate the exceptions that fall under that class, the label to use
+for reporting, and whether exceptions of the class should be
+considered as failures for the whole test run.
+
+ErrorClasses use a declarative syntax. Assign an ErrorClass to the
+attribute you wish to add to the result object, defining the
+exceptions, label and isfailure attributes. For example, to declare an
+ErrorClassPlugin that defines TodoErrors (and subclasses of TodoError)
+as an error class with the label 'TODO' that is considered a failure,
+do this:
+
+ >>> class Todo(Exception):
+ ... pass
+ >>> class TodoError(ErrorClassPlugin):
+ ... todo = ErrorClass(Todo, label='TODO', isfailure=True)
+
+The MetaErrorClass metaclass translates the ErrorClass declarations
+into the tuples used by the error handling and reporting functions in
+the result. This is an internal format and subject to change; you
+should always use the declarative syntax for attaching ErrorClasses to
+an ErrorClass plugin.
+
+ >>> TodoError.errorClasses # doctest: +ELLIPSIS
+ ((<class ...Todo...>, ('todo', 'TODO', True)),)
+
+Let's see the plugin in action. First some boilerplate.
+
+ >>> import sys
+ >>> import unittest
+ >>> try:
+ ... # 2.7+
+ ... from unittest.runner import _WritelnDecorator
+ ... except ImportError:
+ ... from unittest import _WritelnDecorator
+ ...
+ >>> buf = _WritelnDecorator(sys.stdout)
+
+Now define a test case that raises a Todo.
+
+ >>> class TestTodo(unittest.TestCase):
+ ... def runTest(self):
+ ... raise Todo("I need to test something")
+ >>> case = TestTodo()
+
+Prepare the result using our plugin. Normally this happens during the
+course of test execution within nose -- you won't be doing this
+yourself. For the purposes of this testing document, I'm stepping
+through the internal process of nose so you can see what happens at
+each step.
+
+ >>> plugin = TodoError()
+ >>> from nose.result import _TextTestResult
+ >>> result = _TextTestResult(stream=buf, descriptions=0, verbosity=2)
+ >>> plugin.prepareTestResult(result)
+
+Now run the test. TODO is printed.
+
+ >>> _ = case(result) # doctest: +ELLIPSIS
+ runTest (....TestTodo) ... TODO: I need to test something
+
+Errors and failures are empty, but todo has our test:
+
+ >>> result.errors
+ []
+ >>> result.failures
+ []
+ >>> result.todo # doctest: +ELLIPSIS
+ [(<....TestTodo testMethod=runTest>, '...Todo: I need to test something\\n')]
+ >>> result.printErrors() # doctest: +ELLIPSIS
+ <BLANKLINE>
+ ======================================================================
+ TODO: runTest (....TestTodo)
+ ----------------------------------------------------------------------
+ Traceback (most recent call last):
+ ...
+ ...Todo: I need to test something
+ <BLANKLINE>
+
+Since we defined a Todo as a failure, the run was not successful.
+
+ >>> result.wasSuccessful()
+ False
+"""
+
+from nose.pyversion import make_instancemethod
+from nose.plugins.base import Plugin
+from nose.result import TextTestResult
+from nose.util import isclass
+
+class MetaErrorClass(type):
+ """Metaclass for ErrorClassPlugins that allows error classes to be
+ set up in a declarative manner.
+ """
+ def __init__(self, name, bases, attr):
+ errorClasses = []
+ for name, detail in attr.items():
+ if isinstance(detail, ErrorClass):
+ attr.pop(name)
+ for cls in detail:
+ errorClasses.append(
+ (cls, (name, detail.label, detail.isfailure)))
+ super(MetaErrorClass, self).__init__(name, bases, attr)
+ self.errorClasses = tuple(errorClasses)
+
+
+class ErrorClass(object):
+ def __init__(self, *errorClasses, **kw):
+ self.errorClasses = errorClasses
+ try:
+ for key in ('label', 'isfailure'):
+ setattr(self, key, kw.pop(key))
+ except KeyError:
+ raise TypeError("%r is a required named argument for ErrorClass"
+ % key)
+
+ def __iter__(self):
+ return iter(self.errorClasses)
+
+
+class ErrorClassPlugin(Plugin):
+ """
+ Base class for ErrorClass plugins. Subclass this class and declare the
+ exceptions that you wish to handle as attributes of the subclass.
+ """
+ __metaclass__ = MetaErrorClass
+ score = 1000
+ errorClasses = ()
+
+ def addError(self, test, err):
+ err_cls, a, b = err
+ if not isclass(err_cls):
+ return
+ classes = [e[0] for e in self.errorClasses]
+ if filter(lambda c: issubclass(err_cls, c), classes):
+ return True
+
+ def prepareTestResult(self, result):
+ if not hasattr(result, 'errorClasses'):
+ self.patchResult(result)
+ for cls, (storage_attr, label, isfail) in self.errorClasses:
+ if cls not in result.errorClasses:
+ storage = getattr(result, storage_attr, [])
+ setattr(result, storage_attr, storage)
+ result.errorClasses[cls] = (storage, label, isfail)
+
+ def patchResult(self, result):
+ result.printLabel = print_label_patch(result)
+ result._orig_addError, result.addError = \
+ result.addError, add_error_patch(result)
+ result._orig_wasSuccessful, result.wasSuccessful = \
+ result.wasSuccessful, wassuccessful_patch(result)
+ if hasattr(result, 'printErrors'):
+ result._orig_printErrors, result.printErrors = \
+ result.printErrors, print_errors_patch(result)
+ if hasattr(result, 'addSkip'):
+ result._orig_addSkip, result.addSkip = \
+ result.addSkip, add_skip_patch(result)
+ result.errorClasses = {}
+
+
+def add_error_patch(result):
+ """Create a new addError method to patch into a result instance
+ that recognizes the errorClasses attribute and deals with
+ errorclasses correctly.
+ """
+ return make_instancemethod(TextTestResult.addError, result)
+
+
+def print_errors_patch(result):
+ """Create a new printErrors method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printErrors, result)
+
+
+def print_label_patch(result):
+ """Create a new printLabel method that prints errorClasses items
+ as well.
+ """
+ return make_instancemethod(TextTestResult.printLabel, result)
+
+
+def wassuccessful_patch(result):
+ """Create a new wasSuccessful method that checks errorClasses for
+ exceptions that were put into other slots than error or failure
+ but that still count as not success.
+ """
+ return make_instancemethod(TextTestResult.wasSuccessful, result)
+
+
+def add_skip_patch(result):
+ """Create a new addSkip method to patch into a result instance
+ that delegates to addError.
+ """
+ return make_instancemethod(TextTestResult.addSkip, result)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py b/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py
new file mode 100755
index 00000000..6462865d
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/failuredetail.py
@@ -0,0 +1,49 @@
+"""
+This plugin provides assert introspection. When the plugin is enabled
+and a test failure occurs, the traceback is displayed with extra context
+around the line in which the exception was raised. Simple variable
+substitution is also performed in the context output to provide more
+debugging information.
+"""
+
+from nose.plugins import Plugin
+from nose.pyversion import exc_to_unicode, force_unicode
+from nose.inspector import inspect_traceback
+
+class FailureDetail(Plugin):
+ """
+ Plugin that provides extra information in tracebacks of test failures.
+ """
+ score = 1600 # before capture
+
+ def options(self, parser, env):
+ """Register commmandline options.
+ """
+ parser.add_option(
+ "-d", "--detailed-errors", "--failure-detail",
+ action="store_true",
+ default=env.get('NOSE_DETAILED_ERRORS'),
+ dest="detailedErrors", help="Add detail to error"
+ " output by attempting to evaluate failed"
+ " asserts [NOSE_DETAILED_ERRORS]")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.can_configure:
+ return
+ self.enabled = options.detailedErrors
+ self.conf = conf
+
+ def formatFailure(self, test, err):
+ """Add detail from traceback inspection to error message of a failure.
+ """
+ ec, ev, tb = err
+ tbinfo, str_ev = None, exc_to_unicode(ev)
+
+ if tb:
+ tbinfo = force_unicode(inspect_traceback(tb))
+ str_ev = '\n'.join([str_ev, tbinfo])
+ test.tbinfo = tbinfo
+ return (ec, str_ev, tb)
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py b/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py
new file mode 100755
index 00000000..13235dfb
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/isolate.py
@@ -0,0 +1,103 @@
+"""The isolation plugin resets the contents of sys.modules after running
+each test module or package. Use it by setting ``--with-isolation`` or the
+NOSE_WITH_ISOLATION environment variable.
+
+The effects are similar to wrapping the following functions around the
+import and execution of each test module::
+
+ def setup(module):
+ module._mods = sys.modules.copy()
+
+ def teardown(module):
+ to_del = [ m for m in sys.modules.keys() if m not in
+ module._mods ]
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(module._mods)
+
+Isolation works only during lazy loading. In normal use, this is only
+during discovery of modules within a directory, where the process of
+importing, loading tests and running tests from each module is
+encapsulated in a single loadTestsFromName call. This plugin
+implements loadTestsFromNames to force the same lazy-loading there,
+which allows isolation to work in directed mode as well as discovery,
+at the cost of some efficiency: lazy-loading names forces full context
+setup and teardown to run for each name, defeating the grouping that
+is normally used to ensure that context setup and teardown are run the
+fewest possible times for a given set of names.
+
+.. warning ::
+
+ This plugin should not be used in conjunction with other plugins
+ that assume that modules, once imported, will stay imported; for
+ instance, it may cause very odd results when used with the coverage
+ plugin.
+
+"""
+
+import logging
+import sys
+
+from nose.plugins import Plugin
+
+
+log = logging.getLogger('nose.plugins.isolation')
+
+class IsolationPlugin(Plugin):
+ """
+ Activate the isolation plugin to isolate changes to external
+ modules to a single test module or package. The isolation plugin
+ resets the contents of sys.modules after each test module or
+ package runs to its state before the test. PLEASE NOTE that this
+ plugin should not be used with the coverage plugin, or in any other case
+ where module reloading may produce undesirable side-effects.
+ """
+ score = 10 # I want to be last
+ name = 'isolation'
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ self._mod_stack = []
+
+ def beforeContext(self):
+ """Copy sys.modules onto my mod stack
+ """
+ mods = sys.modules.copy()
+ self._mod_stack.append(mods)
+
+ def afterContext(self):
+ """Pop my mod stack and restore sys.modules to the state
+ it was in when mod stack was pushed.
+ """
+ mods = self._mod_stack.pop()
+ to_del = [ m for m in sys.modules.keys() if m not in mods ]
+ if to_del:
+ log.debug('removing sys modules entries: %s', to_del)
+ for mod in to_del:
+ del sys.modules[mod]
+ sys.modules.update(mods)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Create a lazy suite that calls beforeContext and afterContext
+ around each name. The side-effect of this is that full context
+ fixtures will be set up and torn down around each test named.
+ """
+ # Fast path for when we don't care
+ if not names or len(names) == 1:
+ return
+ loader = self.loader
+ plugins = self.conf.plugins
+ def lazy():
+ for name in names:
+ plugins.beforeContext()
+ yield loader.loadTestsFromName(name, module=module)
+ plugins.afterContext()
+ return (loader.suiteClass(lazy), [])
+
+ def prepareTestLoader(self, loader):
+ """Get handle on test loader so we can use it in loadTestsFromNames.
+ """
+ self.loader = loader
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py b/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py
new file mode 100755
index 00000000..4c9a79f6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/logcapture.py
@@ -0,0 +1,245 @@
+"""
+This plugin captures logging statements issued during test execution. When an
+error or failure occurs, the captured log messages are attached to the running
+test in the test.capturedLogging attribute, and displayed with the error failure
+output. It is enabled by default but can be turned off with the option
+``--nologcapture``.
+
+You can filter captured logging statements with the ``--logging-filter`` option.
+If set, it specifies which logger(s) will be captured; loggers that do not match
+will be passed. Example: specifying ``--logging-filter=sqlalchemy,myapp``
+will ensure that only statements logged via sqlalchemy.engine, myapp
+or myapp.foo.bar logger will be logged.
+
+You can remove other installed logging handlers with the
+``--logging-clear-handlers`` option.
+"""
+
+import logging
+from logging import Handler
+import threading
+
+from nose.plugins.base import Plugin
+from nose.util import anyp, ln, safe_str
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+log = logging.getLogger(__name__)
+
+class FilterSet(object):
+ def __init__(self, filter_components):
+ self.inclusive, self.exclusive = self._partition(filter_components)
+
+ # @staticmethod
+ def _partition(components):
+ inclusive, exclusive = [], []
+ for component in components:
+ if component.startswith('-'):
+ exclusive.append(component[1:])
+ else:
+ inclusive.append(component)
+ return inclusive, exclusive
+ _partition = staticmethod(_partition)
+
+ def allow(self, record):
+ """returns whether this record should be printed"""
+ if not self:
+ # nothing to filter
+ return True
+ return self._allow(record) and not self._deny(record)
+
+ # @staticmethod
+ def _any_match(matchers, record):
+ """return the bool of whether `record` starts with
+ any item in `matchers`"""
+ def record_matches_key(key):
+ return record == key or record.startswith(key + '.')
+ return anyp(bool, map(record_matches_key, matchers))
+ _any_match = staticmethod(_any_match)
+
+ def _allow(self, record):
+ if not self.inclusive:
+ return True
+ return self._any_match(self.inclusive, record)
+
+ def _deny(self, record):
+ if not self.exclusive:
+ return False
+ return self._any_match(self.exclusive, record)
+
+
+class MyMemoryHandler(Handler):
+ def __init__(self, logformat, logdatefmt, filters):
+ Handler.__init__(self)
+ fmt = logging.Formatter(logformat, logdatefmt)
+ self.setFormatter(fmt)
+ self.filterset = FilterSet(filters)
+ self.buffer = []
+ def emit(self, record):
+ self.buffer.append(self.format(record))
+ def flush(self):
+ pass # do nothing
+ def truncate(self):
+ self.buffer = []
+ def filter(self, record):
+ if self.filterset.allow(record.name):
+ return Handler.filter(self, record)
+ def __getstate__(self):
+ state = self.__dict__.copy()
+ del state['lock']
+ return state
+ def __setstate__(self, state):
+ self.__dict__.update(state)
+ self.lock = threading.RLock()
+
+
+class LogCapture(Plugin):
+ """
+ Log capture plugin. Enabled by default. Disable with --nologcapture.
+ This plugin captures logging statements issued during test execution,
+ appending any output captured to the error or failure output,
+ should the test fail or raise an error.
+ """
+ enabled = True
+ env_opt = 'NOSE_NOLOGCAPTURE'
+ name = 'logcapture'
+ score = 500
+ logformat = '%(name)s: %(levelname)s: %(message)s'
+ logdatefmt = None
+ clear = False
+ filters = ['-nose']
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ parser.add_option(
+ "--nologcapture", action="store_false",
+ default=not env.get(self.env_opt), dest="logcapture",
+ help="Disable logging capture plugin. "
+ "Logging configuration will be left intact."
+ " [NOSE_NOLOGCAPTURE]")
+ parser.add_option(
+ "--logging-format", action="store", dest="logcapture_format",
+ default=env.get('NOSE_LOGFORMAT') or self.logformat,
+ metavar="FORMAT",
+ help="Specify custom format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGFORMAT]")
+ parser.add_option(
+ "--logging-datefmt", action="store", dest="logcapture_datefmt",
+ default=env.get('NOSE_LOGDATEFMT') or self.logdatefmt,
+ metavar="FORMAT",
+ help="Specify custom date/time format to print statements. "
+ "Uses the same format as used by standard logging handlers."
+ " [NOSE_LOGDATEFMT]")
+ parser.add_option(
+ "--logging-filter", action="store", dest="logcapture_filters",
+ default=env.get('NOSE_LOGFILTER'),
+ metavar="FILTER",
+ help="Specify which statements to filter in/out. "
+ "By default, everything is captured. If the output is too"
+ " verbose,\nuse this option to filter out needless output.\n"
+ "Example: filter=foo will capture statements issued ONLY to\n"
+ " foo or foo.what.ever.sub but not foobar or other logger.\n"
+ "Specify multiple loggers with comma: filter=foo,bar,baz.\n"
+ "If any logger name is prefixed with a minus, eg filter=-foo,\n"
+ "it will be excluded rather than included. Default: "
+ "exclude logging messages from nose itself (-nose)."
+ " [NOSE_LOGFILTER]\n")
+ parser.add_option(
+ "--logging-clear-handlers", action="store_true",
+ default=False, dest="logcapture_clear",
+ help="Clear all other logging handlers")
+ parser.add_option(
+ "--logging-level", action="store",
+ default='NOTSET', dest="logcapture_level",
+ help="Set the log level to capture")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ self.conf = conf
+ # Disable if explicitly disabled, or if logging is
+ # configured via logging config file
+ if not options.logcapture or conf.loggingConfig:
+ self.enabled = False
+ self.logformat = options.logcapture_format
+ self.logdatefmt = options.logcapture_datefmt
+ self.clear = options.logcapture_clear
+ self.loglevel = options.logcapture_level
+ if options.logcapture_filters:
+ self.filters = options.logcapture_filters.split(',')
+
+ def setupLoghandler(self):
+ # setup our handler with root logger
+ root_logger = logging.getLogger()
+ if self.clear:
+ if hasattr(root_logger, "handlers"):
+ for handler in root_logger.handlers:
+ root_logger.removeHandler(handler)
+ for logger in logging.Logger.manager.loggerDict.values():
+ if hasattr(logger, "handlers"):
+ for handler in logger.handlers:
+ logger.removeHandler(handler)
+ # make sure there isn't one already
+ # you can't simply use "if self.handler not in root_logger.handlers"
+ # since at least in unit tests this doesn't work --
+ # LogCapture() is instantiated for each test case while root_logger
+ # is module global
+ # so we always add new MyMemoryHandler instance
+ for handler in root_logger.handlers[:]:
+ if isinstance(handler, MyMemoryHandler):
+ root_logger.handlers.remove(handler)
+ root_logger.addHandler(self.handler)
+ # to make sure everything gets captured
+ loglevel = getattr(self, "loglevel", "NOTSET")
+ root_logger.setLevel(getattr(logging, loglevel))
+
+ def begin(self):
+ """Set up logging handler before test run begins.
+ """
+ self.start()
+
+ def start(self):
+ self.handler = MyMemoryHandler(self.logformat, self.logdatefmt,
+ self.filters)
+ self.setupLoghandler()
+
+ def end(self):
+ pass
+
+ def beforeTest(self, test):
+ """Clear buffers and handlers before test.
+ """
+ self.setupLoghandler()
+
+ def afterTest(self, test):
+ """Clear buffers after test.
+ """
+ self.handler.truncate()
+
+ def formatFailure(self, test, err):
+ """Add captured log messages to failure output.
+ """
+ return self.formatError(test, err)
+
+ def formatError(self, test, err):
+ """Add captured log messages to error output.
+ """
+ # logic flow copied from Capture.formatError
+ test.capturedLogging = records = self.formatLogRecords()
+ if not records:
+ return err
+ ec, ev, tb = err
+ return (ec, self.addCaptureToErr(ev, records), tb)
+
+ def formatLogRecords(self):
+ return map(safe_str, self.handler.buffer)
+
+ def addCaptureToErr(self, ev, records):
+ return '\n'.join([safe_str(ev), ln('>> begin captured logging <<')] + \
+ records + \
+ [ln('>> end captured logging <<')])
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py b/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py
new file mode 100755
index 00000000..4d2ed22b
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/manager.py
@@ -0,0 +1,460 @@
+"""
+Plugin Manager
+--------------
+
+A plugin manager class is used to load plugins, manage the list of
+loaded plugins, and proxy calls to those plugins.
+
+The plugin managers provided with nose are:
+
+:class:`PluginManager`
+ This manager doesn't implement loadPlugins, so it can only work
+ with a static list of plugins.
+
+:class:`BuiltinPluginManager`
+ This manager loads plugins referenced in ``nose.plugins.builtin``.
+
+:class:`EntryPointPluginManager`
+ This manager uses setuptools entrypoints to load plugins.
+
+:class:`ExtraPluginsPluginManager`
+ This manager loads extra plugins specified with the keyword
+ `addplugins`.
+
+:class:`DefaultPluginMananger`
+ This is the manager class that will be used by default. If
+ setuptools is installed, it is a subclass of
+ :class:`EntryPointPluginManager` and :class:`BuiltinPluginManager`;
+ otherwise, an alias to :class:`BuiltinPluginManager`.
+
+:class:`RestrictedPluginManager`
+ This manager is for use in test runs where some plugin calls are
+ not available, such as runs started with ``python setup.py test``,
+ where the test runner is the default unittest :class:`TextTestRunner`. It
+ is a subclass of :class:`DefaultPluginManager`.
+
+Writing a plugin manager
+========================
+
+If you want to load plugins via some other means, you can write a
+plugin manager and pass an instance of your plugin manager class when
+instantiating the :class:`nose.config.Config` instance that you pass to
+:class:`TestProgram` (or :func:`main` or :func:`run`).
+
+To implement your plugin loading scheme, implement ``loadPlugins()``,
+and in that method, call ``addPlugin()`` with an instance of each plugin
+you wish to make available. Make sure to call
+``super(self).loadPlugins()`` as well if have subclassed a manager
+other than ``PluginManager``.
+
+"""
+import inspect
+import logging
+import os
+import sys
+from itertools import chain as iterchain
+from warnings import warn
+import nose.config
+from nose.failure import Failure
+from nose.plugins.base import IPluginInterface
+from nose.pyversion import sort_list
+
+try:
+ import cPickle as pickle
+except:
+ import pickle
+try:
+ from cStringIO import StringIO
+except:
+ from StringIO import StringIO
+
+
+__all__ = ['DefaultPluginManager', 'PluginManager', 'EntryPointPluginManager',
+ 'BuiltinPluginManager', 'RestrictedPluginManager']
+
+log = logging.getLogger(__name__)
+
+
+class PluginProxy(object):
+ """Proxy for plugin calls. Essentially a closure bound to the
+ given call and plugin list.
+
+ The plugin proxy also must be bound to a particular plugin
+ interface specification, so that it knows what calls are available
+ and any special handling that is required for each call.
+ """
+ interface = IPluginInterface
+ def __init__(self, call, plugins):
+ try:
+ self.method = getattr(self.interface, call)
+ except AttributeError:
+ raise AttributeError("%s is not a valid %s method"
+ % (call, self.interface.__name__))
+ self.call = self.makeCall(call)
+ self.plugins = []
+ for p in plugins:
+ self.addPlugin(p, call)
+
+ def __call__(self, *arg, **kw):
+ return self.call(*arg, **kw)
+
+ def addPlugin(self, plugin, call):
+ """Add plugin to my list of plugins to call, if it has the attribute
+ I'm bound to.
+ """
+ meth = getattr(plugin, call, None)
+ if meth is not None:
+ if call == 'loadTestsFromModule' and \
+ len(inspect.getargspec(meth)[0]) == 2:
+ orig_meth = meth
+ meth = lambda module, path, **kwargs: orig_meth(module)
+ self.plugins.append((plugin, meth))
+
+ def makeCall(self, call):
+ if call == 'loadTestsFromNames':
+ # special case -- load tests from names behaves somewhat differently
+ # from other chainable calls, because plugins return a tuple, only
+ # part of which can be chained to the next plugin.
+ return self._loadTestsFromNames
+
+ meth = self.method
+ if getattr(meth, 'generative', False):
+ # call all plugins and yield a flattened iterator of their results
+ return lambda *arg, **kw: list(self.generate(*arg, **kw))
+ elif getattr(meth, 'chainable', False):
+ return self.chain
+ else:
+ # return a value from the first plugin that returns non-None
+ return self.simple
+
+ def chain(self, *arg, **kw):
+ """Call plugins in a chain, where the result of each plugin call is
+ sent to the next plugin as input. The final output result is returned.
+ """
+ result = None
+ # extract the static arguments (if any) from arg so they can
+ # be passed to each plugin call in the chain
+ static = [a for (static, a)
+ in zip(getattr(self.method, 'static_args', []), arg)
+ if static]
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ arg = static[:]
+ arg.append(result)
+ return result
+
+ def generate(self, *arg, **kw):
+ """Call all plugins, yielding each item in each non-None result.
+ """
+ for p, meth in self.plugins:
+ result = None
+ try:
+ result = meth(*arg, **kw)
+ if result is not None:
+ for r in result:
+ yield r
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ exc = sys.exc_info()
+ yield Failure(*exc)
+ continue
+
+ def simple(self, *arg, **kw):
+ """Call all plugins, returning the first non-None result.
+ """
+ for p, meth in self.plugins:
+ result = meth(*arg, **kw)
+ if result is not None:
+ return result
+
+ def _loadTestsFromNames(self, names, module=None):
+ """Chainable but not quite normal. Plugins return a tuple of
+ (tests, names) after processing the names. The tests are added
+ to a suite that is accumulated throughout the full call, while
+ names are input for the next plugin in the chain.
+ """
+ suite = []
+ for p, meth in self.plugins:
+ result = meth(names, module=module)
+ if result is not None:
+ suite_part, names = result
+ if suite_part:
+ suite.extend(suite_part)
+ return suite, names
+
+
+class NoPlugins(object):
+ """Null Plugin manager that has no plugins."""
+ interface = IPluginInterface
+ def __init__(self):
+ self._plugins = self.plugins = ()
+
+ def __iter__(self):
+ return ()
+
+ def _doNothing(self, *args, **kwds):
+ pass
+
+ def _emptyIterator(self, *args, **kwds):
+ return ()
+
+ def __getattr__(self, call):
+ method = getattr(self.interface, call)
+ if getattr(method, "generative", False):
+ return self._emptyIterator
+ else:
+ return self._doNothing
+
+ def addPlugin(self, plug):
+ raise NotImplementedError()
+
+ def addPlugins(self, plugins):
+ raise NotImplementedError()
+
+ def configure(self, options, config):
+ pass
+
+ def loadPlugins(self):
+ pass
+
+ def sort(self):
+ pass
+
+
+class PluginManager(object):
+ """Base class for plugin managers. PluginManager is intended to be
+ used only with a static list of plugins. The loadPlugins() implementation
+ only reloads plugins from _extraplugins to prevent those from being
+ overridden by a subclass.
+
+ The basic functionality of a plugin manager is to proxy all unknown
+ attributes through a ``PluginProxy`` to a list of plugins.
+
+ Note that the list of plugins *may not* be changed after the first plugin
+ call.
+ """
+ proxyClass = PluginProxy
+
+ def __init__(self, plugins=(), proxyClass=None):
+ self._plugins = []
+ self._extraplugins = ()
+ self._proxies = {}
+ if plugins:
+ self.addPlugins(plugins)
+ if proxyClass is not None:
+ self.proxyClass = proxyClass
+
+ def __getattr__(self, call):
+ try:
+ return self._proxies[call]
+ except KeyError:
+ proxy = self.proxyClass(call, self._plugins)
+ self._proxies[call] = proxy
+ return proxy
+
+ def __iter__(self):
+ return iter(self.plugins)
+
+ def addPlugin(self, plug):
+ # allow, for instance, plugins loaded via entry points to
+ # supplant builtin plugins.
+ new_name = getattr(plug, 'name', object())
+ self._plugins[:] = [p for p in self._plugins
+ if getattr(p, 'name', None) != new_name]
+ self._plugins.append(plug)
+
+ def addPlugins(self, plugins=(), extraplugins=()):
+ """extraplugins are maintained in a separate list and
+ re-added by loadPlugins() to prevent their being overwritten
+ by plugins added by a subclass of PluginManager
+ """
+ self._extraplugins = extraplugins
+ for plug in iterchain(plugins, extraplugins):
+ self.addPlugin(plug)
+
+ def configure(self, options, config):
+ """Configure the set of plugins with the given options
+ and config instance. After configuration, disabled plugins
+ are removed from the plugins list.
+ """
+ log.debug("Configuring plugins")
+ self.config = config
+ cfg = PluginProxy('configure', self._plugins)
+ cfg(options, config)
+ enabled = [plug for plug in self._plugins if plug.enabled]
+ self.plugins = enabled
+ self.sort()
+ log.debug("Plugins enabled: %s", enabled)
+
+ def loadPlugins(self):
+ for plug in self._extraplugins:
+ self.addPlugin(plug)
+
+ def sort(self):
+ return sort_list(self._plugins, lambda x: getattr(x, 'score', 1), reverse=True)
+
+ def _get_plugins(self):
+ return self._plugins
+
+ def _set_plugins(self, plugins):
+ self._plugins = []
+ self.addPlugins(plugins)
+
+ plugins = property(_get_plugins, _set_plugins, None,
+ """Access the list of plugins managed by
+ this plugin manager""")
+
+
+class ZeroNinePlugin:
+ """Proxy for 0.9 plugins, adapts 0.10 calls to 0.9 standard.
+ """
+ def __init__(self, plugin):
+ self.plugin = plugin
+
+ def options(self, parser, env=os.environ):
+ self.plugin.add_options(parser, env)
+
+ def addError(self, test, err):
+ if not hasattr(self.plugin, 'addError'):
+ return
+ # switch off to addSkip, addDeprecated if those types
+ from nose.exc import SkipTest, DeprecatedTest
+ ec, ev, tb = err
+ if issubclass(ec, SkipTest):
+ if not hasattr(self.plugin, 'addSkip'):
+ return
+ return self.plugin.addSkip(test.test)
+ elif issubclass(ec, DeprecatedTest):
+ if not hasattr(self.plugin, 'addDeprecated'):
+ return
+ return self.plugin.addDeprecated(test.test)
+ # add capt
+ capt = test.capturedOutput
+ return self.plugin.addError(test.test, err, capt)
+
+ def loadTestsFromFile(self, filename):
+ if hasattr(self.plugin, 'loadTestsFromPath'):
+ return self.plugin.loadTestsFromPath(filename)
+
+ def addFailure(self, test, err):
+ if not hasattr(self.plugin, 'addFailure'):
+ return
+ # add capt and tbinfo
+ capt = test.capturedOutput
+ tbinfo = test.tbinfo
+ return self.plugin.addFailure(test.test, err, capt, tbinfo)
+
+ def addSuccess(self, test):
+ if not hasattr(self.plugin, 'addSuccess'):
+ return
+ capt = test.capturedOutput
+ self.plugin.addSuccess(test.test, capt)
+
+ def startTest(self, test):
+ if not hasattr(self.plugin, 'startTest'):
+ return
+ return self.plugin.startTest(test.test)
+
+ def stopTest(self, test):
+ if not hasattr(self.plugin, 'stopTest'):
+ return
+ return self.plugin.stopTest(test.test)
+
+ def __getattr__(self, val):
+ return getattr(self.plugin, val)
+
+
+class EntryPointPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the `nose.plugins` and
+ `nose.plugins.0.10` entry points.
+ """
+ entry_points = (('nose.plugins.0.10', None),
+ ('nose.plugins', ZeroNinePlugin))
+
+ def loadPlugins(self):
+ """Load plugins by iterating the `nose.plugins` entry point.
+ """
+ from pkg_resources import iter_entry_points
+ loaded = {}
+ for entry_point, adapt in self.entry_points:
+ for ep in iter_entry_points(entry_point):
+ if ep.name in loaded:
+ continue
+ loaded[ep.name] = True
+ log.debug('%s load plugin %s', self.__class__.__name__, ep)
+ try:
+ plugcls = ep.load()
+ except KeyboardInterrupt:
+ raise
+ except Exception, e:
+ # never want a plugin load to kill the test run
+ # but we can't log here because the logger is not yet
+ # configured
+ warn("Unable to load plugin %s: %s" % (ep, e),
+ RuntimeWarning)
+ continue
+ if adapt:
+ plug = adapt(plugcls())
+ else:
+ plug = plugcls()
+ self.addPlugin(plug)
+ super(EntryPointPluginManager, self).loadPlugins()
+
+
+class BuiltinPluginManager(PluginManager):
+ """Plugin manager that loads plugins from the list in
+ `nose.plugins.builtin`.
+ """
+ def loadPlugins(self):
+ """Load plugins in nose.plugins.builtin
+ """
+ from nose.plugins import builtin
+ for plug in builtin.plugins:
+ self.addPlugin(plug())
+ super(BuiltinPluginManager, self).loadPlugins()
+
+try:
+ import pkg_resources
+ class DefaultPluginManager(EntryPointPluginManager, BuiltinPluginManager):
+ pass
+
+except ImportError:
+ class DefaultPluginManager(BuiltinPluginManager):
+ pass
+
+class RestrictedPluginManager(DefaultPluginManager):
+ """Plugin manager that restricts the plugin list to those not
+ excluded by a list of exclude methods. Any plugin that implements
+ an excluded method will be removed from the manager's plugin list
+ after plugins are loaded.
+ """
+ def __init__(self, plugins=(), exclude=(), load=True):
+ DefaultPluginManager.__init__(self, plugins)
+ self.load = load
+ self.exclude = exclude
+ self.excluded = []
+ self._excludedOpts = None
+
+ def excludedOption(self, name):
+ if self._excludedOpts is None:
+ from optparse import OptionParser
+ self._excludedOpts = OptionParser(add_help_option=False)
+ for plugin in self.excluded:
+ plugin.options(self._excludedOpts, env={})
+ return self._excludedOpts.get_option('--' + name)
+
+ def loadPlugins(self):
+ if self.load:
+ DefaultPluginManager.loadPlugins(self)
+ allow = []
+ for plugin in self.plugins:
+ ok = True
+ for method in self.exclude:
+ if hasattr(plugin, method):
+ ok = False
+ self.excluded.append(plugin)
+ break
+ if ok:
+ allow.append(plugin)
+ self.plugins = allow
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py b/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py
new file mode 100755
index 00000000..2cae744a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/multiprocess.py
@@ -0,0 +1,835 @@
+"""
+Overview
+========
+
+The multiprocess plugin enables you to distribute your test run among a set of
+worker processes that run tests in parallel. This can speed up CPU-bound test
+runs (as long as the number of work processeses is around the number of
+processors or cores available), but is mainly useful for IO-bound tests that
+spend most of their time waiting for data to arrive from someplace else.
+
+.. note ::
+
+ See :doc:`../doc_tests/test_multiprocess/multiprocess` for
+ additional documentation and examples. Use of this plugin on python
+ 2.5 or earlier requires the multiprocessing_ module, also available
+ from PyPI.
+
+.. _multiprocessing : http://code.google.com/p/python-multiprocessing/
+
+How tests are distributed
+=========================
+
+The ideal case would be to dispatch each test to a worker process
+separately. This ideal is not attainable in all cases, however, because many
+test suites depend on context (class, module or package) fixtures.
+
+The plugin can't know (unless you tell it -- see below!) if a context fixture
+can be called many times concurrently (is re-entrant), or if it can be shared
+among tests running in different processes. Therefore, if a context has
+fixtures, the default behavior is to dispatch the entire suite to a worker as
+a unit.
+
+Controlling distribution
+^^^^^^^^^^^^^^^^^^^^^^^^
+
+There are two context-level variables that you can use to control this default
+behavior.
+
+If a context's fixtures are re-entrant, set ``_multiprocess_can_split_ = True``
+in the context, and the plugin will dispatch tests in suites bound to that
+context as if the context had no fixtures. This means that the fixtures will
+execute concurrently and multiple times, typically once per test.
+
+If a context's fixtures can be shared by tests running in different processes
+-- such as a package-level fixture that starts an external http server or
+initializes a shared database -- then set ``_multiprocess_shared_ = True`` in
+the context. These fixtures will then execute in the primary nose process, and
+tests in those contexts will be individually dispatched to run in parallel.
+
+How results are collected and reported
+======================================
+
+As each test or suite executes in a worker process, results (failures, errors,
+and specially handled exceptions like SkipTest) are collected in that
+process. When the worker process finishes, it returns results to the main
+nose process. There, any progress output is printed (dots!), and the
+results from the test run are combined into a consolidated result
+set. When results have been received for all dispatched tests, or all
+workers have died, the result summary is output as normal.
+
+Beware!
+=======
+
+Not all test suites will benefit from, or even operate correctly using, this
+plugin. For example, CPU-bound tests will run more slowly if you don't have
+multiple processors. There are also some differences in plugin
+interactions and behaviors due to the way in which tests are dispatched and
+loaded. In general, test loading under this plugin operates as if it were
+always in directed mode instead of discovered mode. For instance, doctests
+in test modules will always be found when using this plugin with the doctest
+plugin.
+
+But the biggest issue you will face is probably concurrency. Unless you
+have kept your tests as religiously pure unit tests, with no side-effects, no
+ordering issues, and no external dependencies, chances are you will experience
+odd, intermittent and unexplainable failures and errors when using this
+plugin. This doesn't necessarily mean the plugin is broken; it may mean that
+your test suite is not safe for concurrency.
+
+New Features in 1.1.0
+=====================
+
+* functions generated by test generators are now added to the worker queue
+ making them multi-threaded.
+* fixed timeout functionality, now functions will be terminated with a
+ TimedOutException exception when they exceed their execution time. The
+ worker processes are not terminated.
+* added ``--process-restartworker`` option to restart workers once they are
+ done, this helps control memory usage. Sometimes memory leaks can accumulate
+ making long runs very difficult.
+* added global _instantiate_plugins to configure which plugins are started
+ on the worker processes.
+
+"""
+
+import logging
+import os
+import sys
+import time
+import traceback
+import unittest
+import pickle
+import signal
+import nose.case
+from nose.core import TextTestRunner
+from nose import failure
+from nose import loader
+from nose.plugins.base import Plugin
+from nose.pyversion import bytes_
+from nose.result import TextTestResult
+from nose.suite import ContextSuite
+from nose.util import test_address
+try:
+ # 2.7+
+ from unittest.runner import _WritelnDecorator
+except ImportError:
+ from unittest import _WritelnDecorator
+from Queue import Empty
+from warnings import warn
+try:
+ from cStringIO import StringIO
+except ImportError:
+ import StringIO
+
+# this is a list of plugin classes that will be checked for and created inside
+# each worker process
+_instantiate_plugins = None
+
+log = logging.getLogger(__name__)
+
+Process = Queue = Pool = Event = Value = Array = None
+
+# have to inherit KeyboardInterrupt to it will interrupt process properly
+class TimedOutException(KeyboardInterrupt):
+ def __init__(self, value = "Timed Out"):
+ self.value = value
+ def __str__(self):
+ return repr(self.value)
+
+def _import_mp():
+ global Process, Queue, Pool, Event, Value, Array
+ try:
+ from multiprocessing import Manager, Process
+ #prevent the server process created in the manager which holds Python
+ #objects and allows other processes to manipulate them using proxies
+ #to interrupt on SIGINT (keyboardinterrupt) so that the communication
+ #channel between subprocesses and main process is still usable after
+ #ctrl+C is received in the main process.
+ old=signal.signal(signal.SIGINT, signal.SIG_IGN)
+ m = Manager()
+ #reset it back so main process will receive a KeyboardInterrupt
+ #exception on ctrl+c
+ signal.signal(signal.SIGINT, old)
+ Queue, Pool, Event, Value, Array = (
+ m.Queue, m.Pool, m.Event, m.Value, m.Array
+ )
+ except ImportError:
+ warn("multiprocessing module is not available, multiprocess plugin "
+ "cannot be used", RuntimeWarning)
+
+
+class TestLet:
+ def __init__(self, case):
+ try:
+ self._id = case.id()
+ except AttributeError:
+ pass
+ self._short_description = case.shortDescription()
+ self._str = str(case)
+
+ def id(self):
+ return self._id
+
+ def shortDescription(self):
+ return self._short_description
+
+ def __str__(self):
+ return self._str
+
+class MultiProcess(Plugin):
+ """
+ Run tests in multiple processes. Requires processing module.
+ """
+ score = 1000
+ status = {}
+
+ def options(self, parser, env):
+ """
+ Register command-line options.
+ """
+ parser.add_option("--processes", action="store",
+ default=env.get('NOSE_PROCESSES', 0),
+ dest="multiprocess_workers",
+ metavar="NUM",
+ help="Spread test run among this many processes. "
+ "Set a number equal to the number of processors "
+ "or cores in your machine for best results. "
+ "Pass a negative number to have the number of "
+ "processes automatically set to the number of "
+ "cores. Passing 0 means to disable parallel "
+ "testing. Default is 0 unless NOSE_PROCESSES is "
+ "set. "
+ "[NOSE_PROCESSES]")
+ parser.add_option("--process-timeout", action="store",
+ default=env.get('NOSE_PROCESS_TIMEOUT', 10),
+ dest="multiprocess_timeout",
+ metavar="SECONDS",
+ help="Set timeout for return of results from each "
+ "test runner process. Default is 10. "
+ "[NOSE_PROCESS_TIMEOUT]")
+ parser.add_option("--process-restartworker", action="store_true",
+ default=env.get('NOSE_PROCESS_RESTARTWORKER', False),
+ dest="multiprocess_restartworker",
+ help="If set, will restart each worker process once"
+ " their tests are done, this helps control memory "
+ "leaks from killing the system. "
+ "[NOSE_PROCESS_RESTARTWORKER]")
+
+ def configure(self, options, config):
+ """
+ Configure plugin.
+ """
+ try:
+ self.status.pop('active')
+ except KeyError:
+ pass
+ if not hasattr(options, 'multiprocess_workers'):
+ self.enabled = False
+ return
+ # don't start inside of a worker process
+ if config.worker:
+ return
+ self.config = config
+ try:
+ workers = int(options.multiprocess_workers)
+ except (TypeError, ValueError):
+ workers = 0
+ if workers:
+ _import_mp()
+ if Process is None:
+ self.enabled = False
+ return
+ # Negative number of workers will cause multiprocessing to hang.
+ # Set the number of workers to the CPU count to avoid this.
+ if workers < 0:
+ try:
+ import multiprocessing
+ workers = multiprocessing.cpu_count()
+ except NotImplementedError:
+ self.enabled = False
+ return
+ self.enabled = True
+ self.config.multiprocess_workers = workers
+ t = float(options.multiprocess_timeout)
+ self.config.multiprocess_timeout = t
+ r = int(options.multiprocess_restartworker)
+ self.config.multiprocess_restartworker = r
+ self.status['active'] = True
+
+ def prepareTestLoader(self, loader):
+ """Remember loader class so MultiProcessTestRunner can instantiate
+ the right loader.
+ """
+ self.loaderClass = loader.__class__
+
+ def prepareTestRunner(self, runner):
+ """Replace test runner with MultiProcessTestRunner.
+ """
+ # replace with our runner class
+ return MultiProcessTestRunner(stream=runner.stream,
+ verbosity=self.config.verbosity,
+ config=self.config,
+ loaderClass=self.loaderClass)
+
+def signalhandler(sig, frame):
+ raise TimedOutException()
+
+class MultiProcessTestRunner(TextTestRunner):
+ waitkilltime = 5.0 # max time to wait to terminate a process that does not
+ # respond to SIGILL
+ def __init__(self, **kw):
+ self.loaderClass = kw.pop('loaderClass', loader.defaultTestLoader)
+ super(MultiProcessTestRunner, self).__init__(**kw)
+
+ def collect(self, test, testQueue, tasks, to_teardown, result):
+ # dispatch and collect results
+ # put indexes only on queue because tests aren't picklable
+ for case in self.nextBatch(test):
+ log.debug("Next batch %s (%s)", case, type(case))
+ if (isinstance(case, nose.case.Test) and
+ isinstance(case.test, failure.Failure)):
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ # handle shared fixtures
+ if isinstance(case, ContextSuite) and case.context is failure.Failure:
+ log.debug("Case is a Failure")
+ case(result) # run here to capture the failure
+ continue
+ elif isinstance(case, ContextSuite) and self.sharedFixtures(case):
+ log.debug("%s has shared fixtures", case)
+ try:
+ case.setUp()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ log.debug("%s setup failed", sys.exc_info())
+ result.addError(case, sys.exc_info())
+ else:
+ to_teardown.append(case)
+ if case.factory:
+ ancestors=case.factory.context.get(case, [])
+ for an in ancestors[:2]:
+ #log.debug('reset ancestor %s', an)
+ if getattr(an, '_multiprocess_shared_', False):
+ an._multiprocess_can_split_=True
+ #an._multiprocess_shared_=False
+ self.collect(case, testQueue, tasks, to_teardown, result)
+
+ else:
+ test_addr = self.addtask(testQueue,tasks,case)
+ log.debug("Queued test %s (%s) to %s",
+ len(tasks), test_addr, testQueue)
+
+ def startProcess(self, iworker, testQueue, resultQueue, shouldStop, result):
+ currentaddr = Value('c',bytes_(''))
+ currentstart = Value('d',time.time())
+ keyboardCaught = Event()
+ p = Process(target=runner,
+ args=(iworker, testQueue,
+ resultQueue,
+ currentaddr,
+ currentstart,
+ keyboardCaught,
+ shouldStop,
+ self.loaderClass,
+ result.__class__,
+ pickle.dumps(self.config)))
+ p.currentaddr = currentaddr
+ p.currentstart = currentstart
+ p.keyboardCaught = keyboardCaught
+ old = signal.signal(signal.SIGILL, signalhandler)
+ p.start()
+ signal.signal(signal.SIGILL, old)
+ return p
+
+ def run(self, test):
+ """
+ Execute the test (which may be a test suite). If the test is a suite,
+ distribute it out among as many processes as have been configured, at
+ as fine a level as is possible given the context fixtures defined in
+ the suite or any sub-suites.
+
+ """
+ log.debug("%s.run(%s) (%s)", self, test, os.getpid())
+ wrapper = self.config.plugins.prepareTest(test)
+ if wrapper is not None:
+ test = wrapper
+
+ # plugins can decorate or capture the output stream
+ wrapped = self.config.plugins.setOutputStream(self.stream)
+ if wrapped is not None:
+ self.stream = wrapped
+
+ testQueue = Queue()
+ resultQueue = Queue()
+ tasks = []
+ completed = []
+ workers = []
+ to_teardown = []
+ shouldStop = Event()
+
+ result = self._makeResult()
+ start = time.time()
+
+ self.collect(test, testQueue, tasks, to_teardown, result)
+
+ log.debug("Starting %s workers", self.config.multiprocess_workers)
+ for i in range(self.config.multiprocess_workers):
+ p = self.startProcess(i, testQueue, resultQueue, shouldStop, result)
+ workers.append(p)
+ log.debug("Started worker process %s", i+1)
+
+ total_tasks = len(tasks)
+ # need to keep track of the next time to check for timeouts in case
+ # more than one process times out at the same time.
+ nexttimeout=self.config.multiprocess_timeout
+ thrownError = None
+
+ try:
+ while tasks:
+ log.debug("Waiting for results (%s/%s tasks), next timeout=%.3fs",
+ len(completed), total_tasks,nexttimeout)
+ try:
+ iworker, addr, newtask_addrs, batch_result = resultQueue.get(
+ timeout=nexttimeout)
+ log.debug('Results received for worker %d, %s, new tasks: %d',
+ iworker,addr,len(newtask_addrs))
+ try:
+ try:
+ tasks.remove(addr)
+ except ValueError:
+ log.warn('worker %s failed to remove from tasks: %s',
+ iworker,addr)
+ total_tasks += len(newtask_addrs)
+ tasks.extend(newtask_addrs)
+ except KeyError:
+ log.debug("Got result for unknown task? %s", addr)
+ log.debug("current: %s",str(list(tasks)[0]))
+ else:
+ completed.append([addr,batch_result])
+ self.consolidate(result, batch_result)
+ if (self.config.stopOnError
+ and not result.wasSuccessful()):
+ # set the stop condition
+ shouldStop.set()
+ break
+ if self.config.multiprocess_restartworker:
+ log.debug('joining worker %s',iworker)
+ # wait for working, but not that important if worker
+ # cannot be joined in fact, for workers that add to
+ # testQueue, they will not terminate until all their
+ # items are read
+ workers[iworker].join(timeout=1)
+ if not shouldStop.is_set() and not testQueue.empty():
+ log.debug('starting new process on worker %s',iworker)
+ workers[iworker] = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ except Empty:
+ log.debug("Timed out with %s tasks pending "
+ "(empty testQueue=%r): %s",
+ len(tasks),testQueue.empty(),str(tasks))
+ any_alive = False
+ for iworker, w in enumerate(workers):
+ if w.is_alive():
+ worker_addr = bytes_(w.currentaddr.value,'ascii')
+ timeprocessing = time.time() - w.currentstart.value
+ if ( len(worker_addr) == 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('worker %d has finished its work item, '
+ 'but is not exiting? do we wait for it?',
+ iworker)
+ else:
+ any_alive = True
+ if (len(worker_addr) > 0
+ and timeprocessing > self.config.multiprocess_timeout-0.1):
+ log.debug('timed out worker %s: %s',
+ iworker,worker_addr)
+ w.currentaddr.value = bytes_('')
+ # If the process is in C++ code, sending a SIGILL
+ # might not send a python KeybordInterrupt exception
+ # therefore, send multiple signals until an
+ # exception is caught. If this takes too long, then
+ # terminate the process
+ w.keyboardCaught.clear()
+ startkilltime = time.time()
+ while not w.keyboardCaught.is_set() and w.is_alive():
+ if time.time()-startkilltime > self.waitkilltime:
+ # have to terminate...
+ log.error("terminating worker %s",iworker)
+ w.terminate()
+ # there is a small probability that the
+ # terminated process might send a result,
+ # which has to be specially handled or
+ # else processes might get orphaned.
+ workers[iworker] = w = self.startProcess(iworker, testQueue, resultQueue, shouldStop, result)
+ break
+ os.kill(w.pid, signal.SIGILL)
+ time.sleep(0.1)
+ if not any_alive and testQueue.empty():
+ log.debug("All workers dead")
+ break
+ nexttimeout=self.config.multiprocess_timeout
+ for w in workers:
+ if w.is_alive() and len(w.currentaddr.value) > 0:
+ timeprocessing = time.time()-w.currentstart.value
+ if timeprocessing <= self.config.multiprocess_timeout:
+ nexttimeout = min(nexttimeout,
+ self.config.multiprocess_timeout-timeprocessing)
+ log.debug("Completed %s tasks (%s remain)", len(completed), len(tasks))
+
+ except (KeyboardInterrupt, SystemExit), e:
+ log.info('parent received ctrl-c when waiting for test results')
+ thrownError = e
+ #resultQueue.get(False)
+
+ result.addError(test, sys.exc_info())
+
+ try:
+ for case in to_teardown:
+ log.debug("Tearing down shared fixtures for %s", case)
+ try:
+ case.tearDown()
+ except (KeyboardInterrupt, SystemExit):
+ raise
+ except:
+ result.addError(case, sys.exc_info())
+
+ stop = time.time()
+
+ # first write since can freeze on shutting down processes
+ result.printErrors()
+ result.printSummary(start, stop)
+ self.config.plugins.finalize(result)
+
+ if thrownError is None:
+ log.debug("Tell all workers to stop")
+ for w in workers:
+ if w.is_alive():
+ testQueue.put('STOP', block=False)
+
+ # wait for the workers to end
+ for iworker,worker in enumerate(workers):
+ if worker.is_alive():
+ log.debug('joining worker %s',iworker)
+ worker.join()
+ if worker.is_alive():
+ log.debug('failed to join worker %s',iworker)
+ except (KeyboardInterrupt, SystemExit):
+ log.info('parent received ctrl-c when shutting down: stop all processes')
+ for worker in workers:
+ if worker.is_alive():
+ worker.terminate()
+
+ if thrownError: raise thrownError
+ else: raise
+
+ return result
+
+ def addtask(testQueue,tasks,case):
+ arg = None
+ if isinstance(case,nose.case.Test) and hasattr(case.test,'arg'):
+ # this removes the top level descriptor and allows real function
+ # name to be returned
+ case.test.descriptor = None
+ arg = case.test.arg
+ test_addr = MultiProcessTestRunner.address(case)
+ testQueue.put((test_addr,arg), block=False)
+ if arg is not None:
+ test_addr += str(arg)
+ if tasks is not None:
+ tasks.append(test_addr)
+ return test_addr
+ addtask = staticmethod(addtask)
+
+ def address(case):
+ if hasattr(case, 'address'):
+ file, mod, call = case.address()
+ elif hasattr(case, 'context'):
+ file, mod, call = test_address(case.context)
+ else:
+ raise Exception("Unable to convert %s to address" % case)
+ parts = []
+ if file is None:
+ if mod is None:
+ raise Exception("Unaddressable case %s" % case)
+ else:
+ parts.append(mod)
+ else:
+ # strip __init__.py(c) from end of file part
+ # if present, having it there confuses loader
+ dirname, basename = os.path.split(file)
+ if basename.startswith('__init__'):
+ file = dirname
+ parts.append(file)
+ if call is not None:
+ parts.append(call)
+ return ':'.join(map(str, parts))
+ address = staticmethod(address)
+
+ def nextBatch(self, test):
+ # allows tests or suites to mark themselves as not safe
+ # for multiprocess execution
+ if hasattr(test, 'context'):
+ if not getattr(test.context, '_multiprocess_', True):
+ return
+
+ if ((isinstance(test, ContextSuite)
+ and test.hasFixtures(self.checkCanSplit))
+ or not getattr(test, 'can_split', True)
+ or not isinstance(test, unittest.TestSuite)):
+ # regular test case, or a suite with context fixtures
+
+ # special case: when run like nosetests path/to/module.py
+ # the top-level suite has only one item, and it shares
+ # the same context as that item. In that case, we want the
+ # item, not the top-level suite
+ if isinstance(test, ContextSuite):
+ contained = list(test)
+ if (len(contained) == 1
+ and getattr(contained[0],
+ 'context', None) == test.context):
+ test = contained[0]
+ yield test
+ else:
+ # Suite is without fixtures at this level; but it may have
+ # fixtures at any deeper level, so we need to examine it all
+ # the way down to the case level
+ for case in test:
+ for batch in self.nextBatch(case):
+ yield batch
+
+ def checkCanSplit(context, fixt):
+ """
+ Callback that we use to check whether the fixtures found in a
+ context or ancestor are ones we care about.
+
+ Contexts can tell us that their fixtures are reentrant by setting
+ _multiprocess_can_split_. So if we see that, we return False to
+ disregard those fixtures.
+ """
+ if not fixt:
+ return False
+ if getattr(context, '_multiprocess_can_split_', False):
+ return False
+ return True
+ checkCanSplit = staticmethod(checkCanSplit)
+
+ def sharedFixtures(self, case):
+ context = getattr(case, 'context', None)
+ if not context:
+ return False
+ return getattr(context, '_multiprocess_shared_', False)
+
+ def consolidate(self, result, batch_result):
+ log.debug("batch result is %s" , batch_result)
+ try:
+ output, testsRun, failures, errors, errorClasses = batch_result
+ except ValueError:
+ log.debug("result in unexpected format %s", batch_result)
+ failure.Failure(*sys.exc_info())(result)
+ return
+ self.stream.write(output)
+ result.testsRun += testsRun
+ result.failures.extend(failures)
+ result.errors.extend(errors)
+ for key, (storage, label, isfail) in errorClasses.items():
+ if key not in result.errorClasses:
+ # Ordinarily storage is result attribute
+ # but it's only processed through the errorClasses
+ # dict, so it's ok to fake it here
+ result.errorClasses[key] = ([], label, isfail)
+ mystorage, _junk, _junk = result.errorClasses[key]
+ mystorage.extend(storage)
+ log.debug("Ran %s tests (total: %s)", testsRun, result.testsRun)
+
+
+def runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+ try:
+ try:
+ return __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config)
+ except KeyboardInterrupt:
+ log.debug('Worker %s keyboard interrupt, stopping',ix)
+ except Empty:
+ log.debug("Worker %s timed out waiting for tasks", ix)
+
+def __runner(ix, testQueue, resultQueue, currentaddr, currentstart,
+ keyboardCaught, shouldStop, loaderClass, resultClass, config):
+
+ config = pickle.loads(config)
+ dummy_parser = config.parserClass()
+ if _instantiate_plugins is not None:
+ for pluginclass in _instantiate_plugins:
+ plugin = pluginclass()
+ plugin.addOptions(dummy_parser,{})
+ config.plugins.addPlugin(plugin)
+ config.plugins.configure(config.options,config)
+ config.plugins.begin()
+ log.debug("Worker %s executing, pid=%d", ix,os.getpid())
+ loader = loaderClass(config=config)
+ loader.suiteClass.suiteClass = NoSharedFixtureContextSuite
+
+ def get():
+ return testQueue.get(timeout=config.multiprocess_timeout)
+
+ def makeResult():
+ stream = _WritelnDecorator(StringIO())
+ result = resultClass(stream, descriptions=1,
+ verbosity=config.verbosity,
+ config=config)
+ plug_result = config.plugins.prepareTestResult(result)
+ if plug_result:
+ return plug_result
+ return result
+
+ def batch(result):
+ failures = [(TestLet(c), err) for c, err in result.failures]
+ errors = [(TestLet(c), err) for c, err in result.errors]
+ errorClasses = {}
+ for key, (storage, label, isfail) in result.errorClasses.items():
+ errorClasses[key] = ([(TestLet(c), err) for c, err in storage],
+ label, isfail)
+ return (
+ result.stream.getvalue(),
+ result.testsRun,
+ failures,
+ errors,
+ errorClasses)
+ for test_addr, arg in iter(get, 'STOP'):
+ if shouldStop.is_set():
+ log.exception('Worker %d STOPPED',ix)
+ break
+ result = makeResult()
+ test = loader.loadTestsFromNames([test_addr])
+ test.testQueue = testQueue
+ test.tasks = []
+ test.arg = arg
+ log.debug("Worker %s Test is %s (%s)", ix, test_addr, test)
+ try:
+ if arg is not None:
+ test_addr = test_addr + str(arg)
+ currentaddr.value = bytes_(test_addr)
+ currentstart.value = time.time()
+ test(result)
+ currentaddr.value = bytes_('')
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ except KeyboardInterrupt, e: #TimedOutException:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ keyboardCaught.set()
+ if len(currentaddr.value):
+ if timeout:
+ msg = 'Worker %s timed out, failing current test %s'
+ else:
+ msg = 'Worker %s keyboard interrupt, failing current test %s'
+ log.exception(msg,ix,test_addr)
+ currentaddr.value = bytes_('')
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ else:
+ if timeout:
+ msg = 'Worker %s test %s timed out'
+ else:
+ msg = 'Worker %s test %s keyboard interrupt'
+ log.debug(msg,ix,test_addr)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if not timeout:
+ raise
+ except SystemExit:
+ currentaddr.value = bytes_('')
+ log.exception('Worker %s system exit',ix)
+ raise
+ except:
+ currentaddr.value = bytes_('')
+ log.exception("Worker %s error running test or returning "
+ "results",ix)
+ failure.Failure(*sys.exc_info())(result)
+ resultQueue.put((ix, test_addr, test.tasks, batch(result)))
+ if config.multiprocess_restartworker:
+ break
+ log.debug("Worker %s ending", ix)
+
+
+class NoSharedFixtureContextSuite(ContextSuite):
+ """
+ Context suite that never fires shared fixtures.
+
+ When a context sets _multiprocess_shared_, fixtures in that context
+ are executed by the main process. Using this suite class prevents them
+ from executing in the runner process as well.
+
+ """
+ testQueue = None
+ tasks = None
+ arg = None
+ def setupContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).setupContext(context)
+
+ def teardownContext(self, context):
+ if getattr(context, '_multiprocess_shared_', False):
+ return
+ super(NoSharedFixtureContextSuite, self).teardownContext(context)
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s",
+ id(self), self, self._tests)
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ #log.debug('setUp for %s', id(self));
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if (isinstance(test,nose.case.Test)
+ and self.arg is not None):
+ test.test.arg = self.arg
+ else:
+ test.arg = self.arg
+ test.testQueue = self.testQueue
+ test.tasks = self.tasks
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ #log.debug('running test %s in suite %s', test, self);
+ try:
+ test(orig)
+ except KeyboardInterrupt, e:
+ timeout = isinstance(e, TimedOutException)
+ if timeout:
+ msg = 'Timeout when running test %s in suite %s'
+ else:
+ msg = 'KeyboardInterrupt when running test %s in suite %s'
+ log.debug(msg, test, self)
+ err = (TimedOutException,TimedOutException(str(test)),
+ sys.exc_info()[2])
+ test.config.plugins.addError(test,err)
+ orig.addError(test,err)
+ if not timeout:
+ raise
+ finally:
+ self.has_run = True
+ try:
+ #log.debug('tearDown for %s', id(self));
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py b/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py
new file mode 100755
index 00000000..76d0d2c4
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/plugintest.py
@@ -0,0 +1,416 @@
+"""
+Testing Plugins
+===============
+
+The plugin interface is well-tested enough to safely unit test your
+use of its hooks with some level of confidence. However, there is also
+a mixin for unittest.TestCase called PluginTester that's designed to
+test plugins in their native runtime environment.
+
+Here's a simple example with a do-nothing plugin and a composed suite.
+
+ >>> import unittest
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FooPlugin(Plugin):
+ ... pass
+ >>> class TestPluginFoo(PluginTester, unittest.TestCase):
+ ... activate = '--with-foo'
+ ... plugins = [FooPlugin()]
+ ... def test_foo(self):
+ ... for line in self.output:
+ ... # i.e. check for patterns
+ ... pass
+ ...
+ ... # or check for a line containing ...
+ ... assert "ValueError" in self.output
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate foo")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestPluginFoo('test_foo')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+And here is a more complex example of testing a plugin that has extra
+arguments and reads environment variables.
+
+ >>> import unittest, os
+ >>> from nose.plugins import Plugin, PluginTester
+ >>> class FancyOutputter(Plugin):
+ ... name = "fancy"
+ ... def configure(self, options, conf):
+ ... Plugin.configure(self, options, conf)
+ ... if not self.enabled:
+ ... return
+ ... self.fanciness = 1
+ ... if options.more_fancy:
+ ... self.fanciness = 2
+ ... if 'EVEN_FANCIER' in self.env:
+ ... self.fanciness = 3
+ ...
+ ... def options(self, parser, env=os.environ):
+ ... self.env = env
+ ... parser.add_option('--more-fancy', action='store_true')
+ ... Plugin.options(self, parser, env=env)
+ ...
+ ... def report(self, stream):
+ ... stream.write("FANCY " * self.fanciness)
+ ...
+ >>> class TestFancyOutputter(PluginTester, unittest.TestCase):
+ ... activate = '--with-fancy' # enables the plugin
+ ... plugins = [FancyOutputter()]
+ ... args = ['--more-fancy']
+ ... env = {'EVEN_FANCIER': '1'}
+ ...
+ ... def test_fancy_output(self):
+ ... assert "FANCY FANCY FANCY" in self.output, (
+ ... "got: %s" % self.output)
+ ... def makeSuite(self):
+ ... class TC(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("I hate fancy stuff")
+ ... return [TC('runTest')]
+ ...
+ >>> res = unittest.TestResult()
+ >>> case = TestFancyOutputter('test_fancy_output')
+ >>> _ = case(res)
+ >>> res.errors
+ []
+ >>> res.failures
+ []
+ >>> res.wasSuccessful()
+ True
+ >>> res.testsRun
+ 1
+
+"""
+
+import re
+import sys
+from warnings import warn
+
+try:
+ from cStringIO import StringIO
+except ImportError:
+ from StringIO import StringIO
+
+__all__ = ['PluginTester', 'run']
+
+from os import getpid
+class MultiProcessFile(object):
+ """
+ helper for testing multiprocessing
+
+ multiprocessing poses a problem for doctests, since the strategy
+ of replacing sys.stdout/stderr with file-like objects then
+ inspecting the results won't work: the child processes will
+ write to the objects, but the data will not be reflected
+ in the parent doctest-ing process.
+
+ The solution is to create file-like objects which will interact with
+ multiprocessing in a more desirable way.
+
+ All processes can write to this object, but only the creator can read.
+ This allows the testing system to see a unified picture of I/O.
+ """
+ def __init__(self):
+ # per advice at:
+ # http://docs.python.org/library/multiprocessing.html#all-platforms
+ self.__master = getpid()
+ self.__queue = Manager().Queue()
+ self.__buffer = StringIO()
+ self.softspace = 0
+
+ def buffer(self):
+ if getpid() != self.__master:
+ return
+
+ from Queue import Empty
+ from collections import defaultdict
+ cache = defaultdict(str)
+ while True:
+ try:
+ pid, data = self.__queue.get_nowait()
+ except Empty:
+ break
+ if pid == ():
+ #show parent output after children
+ #this is what users see, usually
+ pid = ( 1e100, ) # googol!
+ cache[pid] += data
+ for pid in sorted(cache):
+ #self.__buffer.write( '%s wrote: %r\n' % (pid, cache[pid]) ) #DEBUG
+ self.__buffer.write( cache[pid] )
+ def write(self, data):
+ # note that these pids are in the form of current_process()._identity
+ # rather than OS pids
+ from multiprocessing import current_process
+ pid = current_process()._identity
+ self.__queue.put((pid, data))
+ def __iter__(self):
+ "getattr doesn't work for iter()"
+ self.buffer()
+ return self.__buffer
+ def seek(self, offset, whence=0):
+ self.buffer()
+ return self.__buffer.seek(offset, whence)
+ def getvalue(self):
+ self.buffer()
+ return self.__buffer.getvalue()
+ def __getattr__(self, attr):
+ return getattr(self.__buffer, attr)
+
+try:
+ from multiprocessing import Manager
+ Buffer = MultiProcessFile
+except ImportError:
+ Buffer = StringIO
+
+class PluginTester(object):
+ """A mixin for testing nose plugins in their runtime environment.
+
+ Subclass this and mix in unittest.TestCase to run integration/functional
+ tests on your plugin. When setUp() is called, the stub test suite is
+ executed with your plugin so that during an actual test you can inspect the
+ artifacts of how your plugin interacted with the stub test suite.
+
+ - activate
+
+ - the argument to send nosetests to activate the plugin
+
+ - suitepath
+
+ - if set, this is the path of the suite to test. Otherwise, you
+ will need to use the hook, makeSuite()
+
+ - plugins
+
+ - the list of plugins to make available during the run. Note
+ that this does not mean these plugins will be *enabled* during
+ the run -- only the plugins enabled by the activate argument
+ or other settings in argv or env will be enabled.
+
+ - args
+
+ - a list of arguments to add to the nosetests command, in addition to
+ the activate argument
+
+ - env
+
+ - optional dict of environment variables to send nosetests
+
+ """
+ activate = None
+ suitepath = None
+ args = None
+ env = {}
+ argv = None
+ plugins = []
+ ignoreFiles = None
+
+ def makeSuite(self):
+ """returns a suite object of tests to run (unittest.TestSuite())
+
+ If self.suitepath is None, this must be implemented. The returned suite
+ object will be executed with all plugins activated. It may return
+ None.
+
+ Here is an example of a basic suite object you can return ::
+
+ >>> import unittest
+ >>> class SomeTest(unittest.TestCase):
+ ... def runTest(self):
+ ... raise ValueError("Now do something, plugin!")
+ ...
+ >>> unittest.TestSuite([SomeTest()]) # doctest: +ELLIPSIS
+ <unittest...TestSuite tests=[<...SomeTest testMethod=runTest>]>
+
+ """
+ raise NotImplementedError
+
+ def _execPlugin(self):
+ """execute the plugin on the internal test suite.
+ """
+ from nose.config import Config
+ from nose.core import TestProgram
+ from nose.plugins.manager import PluginManager
+
+ suite = None
+ stream = Buffer()
+ conf = Config(env=self.env,
+ stream=stream,
+ plugins=PluginManager(plugins=self.plugins))
+ if self.ignoreFiles is not None:
+ conf.ignoreFiles = self.ignoreFiles
+ if not self.suitepath:
+ suite = self.makeSuite()
+
+ self.nose = TestProgram(argv=self.argv, config=conf, suite=suite,
+ exit=False)
+ self.output = AccessDecorator(stream)
+
+ def setUp(self):
+ """runs nosetests with the specified test suite, all plugins
+ activated.
+ """
+ self.argv = ['nosetests', self.activate]
+ if self.args:
+ self.argv.extend(self.args)
+ if self.suitepath:
+ self.argv.append(self.suitepath)
+
+ self._execPlugin()
+
+
+class AccessDecorator(object):
+ stream = None
+ _buf = None
+ def __init__(self, stream):
+ self.stream = stream
+ stream.seek(0)
+ self._buf = stream.read()
+ stream.seek(0)
+ def __contains__(self, val):
+ return val in self._buf
+ def __iter__(self):
+ return iter(self.stream)
+ def __str__(self):
+ return self._buf
+
+
+def blankline_separated_blocks(text):
+ "a bunch of === characters is also considered a blank line"
+ block = []
+ for line in text.splitlines(True):
+ block.append(line)
+ line = line.strip()
+ if not line or line.startswith('===') and not line.strip('='):
+ yield "".join(block)
+ block = []
+ if block:
+ yield "".join(block)
+
+
+def remove_stack_traces(out):
+ # this regexp taken from Python 2.5's doctest
+ traceback_re = re.compile(r"""
+ # Grab the traceback header. Different versions of Python have
+ # said different things on the first traceback line.
+ ^(?P<hdr> Traceback\ \(
+ (?: most\ recent\ call\ last
+ | innermost\ last
+ ) \) :
+ )
+ \s* $ # toss trailing whitespace on the header.
+ (?P<stack> .*?) # don't blink: absorb stuff until...
+ ^(?=\w) # a line *starts* with alphanum.
+ .*?(?P<exception> \w+ ) # exception name
+ (?P<msg> [:\n] .*) # the rest
+ """, re.VERBOSE | re.MULTILINE | re.DOTALL)
+ blocks = []
+ for block in blankline_separated_blocks(out):
+ blocks.append(traceback_re.sub(r"\g<hdr>\n...\n\g<exception>\g<msg>", block))
+ return "".join(blocks)
+
+
+def simplify_warnings(out):
+ warn_re = re.compile(r"""
+ # Cut the file and line no, up to the warning name
+ ^.*:\d+:\s
+ (?P<category>\w+): \s+ # warning category
+ (?P<detail>.+) $ \n? # warning message
+ ^ .* $ # stack frame
+ """, re.VERBOSE | re.MULTILINE)
+ return warn_re.sub(r"\g<category>: \g<detail>", out)
+
+
+def remove_timings(out):
+ return re.sub(
+ r"Ran (\d+ tests?) in [0-9.]+s", r"Ran \1 in ...s", out)
+
+
+def munge_nose_output_for_doctest(out):
+ """Modify nose output to make it easy to use in doctests."""
+ out = remove_stack_traces(out)
+ out = simplify_warnings(out)
+ out = remove_timings(out)
+ return out.strip()
+
+
+def run(*arg, **kw):
+ """
+ Specialized version of nose.run for use inside of doctests that
+ test test runs.
+
+ This version of run() prints the result output to stdout. Before
+ printing, the output is processed by replacing the timing
+ information with an ellipsis (...), removing traceback stacks, and
+ removing trailing whitespace.
+
+ Use this version of run wherever you are writing a doctest that
+ tests nose (or unittest) test result output.
+
+ Note: do not use doctest: +ELLIPSIS when testing nose output,
+ since ellipses ("test_foo ... ok") in your expected test runner
+ output may match multiple lines of output, causing spurious test
+ passes!
+ """
+ from nose import run
+ from nose.config import Config
+ from nose.plugins.manager import PluginManager
+
+ buffer = Buffer()
+ if 'config' not in kw:
+ plugins = kw.pop('plugins', [])
+ if isinstance(plugins, list):
+ plugins = PluginManager(plugins=plugins)
+ env = kw.pop('env', {})
+ kw['config'] = Config(env=env, plugins=plugins)
+ if 'argv' not in kw:
+ kw['argv'] = ['nosetests', '-v']
+ kw['config'].stream = buffer
+
+ # Set up buffering so that all output goes to our buffer,
+ # or warn user if deprecated behavior is active. If this is not
+ # done, prints and warnings will either be out of place or
+ # disappear.
+ stderr = sys.stderr
+ stdout = sys.stdout
+ if kw.pop('buffer_all', False):
+ sys.stdout = sys.stderr = buffer
+ restore = True
+ else:
+ restore = False
+ warn("The behavior of nose.plugins.plugintest.run() will change in "
+ "the next release of nose. The current behavior does not "
+ "correctly account for output to stdout and stderr. To enable "
+ "correct behavior, use run_buffered() instead, or pass "
+ "the keyword argument buffer_all=True to run().",
+ DeprecationWarning, stacklevel=2)
+ try:
+ run(*arg, **kw)
+ finally:
+ if restore:
+ sys.stderr = stderr
+ sys.stdout = stdout
+ out = buffer.getvalue()
+ print munge_nose_output_for_doctest(out)
+
+
+def run_buffered(*arg, **kw):
+ kw['buffer_all'] = True
+ run(*arg, **kw)
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py b/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py
new file mode 100755
index 00000000..4d304a93
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/prof.py
@@ -0,0 +1,154 @@
+"""This plugin will run tests using the hotshot profiler, which is part
+of the standard library. To turn it on, use the ``--with-profile`` option
+or set the NOSE_WITH_PROFILE environment variable. Profiler output can be
+controlled with the ``--profile-sort`` and ``--profile-restrict`` options,
+and the profiler output file may be changed with ``--profile-stats-file``.
+
+See the `hotshot documentation`_ in the standard library documentation for
+more details on the various output options.
+
+.. _hotshot documentation: http://docs.python.org/library/hotshot.html
+"""
+
+try:
+ import hotshot
+ from hotshot import stats
+except ImportError:
+ hotshot, stats = None, None
+import logging
+import os
+import sys
+import tempfile
+from nose.plugins.base import Plugin
+from nose.util import tolist
+
+log = logging.getLogger('nose.plugins')
+
+class Profile(Plugin):
+ """
+ Use this plugin to run tests using the hotshot profiler.
+ """
+ pfile = None
+ clean_stats_file = False
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ if not self.available():
+ return
+ Plugin.options(self, parser, env)
+ parser.add_option('--profile-sort', action='store', dest='profile_sort',
+ default=env.get('NOSE_PROFILE_SORT', 'cumulative'),
+ metavar="SORT",
+ help="Set sort order for profiler output")
+ parser.add_option('--profile-stats-file', action='store',
+ dest='profile_stats_file',
+ metavar="FILE",
+ default=env.get('NOSE_PROFILE_STATS_FILE'),
+ help='Profiler stats file; default is a new '
+ 'temp file on each run')
+ parser.add_option('--profile-restrict', action='append',
+ dest='profile_restrict',
+ metavar="RESTRICT",
+ default=env.get('NOSE_PROFILE_RESTRICT'),
+ help="Restrict profiler output. See help for "
+ "pstats.Stats for details")
+
+ def available(cls):
+ return hotshot is not None
+ available = classmethod(available)
+
+ def begin(self):
+ """Create profile stats file and load profiler.
+ """
+ if not self.available():
+ return
+ self._create_pfile()
+ self.prof = hotshot.Profile(self.pfile)
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ if not self.available():
+ self.enabled = False
+ return
+ Plugin.configure(self, options, conf)
+ self.conf = conf
+ if options.profile_stats_file:
+ self.pfile = options.profile_stats_file
+ self.clean_stats_file = False
+ else:
+ self.pfile = None
+ self.clean_stats_file = True
+ self.fileno = None
+ self.sort = options.profile_sort
+ self.restrict = tolist(options.profile_restrict)
+
+ def prepareTest(self, test):
+ """Wrap entire test run in :func:`prof.runcall`.
+ """
+ if not self.available():
+ return
+ log.debug('preparing test %s' % test)
+ def run_and_profile(result, prof=self.prof, test=test):
+ self._create_pfile()
+ prof.runcall(test, result)
+ return run_and_profile
+
+ def report(self, stream):
+ """Output profiler report.
+ """
+ log.debug('printing profiler report')
+ self.prof.close()
+ prof_stats = stats.load(self.pfile)
+ prof_stats.sort_stats(self.sort)
+
+ # 2.5 has completely different stream handling from 2.4 and earlier.
+ # Before 2.5, stats objects have no stream attribute; in 2.5 and later
+ # a reference sys.stdout is stored before we can tweak it.
+ compat_25 = hasattr(prof_stats, 'stream')
+ if compat_25:
+ tmp = prof_stats.stream
+ prof_stats.stream = stream
+ else:
+ tmp = sys.stdout
+ sys.stdout = stream
+ try:
+ if self.restrict:
+ log.debug('setting profiler restriction to %s', self.restrict)
+ prof_stats.print_stats(*self.restrict)
+ else:
+ prof_stats.print_stats()
+ finally:
+ if compat_25:
+ prof_stats.stream = tmp
+ else:
+ sys.stdout = tmp
+
+ def finalize(self, result):
+ """Clean up stats file, if configured to do so.
+ """
+ if not self.available():
+ return
+ try:
+ self.prof.close()
+ except AttributeError:
+ # TODO: is this trying to catch just the case where not
+ # hasattr(self.prof, "close")? If so, the function call should be
+ # moved out of the try: suite.
+ pass
+ if self.clean_stats_file:
+ if self.fileno:
+ try:
+ os.close(self.fileno)
+ except OSError:
+ pass
+ try:
+ os.unlink(self.pfile)
+ except OSError:
+ pass
+ return None
+
+ def _create_pfile(self):
+ if not self.pfile:
+ self.fileno, self.pfile = tempfile.mkstemp()
+ self.clean_stats_file = True
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py b/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py
new file mode 100755
index 00000000..9d1ac8f6
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/skip.py
@@ -0,0 +1,63 @@
+"""
+This plugin installs a SKIP error class for the SkipTest exception.
+When SkipTest is raised, the exception will be logged in the skipped
+attribute of the result, 'S' or 'SKIP' (verbose) will be output, and
+the exception will not be counted as an error or failure. This plugin
+is enabled by default but may be disabled with the ``--no-skip`` option.
+"""
+
+from nose.plugins.errorclass import ErrorClass, ErrorClassPlugin
+
+
+# on SkipTest:
+# - unittest SkipTest is first preference, but it's only available
+# for >= 2.7
+# - unittest2 SkipTest is second preference for older pythons. This
+# mirrors logic for choosing SkipTest exception in testtools
+# - if none of the above, provide custom class
+try:
+ from unittest.case import SkipTest
+except ImportError:
+ try:
+ from unittest2.case import SkipTest
+ except ImportError:
+ class SkipTest(Exception):
+ """Raise this exception to mark a test as skipped.
+ """
+ pass
+
+
+class Skip(ErrorClassPlugin):
+ """
+ Plugin that installs a SKIP error class for the SkipTest
+ exception. When SkipTest is raised, the exception will be logged
+ in the skipped attribute of the result, 'S' or 'SKIP' (verbose)
+ will be output, and the exception will not be counted as an error
+ or failure.
+ """
+ enabled = True
+ skipped = ErrorClass(SkipTest,
+ label='SKIP',
+ isfailure=False)
+
+ def options(self, parser, env):
+ """
+ Add my options to command line.
+ """
+ env_opt = 'NOSE_WITHOUT_SKIP'
+ parser.add_option('--no-skip', action='store_true',
+ dest='noSkip', default=env.get(env_opt, False),
+ help="Disable special handling of SkipTest "
+ "exceptions.")
+
+ def configure(self, options, conf):
+ """
+ Configure plugin. Skip plugin is enabled by default.
+ """
+ if not self.can_configure:
+ return
+ self.conf = conf
+ disable = getattr(options, 'noSkip', False)
+ if disable:
+ self.enabled = False
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py b/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py
new file mode 100755
index 00000000..49fff9b1
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/testid.py
@@ -0,0 +1,306 @@
+"""
+This plugin adds a test id (like #1) to each test name output. After
+you've run once to generate test ids, you can re-run individual
+tests by activating the plugin and passing the ids (with or
+without the # prefix) instead of test names.
+
+For example, if your normal test run looks like::
+
+ % nosetests -v
+ tests.test_a ... ok
+ tests.test_b ... ok
+ tests.test_c ... ok
+
+When adding ``--with-id`` you'll see::
+
+ % nosetests -v --with-id
+ #1 tests.test_a ... ok
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Then you can re-run individual tests by supplying just an id number::
+
+ % nosetests -v --with-id 2
+ #2 tests.test_b ... ok
+
+You can also pass multiple id numbers::
+
+ % nosetests -v --with-id 2 3
+ #2 tests.test_b ... ok
+ #3 tests.test_c ... ok
+
+Since most shells consider '#' a special character, you can leave it out when
+specifying a test id.
+
+Note that when run without the -v switch, no special output is displayed, but
+the ids file is still written.
+
+Looping over failed tests
+-------------------------
+
+This plugin also adds a mode that will direct the test runner to record
+failed tests. Subsequent test runs will then run only the tests that failed
+last time. Activate this mode with the ``--failed`` switch::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+ #4 test.test_d ... ok
+
+On the second run, only tests #2 and #3 will run::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ERROR
+ #3 test.test_c ... FAILED
+
+As you correct errors and tests pass, they'll drop out of subsequent runs.
+
+First::
+
+ % nosetests -v --failed
+ #2 test.test_b ... ok
+ #3 test.test_c ... FAILED
+
+Second::
+
+ % nosetests -v --failed
+ #3 test.test_c ... FAILED
+
+When all tests pass, the full set will run on the next invocation.
+
+First::
+
+ % nosetests -v --failed
+ #3 test.test_c ... ok
+
+Second::
+
+ % nosetests -v --failed
+ #1 test.test_a ... ok
+ #2 test.test_b ... ok
+ #3 test.test_c ... ok
+ #4 test.test_d ... ok
+
+.. note ::
+
+ If you expect to use ``--failed`` regularly, it's a good idea to always run
+ using the ``--with-id`` option. This will ensure that an id file is always
+ created, allowing you to add ``--failed`` to the command line as soon as
+ you have failing tests. Otherwise, your first run using ``--failed`` will
+ (perhaps surprisingly) run *all* tests, because there won't be an id file
+ containing the record of failed tests from your previous run.
+
+"""
+__test__ = False
+
+import logging
+import os
+from nose.plugins import Plugin
+from nose.util import src, set
+
+try:
+ from cPickle import dump, load
+except ImportError:
+ from pickle import dump, load
+
+log = logging.getLogger(__name__)
+
+
+class TestId(Plugin):
+ """
+ Activate to add a test id (like #1) to each test name output. Activate
+ with --failed to rerun failing tests only.
+ """
+ name = 'id'
+ idfile = None
+ collecting = True
+ loopOnFailed = False
+
+ def options(self, parser, env):
+ """Register commandline options.
+ """
+ Plugin.options(self, parser, env)
+ parser.add_option('--id-file', action='store', dest='testIdFile',
+ default='.noseids', metavar="FILE",
+ help="Store test ids found in test runs in this "
+ "file. Default is the file .noseids in the "
+ "working directory.")
+ parser.add_option('--failed', action='store_true',
+ dest='failed', default=False,
+ help="Run the tests that failed in the last "
+ "test run.")
+
+ def configure(self, options, conf):
+ """Configure plugin.
+ """
+ Plugin.configure(self, options, conf)
+ if options.failed:
+ self.enabled = True
+ self.loopOnFailed = True
+ log.debug("Looping on failed tests")
+ self.idfile = os.path.expanduser(options.testIdFile)
+ if not os.path.isabs(self.idfile):
+ self.idfile = os.path.join(conf.workingDir, self.idfile)
+ self.id = 1
+ # Ids and tests are mirror images: ids are {id: test address} and
+ # tests are {test address: id}
+ self.ids = {}
+ self.tests = {}
+ self.failed = []
+ self.source_names = []
+ # used to track ids seen when tests is filled from
+ # loaded ids file
+ self._seen = {}
+ self._write_hashes = conf.verbosity >= 2
+
+ def finalize(self, result):
+ """Save new ids file, if needed.
+ """
+ if result.wasSuccessful():
+ self.failed = []
+ if self.collecting:
+ ids = dict(list(zip(list(self.tests.values()), list(self.tests.keys()))))
+ else:
+ ids = self.ids
+ fh = open(self.idfile, 'wb')
+ dump({'ids': ids,
+ 'failed': self.failed,
+ 'source_names': self.source_names}, fh)
+ fh.close()
+ log.debug('Saved test ids: %s, failed %s to %s',
+ ids, self.failed, self.idfile)
+
+ def loadTestsFromNames(self, names, module=None):
+ """Translate ids in the list of requested names into their
+ test addresses, if they are found in my dict of tests.
+ """
+ log.debug('ltfn %s %s', names, module)
+ try:
+ fh = open(self.idfile, 'rb')
+ data = load(fh)
+ if 'ids' in data:
+ self.ids = data['ids']
+ self.failed = data['failed']
+ self.source_names = data['source_names']
+ else:
+ # old ids field
+ self.ids = data
+ self.failed = []
+ self.source_names = names
+ if self.ids:
+ self.id = max(self.ids) + 1
+ self.tests = dict(list(zip(list(self.ids.values()), list(self.ids.keys()))))
+ else:
+ self.id = 1
+ log.debug(
+ 'Loaded test ids %s tests %s failed %s sources %s from %s',
+ self.ids, self.tests, self.failed, self.source_names,
+ self.idfile)
+ fh.close()
+ except IOError:
+ log.debug('IO error reading %s', self.idfile)
+
+ if self.loopOnFailed and self.failed:
+ self.collecting = False
+ names = self.failed
+ self.failed = []
+ # I don't load any tests myself, only translate names like '#2'
+ # into the associated test addresses
+ translated = []
+ new_source = []
+ really_new = []
+ for name in names:
+ trans = self.tr(name)
+ if trans != name:
+ translated.append(trans)
+ else:
+ new_source.append(name)
+ # names that are not ids and that are not in the current
+ # list of source names go into the list for next time
+ if new_source:
+ new_set = set(new_source)
+ old_set = set(self.source_names)
+ log.debug("old: %s new: %s", old_set, new_set)
+ really_new = [s for s in new_source
+ if not s in old_set]
+ if really_new:
+ # remember new sources
+ self.source_names.extend(really_new)
+ if not translated:
+ # new set of source names, no translations
+ # means "run the requested tests"
+ names = new_source
+ else:
+ # no new names to translate and add to id set
+ self.collecting = False
+ log.debug("translated: %s new sources %s names %s",
+ translated, really_new, names)
+ return (None, translated + really_new or names)
+
+ def makeName(self, addr):
+ log.debug("Make name %s", addr)
+ filename, module, call = addr
+ if filename is not None:
+ head = src(filename)
+ else:
+ head = module
+ if call is not None:
+ return "%s:%s" % (head, call)
+ return head
+
+ def setOutputStream(self, stream):
+ """Get handle on output stream so the plugin can print id #s
+ """
+ self.stream = stream
+
+ def startTest(self, test):
+ """Maybe output an id # before the test name.
+
+ Example output::
+
+ #1 test.test ... ok
+ #2 test.test_two ... ok
+
+ """
+ adr = test.address()
+ log.debug('start test %s (%s)', adr, adr in self.tests)
+ if adr in self.tests:
+ if adr in self._seen:
+ self.write(' ')
+ else:
+ self.write('#%s ' % self.tests[adr])
+ self._seen[adr] = 1
+ return
+ self.tests[adr] = self.id
+ self.write('#%s ' % self.id)
+ self.id += 1
+
+ def afterTest(self, test):
+ # None means test never ran, False means failed/err
+ if test.passed is False:
+ try:
+ key = str(self.tests[test.address()])
+ except KeyError:
+ # never saw this test -- startTest didn't run
+ pass
+ else:
+ if key not in self.failed:
+ self.failed.append(key)
+
+ def tr(self, name):
+ log.debug("tr '%s'", name)
+ try:
+ key = int(name.replace('#', ''))
+ except ValueError:
+ return name
+ log.debug("Got key %s", key)
+ # I'm running tests mapped from the ids file,
+ # not collecting new ones
+ if key in self.ids:
+ return self.makeName(self.ids[key])
+ return name
+
+ def write(self, output):
+ if self._write_hashes:
+ self.stream.write(output)
diff --git a/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py b/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py
new file mode 100755
index 00000000..e1ec0e1d
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/plugins/xunit.py
@@ -0,0 +1,329 @@
+"""This plugin provides test results in the standard XUnit XML format.
+
+It's designed for the `Jenkins`_ (previously Hudson) continuous build
+system, but will probably work for anything else that understands an
+XUnit-formatted XML representation of test results.
+
+Add this shell command to your builder ::
+
+ nosetests --with-xunit
+
+And by default a file named nosetests.xml will be written to the
+working directory.
+
+In a Jenkins builder, tick the box named "Publish JUnit test result report"
+under the Post-build Actions and enter this value for Test report XMLs::
+
+ **/nosetests.xml
+
+If you need to change the name or location of the file, you can set the
+``--xunit-file`` option.
+
+Here is an abbreviated version of what an XML test report might look like::
+
+ <?xml version="1.0" encoding="UTF-8"?>
+ <testsuite name="nosetests" tests="1" errors="1" failures="0" skip="0">
+ <testcase classname="path_to_test_suite.TestSomething"
+ name="test_it" time="0">
+ <error type="exceptions.TypeError" message="oops, wrong type">
+ Traceback (most recent call last):
+ ...
+ TypeError: oops, wrong type
+ </error>
+ </testcase>
+ </testsuite>
+
+.. _Jenkins: http://jenkins-ci.org/
+
+"""
+import codecs
+import doctest
+import os
+import sys
+import traceback
+import re
+import inspect
+from StringIO import StringIO
+from time import time
+from xml.sax import saxutils
+
+from nose.plugins.base import Plugin
+from nose.exc import SkipTest
+from nose.pyversion import force_unicode, format_exception
+
+# Invalid XML characters, control characters 0-31 sans \t, \n and \r
+CONTROL_CHARACTERS = re.compile(r"[\000-\010\013\014\016-\037]")
+
+TEST_ID = re.compile(r'^(.*?)(\(.*\))$')
+
+def xml_safe(value):
+ """Replaces invalid XML characters with '?'."""
+ return CONTROL_CHARACTERS.sub('?', value)
+
+def escape_cdata(cdata):
+ """Escape a string for an XML CDATA section."""
+ return xml_safe(cdata).replace(']]>', ']]>]]&gt;<![CDATA[')
+
+def id_split(idval):
+ m = TEST_ID.match(idval)
+ if m:
+ name, fargs = m.groups()
+ head, tail = name.rsplit(".", 1)
+ return [head, tail+fargs]
+ else:
+ return idval.rsplit(".", 1)
+
+def nice_classname(obj):
+ """Returns a nice name for class object or class instance.
+
+ >>> nice_classname(Exception()) # doctest: +ELLIPSIS
+ '...Exception'
+ >>> nice_classname(Exception) # doctest: +ELLIPSIS
+ '...Exception'
+
+ """
+ if inspect.isclass(obj):
+ cls_name = obj.__name__
+ else:
+ cls_name = obj.__class__.__name__
+ mod = inspect.getmodule(obj)
+ if mod:
+ name = mod.__name__
+ # jython
+ if name.startswith('org.python.core.'):
+ name = name[len('org.python.core.'):]
+ return "%s.%s" % (name, cls_name)
+ else:
+ return cls_name
+
+def exc_message(exc_info):
+ """Return the exception's message."""
+ exc = exc_info[1]
+ if exc is None:
+ # str exception
+ result = exc_info[0]
+ else:
+ try:
+ result = str(exc)
+ except UnicodeEncodeError:
+ try:
+ result = unicode(exc)
+ except UnicodeError:
+ # Fallback to args as neither str nor
+ # unicode(Exception(u'\xe6')) work in Python < 2.6
+ result = exc.args[0]
+ result = force_unicode(result, 'UTF-8')
+ return xml_safe(result)
+
+class Tee(object):
+ def __init__(self, encoding, *args):
+ self._encoding = encoding
+ self._streams = args
+
+ def write(self, data):
+ data = force_unicode(data, self._encoding)
+ for s in self._streams:
+ s.write(data)
+
+ def writelines(self, lines):
+ for line in lines:
+ self.write(line)
+
+ def flush(self):
+ for s in self._streams:
+ s.flush()
+
+ def isatty(self):
+ return False
+
+
+class Xunit(Plugin):
+ """This plugin provides test results in the standard XUnit XML format."""
+ name = 'xunit'
+ score = 1500
+ encoding = 'UTF-8'
+ error_report_file = None
+
+ def __init__(self):
+ super(Xunit, self).__init__()
+ self._capture_stack = []
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def _timeTaken(self):
+ if hasattr(self, '_timer'):
+ taken = time() - self._timer
+ else:
+ # test died before it ran (probably error in setup())
+ # or success/failure added before test started probably
+ # due to custom TestResult munging
+ taken = 0.0
+ return taken
+
+ def _quoteattr(self, attr):
+ """Escape an XML attribute. Value can be unicode."""
+ attr = xml_safe(attr)
+ return saxutils.quoteattr(attr)
+
+ def options(self, parser, env):
+ """Sets additional command line options."""
+ Plugin.options(self, parser, env)
+ parser.add_option(
+ '--xunit-file', action='store',
+ dest='xunit_file', metavar="FILE",
+ default=env.get('NOSE_XUNIT_FILE', 'nosetests.xml'),
+ help=("Path to xml file to store the xunit report in. "
+ "Default is nosetests.xml in the working directory "
+ "[NOSE_XUNIT_FILE]"))
+
+ def configure(self, options, config):
+ """Configures the xunit plugin."""
+ Plugin.configure(self, options, config)
+ self.config = config
+ if self.enabled:
+ self.stats = {'errors': 0,
+ 'failures': 0,
+ 'passes': 0,
+ 'skipped': 0
+ }
+ self.errorlist = []
+ self.error_report_file_name = os.path.realpath(options.xunit_file)
+
+ def report(self, stream):
+ """Writes an Xunit-formatted XML file
+
+ The file includes a report of test errors and failures.
+
+ """
+ self.error_report_file = codecs.open(self.error_report_file_name, 'w',
+ self.encoding, 'replace')
+ self.stats['encoding'] = self.encoding
+ self.stats['total'] = (self.stats['errors'] + self.stats['failures']
+ + self.stats['passes'] + self.stats['skipped'])
+ self.error_report_file.write(
+ u'<?xml version="1.0" encoding="%(encoding)s"?>'
+ u'<testsuite name="nosetests" tests="%(total)d" '
+ u'errors="%(errors)d" failures="%(failures)d" '
+ u'skip="%(skipped)d">' % self.stats)
+ self.error_report_file.write(u''.join([force_unicode(e, self.encoding)
+ for e in self.errorlist]))
+ self.error_report_file.write(u'</testsuite>')
+ self.error_report_file.close()
+ if self.config.verbosity > 1:
+ stream.writeln("-" * 70)
+ stream.writeln("XML: %s" % self.error_report_file.name)
+
+ def _startCapture(self):
+ self._capture_stack.append((sys.stdout, sys.stderr))
+ self._currentStdout = StringIO()
+ self._currentStderr = StringIO()
+ sys.stdout = Tee(self.encoding, self._currentStdout, sys.stdout)
+ sys.stderr = Tee(self.encoding, self._currentStderr, sys.stderr)
+
+ def startContext(self, context):
+ self._startCapture()
+
+ def stopContext(self, context):
+ self._endCapture()
+
+ def beforeTest(self, test):
+ """Initializes a timer before starting a test."""
+ self._timer = time()
+ self._startCapture()
+
+ def _endCapture(self):
+ if self._capture_stack:
+ sys.stdout, sys.stderr = self._capture_stack.pop()
+
+ def afterTest(self, test):
+ self._endCapture()
+ self._currentStdout = None
+ self._currentStderr = None
+
+ def finalize(self, test):
+ while self._capture_stack:
+ self._endCapture()
+
+ def _getCapturedStdout(self):
+ if self._currentStdout:
+ value = self._currentStdout.getvalue()
+ if value:
+ return '<system-out><![CDATA[%s]]></system-out>' % escape_cdata(
+ value)
+ return ''
+
+ def _getCapturedStderr(self):
+ if self._currentStderr:
+ value = self._currentStderr.getvalue()
+ if value:
+ return '<system-err><![CDATA[%s]]></system-err>' % escape_cdata(
+ value)
+ return ''
+
+ def addError(self, test, err, capt=None):
+ """Add error output to Xunit report.
+ """
+ taken = self._timeTaken()
+
+ if issubclass(err[0], SkipTest):
+ type = 'skipped'
+ self.stats['skipped'] += 1
+ else:
+ type = 'error'
+ self.stats['errors'] += 1
+
+ tb = format_exception(err, self.encoding)
+ id = test.id()
+
+ self.errorlist.append(
+ u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ u'<%(type)s type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ u'</%(type)s>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'type': type,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addFailure(self, test, err, capt=None, tb_info=None):
+ """Add failure output to Xunit report.
+ """
+ taken = self._timeTaken()
+ tb = format_exception(err, self.encoding)
+ self.stats['failures'] += 1
+ id = test.id()
+
+ self.errorlist.append(
+ u'<testcase classname=%(cls)s name=%(name)s time="%(taken).3f">'
+ u'<failure type=%(errtype)s message=%(message)s><![CDATA[%(tb)s]]>'
+ u'</failure>%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'errtype': self._quoteattr(nice_classname(err[0])),
+ 'message': self._quoteattr(exc_message(err)),
+ 'tb': escape_cdata(tb),
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
+
+ def addSuccess(self, test, capt=None):
+ """Add success output to Xunit report.
+ """
+ taken = self._timeTaken()
+ self.stats['passes'] += 1
+ id = test.id()
+ self.errorlist.append(
+ '<testcase classname=%(cls)s name=%(name)s '
+ 'time="%(taken).3f">%(systemout)s%(systemerr)s</testcase>' %
+ {'cls': self._quoteattr(id_split(id)[0]),
+ 'name': self._quoteattr(id_split(id)[-1]),
+ 'taken': taken,
+ 'systemout': self._getCapturedStdout(),
+ 'systemerr': self._getCapturedStderr(),
+ })
diff --git a/scripts/external_libs/nose-1.3.4/nose/proxy.py b/scripts/external_libs/nose-1.3.4/nose/proxy.py
new file mode 100755
index 00000000..c2676cb1
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/proxy.py
@@ -0,0 +1,188 @@
+"""
+Result Proxy
+------------
+
+The result proxy wraps the result instance given to each test. It
+performs two functions: enabling extended error/failure reporting
+and calling plugins.
+
+As each result event is fired, plugins are called with the same event;
+however, plugins are called with the nose.case.Test instance that
+wraps the actual test. So when a test fails and calls
+result.addFailure(self, err), the result proxy calls
+addFailure(self.test, err) for each plugin. This allows plugins to
+have a single stable interface for all test types, and also to
+manipulate the test object itself by setting the `test` attribute of
+the nose.case.Test that they receive.
+"""
+import logging
+from nose.config import Config
+
+
+log = logging.getLogger(__name__)
+
+
+def proxied_attribute(local_attr, proxied_attr, doc):
+ """Create a property that proxies attribute ``proxied_attr`` through
+ the local attribute ``local_attr``.
+ """
+ def fget(self):
+ return getattr(getattr(self, local_attr), proxied_attr)
+ def fset(self, value):
+ setattr(getattr(self, local_attr), proxied_attr, value)
+ def fdel(self):
+ delattr(getattr(self, local_attr), proxied_attr)
+ return property(fget, fset, fdel, doc)
+
+
+class ResultProxyFactory(object):
+ """Factory for result proxies. Generates a ResultProxy bound to each test
+ and the result passed to the test.
+ """
+ def __init__(self, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ self.__prepared = False
+ self.__result = None
+
+ def __call__(self, result, test):
+ """Return a ResultProxy for the current test.
+
+ On first call, plugins are given a chance to replace the
+ result used for the remaining tests. If a plugin returns a
+ value from prepareTestResult, that object will be used as the
+ result for all tests.
+ """
+ if not self.__prepared:
+ self.__prepared = True
+ plug_result = self.config.plugins.prepareTestResult(result)
+ if plug_result is not None:
+ self.__result = result = plug_result
+ if self.__result is not None:
+ result = self.__result
+ return ResultProxy(result, test, config=self.config)
+
+
+class ResultProxy(object):
+ """Proxy to TestResults (or other results handler).
+
+ One ResultProxy is created for each nose.case.Test. The result
+ proxy calls plugins with the nose.case.Test instance (instead of
+ the wrapped test case) as each result call is made. Finally, the
+ real result method is called, also with the nose.case.Test
+ instance as the test parameter.
+
+ """
+ def __init__(self, result, test, config=None):
+ if config is None:
+ config = Config()
+ self.config = config
+ self.plugins = config.plugins
+ self.result = result
+ self.test = test
+
+ def __repr__(self):
+ return repr(self.result)
+
+ def _prepareErr(self, err):
+ if not isinstance(err[1], Exception) and isinstance(err[0], type):
+ # Turn value back into an Exception (required in Python 3.x).
+ # Plugins do all sorts of crazy things with exception values.
+ # Convert it to a custom subclass of Exception with the same
+ # name as the actual exception to make it print correctly.
+ value = type(err[0].__name__, (Exception,), {})(err[1])
+ err = (err[0], value, err[2])
+ return err
+
+ def assertMyTest(self, test):
+ # The test I was called with must be my .test or my
+ # .test's .test. or my .test.test's .case
+
+ case = getattr(self.test, 'test', None)
+ assert (test is self.test
+ or test is case
+ or test is getattr(case, '_nose_case', None)), (
+ "ResultProxy for %r (%s) was called with test %r (%s)"
+ % (self.test, id(self.test), test, id(test)))
+
+ def afterTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.afterTest(self.test)
+ if hasattr(self.result, "afterTest"):
+ self.result.afterTest(self.test)
+
+ def beforeTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.beforeTest(self.test)
+ if hasattr(self.result, "beforeTest"):
+ self.result.beforeTest(self.test)
+
+ def addError(self, test, err):
+ self.assertMyTest(test)
+ plugins = self.plugins
+ plugin_handled = plugins.handleError(self.test, err)
+ if plugin_handled:
+ return
+ # test.passed is set in result, to account for error classes
+ formatted = plugins.formatError(self.test, err)
+ if formatted is not None:
+ err = formatted
+ plugins.addError(self.test, err)
+ self.result.addError(self.test, self._prepareErr(err))
+ if not self.result.wasSuccessful() and self.config.stopOnError:
+ self.shouldStop = True
+
+ def addFailure(self, test, err):
+ self.assertMyTest(test)
+ plugins = self.plugins
+ plugin_handled = plugins.handleFailure(self.test, err)
+ if plugin_handled:
+ return
+ self.test.passed = False
+ formatted = plugins.formatFailure(self.test, err)
+ if formatted is not None:
+ err = formatted
+ plugins.addFailure(self.test, err)
+ self.result.addFailure(self.test, self._prepareErr(err))
+ if self.config.stopOnError:
+ self.shouldStop = True
+
+ def addSkip(self, test, reason):
+ # 2.7 compat shim
+ from nose.plugins.skip import SkipTest
+ self.assertMyTest(test)
+ plugins = self.plugins
+ if not isinstance(reason, Exception):
+ # for Python 3.2+
+ reason = Exception(reason)
+ plugins.addError(self.test, (SkipTest, reason, None))
+ self.result.addSkip(self.test, reason)
+
+ def addSuccess(self, test):
+ self.assertMyTest(test)
+ self.plugins.addSuccess(self.test)
+ self.result.addSuccess(self.test)
+
+ def startTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.startTest(self.test)
+ self.result.startTest(self.test)
+
+ def stop(self):
+ self.result.stop()
+
+ def stopTest(self, test):
+ self.assertMyTest(test)
+ self.plugins.stopTest(self.test)
+ self.result.stopTest(self.test)
+
+ # proxied attributes
+ shouldStop = proxied_attribute('result', 'shouldStop',
+ """Should the test run stop?""")
+ errors = proxied_attribute('result', 'errors',
+ """Tests that raised an exception""")
+ failures = proxied_attribute('result', 'failures',
+ """Tests that failed""")
+ testsRun = proxied_attribute('result', 'testsRun',
+ """Number of tests run""")
diff --git a/scripts/external_libs/nose-1.3.4/nose/pyversion.py b/scripts/external_libs/nose-1.3.4/nose/pyversion.py
new file mode 100755
index 00000000..8b566141
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/pyversion.py
@@ -0,0 +1,214 @@
+"""
+This module contains fixups for using nose under different versions of Python.
+"""
+import sys
+import os
+import traceback
+import types
+import inspect
+import nose.util
+
+__all__ = ['make_instancemethod', 'cmp_to_key', 'sort_list', 'ClassType',
+ 'TypeType', 'UNICODE_STRINGS', 'unbound_method', 'ismethod',
+ 'bytes_', 'is_base_exception', 'force_unicode', 'exc_to_unicode',
+ 'format_exception']
+
+# In Python 3.x, all strings are unicode (the call to 'unicode()' in the 2.x
+# source will be replaced with 'str()' when running 2to3, so this test will
+# then become true)
+UNICODE_STRINGS = (type(unicode()) == type(str()))
+
+if sys.version_info[:2] < (3, 0):
+ def force_unicode(s, encoding='UTF-8'):
+ try:
+ s = unicode(s)
+ except UnicodeDecodeError:
+ s = str(s).decode(encoding, 'replace')
+
+ return s
+else:
+ def force_unicode(s, encoding='UTF-8'):
+ return str(s)
+
+# new.instancemethod() is obsolete for new-style classes (Python 3.x)
+# We need to use descriptor methods instead.
+try:
+ import new
+ def make_instancemethod(function, instance):
+ return new.instancemethod(function.im_func, instance,
+ instance.__class__)
+except ImportError:
+ def make_instancemethod(function, instance):
+ return function.__get__(instance, instance.__class__)
+
+# To be forward-compatible, we do all list sorts using keys instead of cmp
+# functions. However, part of the unittest.TestLoader API involves a
+# user-provideable cmp function, so we need some way to convert that.
+def cmp_to_key(mycmp):
+ 'Convert a cmp= function into a key= function'
+ class Key(object):
+ def __init__(self, obj):
+ self.obj = obj
+ def __lt__(self, other):
+ return mycmp(self.obj, other.obj) < 0
+ def __gt__(self, other):
+ return mycmp(self.obj, other.obj) > 0
+ def __eq__(self, other):
+ return mycmp(self.obj, other.obj) == 0
+ return Key
+
+# Python 2.3 also does not support list-sorting by key, so we need to convert
+# keys to cmp functions if we're running on old Python..
+if sys.version_info < (2, 4):
+ def sort_list(l, key, reverse=False):
+ if reverse:
+ return l.sort(lambda a, b: cmp(key(b), key(a)))
+ else:
+ return l.sort(lambda a, b: cmp(key(a), key(b)))
+else:
+ def sort_list(l, key, reverse=False):
+ return l.sort(key=key, reverse=reverse)
+
+# In Python 3.x, all objects are "new style" objects descended from 'type', and
+# thus types.ClassType and types.TypeType don't exist anymore. For
+# compatibility, we make sure they still work.
+if hasattr(types, 'ClassType'):
+ ClassType = types.ClassType
+ TypeType = types.TypeType
+else:
+ ClassType = type
+ TypeType = type
+
+# The following emulates the behavior (we need) of an 'unbound method' under
+# Python 3.x (namely, the ability to have a class associated with a function
+# definition so that things can do stuff based on its associated class)
+class UnboundMethod:
+ def __init__(self, cls, func):
+ # Make sure we have all the same attributes as the original function,
+ # so that the AttributeSelector plugin will work correctly...
+ self.__dict__ = func.__dict__.copy()
+ self._func = func
+ self.__self__ = UnboundSelf(cls)
+ if sys.version_info < (3, 0):
+ self.im_class = cls
+
+ def address(self):
+ cls = self.__self__.cls
+ modname = cls.__module__
+ module = sys.modules[modname]
+ filename = getattr(module, '__file__', None)
+ if filename is not None:
+ filename = os.path.abspath(filename)
+ return (nose.util.src(filename), modname, "%s.%s" % (cls.__name__,
+ self._func.__name__))
+
+ def __call__(self, *args, **kwargs):
+ return self._func(*args, **kwargs)
+
+ def __getattr__(self, attr):
+ return getattr(self._func, attr)
+
+ def __repr__(self):
+ return '<unbound method %s.%s>' % (self.__self__.cls.__name__,
+ self._func.__name__)
+
+class UnboundSelf:
+ def __init__(self, cls):
+ self.cls = cls
+
+ # We have to do this hackery because Python won't let us override the
+ # __class__ attribute...
+ def __getattribute__(self, attr):
+ if attr == '__class__':
+ return self.cls
+ else:
+ return object.__getattribute__(self, attr)
+
+def unbound_method(cls, func):
+ if inspect.ismethod(func):
+ return func
+ if not inspect.isfunction(func):
+ raise TypeError('%s is not a function' % (repr(func),))
+ return UnboundMethod(cls, func)
+
+def ismethod(obj):
+ return inspect.ismethod(obj) or isinstance(obj, UnboundMethod)
+
+
+# Make a pseudo-bytes function that can be called without the encoding arg:
+if sys.version_info >= (3, 0):
+ def bytes_(s, encoding='utf8'):
+ if isinstance(s, bytes):
+ return s
+ return bytes(s, encoding)
+else:
+ def bytes_(s, encoding=None):
+ return str(s)
+
+
+if sys.version_info[:2] >= (2, 6):
+ def isgenerator(o):
+ if isinstance(o, UnboundMethod):
+ o = o._func
+ return inspect.isgeneratorfunction(o) or inspect.isgenerator(o)
+else:
+ try:
+ from compiler.consts import CO_GENERATOR
+ except ImportError:
+ # IronPython doesn't have a complier module
+ CO_GENERATOR=0x20
+
+ def isgenerator(func):
+ try:
+ return func.func_code.co_flags & CO_GENERATOR != 0
+ except AttributeError:
+ return False
+
+# Make a function to help check if an exception is derived from BaseException.
+# In Python 2.4, we just use Exception instead.
+if sys.version_info[:2] < (2, 5):
+ def is_base_exception(exc):
+ return isinstance(exc, Exception)
+else:
+ def is_base_exception(exc):
+ return isinstance(exc, BaseException)
+
+if sys.version_info[:2] < (3, 0):
+ def exc_to_unicode(ev, encoding='utf-8'):
+ if is_base_exception(ev):
+ if not hasattr(ev, '__unicode__'):
+ # 2.5-
+ if not hasattr(ev, 'message'):
+ # 2.4
+ msg = len(ev.args) and ev.args[0] or ''
+ else:
+ msg = ev.message
+ msg = force_unicode(msg, encoding=encoding)
+ clsname = force_unicode(ev.__class__.__name__,
+ encoding=encoding)
+ ev = u'%s: %s' % (clsname, msg)
+ elif not isinstance(ev, unicode):
+ ev = repr(ev)
+
+ return force_unicode(ev, encoding=encoding)
+else:
+ def exc_to_unicode(ev, encoding='utf-8'):
+ return str(ev)
+
+def format_exception(exc_info, encoding='UTF-8'):
+ ec, ev, tb = exc_info
+
+ # Our exception object may have been turned into a string, and Python 3's
+ # traceback.format_exception() doesn't take kindly to that (it expects an
+ # actual exception object). So we work around it, by doing the work
+ # ourselves if ev is not an exception object.
+ if not is_base_exception(ev):
+ tb_data = force_unicode(
+ ''.join(traceback.format_tb(tb)),
+ encoding)
+ ev = exc_to_unicode(ev)
+ return tb_data + ev
+ else:
+ return force_unicode(
+ ''.join(traceback.format_exception(*exc_info)),
+ encoding)
diff --git a/scripts/external_libs/nose-1.3.4/nose/result.py b/scripts/external_libs/nose-1.3.4/nose/result.py
new file mode 100755
index 00000000..f974a14a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/result.py
@@ -0,0 +1,200 @@
+"""
+Test Result
+-----------
+
+Provides a TextTestResult that extends unittest's _TextTestResult to
+provide support for error classes (such as the builtin skip and
+deprecated classes), and hooks for plugins to take over or extend
+reporting.
+"""
+
+import logging
+try:
+ # 2.7+
+ from unittest.runner import _TextTestResult
+except ImportError:
+ from unittest import _TextTestResult
+from nose.config import Config
+from nose.util import isclass, ln as _ln # backwards compat
+
+log = logging.getLogger('nose.result')
+
+
+def _exception_detail(exc):
+ # this is what stdlib module traceback does
+ try:
+ return str(exc)
+ except:
+ return '<unprintable %s object>' % type(exc).__name__
+
+
+class TextTestResult(_TextTestResult):
+ """Text test result that extends unittest's default test result
+ support for a configurable set of errorClasses (eg, Skip,
+ Deprecated, TODO) that extend the errors/failures/success triad.
+ """
+ def __init__(self, stream, descriptions, verbosity, config=None,
+ errorClasses=None):
+ if errorClasses is None:
+ errorClasses = {}
+ self.errorClasses = errorClasses
+ if config is None:
+ config = Config()
+ self.config = config
+ _TextTestResult.__init__(self, stream, descriptions, verbosity)
+
+ def addSkip(self, test, reason):
+ # 2.7 skip compat
+ from nose.plugins.skip import SkipTest
+ if SkipTest in self.errorClasses:
+ storage, label, isfail = self.errorClasses[SkipTest]
+ storage.append((test, reason))
+ self.printLabel(label, (SkipTest, reason, None))
+
+ def addError(self, test, err):
+ """Overrides normal addError to add support for
+ errorClasses. If the exception is a registered class, the
+ error will be added to the list for that class, not errors.
+ """
+ ec, ev, tb = err
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3 compat
+ exc_info = self._exc_info_to_string(err)
+ for cls, (storage, label, isfail) in self.errorClasses.items():
+ #if 'Skip' in cls.__name__ or 'Skip' in ec.__name__:
+ # from nose.tools import set_trace
+ # set_trace()
+ if isclass(ec) and issubclass(ec, cls):
+ if isfail:
+ test.passed = False
+ storage.append((test, exc_info))
+ self.printLabel(label, err)
+ return
+ self.errors.append((test, exc_info))
+ test.passed = False
+ self.printLabel('ERROR')
+
+ # override to bypass changes in 2.7
+ def getDescription(self, test):
+ if self.descriptions:
+ return test.shortDescription() or str(test)
+ else:
+ return str(test)
+
+ def printLabel(self, label, err=None):
+ # Might get patched into a streamless result
+ stream = getattr(self, 'stream', None)
+ if stream is not None:
+ if self.showAll:
+ message = [label]
+ if err:
+ detail = _exception_detail(err[1])
+ if detail:
+ message.append(detail)
+ stream.writeln(": ".join(message))
+ elif self.dots:
+ stream.write(label[:1])
+
+ def printErrors(self):
+ """Overrides to print all errorClasses errors as well.
+ """
+ _TextTestResult.printErrors(self)
+ for cls in self.errorClasses.keys():
+ storage, label, isfail = self.errorClasses[cls]
+ if isfail:
+ self.printErrorList(label, storage)
+ # Might get patched into a result with no config
+ if hasattr(self, 'config'):
+ self.config.plugins.report(self.stream)
+
+ def printSummary(self, start, stop):
+ """Called by the test runner to print the final summary of test
+ run results.
+ """
+ write = self.stream.write
+ writeln = self.stream.writeln
+ taken = float(stop - start)
+ run = self.testsRun
+ plural = run != 1 and "s" or ""
+
+ writeln(self.separator2)
+ writeln("Ran %s test%s in %.3fs" % (run, plural, taken))
+ writeln()
+
+ summary = {}
+ eckeys = self.errorClasses.keys()
+ for cls in eckeys:
+ storage, label, isfail = self.errorClasses[cls]
+ count = len(storage)
+ if not count:
+ continue
+ summary[label] = count
+ if len(self.failures):
+ summary['failures'] = len(self.failures)
+ if len(self.errors):
+ summary['errors'] = len(self.errors)
+
+ if not self.wasSuccessful():
+ write("FAILED")
+ else:
+ write("OK")
+ items = summary.items()
+ if items:
+ items.sort()
+ write(" (")
+ write(", ".join(["%s=%s" % (label, count) for
+ label, count in items]))
+ writeln(")")
+ else:
+ writeln()
+
+ def wasSuccessful(self):
+ """Overrides to check that there are no errors in errorClasses
+ lists that are marked as errors and should cause a run to
+ fail.
+ """
+ if self.errors or self.failures:
+ return False
+ for cls in self.errorClasses.keys():
+ storage, label, isfail = self.errorClasses[cls]
+ if not isfail:
+ continue
+ if storage:
+ return False
+ return True
+
+ def _addError(self, test, err):
+ try:
+ exc_info = self._exc_info_to_string(err, test)
+ except TypeError:
+ # 2.3: does not take test arg
+ exc_info = self._exc_info_to_string(err)
+ self.errors.append((test, exc_info))
+ if self.showAll:
+ self.stream.write('ERROR')
+ elif self.dots:
+ self.stream.write('E')
+
+ def _exc_info_to_string(self, err, test=None):
+ # 2.7 skip compat
+ from nose.plugins.skip import SkipTest
+ if isclass(err[0]) and issubclass(err[0], SkipTest):
+ return str(err[1])
+ # 2.3/2.4 -- 2.4 passes test, 2.3 does not
+ try:
+ return _TextTestResult._exc_info_to_string(self, err, test)
+ except TypeError:
+ # 2.3: does not take test arg
+ return _TextTestResult._exc_info_to_string(self, err)
+
+
+def ln(*arg, **kw):
+ from warnings import warn
+ warn("ln() has moved to nose.util from nose.result and will be removed "
+ "from nose.result in a future release. Please update your imports ",
+ DeprecationWarning)
+ return _ln(*arg, **kw)
+
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/selector.py b/scripts/external_libs/nose-1.3.4/nose/selector.py
new file mode 100755
index 00000000..c4a006a8
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/selector.py
@@ -0,0 +1,251 @@
+"""
+Test Selection
+--------------
+
+Test selection is handled by a Selector. The test loader calls the
+appropriate selector method for each object it encounters that it
+thinks may be a test.
+"""
+import logging
+import os
+import unittest
+from nose.config import Config
+from nose.util import split_test_name, src, getfilename, getpackage, ispackage
+
+log = logging.getLogger(__name__)
+
+__all__ = ['Selector', 'defaultSelector', 'TestAddress']
+
+
+# for efficiency and easier mocking
+op_join = os.path.join
+op_basename = os.path.basename
+op_exists = os.path.exists
+op_splitext = os.path.splitext
+op_isabs = os.path.isabs
+op_abspath = os.path.abspath
+
+
+class Selector(object):
+ """Core test selector. Examines test candidates and determines whether,
+ given the specified configuration, the test candidate should be selected
+ as a test.
+ """
+ def __init__(self, config):
+ if config is None:
+ config = Config()
+ self.configure(config)
+
+ def configure(self, config):
+ self.config = config
+ self.exclude = config.exclude
+ self.ignoreFiles = config.ignoreFiles
+ self.include = config.include
+ self.plugins = config.plugins
+ self.match = config.testMatch
+
+ def matches(self, name):
+ """Does the name match my requirements?
+
+ To match, a name must match config.testMatch OR config.include
+ and it must not match config.exclude
+ """
+ return ((self.match.search(name)
+ or (self.include and
+ filter(None,
+ [inc.search(name) for inc in self.include])))
+ and ((not self.exclude)
+ or not filter(None,
+ [exc.search(name) for exc in self.exclude])
+ ))
+
+ def wantClass(self, cls):
+ """Is the class a wanted test class?
+
+ A class must be a unittest.TestCase subclass, or match test name
+ requirements. Classes that start with _ are always excluded.
+ """
+ declared = getattr(cls, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = (not cls.__name__.startswith('_')
+ and (issubclass(cls, unittest.TestCase)
+ or self.matches(cls.__name__)))
+
+ plug_wants = self.plugins.wantClass(cls)
+ if plug_wants is not None:
+ log.debug("Plugin setting selection of %s to %s", cls, plug_wants)
+ wanted = plug_wants
+ log.debug("wantClass %s? %s", cls, wanted)
+ return wanted
+
+ def wantDirectory(self, dirname):
+ """Is the directory a wanted test directory?
+
+ All package directories match, so long as they do not match exclude.
+ All other directories must match test requirements.
+ """
+ tail = op_basename(dirname)
+ if ispackage(dirname):
+ wanted = (not self.exclude
+ or not filter(None,
+ [exc.search(tail) for exc in self.exclude]
+ ))
+ else:
+ wanted = (self.matches(tail)
+ or (self.config.srcDirs
+ and tail in self.config.srcDirs))
+ plug_wants = self.plugins.wantDirectory(dirname)
+ if plug_wants is not None:
+ log.debug("Plugin setting selection of %s to %s",
+ dirname, plug_wants)
+ wanted = plug_wants
+ log.debug("wantDirectory %s? %s", dirname, wanted)
+ return wanted
+
+ def wantFile(self, file):
+ """Is the file a wanted test file?
+
+ The file must be a python source file and match testMatch or
+ include, and not match exclude. Files that match ignore are *never*
+ wanted, regardless of plugin, testMatch, include or exclude settings.
+ """
+ # never, ever load files that match anything in ignore
+ # (.* _* and *setup*.py by default)
+ base = op_basename(file)
+ ignore_matches = [ ignore_this for ignore_this in self.ignoreFiles
+ if ignore_this.search(base) ]
+ if ignore_matches:
+ log.debug('%s matches ignoreFiles pattern; skipped',
+ base)
+ return False
+ if not self.config.includeExe and os.access(file, os.X_OK):
+ log.info('%s is executable; skipped', file)
+ return False
+ dummy, ext = op_splitext(base)
+ pysrc = ext == '.py'
+
+ wanted = pysrc and self.matches(base)
+ plug_wants = self.plugins.wantFile(file)
+ if plug_wants is not None:
+ log.debug("plugin setting want %s to %s", file, plug_wants)
+ wanted = plug_wants
+ log.debug("wantFile %s? %s", file, wanted)
+ return wanted
+
+ def wantFunction(self, function):
+ """Is the function a test function?
+ """
+ try:
+ if hasattr(function, 'compat_func_name'):
+ funcname = function.compat_func_name
+ else:
+ funcname = function.__name__
+ except AttributeError:
+ # not a function
+ return False
+ declared = getattr(function, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = not funcname.startswith('_') and self.matches(funcname)
+ plug_wants = self.plugins.wantFunction(function)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantFunction %s? %s", function, wanted)
+ return wanted
+
+ def wantMethod(self, method):
+ """Is the method a test method?
+ """
+ try:
+ method_name = method.__name__
+ except AttributeError:
+ # not a method
+ return False
+ if method_name.startswith('_'):
+ # never collect 'private' methods
+ return False
+ declared = getattr(method, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = self.matches(method_name)
+ plug_wants = self.plugins.wantMethod(method)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantMethod %s? %s", method, wanted)
+ return wanted
+
+ def wantModule(self, module):
+ """Is the module a test module?
+
+ The tail of the module name must match test requirements. One exception:
+ we always want __main__.
+ """
+ declared = getattr(module, '__test__', None)
+ if declared is not None:
+ wanted = declared
+ else:
+ wanted = self.matches(module.__name__.split('.')[-1]) \
+ or module.__name__ == '__main__'
+ plug_wants = self.plugins.wantModule(module)
+ if plug_wants is not None:
+ wanted = plug_wants
+ log.debug("wantModule %s? %s", module, wanted)
+ return wanted
+
+defaultSelector = Selector
+
+
+class TestAddress(object):
+ """A test address represents a user's request to run a particular
+ test. The user may specify a filename or module (or neither),
+ and/or a callable (a class, function, or method). The naming
+ format for test addresses is:
+
+ filename_or_module:callable
+
+ Filenames that are not absolute will be made absolute relative to
+ the working dir.
+
+ The filename or module part will be considered a module name if it
+ doesn't look like a file, that is, if it doesn't exist on the file
+ system and it doesn't contain any directory separators and it
+ doesn't end in .py.
+
+ Callables may be a class name, function name, method name, or
+ class.method specification.
+ """
+ def __init__(self, name, workingDir=None):
+ if workingDir is None:
+ workingDir = os.getcwd()
+ self.name = name
+ self.workingDir = workingDir
+ self.filename, self.module, self.call = split_test_name(name)
+ log.debug('Test name %s resolved to file %s, module %s, call %s',
+ name, self.filename, self.module, self.call)
+ if self.filename is None:
+ if self.module is not None:
+ self.filename = getfilename(self.module, self.workingDir)
+ if self.filename:
+ self.filename = src(self.filename)
+ if not op_isabs(self.filename):
+ self.filename = op_abspath(op_join(workingDir,
+ self.filename))
+ if self.module is None:
+ self.module = getpackage(self.filename)
+ log.debug(
+ 'Final resolution of test name %s: file %s module %s call %s',
+ name, self.filename, self.module, self.call)
+
+ def totuple(self):
+ return (self.filename, self.module, self.call)
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return "%s: (%s, %s, %s)" % (self.name, self.filename,
+ self.module, self.call)
diff --git a/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py b/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py
new file mode 100755
index 00000000..2ae28399
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/sphinx/__init__.py
@@ -0,0 +1 @@
+pass
diff --git a/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py b/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py
new file mode 100755
index 00000000..d2b284ab
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/sphinx/pluginopts.py
@@ -0,0 +1,189 @@
+"""
+Adds a sphinx directive that can be used to automatically document a plugin.
+
+this::
+
+ .. autoplugin :: nose.plugins.foo
+ :plugin: Pluggy
+
+produces::
+
+ .. automodule :: nose.plugins.foo
+
+ Options
+ -------
+
+ .. cmdoption :: --foo=BAR, --fooble=BAR
+
+ Do the foo thing to the new thing.
+
+ Plugin
+ ------
+
+ .. autoclass :: nose.plugins.foo.Pluggy
+ :members:
+
+ Source
+ ------
+
+ .. include :: path/to/nose/plugins/foo.py
+ :literal:
+
+"""
+import os
+try:
+ from docutils import nodes, utils
+ from docutils.statemachine import ViewList
+ from docutils.parsers.rst import directives
+except ImportError:
+ pass # won't run anyway
+
+from nose.util import resolve_name
+from nose.plugins.base import Plugin
+from nose.plugins.manager import BuiltinPluginManager
+from nose.config import Config
+from nose.core import TestProgram
+from inspect import isclass
+
+
+def autoplugin_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ mod_name = arguments[0]
+ mod = resolve_name(mod_name)
+ plug_name = options.get('plugin', None)
+ if plug_name:
+ obj = getattr(mod, plug_name)
+ else:
+ for entry in dir(mod):
+ obj = getattr(mod, entry)
+ if isclass(obj) and issubclass(obj, Plugin) and obj is not Plugin:
+ plug_name = '%s.%s' % (mod_name, entry)
+ break
+
+ # mod docstring
+ rst = ViewList()
+ rst.append('.. automodule :: %s\n' % mod_name, '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ # options
+ rst.append('Options', '<autodoc>')
+ rst.append('-------', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ plug = obj()
+ opts = OptBucket()
+ plug.options(opts, {})
+ for opt in opts:
+ rst.append(opt.options(), '<autodoc>')
+ rst.append(' \n', '<autodoc>')
+ rst.append(' ' + opt.help + '\n', '<autodoc>')
+ rst.append('\n', '<autodoc>')
+
+ # plugin class
+ rst.append('Plugin', '<autodoc>')
+ rst.append('------', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ rst.append('.. autoclass :: %s\n' % plug_name, '<autodoc>')
+ rst.append(' :members:\n', '<autodoc>')
+ rst.append(' :show-inheritance:\n', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ # source
+ rst.append('Source', '<autodoc>')
+ rst.append('------', '<autodoc>')
+ rst.append(
+ '.. include :: %s\n' % utils.relative_path(
+ state_machine.document['source'],
+ os.path.abspath(mod.__file__.replace('.pyc', '.py'))),
+ '<autodoc>')
+ rst.append(' :literal:\n', '<autodoc>')
+ rst.append('', '<autodoc>')
+
+ node = nodes.section()
+ node.document = state.document
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(rst, 0, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+
+ return node.children
+
+
+def autohelp_directive(dirname, arguments, options, content, lineno,
+ content_offset, block_text, state, state_machine):
+ """produces rst from nose help"""
+ config = Config(parserClass=OptBucket,
+ plugins=BuiltinPluginManager())
+ parser = config.getParser(TestProgram.usage())
+ rst = ViewList()
+ for line in parser.format_help().split('\n'):
+ rst.append(line, '<autodoc>')
+
+ rst.append('Options', '<autodoc>')
+ rst.append('-------', '<autodoc>')
+ rst.append('', '<autodoc>')
+ for opt in parser:
+ rst.append(opt.options(), '<autodoc>')
+ rst.append(' \n', '<autodoc>')
+ rst.append(' ' + opt.help + '\n', '<autodoc>')
+ rst.append('\n', '<autodoc>')
+ node = nodes.section()
+ node.document = state.document
+ surrounding_title_styles = state.memo.title_styles
+ surrounding_section_level = state.memo.section_level
+ state.memo.title_styles = []
+ state.memo.section_level = 0
+ state.nested_parse(rst, 0, node, match_titles=1)
+ state.memo.title_styles = surrounding_title_styles
+ state.memo.section_level = surrounding_section_level
+
+ return node.children
+
+
+class OptBucket(object):
+ def __init__(self, doc=None, prog='nosetests'):
+ self.opts = []
+ self.doc = doc
+ self.prog = prog
+
+ def __iter__(self):
+ return iter(self.opts)
+
+ def format_help(self):
+ return self.doc.replace('%prog', self.prog).replace(':\n', '::\n')
+
+ def add_option(self, *arg, **kw):
+ self.opts.append(Opt(*arg, **kw))
+
+
+class Opt(object):
+ def __init__(self, *arg, **kw):
+ self.opts = arg
+ self.action = kw.pop('action', None)
+ self.default = kw.pop('default', None)
+ self.metavar = kw.pop('metavar', None)
+ self.help = kw.pop('help', None)
+
+ def options(self):
+ buf = []
+ for optstring in self.opts:
+ desc = optstring
+ if self.action not in ('store_true', 'store_false'):
+ desc += '=%s' % self.meta(optstring)
+ buf.append(desc)
+ return '.. cmdoption :: ' + ', '.join(buf)
+
+ def meta(self, optstring):
+ # FIXME optparser default metavar?
+ return self.metavar or 'DEFAULT'
+
+
+def setup(app):
+ app.add_directive('autoplugin',
+ autoplugin_directive, 1, (1, 0, 1),
+ plugin=directives.unchanged)
+ app.add_directive('autohelp', autohelp_directive, 0, (0, 0, 1))
diff --git a/scripts/external_libs/nose-1.3.4/nose/suite.py b/scripts/external_libs/nose-1.3.4/nose/suite.py
new file mode 100755
index 00000000..a831105e
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/suite.py
@@ -0,0 +1,609 @@
+"""
+Test Suites
+-----------
+
+Provides a LazySuite, which is a suite whose test list is a generator
+function, and ContextSuite,which can run fixtures (setup/teardown
+functions or methods) for the context that contains its tests.
+
+"""
+from __future__ import generators
+
+import logging
+import sys
+import unittest
+from nose.case import Test
+from nose.config import Config
+from nose.proxy import ResultProxyFactory
+from nose.util import isclass, resolve_name, try_run
+
+if sys.platform == 'cli':
+ if sys.version_info[:2] < (2, 6):
+ import clr
+ clr.AddReference("IronPython")
+ from IronPython.Runtime.Exceptions import StringException
+ else:
+ class StringException(Exception):
+ pass
+
+log = logging.getLogger(__name__)
+#log.setLevel(logging.DEBUG)
+
+# Singleton for default value -- see ContextSuite.__init__ below
+_def = object()
+
+
+def _strclass(cls):
+ return "%s.%s" % (cls.__module__, cls.__name__)
+
+class MixedContextError(Exception):
+ """Error raised when a context suite sees tests from more than
+ one context.
+ """
+ pass
+
+
+class LazySuite(unittest.TestSuite):
+ """A suite that may use a generator as its list of tests
+ """
+ def __init__(self, tests=()):
+ """Initialize the suite. tests may be an iterable or a generator
+ """
+ super(LazySuite, self).__init__()
+ self._set_tests(tests)
+
+ def __iter__(self):
+ return iter(self._tests)
+
+ def __repr__(self):
+ return "<%s tests=generator (%s)>" % (
+ _strclass(self.__class__), id(self))
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ __str__ = __repr__
+
+ def addTest(self, test):
+ self._precache.append(test)
+
+ # added to bypass run changes in 2.7's unittest
+ def run(self, result):
+ for test in self._tests:
+ if result.shouldStop:
+ break
+ test(result)
+ return result
+
+ def __nonzero__(self):
+ log.debug("tests in %s?", id(self))
+ if self._precache:
+ return True
+ if self.test_generator is None:
+ return False
+ try:
+ test = self.test_generator.next()
+ if test is not None:
+ self._precache.append(test)
+ return True
+ except StopIteration:
+ pass
+ return False
+
+ def _get_tests(self):
+ log.debug("precache is %s", self._precache)
+ for test in self._precache:
+ yield test
+ if self.test_generator is None:
+ return
+ for test in self.test_generator:
+ yield test
+
+ def _set_tests(self, tests):
+ self._precache = []
+ is_suite = isinstance(tests, unittest.TestSuite)
+ if callable(tests) and not is_suite:
+ self.test_generator = tests()
+ elif is_suite:
+ # Suites need special treatment: they must be called like
+ # tests for their setup/teardown to run (if any)
+ self.addTests([tests])
+ self.test_generator = None
+ else:
+ self.addTests(tests)
+ self.test_generator = None
+
+ _tests = property(_get_tests, _set_tests, None,
+ "Access the tests in this suite. Access is through a "
+ "generator, so iteration may not be repeatable.")
+
+
+class ContextSuite(LazySuite):
+ """A suite with context.
+
+ A ContextSuite executes fixtures (setup and teardown functions or
+ methods) for the context containing its tests.
+
+ The context may be explicitly passed. If it is not, a context (or
+ nested set of contexts) will be constructed by examining the tests
+ in the suite.
+ """
+ failureException = unittest.TestCase.failureException
+ was_setup = False
+ was_torndown = False
+ classSetup = ('setup_class', 'setup_all', 'setupClass', 'setupAll',
+ 'setUpClass', 'setUpAll')
+ classTeardown = ('teardown_class', 'teardown_all', 'teardownClass',
+ 'teardownAll', 'tearDownClass', 'tearDownAll')
+ moduleSetup = ('setup_module', 'setupModule', 'setUpModule', 'setup',
+ 'setUp')
+ moduleTeardown = ('teardown_module', 'teardownModule', 'tearDownModule',
+ 'teardown', 'tearDown')
+ packageSetup = ('setup_package', 'setupPackage', 'setUpPackage')
+ packageTeardown = ('teardown_package', 'teardownPackage',
+ 'tearDownPackage')
+
+ def __init__(self, tests=(), context=None, factory=None,
+ config=None, resultProxy=None, can_split=True):
+ log.debug("Context suite for %s (%s) (%s)", tests, context, id(self))
+ self.context = context
+ self.factory = factory
+ if config is None:
+ config = Config()
+ self.config = config
+ self.resultProxy = resultProxy
+ self.has_run = False
+ self.can_split = can_split
+ self.error_context = None
+ super(ContextSuite, self).__init__(tests)
+
+ def __repr__(self):
+ return "<%s context=%s>" % (
+ _strclass(self.__class__),
+ getattr(self.context, '__name__', self.context))
+ __str__ = __repr__
+
+ def id(self):
+ if self.error_context:
+ return '%s:%s' % (repr(self), self.error_context)
+ else:
+ return repr(self)
+
+ def __hash__(self):
+ return object.__hash__(self)
+
+ # 2.3 compat -- force 2.4 call sequence
+ def __call__(self, *arg, **kw):
+ return self.run(*arg, **kw)
+
+ def exc_info(self):
+ """Hook for replacing error tuple output
+ """
+ return sys.exc_info()
+
+ def _exc_info(self):
+ """Bottleneck to fix up IronPython string exceptions
+ """
+ e = self.exc_info()
+ if sys.platform == 'cli':
+ if isinstance(e[0], StringException):
+ # IronPython throws these StringExceptions, but
+ # traceback checks type(etype) == str. Make a real
+ # string here.
+ e = (str(e[0]), e[1], e[2])
+
+ return e
+
+ def run(self, result):
+ """Run tests in suite inside of suite fixtures.
+ """
+ # proxy the result for myself
+ log.debug("suite %s (%s) run called, tests: %s", id(self), self, self._tests)
+ #import pdb
+ #pdb.set_trace()
+ if self.resultProxy:
+ result, orig = self.resultProxy(result, self), result
+ else:
+ result, orig = result, result
+ try:
+ self.setUp()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'setup'
+ result.addError(self, self._exc_info())
+ return
+ try:
+ for test in self._tests:
+ if result.shouldStop:
+ log.debug("stopping")
+ break
+ # each nose.case.Test will create its own result proxy
+ # so the cases need the original result, to avoid proxy
+ # chains
+ test(orig)
+ finally:
+ self.has_run = True
+ try:
+ self.tearDown()
+ except KeyboardInterrupt:
+ raise
+ except:
+ self.error_context = 'teardown'
+ result.addError(self, self._exc_info())
+
+ def hasFixtures(self, ctx_callback=None):
+ context = self.context
+ if context is None:
+ return False
+ if self.implementsAnyFixture(context, ctx_callback=ctx_callback):
+ return True
+ # My context doesn't have any, but its ancestors might
+ factory = self.factory
+ if factory:
+ ancestors = factory.context.get(self, [])
+ for ancestor in ancestors:
+ if self.implementsAnyFixture(
+ ancestor, ctx_callback=ctx_callback):
+ return True
+ return False
+
+ def implementsAnyFixture(self, context, ctx_callback):
+ if isclass(context):
+ names = self.classSetup + self.classTeardown
+ else:
+ names = self.moduleSetup + self.moduleTeardown
+ if hasattr(context, '__path__'):
+ names += self.packageSetup + self.packageTeardown
+ # If my context has any fixture attribute, I have fixtures
+ fixt = False
+ for m in names:
+ if hasattr(context, m):
+ fixt = True
+ break
+ if ctx_callback is None:
+ return fixt
+ return ctx_callback(context, fixt)
+
+ def setUp(self):
+ log.debug("suite %s setUp called, tests: %s", id(self), self._tests)
+ if not self:
+ # I have no tests
+ log.debug("suite %s has no tests", id(self))
+ return
+ if self.was_setup:
+ log.debug("suite %s already set up", id(self))
+ return
+ context = self.context
+ if context is None:
+ return
+ # before running my own context's setup, I need to
+ # ask the factory if my context's contexts' setups have been run
+ factory = self.factory
+ if factory:
+ # get a copy, since we'll be destroying it as we go
+ ancestors = factory.context.get(self, [])[:]
+ while ancestors:
+ ancestor = ancestors.pop()
+ log.debug("ancestor %s may need setup", ancestor)
+ if ancestor in factory.was_setup:
+ continue
+ log.debug("ancestor %s does need setup", ancestor)
+ self.setupContext(ancestor)
+ if not context in factory.was_setup:
+ self.setupContext(context)
+ else:
+ self.setupContext(context)
+ self.was_setup = True
+ log.debug("completed suite setup")
+
+ def setupContext(self, context):
+ self.config.plugins.startContext(context)
+ log.debug("%s setup context %s", self, context)
+ if self.factory:
+ if context in self.factory.was_setup:
+ return
+ # note that I ran the setup for this context, so that I'll run
+ # the teardown in my teardown
+ self.factory.was_setup[context] = self
+ if isclass(context):
+ names = self.classSetup
+ else:
+ names = self.moduleSetup
+ if hasattr(context, '__path__'):
+ names = self.packageSetup + names
+ try_run(context, names)
+
+ def shortDescription(self):
+ if self.context is None:
+ return "test suite"
+ return "test suite for %s" % self.context
+
+ def tearDown(self):
+ log.debug('context teardown')
+ if not self.was_setup or self.was_torndown:
+ log.debug(
+ "No reason to teardown (was_setup? %s was_torndown? %s)"
+ % (self.was_setup, self.was_torndown))
+ return
+ self.was_torndown = True
+ context = self.context
+ if context is None:
+ log.debug("No context to tear down")
+ return
+
+ # for each ancestor... if the ancestor was setup
+ # and I did the setup, I can do teardown
+ factory = self.factory
+ if factory:
+ ancestors = factory.context.get(self, []) + [context]
+ for ancestor in ancestors:
+ log.debug('ancestor %s may need teardown', ancestor)
+ if not ancestor in factory.was_setup:
+ log.debug('ancestor %s was not setup', ancestor)
+ continue
+ if ancestor in factory.was_torndown:
+ log.debug('ancestor %s already torn down', ancestor)
+ continue
+ setup = factory.was_setup[ancestor]
+ log.debug("%s setup ancestor %s", setup, ancestor)
+ if setup is self:
+ self.teardownContext(ancestor)
+ else:
+ self.teardownContext(context)
+
+ def teardownContext(self, context):
+ log.debug("%s teardown context %s", self, context)
+ if self.factory:
+ if context in self.factory.was_torndown:
+ return
+ self.factory.was_torndown[context] = self
+ if isclass(context):
+ names = self.classTeardown
+ else:
+ names = self.moduleTeardown
+ if hasattr(context, '__path__'):
+ names = self.packageTeardown + names
+ try_run(context, names)
+ self.config.plugins.stopContext(context)
+
+ # FIXME the wrapping has to move to the factory?
+ def _get_wrapped_tests(self):
+ for test in self._get_tests():
+ if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+ yield test
+ else:
+ yield Test(test,
+ config=self.config,
+ resultProxy=self.resultProxy)
+
+ _tests = property(_get_wrapped_tests, LazySuite._set_tests, None,
+ "Access the tests in this suite. Tests are returned "
+ "inside of a context wrapper.")
+
+
+class ContextSuiteFactory(object):
+ """Factory for ContextSuites. Called with a collection of tests,
+ the factory decides on a hierarchy of contexts by introspecting
+ the collection or the tests themselves to find the objects
+ containing the test objects. It always returns one suite, but that
+ suite may consist of a hierarchy of nested suites.
+ """
+ suiteClass = ContextSuite
+ def __init__(self, config=None, suiteClass=None, resultProxy=_def):
+ if config is None:
+ config = Config()
+ self.config = config
+ if suiteClass is not None:
+ self.suiteClass = suiteClass
+ # Using a singleton to represent default instead of None allows
+ # passing resultProxy=None to turn proxying off.
+ if resultProxy is _def:
+ resultProxy = ResultProxyFactory(config=config)
+ self.resultProxy = resultProxy
+ self.suites = {}
+ self.context = {}
+ self.was_setup = {}
+ self.was_torndown = {}
+
+ def __call__(self, tests, **kw):
+ """Return ``ContextSuite`` for tests. ``tests`` may either
+ be a callable (in which case the resulting ContextSuite will
+ have no parent context and be evaluated lazily) or an
+ iterable. In that case the tests will wrapped in
+ nose.case.Test, be examined and the context of each found and a
+ suite of suites returned, organized into a stack with the
+ outermost suites belonging to the outermost contexts.
+ """
+ log.debug("Create suite for %s", tests)
+ context = kw.pop('context', getattr(tests, 'context', None))
+ log.debug("tests %s context %s", tests, context)
+ if context is None:
+ tests = self.wrapTests(tests)
+ try:
+ context = self.findContext(tests)
+ except MixedContextError:
+ return self.makeSuite(self.mixedSuites(tests), None, **kw)
+ return self.makeSuite(tests, context, **kw)
+
+ def ancestry(self, context):
+ """Return the ancestry of the context (that is, all of the
+ packages and modules containing the context), in order of
+ descent with the outermost ancestor last.
+ This method is a generator.
+ """
+ log.debug("get ancestry %s", context)
+ if context is None:
+ return
+ # Methods include reference to module they are defined in, we
+ # don't want that, instead want the module the class is in now
+ # (classes are re-ancestored elsewhere).
+ if hasattr(context, 'im_class'):
+ context = context.im_class
+ elif hasattr(context, '__self__'):
+ context = context.__self__.__class__
+ if hasattr(context, '__module__'):
+ ancestors = context.__module__.split('.')
+ elif hasattr(context, '__name__'):
+ ancestors = context.__name__.split('.')[:-1]
+ else:
+ raise TypeError("%s has no ancestors?" % context)
+ while ancestors:
+ log.debug(" %s ancestors %s", context, ancestors)
+ yield resolve_name('.'.join(ancestors))
+ ancestors.pop()
+
+ def findContext(self, tests):
+ if callable(tests) or isinstance(tests, unittest.TestSuite):
+ return None
+ context = None
+ for test in tests:
+ # Don't look at suites for contexts, only tests
+ ctx = getattr(test, 'context', None)
+ if ctx is None:
+ continue
+ if context is None:
+ context = ctx
+ elif context != ctx:
+ raise MixedContextError(
+ "Tests with different contexts in same suite! %s != %s"
+ % (context, ctx))
+ return context
+
+ def makeSuite(self, tests, context, **kw):
+ suite = self.suiteClass(
+ tests, context=context, config=self.config, factory=self,
+ resultProxy=self.resultProxy, **kw)
+ if context is not None:
+ self.suites.setdefault(context, []).append(suite)
+ self.context.setdefault(suite, []).append(context)
+ log.debug("suite %s has context %s", suite,
+ getattr(context, '__name__', None))
+ for ancestor in self.ancestry(context):
+ self.suites.setdefault(ancestor, []).append(suite)
+ self.context[suite].append(ancestor)
+ log.debug("suite %s has ancestor %s", suite, ancestor.__name__)
+ return suite
+
+ def mixedSuites(self, tests):
+ """The complex case where there are tests that don't all share
+ the same context. Groups tests into suites with common ancestors,
+ according to the following (essentially tail-recursive) procedure:
+
+ Starting with the context of the first test, if it is not
+ None, look for tests in the remaining tests that share that
+ ancestor. If any are found, group into a suite with that
+ ancestor as the context, and replace the current suite with
+ that suite. Continue this process for each ancestor of the
+ first test, until all ancestors have been processed. At this
+ point if any tests remain, recurse with those tests as the
+ input, returning a list of the common suite (which may be the
+ suite or test we started with, if no common tests were found)
+ plus the results of recursion.
+ """
+ if not tests:
+ return []
+ head = tests.pop(0)
+ if not tests:
+ return [head] # short circuit when none are left to combine
+ suite = head # the common ancestry suite, so far
+ tail = tests[:]
+ context = getattr(head, 'context', None)
+ if context is not None:
+ ancestors = [context] + [a for a in self.ancestry(context)]
+ for ancestor in ancestors:
+ common = [suite] # tests with ancestor in common, so far
+ remain = [] # tests that remain to be processed
+ for test in tail:
+ found_common = False
+ test_ctx = getattr(test, 'context', None)
+ if test_ctx is None:
+ remain.append(test)
+ continue
+ if test_ctx is ancestor:
+ common.append(test)
+ continue
+ for test_ancestor in self.ancestry(test_ctx):
+ if test_ancestor is ancestor:
+ common.append(test)
+ found_common = True
+ break
+ if not found_common:
+ remain.append(test)
+ if common:
+ suite = self.makeSuite(common, ancestor)
+ tail = self.mixedSuites(remain)
+ return [suite] + tail
+
+ def wrapTests(self, tests):
+ log.debug("wrap %s", tests)
+ if callable(tests) or isinstance(tests, unittest.TestSuite):
+ log.debug("I won't wrap")
+ return tests
+ wrapped = []
+ for test in tests:
+ log.debug("wrapping %s", test)
+ if isinstance(test, Test) or isinstance(test, unittest.TestSuite):
+ wrapped.append(test)
+ elif isinstance(test, ContextList):
+ wrapped.append(self.makeSuite(test, context=test.context))
+ else:
+ wrapped.append(
+ Test(test, config=self.config, resultProxy=self.resultProxy)
+ )
+ return wrapped
+
+
+class ContextList(object):
+ """Not quite a suite -- a group of tests in a context. This is used
+ to hint the ContextSuiteFactory about what context the tests
+ belong to, in cases where it may be ambiguous or missing.
+ """
+ def __init__(self, tests, context=None):
+ self.tests = tests
+ self.context = context
+
+ def __iter__(self):
+ return iter(self.tests)
+
+
+class FinalizingSuiteWrapper(unittest.TestSuite):
+ """Wraps suite and calls final function after suite has
+ executed. Used to call final functions in cases (like running in
+ the standard test runner) where test running is not under nose's
+ control.
+ """
+ def __init__(self, suite, finalize):
+ super(FinalizingSuiteWrapper, self).__init__()
+ self.suite = suite
+ self.finalize = finalize
+
+ def __call__(self, *arg, **kw):
+ return self.run(*arg, **kw)
+
+ # 2.7 compat
+ def __iter__(self):
+ return iter(self.suite)
+
+ def run(self, *arg, **kw):
+ try:
+ return self.suite(*arg, **kw)
+ finally:
+ self.finalize(*arg, **kw)
+
+
+# backwards compat -- sort of
+class TestDir:
+ def __init__(*arg, **kw):
+ raise NotImplementedError(
+ "TestDir is not usable with nose 0.10. The class is present "
+ "in nose.suite for backwards compatibility purposes but it "
+ "may not be used.")
+
+
+class TestModule:
+ def __init__(*arg, **kw):
+ raise NotImplementedError(
+ "TestModule is not usable with nose 0.10. The class is present "
+ "in nose.suite for backwards compatibility purposes but it "
+ "may not be used.")
diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py b/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py
new file mode 100755
index 00000000..74dab16a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/tools/__init__.py
@@ -0,0 +1,15 @@
+"""
+Tools for testing
+-----------------
+
+nose.tools provides a few convenience functions to make writing tests
+easier. You don't have to use them; nothing in the rest of nose depends
+on any of these methods.
+
+"""
+from nose.tools.nontrivial import *
+from nose.tools.nontrivial import __all__ as nontrivial_all
+from nose.tools.trivial import *
+from nose.tools.trivial import __all__ as trivial_all
+
+__all__ = trivial_all + nontrivial_all
diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py b/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py
new file mode 100755
index 00000000..28397324
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/tools/nontrivial.py
@@ -0,0 +1,151 @@
+"""Tools not exempt from being descended into in tracebacks"""
+
+import time
+
+
+__all__ = ['make_decorator', 'raises', 'set_trace', 'timed', 'with_setup',
+ 'TimeExpired', 'istest', 'nottest']
+
+
+class TimeExpired(AssertionError):
+ pass
+
+
+def make_decorator(func):
+ """
+ Wraps a test decorator so as to properly replicate metadata
+ of the decorated function, including nose's additional stuff
+ (namely, setup and teardown).
+ """
+ def decorate(newfunc):
+ if hasattr(func, 'compat_func_name'):
+ name = func.compat_func_name
+ else:
+ name = func.__name__
+ newfunc.__dict__ = func.__dict__
+ newfunc.__doc__ = func.__doc__
+ newfunc.__module__ = func.__module__
+ if not hasattr(newfunc, 'compat_co_firstlineno'):
+ newfunc.compat_co_firstlineno = func.func_code.co_firstlineno
+ try:
+ newfunc.__name__ = name
+ except TypeError:
+ # can't set func name in 2.3
+ newfunc.compat_func_name = name
+ return newfunc
+ return decorate
+
+
+def raises(*exceptions):
+ """Test must raise one of expected exceptions to pass.
+
+ Example use::
+
+ @raises(TypeError, ValueError)
+ def test_raises_type_error():
+ raise TypeError("This test passes")
+
+ @raises(Exception)
+ def test_that_fails_by_passing():
+ pass
+
+ If you want to test many assertions about exceptions in a single test,
+ you may want to use `assert_raises` instead.
+ """
+ valid = ' or '.join([e.__name__ for e in exceptions])
+ def decorate(func):
+ name = func.__name__
+ def newfunc(*arg, **kw):
+ try:
+ func(*arg, **kw)
+ except exceptions:
+ pass
+ except:
+ raise
+ else:
+ message = "%s() did not raise %s" % (name, valid)
+ raise AssertionError(message)
+ newfunc = make_decorator(func)(newfunc)
+ return newfunc
+ return decorate
+
+
+def set_trace():
+ """Call pdb.set_trace in the calling frame, first restoring
+ sys.stdout to the real output stream. Note that sys.stdout is NOT
+ reset to whatever it was before the call once pdb is done!
+ """
+ import pdb
+ import sys
+ stdout = sys.stdout
+ sys.stdout = sys.__stdout__
+ pdb.Pdb().set_trace(sys._getframe().f_back)
+
+
+def timed(limit):
+ """Test must finish within specified time limit to pass.
+
+ Example use::
+
+ @timed(.1)
+ def test_that_fails():
+ time.sleep(.2)
+ """
+ def decorate(func):
+ def newfunc(*arg, **kw):
+ start = time.time()
+ result = func(*arg, **kw)
+ end = time.time()
+ if end - start > limit:
+ raise TimeExpired("Time limit (%s) exceeded" % limit)
+ return result
+ newfunc = make_decorator(func)(newfunc)
+ return newfunc
+ return decorate
+
+
+def with_setup(setup=None, teardown=None):
+ """Decorator to add setup and/or teardown methods to a test function::
+
+ @with_setup(setup, teardown)
+ def test_something():
+ " ... "
+
+ Note that `with_setup` is useful *only* for test functions, not for test
+ methods or inside of TestCase subclasses.
+ """
+ def decorate(func, setup=setup, teardown=teardown):
+ if setup:
+ if hasattr(func, 'setup'):
+ _old_s = func.setup
+ def _s():
+ setup()
+ _old_s()
+ func.setup = _s
+ else:
+ func.setup = setup
+ if teardown:
+ if hasattr(func, 'teardown'):
+ _old_t = func.teardown
+ def _t():
+ _old_t()
+ teardown()
+ func.teardown = _t
+ else:
+ func.teardown = teardown
+ return func
+ return decorate
+
+
+def istest(func):
+ """Decorator to mark a function or method as a test
+ """
+ func.__test__ = True
+ return func
+
+
+def nottest(func):
+ """Decorator to mark a function or method as *not* a test
+ """
+ func.__test__ = False
+ return func
diff --git a/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py b/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py
new file mode 100755
index 00000000..cf83efed
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/tools/trivial.py
@@ -0,0 +1,54 @@
+"""Tools so trivial that tracebacks should not descend into them
+
+We define the ``__unittest`` symbol in their module namespace so unittest will
+skip them when printing tracebacks, just as it does for their corresponding
+methods in ``unittest`` proper.
+
+"""
+import re
+import unittest
+
+
+__all__ = ['ok_', 'eq_']
+
+# Use the same flag as unittest itself to prevent descent into these functions:
+__unittest = 1
+
+
+def ok_(expr, msg=None):
+ """Shorthand for assert. Saves 3 whole characters!
+ """
+ if not expr:
+ raise AssertionError(msg)
+
+
+def eq_(a, b, msg=None):
+ """Shorthand for 'assert a == b, "%r != %r" % (a, b)
+ """
+ if not a == b:
+ raise AssertionError(msg or "%r != %r" % (a, b))
+
+
+#
+# Expose assert* from unittest.TestCase
+# - give them pep8 style names
+#
+caps = re.compile('([A-Z])')
+
+def pep8(name):
+ return caps.sub(lambda m: '_' + m.groups()[0].lower(), name)
+
+class Dummy(unittest.TestCase):
+ def nop():
+ pass
+_t = Dummy('nop')
+
+for at in [ at for at in dir(_t)
+ if at.startswith('assert') and not '_' in at ]:
+ pepd = pep8(at)
+ vars()[pepd] = getattr(_t, at)
+ __all__.append(pepd)
+
+del Dummy
+del _t
+del pep8
diff --git a/scripts/external_libs/nose-1.3.4/nose/twistedtools.py b/scripts/external_libs/nose-1.3.4/nose/twistedtools.py
new file mode 100755
index 00000000..8d9c6ffe
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/twistedtools.py
@@ -0,0 +1,173 @@
+"""
+Twisted integration
+-------------------
+
+This module provides a very simple way to integrate your tests with the
+Twisted_ event loop.
+
+You must import this module *before* importing anything from Twisted itself!
+
+Example::
+
+ from nose.twistedtools import reactor, deferred
+
+ @deferred()
+ def test_resolve():
+ return reactor.resolve("www.python.org")
+
+Or, more realistically::
+
+ @deferred(timeout=5.0)
+ def test_resolve():
+ d = reactor.resolve("www.python.org")
+ def check_ip(ip):
+ assert ip == "67.15.36.43"
+ d.addCallback(check_ip)
+ return d
+
+.. _Twisted: http://twistedmatrix.com/trac/
+"""
+
+import sys
+from Queue import Queue, Empty
+from nose.tools import make_decorator, TimeExpired
+
+__all__ = [
+ 'threaded_reactor', 'reactor', 'deferred', 'TimeExpired',
+ 'stop_reactor'
+]
+
+_twisted_thread = None
+
+def threaded_reactor():
+ """
+ Start the Twisted reactor in a separate thread, if not already done.
+ Returns the reactor.
+ The thread will automatically be destroyed when all the tests are done.
+ """
+ global _twisted_thread
+ try:
+ from twisted.internet import reactor
+ except ImportError:
+ return None, None
+ if not _twisted_thread:
+ from twisted.python import threadable
+ from threading import Thread
+ _twisted_thread = Thread(target=lambda: reactor.run( \
+ installSignalHandlers=False))
+ _twisted_thread.setDaemon(True)
+ _twisted_thread.start()
+ return reactor, _twisted_thread
+
+# Export global reactor variable, as Twisted does
+reactor, reactor_thread = threaded_reactor()
+
+
+def stop_reactor():
+ """Stop the reactor and join the reactor thread until it stops.
+ Call this function in teardown at the module or package level to
+ reset the twisted system after your tests. You *must* do this if
+ you mix tests using these tools and tests using twisted.trial.
+ """
+ global _twisted_thread
+
+ def stop_reactor():
+ '''Helper for calling stop from withing the thread.'''
+ reactor.stop()
+
+ reactor.callFromThread(stop_reactor)
+ reactor_thread.join()
+ for p in reactor.getDelayedCalls():
+ if p.active():
+ p.cancel()
+ _twisted_thread = None
+
+
+def deferred(timeout=None):
+ """
+ By wrapping a test function with this decorator, you can return a
+ twisted Deferred and the test will wait for the deferred to be triggered.
+ The whole test function will run inside the Twisted event loop.
+
+ The optional timeout parameter specifies the maximum duration of the test.
+ The difference with timed() is that timed() will still wait for the test
+ to end, while deferred() will stop the test when its timeout has expired.
+ The latter is more desireable when dealing with network tests, because
+ the result may actually never arrive.
+
+ If the callback is triggered, the test has passed.
+ If the errback is triggered or the timeout expires, the test has failed.
+
+ Example::
+
+ @deferred(timeout=5.0)
+ def test_resolve():
+ return reactor.resolve("www.python.org")
+
+ Attention! If you combine this decorator with other decorators (like
+ "raises"), deferred() must be called *first*!
+
+ In other words, this is good::
+
+ @raises(DNSLookupError)
+ @deferred()
+ def test_error():
+ return reactor.resolve("xxxjhjhj.biz")
+
+ and this is bad::
+
+ @deferred()
+ @raises(DNSLookupError)
+ def test_error():
+ return reactor.resolve("xxxjhjhj.biz")
+ """
+ reactor, reactor_thread = threaded_reactor()
+ if reactor is None:
+ raise ImportError("twisted is not available or could not be imported")
+ # Check for common syntax mistake
+ # (otherwise, tests can be silently ignored
+ # if one writes "@deferred" instead of "@deferred()")
+ try:
+ timeout is None or timeout + 0
+ except TypeError:
+ raise TypeError("'timeout' argument must be a number or None")
+
+ def decorate(func):
+ def wrapper(*args, **kargs):
+ q = Queue()
+ def callback(value):
+ q.put(None)
+ def errback(failure):
+ # Retrieve and save full exception info
+ try:
+ failure.raiseException()
+ except:
+ q.put(sys.exc_info())
+ def g():
+ try:
+ d = func(*args, **kargs)
+ try:
+ d.addCallbacks(callback, errback)
+ # Check for a common mistake and display a nice error
+ # message
+ except AttributeError:
+ raise TypeError("you must return a twisted Deferred "
+ "from your test case!")
+ # Catch exceptions raised in the test body (from the
+ # Twisted thread)
+ except:
+ q.put(sys.exc_info())
+ reactor.callFromThread(g)
+ try:
+ error = q.get(timeout=timeout)
+ except Empty:
+ raise TimeExpired("timeout expired before end of test (%f s.)"
+ % timeout)
+ # Re-raise all exceptions
+ if error is not None:
+ exc_type, exc_value, tb = error
+ raise exc_type, exc_value, tb
+ wrapper = make_decorator(func)(wrapper)
+ return wrapper
+ return decorate
+
diff --git a/scripts/external_libs/nose-1.3.4/nose/usage.txt b/scripts/external_libs/nose-1.3.4/nose/usage.txt
new file mode 100755
index 00000000..bc96894a
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/usage.txt
@@ -0,0 +1,115 @@
+nose collects tests automatically from python source files,
+directories and packages found in its working directory (which
+defaults to the current working directory). Any python source file,
+directory or package that matches the testMatch regular expression
+(by default: `(?:^|[\b_\.-])[Tt]est)` will be collected as a test (or
+source for collection of tests). In addition, all other packages
+found in the working directory will be examined for python source files
+or directories that match testMatch. Package discovery descends all
+the way down the tree, so package.tests and package.sub.tests and
+package.sub.sub2.tests will all be collected.
+
+Within a test directory or package, any python source file matching
+testMatch will be examined for test cases. Within a test module,
+functions and classes whose names match testMatch and TestCase
+subclasses with any name will be loaded and executed as tests. Tests
+may use the assert keyword or raise AssertionErrors to indicate test
+failure. TestCase subclasses may do the same or use the various
+TestCase methods available.
+
+**It is important to note that the default behavior of nose is to
+not include tests from files which are executable.** To include
+tests from such files, remove their executable bit or use
+the --exe flag (see 'Options' section below).
+
+Selecting Tests
+---------------
+
+To specify which tests to run, pass test names on the command line:
+
+ %prog only_test_this.py
+
+Test names specified may be file or module names, and may optionally
+indicate the test case to run by separating the module or file name
+from the test case name with a colon. Filenames may be relative or
+absolute. Examples:
+
+ %prog test.module
+ %prog another.test:TestCase.test_method
+ %prog a.test:TestCase
+ %prog /path/to/test/file.py:test_function
+
+You may also change the working directory where nose looks for tests
+by using the -w switch:
+
+ %prog -w /path/to/tests
+
+Note, however, that support for multiple -w arguments is now deprecated
+and will be removed in a future release. As of nose 0.10, you can get
+the same behavior by specifying the target directories *without*
+the -w switch:
+
+ %prog /path/to/tests /another/path/to/tests
+
+Further customization of test selection and loading is possible
+through the use of plugins.
+
+Test result output is identical to that of unittest, except for
+the additional features (error classes, and plugin-supplied
+features such as output capture and assert introspection) detailed
+in the options below.
+
+Configuration
+-------------
+
+In addition to passing command-line options, you may also put
+configuration options in your project's *setup.cfg* file, or a .noserc
+or nose.cfg file in your home directory. In any of these standard
+ini-style config files, you put your nosetests configuration in a
+``[nosetests]`` section. Options are the same as on the command line,
+with the -- prefix removed. For options that are simple switches, you
+must supply a value:
+
+ [nosetests]
+ verbosity=3
+ with-doctest=1
+
+All configuration files that are found will be loaded and their
+options combined. You can override the standard config file loading
+with the ``-c`` option.
+
+Using Plugins
+-------------
+
+There are numerous nose plugins available via easy_install and
+elsewhere. To use a plugin, just install it. The plugin will add
+command line options to nosetests. To verify that the plugin is installed,
+run:
+
+ nosetests --plugins
+
+You can add -v or -vv to that command to show more information
+about each plugin.
+
+If you are running nose.main() or nose.run() from a script, you
+can specify a list of plugins to use by passing a list of plugins
+with the plugins keyword argument.
+
+0.9 plugins
+-----------
+
+nose 1.0 can use SOME plugins that were written for nose 0.9. The
+default plugin manager inserts a compatibility wrapper around 0.9
+plugins that adapts the changed plugin api calls. However, plugins
+that access nose internals are likely to fail, especially if they
+attempt to access test case or test suite classes. For example,
+plugins that try to determine if a test passed to startTest is an
+individual test or a suite will fail, partly because suites are no
+longer passed to startTest and partly because it's likely that the
+plugin is trying to find out if the test is an instance of a class
+that no longer exists.
+
+0.10 and 0.11 plugins
+---------------------
+
+All plugins written for nose 0.10 and 0.11 should work with nose 1.0.
diff --git a/scripts/external_libs/nose-1.3.4/nose/util.py b/scripts/external_libs/nose-1.3.4/nose/util.py
new file mode 100755
index 00000000..e6f735e0
--- /dev/null
+++ b/scripts/external_libs/nose-1.3.4/nose/util.py
@@ -0,0 +1,660 @@
+"""Utility functions and classes used by nose internally.
+"""
+import inspect
+import itertools
+import logging
+import os
+import re
+import sys
+import types
+import unittest
+from nose.pyversion import ClassType, TypeType, isgenerator, ismethod
+
+
+log = logging.getLogger('nose')
+
+ident_re = re.compile(r'^[A-Za-z_][A-Za-z0-9_.]*$')
+class_types = (ClassType, TypeType)
+skip_pattern = r"(?:\.svn)|(?:[^.]+\.py[co])|(?:.*~)|(?:.*\$py\.class)|(?:__pycache__)"
+
+try:
+ set()
+ set = set # make from nose.util import set happy
+except NameError:
+ try:
+ from sets import Set as set
+ except ImportError:
+ pass
+
+
+def ls_tree(dir_path="",
+ skip_pattern=skip_pattern,
+ indent="|-- ", branch_indent="| ",
+ last_indent="`-- ", last_branch_indent=" "):
+ # TODO: empty directories look like non-directory files
+ return "\n".join(_ls_tree_lines(dir_path, skip_pattern,
+ indent, branch_indent,
+ last_indent, last_branch_indent))
+
+
+def _ls_tree_lines(dir_path, skip_pattern,
+ indent, branch_indent, last_indent, last_branch_indent):
+ if dir_path == "":
+ dir_path = os.getcwd()
+
+ lines = []
+
+ names = os.listdir(dir_path)
+ names.sort()
+ dirs, nondirs = [], []
+ for name in names:
+ if re.match(skip_pattern, name):
+ continue
+ if os.path.isdir(os.path.join(dir_path, name)):
+ dirs.append(name)
+ else:
+ nondirs.append(name)
+
+ # list non-directories first
+ entries = list(itertools.chain([(name, False) for name in nondirs],
+ [(name, True) for name in dirs]))
+ def ls_entry(name, is_dir, ind, branch_ind):
+ if not is_dir:
+ yield ind + name
+ else:
+ path = os.path.join(dir_path, name)
+ if not os.path.islink(path):
+ yield ind + name
+ subtree = _ls_tree_lines(path, skip_pattern,
+ indent, branch_indent,
+ last_indent, last_branch_indent)
+ for x in subtree:
+ yield branch_ind + x
+ for name, is_dir in entries[:-1]:
+ for line in ls_entry(name, is_dir, indent, branch_indent):
+ yield line
+ if entries:
+ name, is_dir = entries[-1]
+ for line in ls_entry(name, is_dir, last_indent, last_branch_indent):
+ yield line
+
+
+def absdir(path):
+ """Return absolute, normalized path to directory, if it exists; None
+ otherwise.
+ """
+ if not os.path.isabs(path):
+ path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+ path)))
+ if path is None or not os.path.isdir(path):
+ return None
+ return path
+
+
+def absfile(path, where=None):
+ """Return absolute, normalized path to file (optionally in directory
+ where), or None if the file can't be found either in where or the current
+ working directory.
+ """
+ orig = path
+ if where is None:
+ where = os.getcwd()
+ if isinstance(where, list) or isinstance(where, tuple):
+ for maybe_path in where:
+ maybe_abs = absfile(path, maybe_path)
+ if maybe_abs is not None:
+ return maybe_abs
+ return None
+ if not os.path.isabs(path):
+ path = os.path.normpath(os.path.abspath(os.path.join(where, path)))
+ if path is None or not os.path.exists(path):
+ if where != os.getcwd():
+ # try the cwd instead
+ path = os.path.normpath(os.path.abspath(os.path.join(os.getcwd(),
+ orig)))
+ if path is None or not os.path.exists(path):
+ return None
+ if os.path.isdir(path):
+ # might want an __init__.py from pacakge
+ init = os.path.join(path,'__init__.py')
+ if os.path.isfile(init):
+ return init
+ elif os.path.isfile(path):
+ return path
+ return None
+
+
+def anyp(predicate, iterable):
+ for item in iterable:
+ if predicate(item):
+ return True
+ return False
+
+
+def file_like(name):
+ """A name is file-like if it is a path that exists, or it has a
+ directory part, or it ends in .py, or it isn't a legal python
+ identifier.
+ """
+ return (os.path.exists(name)
+ or os.path.dirname(name)
+ or name.endswith('.py')
+ or not ident_re.match(os.path.splitext(name)[0]))
+
+
+def func_lineno(func):
+ """Get the line number of a function. First looks for
+ compat_co_firstlineno, then func_code.co_first_lineno.
+ """
+ try:
+ return func.compat_co_firstlineno
+ except AttributeError:
+ try:
+ return func.func_code.co_firstlineno
+ except AttributeError:
+ return -1
+
+
+def isclass(obj):
+ """Is obj a class? Inspect's isclass is too liberal and returns True
+ for objects that can't be subclasses of anything.
+ """
+ obj_type = type(obj)
+ return obj_type in class_types or issubclass(obj_type, type)
+
+
+# backwards compat (issue #64)
+is_generator = isgenerator
+
+
+def ispackage(path):
+ """
+ Is this path a package directory?
+
+ >>> ispackage('nose')
+ True
+ >>> ispackage('unit_tests')
+ False
+ >>> ispackage('nose/plugins')
+ True
+ >>> ispackage('nose/loader.py')
+ False
+ """
+ if os.path.isdir(path):
+ # at least the end of the path must be a legal python identifier
+ # and __init__.py[co] must exist
+ end = os.path.basename(path)
+ if ident_re.match(end):
+ for init in ('__init__.py', '__init__.pyc', '__init__.pyo'):
+ if os.path.isfile(os.path.join(path, init)):
+ return True
+ if sys.platform.startswith('java') and \
+ os.path.isfile(os.path.join(path, '__init__$py.class')):
+ return True
+ return False
+
+
+def isproperty(obj):
+ """
+ Is this a property?
+
+ >>> class Foo:
+ ... def got(self):
+ ... return 2
+ ... def get(self):
+ ... return 1
+ ... get = property(get)
+
+ >>> isproperty(Foo.got)
+ False
+ >>> isproperty(Foo.get)
+ True
+ """
+ return type(obj) == property
+
+
+def getfilename(package, relativeTo=None):
+ """Find the python source file for a package, relative to a
+ particular directory (defaults to current working directory if not
+ given).
+ """
+ if relativeTo is None:
+ relativeTo = os.getcwd()
+ path = os.path.join(relativeTo, os.sep.join(package.split('.')))
+ suffixes = ('/__init__.py', '.py')
+ for suffix in suffixes:
+ filename = path + suffix
+ if os.path.exists(filename):
+ return filename
+ return None
+
+
+def getpackage(filename):
+ """
+ Find the full dotted package name for a given python source file
+ name. Returns None if the file is not a python source file.
+
+ >>> getpackage('foo.py')
+ 'foo'
+ >>> getpackage('biff/baf.py')
+ 'baf'
+ >>> getpackage('nose/util.py')
+ 'nose.util'
+
+ Works for directories too.
+
+ >>> getpackage('nose')
+ 'nose'
+ >>> getpackage('nose/plugins')
+ 'nose.plugins'
+
+ And __init__ files stuck onto directories
+
+ >>> getpackage('nose/plugins/__init__.py')
+ 'nose.plugins'
+
+ Absolute paths also work.
+
+ >>> path = os.path.abspath(os.path.join('nose', 'plugins'))
+ >>> getpackage(path)
+ 'nose.plugins'
+ """
+ src_file = src(filename)
+ if (os.path.isdir(src_file) or not src_file.endswith('.py')) and not ispackage(src_file):
+ return None
+ base, ext = os.path.splitext(os.path.basename(src_file))
+ if base == '__init__':
+ mod_parts = []
+ else:
+ mod_parts = [base]
+ path, part = os.path.split(os.path.split(src_file)[0])
+ while part:
+ if ispackage(os.path.join(path, part)):
+ mod_parts.append(part)
+ else:
+ break
+ path, part = os.path.split(path)
+ mod_parts.reverse()
+ return '.'.join(mod_parts)
+
+
+def ln(label):
+ """Draw a 70-char-wide divider, with label in the middle.
+
+ >>> ln('hello there')
+ '---------------------------- hello there -----------------------------'
+ """
+ label_len = len(label) + 2
+ chunk = (70 - label_len) // 2
+ out = '%s %s %s' % ('-' * chunk, label, '-' * chunk)
+ pad = 70 - len(out)
+ if pad > 0:
+ out = out + ('-' * pad)
+ return out
+
+
+def resolve_name(name, module=None):
+ """Resolve a dotted name to a module and its parts. This is stolen
+ wholesale from unittest.TestLoader.loadTestByName.
+
+ >>> resolve_name('nose.util') #doctest: +ELLIPSIS
+ <module 'nose.util' from...>
+ >>> resolve_name('nose.util.resolve_name') #doctest: +ELLIPSIS
+ <function resolve_name at...>
+ """
+ parts = name.split('.')
+ parts_copy = parts[:]
+ if module is None:
+ while parts_copy:
+ try:
+ log.debug("__import__ %s", name)
+ module = __import__('.'.join(parts_copy))
+ break
+ except ImportError:
+ del parts_copy[-1]
+ if not parts_copy:
+ raise
+ parts = parts[1:]
+ obj = module
+ log.debug("resolve: %s, %s, %s, %s", parts, name, obj, module)
+ for part in parts:
+ obj = getattr(obj, part)
+ return obj
+
+
+def split_test_name(test):
+ """Split a test name into a 3-tuple containing file, module, and callable
+ names, any of which (but not all) may be blank.
+
+ Test names are in the form:
+
+ file_or_module:callable
+
+ Either side of the : may be dotted. To change the splitting behavior, you
+ can alter nose.util.split_test_re.
+ """
+ norm = os.path.normpath
+ file_or_mod = test
+ fn = None
+ if not ':' in test:
+ # only a file or mod part
+ if file_like(test):
+ return (norm(test), None, None)
+ else:
+ return (None, test, None)
+
+ # could be path|mod:callable, or a : in the file path someplace
+ head, tail = os.path.split(test)
+ if not head:
+ # this is a case like 'foo:bar' -- generally a module
+ # name followed by a callable, but also may be a windows
+ # drive letter followed by a path
+ try:
+ file_or_mod, fn = test.split(':')
+ if file_like(fn):
+ # must be a funny path
+ file_or_mod, fn = test, None
+ except ValueError:
+ # more than one : in the test
+ # this is a case like c:\some\path.py:a_test
+ parts = test.split(':')
+ if len(parts[0]) == 1:
+ file_or_mod, fn = ':'.join(parts[:-1]), parts[-1]
+ else:
+ # nonsense like foo:bar:baz
+ raise ValueError("Test name '%s' could not be parsed. Please "
+ "format test names as path:callable or "
+ "module:callable." % (test,))
+ elif not tail:
+ # this is a case like 'foo:bar/'
+ # : must be part of the file path, so ignore it
+ file_or_mod = test
+ else:
+ if ':' in tail:
+ file_part, fn = tail.split(':')
+ else:
+ file_part = tail
+ file_or_mod = os.sep.join([head, file_part])
+ if file_or_mod:
+ if file_like(file_or_mod):
+ return (norm(file_or_mod), None, fn)
+ else:
+ return (None, file_or_mod, fn)
+ else:
+ return (None, None, fn)
+split_test_name.__test__ = False # do not collect
+
+
+def test_address(test):
+ """Find the test address for a test, which may be a module, filename,
+ class, method or function.
+ """
+ if hasattr(test, "address"):
+ return test.address()
+ # type-based polymorphism sucks in general, but I believe is
+ # appropriate here
+ t = type(test)
+ file = module = call = None
+ if t == types.ModuleType:
+ file = getattr(test, '__file__', None)
+ module = getattr(test, '__name__', None)
+ return (src(file), module, call)
+ if t == types.FunctionType or issubclass(t, type) or t == types.ClassType:
+ module = getattr(test, '__module__', None)
+ if module is not None:
+ m = sys.modules[module]
+ file = getattr(m, '__file__', None)
+ if file is not None:
+ file = os.path.abspath(file)
+ call = getattr(test, '__name__', None)
+ return (src(file), module, call)
+ if t == types.MethodType:
+ cls_adr = test_address(test.im_class)
+ return (src(cls_adr[0]), cls_adr[1],
+ "%s.%s" % (cls_adr[2], test.__name__))
+ # handle unittest.TestCase instances
+ if isinstance(test, unittest.TestCase):
+ if (hasattr(test, '_FunctionTestCase__testFunc') # pre 2.7
+ or hasattr(test, '_testFunc')): # 2.7
+ # unittest FunctionTestCase
+ try:
+ return test_address(test._FunctionTestCase__testFunc)
+ except AttributeError:
+ return test_address(test._testFunc)
+ # regular unittest.TestCase
+ cls_adr = test_address(test.__class__)
+ # 2.5 compat: __testMethodName changed to _testMethodName
+ try:
+ method_name = test._TestCase__testMethodName
+ except AttributeError:
+ method_name = test._testMethodName
+ return (src(cls_adr[0]), cls_adr[1],
+ "%s.%s" % (cls_adr[2], method_name))
+ if (hasattr(test, '__class__') and
+ test.__class__.__module__ not in ('__builtin__', 'builtins')):
+ return test_address(test.__class__)
+ raise TypeError("I don't know what %s is (%s)" % (test, t))
+test_address.__test__ = False # do not collect
+
+
+def try_run(obj, names):
+ """Given a list of possible method names, try to run them with the
+ provided object. Keep going until something works. Used to run
+ setup/teardown methods for module, package, and function tests.
+ """
+ for name in names:
+ func = getattr(obj, name, None)
+ if func is not None:
+ if type(obj) == types.ModuleType:
+ # py.test compatibility
+ if isinstance(func, types.FunctionType):
+ args, varargs, varkw, defaults = \
+ inspect.getargspec(func)
+ else:
+ # Not a function. If it's callable, call it anyway
+ if hasattr(func, '__call__') and not inspect.ismethod(func):
+ func = func.__call__
+ try:
+ args, varargs, varkw, defaults = \
+ inspect.getargspec(func)
+ args.pop(0) # pop the self off
+ except TypeError:
+ raise TypeError("Attribute %s of %r is not a python "
+ "function. Only functions or callables"
+ " may be used as fixtures." %
+ (name, obj))
+ if len(args):
+ log.debug("call fixture %s.%s(%s)", obj, name, obj)
+ return func(obj)
+ log.debug("call fixture %s.%s", obj, name)
+ return func()
+
+
+def src(filename):
+ """Find the python source file for a .pyc, .pyo or $py.class file on
+ jython. Returns the filename provided if it is not a python source
+ file.
+ """
+ if filename is None:
+ return filename
+ if sys.platform.startswith('java') and filename.endswith('$py.class'):
+ return '.'.join((filename[:-9], 'py'))
+ base, ext = os.path.splitext(filename)
+ if ext in ('.pyc', '.pyo', '.py'):
+ return '.'.join((base, 'py'))
+ return filename
+
+
+def regex_last_key(regex):
+ """Sort key function factory that puts items that match a
+ regular expression last.
+
+ >>> from nose.config import Config
+ >>> from nose.pyversion import sort_list
+ >>> c = Config()
+ >>> regex = c.testMatch
+ >>> entries = ['.', '..', 'a_test', 'src', 'lib', 'test', 'foo.py']
+ >>> sort_list(entries, regex_last_key(regex))
+ >>> entries
+ ['.', '..', 'foo.py', 'lib', 'src', 'a_test', 'test']
+ """
+ def k(obj):
+ if regex.search(obj):
+ return (1, obj)
+ return (0, obj)
+ return k
+
+
+def tolist(val):
+ """Convert a value that may be a list or a (possibly comma-separated)
+ string into a list. The exception: None is returned as None, not [None].
+
+ >>> tolist(["one", "two"])
+ ['one', 'two']
+ >>> tolist("hello")
+ ['hello']
+ >>> tolist("separate,values, with, commas, spaces , are ,ok")
+ ['separate', 'values', 'with', 'commas', 'spaces', 'are', 'ok']
+ """
+ if val is None:
+ return None
+ try:
+ # might already be a list
+ val.extend([])
+ return val
+ except AttributeError:
+ pass
+ # might be a string
+ try:
+ return re.split(r'\s*,\s*', val)
+ except TypeError:
+ # who knows...
+ return list(val)
+
+
+class odict(dict):
+ """Simple ordered dict implementation, based on:
+
+ http://aspn.activestate.com/ASPN/Cookbook/Python/Recipe/107747
+ """
+ def __init__(self, *arg, **kw):
+ self._keys = []
+ super(odict, self).__init__(*arg, **kw)
+
+ def __delitem__(self, key):
+ super(odict, self).__delitem__(key)
+ self._keys.remove(key)
+
+ def __setitem__(self, key, item):
+ super(odict, self).__setitem__(key, item)
+ if key not in self._keys:
+ self._keys.append(key)
+
+ def __str__(self):
+ return "{%s}" % ', '.join(["%r: %r" % (k, v) for k, v in self.items()])
+
+ def clear(self):
+ super(odict, self).clear()
+ self._keys = []
+
+ def copy(self):
+ d = super(odict, self).copy()
+ d._keys = self._keys[:]
+ return d
+
+ def items(self):
+ return zip(self._keys, self.values())
+
+ def keys(self):
+ return self._keys[:]
+
+ def setdefault(self, key, failobj=None):
+ item = super(odict, self).setdefault(key, failobj)
+ if key not in self._keys:
+ self._keys.append(key)
+ return item
+
+ def update(self, dict):
+ super(odict, self).update(dict)
+ for key in dict.keys():
+ if key not in self._keys:
+ self._keys.append(key)
+
+ def values(self):
+ return map(self.get, self._keys)
+
+
+def transplant_func(func, module):
+ """
+ Make a function imported from module A appear as if it is located
+ in module B.
+
+ >>> from pprint import pprint
+ >>> pprint.__module__
+ 'pprint'
+ >>> pp = transplant_func(pprint, __name__)
+ >>> pp.__module__
+ 'nose.util'
+
+ The original function is not modified.
+
+ >>> pprint.__module__
+ 'pprint'
+
+ Calling the transplanted function calls the original.
+
+ >>> pp([1, 2])
+ [1, 2]
+ >>> pprint([1,2])
+ [1, 2]
+
+ """
+ from nose.tools import make_decorator
+ if isgenerator(func):
+ def newfunc(*arg, **kw):
+ for v in func(*arg, **kw):
+ yield v
+ else:
+ def newfunc(*arg, **kw):
+ return func(*arg, **kw)
+
+ newfunc = make_decorator(func)(newfunc)
+ newfunc.__module__ = module
+ return newfunc
+
+
+def transplant_class(cls, module):
+ """
+ Make a class appear to reside in `module`, rather than the module in which
+ it is actually defined.
+
+ >>> from nose.failure import Failure
+ >>> Failure.__module__
+ 'nose.failure'
+ >>> Nf = transplant_class(Failure, __name__)
+ >>> Nf.__module__
+ 'nose.util'
+ >>> Nf.__name__
+ 'Failure'
+
+ """
+ class C(cls):
+ pass
+ C.__module__ = module
+ C.__name__ = cls.__name__
+ return C
+
+
+def safe_str(val, encoding='utf-8'):
+ try:
+ return str(val)
+ except UnicodeEncodeError:
+ if isinstance(val, Exception):
+ return ' '.join([safe_str(arg, encoding)
+ for arg in val])
+ return unicode(val).encode(encoding)
+
+
+if __name__ == '__main__':
+ import doctest
+ doctest.testmod()
diff --git a/scripts/external_libs/progressbar-2.2/LICENSE b/scripts/external_libs/progressbar-2.2/LICENSE
new file mode 100755
index 00000000..ffce87a8
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/LICENSE
@@ -0,0 +1,16 @@
+progressbar - Text progressbar library for python.
+Copyright (C) 2005 Nilton Volpato
+
+This library is free software; you can redistribute it and/or
+modify it under the terms of the GNU Lesser General Public
+License as published by the Free Software Foundation; either
+version 2.1 of the License, or (at your option) any later version.
+
+This library is distributed in the hope that it will be useful,
+but WITHOUT ANY WARRANTY; without even the implied warranty of
+MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+Lesser General Public License for more details.
+
+You should have received a copy of the GNU Lesser General Public
+License along with this library; if not, write to the Free Software
+Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
diff --git a/scripts/external_libs/progressbar-2.2/MANIFEST b/scripts/external_libs/progressbar-2.2/MANIFEST
new file mode 100755
index 00000000..6e64f9f1
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/MANIFEST
@@ -0,0 +1,6 @@
+LICENSE
+MANIFEST
+MANIFEST.in
+README
+progressbar.py
+setup.py
diff --git a/scripts/external_libs/progressbar-2.2/MANIFEST.in b/scripts/external_libs/progressbar-2.2/MANIFEST.in
new file mode 100755
index 00000000..31ed35dd
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/MANIFEST.in
@@ -0,0 +1,3 @@
+include README MANIFEST MANIFEST.in LICENSE
+include setup.py
+include progressbar.py
diff --git a/scripts/external_libs/progressbar-2.2/PKG-INFO b/scripts/external_libs/progressbar-2.2/PKG-INFO
new file mode 100755
index 00000000..c8d81b37
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/PKG-INFO
@@ -0,0 +1,38 @@
+Metadata-Version: 1.0
+Name: progressbar
+Version: 2.2
+Summary: Text progressbar library for python.
+Home-page: http://qubit.ic.unicamp.br/~nilton
+Author: Nilton Volpato
+Author-email: first-name dot last-name @ gmail.com
+License: UNKNOWN
+Description: Text progressbar library for python.
+
+ This library provides a text mode progressbar. This is tipically used
+ to display the progress of a long running operation, providing a
+ visual clue that processing is underway.
+
+ The ProgressBar class manages the progress, and the format of the line
+ is given by a number of widgets. A widget is an object that may
+ display diferently depending on the state of the progress. There are
+ three types of widget:
+ - a string, which always shows itself;
+ - a ProgressBarWidget, which may return a diferent value every time
+ it's update method is called; and
+ - a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
+ expands to fill the remaining width of the line.
+
+ The progressbar module is very easy to use, yet very powerful. And
+ automatically supports features like auto-resizing when available.
+
+Platform: UNKNOWN
+Classifier: Development Status :: 5 - Production/Stable
+Classifier: Environment :: Console
+Classifier: Intended Audience :: Developers
+Classifier: Intended Audience :: System Administrators
+Classifier: License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)
+Classifier: Operating System :: OS Independent
+Classifier: Programming Language :: Python
+Classifier: Topic :: Software Development :: Libraries
+Classifier: Topic :: Software Development :: User Interfaces
+Classifier: Topic :: Terminals
diff --git a/scripts/external_libs/progressbar-2.2/README b/scripts/external_libs/progressbar-2.2/README
new file mode 100755
index 00000000..a5e861bf
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/README
@@ -0,0 +1,18 @@
+Text progressbar library for python.
+
+This library provides a text mode progressbar. This is tipically used
+to display the progress of a long running operation, providing a
+visual clue that processing is underway.
+
+The ProgressBar class manages the progress, and the format of the line
+is given by a number of widgets. A widget is an object that may
+display diferently depending on the state of the progress. There are
+three types of widget:
+- a string, which always shows itself;
+- a ProgressBarWidget, which may return a diferent value every time
+it's update method is called; and
+- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
+expands to fill the remaining width of the line.
+
+The progressbar module is very easy to use, yet very powerful. And
+automatically supports features like auto-resizing when available.
diff --git a/scripts/external_libs/progressbar-2.2/progressbar.py b/scripts/external_libs/progressbar-2.2/progressbar.py
new file mode 100755
index 00000000..07981a34
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/progressbar.py
@@ -0,0 +1,381 @@
+#!/usr/bin/python
+# -*- coding: iso-8859-1 -*-
+#
+# progressbar - Text progressbar library for python.
+# Copyright (c) 2005 Nilton Volpato
+#
+# This library is free software; you can redistribute it and/or
+# modify it under the terms of the GNU Lesser General Public
+# License as published by the Free Software Foundation; either
+# version 2.1 of the License, or (at your option) any later version.
+#
+# This library is distributed in the hope that it will be useful,
+# but WITHOUT ANY WARRANTY; without even the implied warranty of
+# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
+# Lesser General Public License for more details.
+#
+# You should have received a copy of the GNU Lesser General Public
+# License along with this library; if not, write to the Free Software
+# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
+
+
+"""Text progressbar library for python.
+
+This library provides a text mode progressbar. This is tipically used
+to display the progress of a long running operation, providing a
+visual clue that processing is underway.
+
+The ProgressBar class manages the progress, and the format of the line
+is given by a number of widgets. A widget is an object that may
+display diferently depending on the state of the progress. There are
+three types of widget:
+- a string, which always shows itself;
+- a ProgressBarWidget, which may return a diferent value every time
+it's update method is called; and
+- a ProgressBarWidgetHFill, which is like ProgressBarWidget, except it
+expands to fill the remaining width of the line.
+
+The progressbar module is very easy to use, yet very powerful. And
+automatically supports features like auto-resizing when available.
+"""
+
+__author__ = "Nilton Volpato"
+__author_email__ = "first-name dot last-name @ gmail.com"
+__date__ = "2006-05-07"
+__version__ = "2.2"
+
+# Changelog
+#
+# 2006-05-07: v2.2 fixed bug in windows
+# 2005-12-04: v2.1 autodetect terminal width, added start method
+# 2005-12-04: v2.0 everything is now a widget (wow!)
+# 2005-12-03: v1.0 rewrite using widgets
+# 2005-06-02: v0.5 rewrite
+# 2004-??-??: v0.1 first version
+
+
+import sys, time
+from array import array
+try:
+ from fcntl import ioctl
+ import termios
+except ImportError:
+ pass
+import signal
+
+class ProgressBarWidget(object):
+ """This is an element of ProgressBar formatting.
+
+ The ProgressBar object will call it's update value when an update
+ is needed. It's size may change between call, but the results will
+ not be good if the size changes drastically and repeatedly.
+ """
+ def update(self, pbar):
+ """Returns the string representing the widget.
+
+ The parameter pbar is a reference to the calling ProgressBar,
+ where one can access attributes of the class for knowing how
+ the update must be made.
+
+ At least this function must be overriden."""
+ pass
+
+class ProgressBarWidgetHFill(object):
+ """This is a variable width element of ProgressBar formatting.
+
+ The ProgressBar object will call it's update value, informing the
+ width this object must the made. This is like TeX \\hfill, it will
+ expand to fill the line. You can use more than one in the same
+ line, and they will all have the same width, and together will
+ fill the line.
+ """
+ def update(self, pbar, width):
+ """Returns the string representing the widget.
+
+ The parameter pbar is a reference to the calling ProgressBar,
+ where one can access attributes of the class for knowing how
+ the update must be made. The parameter width is the total
+ horizontal width the widget must have.
+
+ At least this function must be overriden."""
+ pass
+
+
+class ETA(ProgressBarWidget):
+ "Widget for the Estimated Time of Arrival"
+ def format_time(self, seconds):
+ return time.strftime('%H:%M:%S', time.gmtime(seconds))
+ def update(self, pbar):
+ if pbar.currval == 0:
+ return 'ETA: --:--:--'
+ elif pbar.finished:
+ return 'Time: %s' % self.format_time(pbar.seconds_elapsed)
+ else:
+ elapsed = pbar.seconds_elapsed
+ eta = elapsed * pbar.maxval / pbar.currval - elapsed
+ return 'ETA: %s' % self.format_time(eta)
+
+class FileTransferSpeed(ProgressBarWidget):
+ "Widget for showing the transfer speed (useful for file transfers)."
+ def __init__(self):
+ self.fmt = '%6.2f %s'
+ self.units = ['B','K','M','G','T','P']
+ def update(self, pbar):
+ if pbar.seconds_elapsed < 2e-6:#== 0:
+ bps = 0.0
+ else:
+ bps = float(pbar.currval) / pbar.seconds_elapsed
+ spd = bps
+ for u in self.units:
+ if spd < 1000:
+ break
+ spd /= 1000
+ return self.fmt % (spd, u+'/s')
+
+class RotatingMarker(ProgressBarWidget):
+ "A rotating marker for filling the bar of progress."
+ def __init__(self, markers='|/-\\'):
+ self.markers = markers
+ self.curmark = -1
+ def update(self, pbar):
+ if pbar.finished:
+ return self.markers[0]
+ self.curmark = (self.curmark + 1)%len(self.markers)
+ return self.markers[self.curmark]
+
+class Percentage(ProgressBarWidget):
+ "Just the percentage done."
+ def update(self, pbar):
+ return '%3d%%' % pbar.percentage()
+
+class Bar(ProgressBarWidgetHFill):
+ "The bar of progress. It will strech to fill the line."
+ def __init__(self, marker='#', left='|', right='|'):
+ self.marker = marker
+ self.left = left
+ self.right = right
+ def _format_marker(self, pbar):
+ if isinstance(self.marker, (str, unicode)):
+ return self.marker
+ else:
+ return self.marker.update(pbar)
+ def update(self, pbar, width):
+ percent = pbar.percentage()
+ cwidth = width - len(self.left) - len(self.right)
+ marked_width = int(percent * cwidth / 100)
+ m = self._format_marker(pbar)
+ bar = (self.left + (m*marked_width).ljust(cwidth) + self.right)
+ return bar
+
+class ReverseBar(Bar):
+ "The reverse bar of progress, or bar of regress. :)"
+ def update(self, pbar, width):
+ percent = pbar.percentage()
+ cwidth = width - len(self.left) - len(self.right)
+ marked_width = int(percent * cwidth / 100)
+ m = self._format_marker(pbar)
+ bar = (self.left + (m*marked_width).rjust(cwidth) + self.right)
+ return bar
+
+default_widgets = [Percentage(), ' ', Bar()]
+class ProgressBar(object):
+ """This is the ProgressBar class, it updates and prints the bar.
+
+ The term_width parameter may be an integer. Or None, in which case
+ it will try to guess it, if it fails it will default to 80 columns.
+
+ The simple use is like this:
+ >>> pbar = ProgressBar().start()
+ >>> for i in xrange(100):
+ ... # do something
+ ... pbar.update(i+1)
+ ...
+ >>> pbar.finish()
+
+ But anything you want to do is possible (well, almost anything).
+ You can supply different widgets of any type in any order. And you
+ can even write your own widgets! There are many widgets already
+ shipped and you should experiment with them.
+
+ When implementing a widget update method you may access any
+ attribute or function of the ProgressBar object calling the
+ widget's update method. The most important attributes you would
+ like to access are:
+ - currval: current value of the progress, 0 <= currval <= maxval
+ - maxval: maximum (and final) value of the progress
+ - finished: True if the bar is have finished (reached 100%), False o/w
+ - start_time: first time update() method of ProgressBar was called
+ - seconds_elapsed: seconds elapsed since start_time
+ - percentage(): percentage of the progress (this is a method)
+ """
+ def __init__(self, maxval=100, widgets=default_widgets, term_width=None,
+ fd=sys.stderr):
+ assert maxval > 0
+ self.maxval = maxval
+ self.widgets = widgets
+ self.fd = fd
+ self.signal_set = False
+ if term_width is None:
+ try:
+ self.handle_resize(None,None)
+ signal.signal(signal.SIGWINCH, self.handle_resize)
+ self.signal_set = True
+ except:
+ self.term_width = 79
+ else:
+ self.term_width = term_width
+
+ self.currval = 0
+ self.finished = False
+ self.prev_percentage = -1
+ self.start_time = None
+ self.seconds_elapsed = 0
+
+ def handle_resize(self, signum, frame):
+ h,w=array('h', ioctl(self.fd,termios.TIOCGWINSZ,'\0'*8))[:2]
+ self.term_width = w
+
+ def percentage(self):
+ "Returns the percentage of the progress."
+ return self.currval*100.0 / self.maxval
+
+ def _format_widgets(self):
+ r = []
+ hfill_inds = []
+ num_hfill = 0
+ currwidth = 0
+ for i, w in enumerate(self.widgets):
+ if isinstance(w, ProgressBarWidgetHFill):
+ r.append(w)
+ hfill_inds.append(i)
+ num_hfill += 1
+ elif isinstance(w, (str, unicode)):
+ r.append(w)
+ currwidth += len(w)
+ else:
+ weval = w.update(self)
+ currwidth += len(weval)
+ r.append(weval)
+ for iw in hfill_inds:
+ r[iw] = r[iw].update(self, (self.term_width-currwidth)/num_hfill)
+ return r
+
+ def _format_line(self):
+ return ''.join(self._format_widgets()).ljust(self.term_width)
+
+ def _need_update(self):
+ return int(self.percentage()) != int(self.prev_percentage)
+
+ def update(self, value):
+ "Updates the progress bar to a new value."
+ assert 0 <= value <= self.maxval
+ self.currval = value
+ if not self._need_update() or self.finished:
+ return
+ if not self.start_time:
+ self.start_time = time.time()
+ self.seconds_elapsed = time.time() - self.start_time
+ self.prev_percentage = self.percentage()
+ if value != self.maxval:
+ self.fd.write(self._format_line() + '\r')
+ else:
+ self.finished = True
+ self.fd.write(self._format_line() + '\n')
+
+ def start(self):
+ """Start measuring time, and prints the bar at 0%.
+
+ It returns self so you can use it like this:
+ >>> pbar = ProgressBar().start()
+ >>> for i in xrange(100):
+ ... # do something
+ ... pbar.update(i+1)
+ ...
+ >>> pbar.finish()
+ """
+ self.update(0)
+ return self
+
+ def finish(self):
+ """Used to tell the progress is finished."""
+ self.update(self.maxval)
+ if self.signal_set:
+ signal.signal(signal.SIGWINCH, signal.SIG_DFL)
+
+
+
+
+
+
+if __name__=='__main__':
+ import os
+
+ def example1():
+ widgets = ['Test: ', Percentage(), ' ', Bar(marker=RotatingMarker()),
+ ' ', ETA(), ' ', FileTransferSpeed()]
+ pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
+ for i in range(1000000):
+ # do something
+ pbar.update(10*i+1)
+ pbar.finish()
+ print
+
+ def example2():
+ class CrazyFileTransferSpeed(FileTransferSpeed):
+ "It's bigger between 45 and 80 percent"
+ def update(self, pbar):
+ if 45 < pbar.percentage() < 80:
+ return 'Bigger Now ' + FileTransferSpeed.update(self,pbar)
+ else:
+ return FileTransferSpeed.update(self,pbar)
+
+ widgets = [CrazyFileTransferSpeed(),' <<<', Bar(), '>>> ', Percentage(),' ', ETA()]
+ pbar = ProgressBar(widgets=widgets, maxval=10000000)
+ # maybe do something
+ pbar.start()
+ for i in range(2000000):
+ # do something
+ pbar.update(5*i+1)
+ pbar.finish()
+ print
+
+ def example3():
+ widgets = [Bar('>'), ' ', ETA(), ' ', ReverseBar('<')]
+ pbar = ProgressBar(widgets=widgets, maxval=10000000).start()
+ for i in range(1000000):
+ # do something
+ pbar.update(10*i+1)
+ pbar.finish()
+ print
+
+ def example4():
+ widgets = ['Test: ', Percentage(), ' ',
+ Bar(marker='0',left='[',right=']'),
+ ' ', ETA(), ' ', FileTransferSpeed()]
+ pbar = ProgressBar(widgets=widgets, maxval=500)
+ pbar.start()
+ for i in range(100,500+1,50):
+ time.sleep(0.2)
+ pbar.update(i)
+ pbar.finish()
+ print
+
+
+ example1()
+ example2()
+ example3()
+ example4()
+
+ # def timedProgressBar(time_in_secs):
+ # widgets = ['Running T-Rex: ', Percentage(), ' ',
+ # Bar(marker='>',left='[',right=']'),
+ # ' ', ETA()]
+ # pbar = ProgressBar(widgets=widgets, maxval=time_in_secs*2)
+ # pbar.start()
+ # for i in range(0, time_in_secs*2 + 1):
+ # time.sleep(0.5)
+ # pbar.update(i)
+ # pbar.finish()
+ # print
+
+ # timedProgressBar(20)
diff --git a/scripts/external_libs/progressbar-2.2/setup.py b/scripts/external_libs/progressbar-2.2/setup.py
new file mode 100755
index 00000000..97e29e15
--- /dev/null
+++ b/scripts/external_libs/progressbar-2.2/setup.py
@@ -0,0 +1,31 @@
+#!/usr/bin/python
+
+import os
+from distutils.core import setup
+import progressbar
+
+if os.stat('progressbar.py').st_mtime > os.stat('README').st_mtime:
+ file('README','w').write(progressbar.__doc__)
+
+setup(
+ name = 'progressbar',
+ version = progressbar.__version__,
+ description = progressbar.__doc__.splitlines()[0],
+ long_description = progressbar.__doc__,
+ maintainer = progressbar.__author__,
+ maintainer_email = progressbar.__author_email__,
+ url = 'http://qubit.ic.unicamp.br/~nilton',
+ py_modules = ['progressbar'],
+ classifiers = [
+ 'Development Status :: 5 - Production/Stable',
+ 'Environment :: Console',
+ 'Intended Audience :: Developers',
+ 'Intended Audience :: System Administrators',
+ 'License :: OSI Approved :: GNU Library or Lesser General Public License (LGPL)',
+ 'Operating System :: OS Independent',
+ 'Programming Language :: Python',
+ 'Topic :: Software Development :: Libraries',
+ 'Topic :: Software Development :: User Interfaces',
+ 'Topic :: Terminals',
+ ],
+)
diff --git a/scripts/external_libs/rednose-0.4.1/rednose.py b/scripts/external_libs/rednose-0.4.1/rednose.py
new file mode 100755
index 00000000..1ff892ad
--- /dev/null
+++ b/scripts/external_libs/rednose-0.4.1/rednose.py
@@ -0,0 +1,387 @@
+# Copyright (c) 2009, Tim Cuthbertson # All rights reserved.
+#
+# Redistribution and use in source and binary forms, with or without
+# modification, are permitted provided that the following conditions
+# are met:
+#
+# * Redistributions of source code must retain the above copyright
+# notice, this list of conditions and the following disclaimer.
+# * Redistributions in binary form must reproduce the above
+# copyright notice, this list of conditions and the following
+# disclaimer in the documentation and/or other materials provided
+# with the distribution.
+# * Neither the name of the organisation nor the names of its
+# contributors may be used to endorse or promote products derived
+# from this software without specific prior written permission.
+#
+# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
+# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
+# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
+# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
+# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
+# OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
+# AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
+# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY
+# WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
+# POSSIBILITY OF SUCH DAMAGE.
+
+from __future__ import print_function
+import os
+import sys
+import linecache
+import re
+import time
+
+import nose
+
+import termstyle
+
+failure = 'FAILED'
+error = 'ERROR'
+success = 'passed'
+skip = 'skipped'
+line_length = 77
+
+PY3 = sys.version_info[0] >= 3
+if PY3:
+ to_unicode = str
+else:
+ def to_unicode(s):
+ try:
+ return unicode(s)
+ except UnicodeDecodeError:
+ return unicode(repr(str(s)))
+
+BLACKLISTED_WRITERS = [
+ 'nose[\\/]result\\.pyc?$',
+ 'unittest[\\/]runner\\.pyc?$'
+]
+REDNOSE_DEBUG = False
+
+
+class RedNose(nose.plugins.Plugin):
+ env_opt = 'NOSE_REDNOSE'
+ env_opt_color = 'NOSE_REDNOSE_COLOR'
+ score = 199 # just under the `coverage` module
+
+ def __init__(self, *args):
+ super(RedNose, self).__init__(*args)
+ self.reports = []
+ self.error = self.success = self.failure = self.skip = 0
+ self.total = 0
+ self.stream = None
+ self.verbose = False
+ self.enabled = False
+ self.tree = False
+
+ def options(self, parser, env=os.environ):
+ global REDNOSE_DEBUG
+ rednose_on = bool(env.get(self.env_opt, False))
+ rednose_color = env.get(self.env_opt_color, 'auto')
+ REDNOSE_DEBUG = bool(env.get('REDNOSE_DEBUG', False))
+
+ parser.add_option(
+ "--rednose",
+ action="store_true",
+ default=rednose_on,
+ dest="rednose",
+ help="enable colour output (alternatively, set $%s=1)" % (self.env_opt,)
+ )
+ parser.add_option(
+ "--no-color",
+ action="store_false",
+ dest="rednose",
+ help="disable colour output"
+ )
+ parser.add_option(
+ "--force-color",
+ action="store_const",
+ dest='rednose_color',
+ default=rednose_color,
+ const='force',
+ help="force colour output when not using a TTY (alternatively, set $%s=force)" % (self.env_opt_color,)
+ )
+ parser.add_option(
+ "--immediate",
+ action="store_true",
+ default=False,
+ help="print errors and failures as they happen, as well as at the end"
+ )
+
+ def configure(self, options, conf):
+ if options.rednose:
+ self.enabled = True
+ termstyle_init = {
+ 'force': termstyle.enable,
+ 'off': termstyle.disable
+ }.get(options.rednose_color, termstyle.auto)
+ termstyle_init()
+
+ self.immediate = options.immediate
+ self.verbose = options.verbosity >= 2
+
+ def begin(self):
+ self.start_time = time.time()
+ self._in_test = False
+
+ def _format_test_name(self, test):
+ return test.shortDescription() or to_unicode(test)
+
+ def prepareTestResult(self, result):
+ result.stream = FilteringStream(self.stream, BLACKLISTED_WRITERS)
+
+ def beforeTest(self, test):
+ self._in_test = True
+ if self.verbose:
+ self._out(self._format_test_name(test) + ' ... ')
+
+ def afterTest(self, test):
+ if self._in_test:
+ self.addSkip()
+
+ def _print_test(self, type_, color):
+ self.total += 1
+ if self.verbose:
+ self._outln(color(type_))
+ else:
+ if type_ == failure:
+ short_ = 'F'
+ elif type_ == error:
+ short_ = 'X'
+ elif type_ == skip:
+ short_ = '-'
+ else:
+ short_ = '.'
+ self._out(color(short_))
+ if self.total % line_length == 0:
+ self._outln()
+ self._in_test = False
+
+ def _add_report(self, report):
+ failure_type, test, err = report
+ self.reports.append(report)
+ if self.immediate:
+ self._outln()
+ self._report_test(len(self.reports), *report)
+
+ def addFailure(self, test, err):
+ self.failure += 1
+ self._add_report((failure, test, err))
+ self._print_test(failure, termstyle.red)
+
+ def addError(self, test, err):
+ if err[0].__name__ == 'SkipTest':
+ self.addSkip(test, err)
+ return
+ self.error += 1
+ self._add_report((error, test, err))
+ self._print_test(error, termstyle.yellow)
+
+ def addSuccess(self, test):
+ self.success += 1
+ self._print_test(success, termstyle.green)
+
+ def addSkip(self, test=None, err=None):
+ self.skip += 1
+ self._print_test(skip, termstyle.blue)
+
+ def setOutputStream(self, stream):
+ self.stream = stream
+
+ def report(self, stream):
+ """report on all registered failures and errors"""
+ self._outln()
+ if self.immediate:
+ for x in range(0, 5):
+ self._outln()
+ report_num = 0
+ if len(self.reports) > 0:
+ for report_num, report in enumerate(self.reports):
+ self._report_test(report_num + 1, *report)
+ self._outln()
+
+ self._summarize()
+
+ def _summarize(self):
+ """summarize all tests - the number of failures, errors and successes"""
+ self._line(termstyle.black)
+ self._out("%s test%s run in %0.1f seconds" % (
+ self.total,
+ self._plural(self.total),
+ time.time() - self.start_time))
+ if self.total > self.success:
+ self._outln(". ")
+ additionals = []
+ if self.failure > 0:
+ additionals.append(termstyle.red("%s FAILED" % (
+ self.failure,)))
+ if self.error > 0:
+ additionals.append(termstyle.yellow("%s error%s" % (
+ self.error,
+ self._plural(self.error) )))
+ if self.skip > 0:
+ additionals.append(termstyle.blue("%s skipped" % (
+ self.skip)))
+ self._out(', '.join(additionals))
+
+ self._out(termstyle.green(" (%s test%s passed)" % (
+ self.success,
+ self._plural(self.success) )))
+ self._outln()
+
+ def _report_test(self, report_num, type_, test, err):
+ """report the results of a single (failing or errored) test"""
+ self._line(termstyle.black)
+ self._out("%s) " % (report_num))
+ if type_ == failure:
+ color = termstyle.red
+ self._outln(color('FAIL: %s' % (self._format_test_name(test),)))
+ else:
+ color = termstyle.yellow
+ self._outln(color('ERROR: %s' % (self._format_test_name(test),)))
+
+ exc_type, exc_instance, exc_trace = err
+
+ self._outln()
+ self._outln(self._fmt_traceback(exc_trace))
+ self._out(color(' ', termstyle.bold(color(exc_type.__name__)), ": "))
+ self._outln(self._fmt_message(exc_instance, color))
+ self._outln()
+
+ def _relative_path(self, path):
+ """
+ If path is a child of the current working directory, the relative
+ path is returned surrounded by bold xterm escape sequences.
+ If path is not a child of the working directory, path is returned
+ """
+ try:
+ here = os.path.abspath(os.path.realpath(os.getcwd()))
+ fullpath = os.path.abspath(os.path.realpath(path))
+ except OSError:
+ return path
+ if fullpath.startswith(here):
+ return termstyle.bold(fullpath[len(here)+1:])
+ return path
+
+ def _file_line(self, tb):
+ """formats the file / lineno / function line of a traceback element"""
+ prefix = "file://"
+ prefix = ""
+
+ f = tb.tb_frame
+ if '__unittest' in f.f_globals:
+ # this is the magical flag that prevents unittest internal
+ # code from junking up the stacktrace
+ return None
+
+ filename = f.f_code.co_filename
+ lineno = tb.tb_lineno
+ linecache.checkcache(filename)
+ function_name = f.f_code.co_name
+
+ line_contents = linecache.getline(filename, lineno, f.f_globals).strip()
+
+ return " %s line %s in %s\n %s" % (
+ termstyle.blue(prefix, self._relative_path(filename)),
+ lineno,
+ termstyle.cyan(function_name),
+ line_contents)
+
+ def _fmt_traceback(self, trace):
+ """format a traceback"""
+ ret = []
+ ret.append(termstyle.default(" Traceback (most recent call last):"))
+ current_trace = trace
+ while current_trace is not None:
+ line = self._file_line(current_trace)
+ if line is not None:
+ ret.append(line)
+ current_trace = current_trace.tb_next
+ return '\n'.join(ret)
+
+ def _fmt_message(self, exception, color):
+ orig_message_lines = to_unicode(exception).splitlines()
+
+ if len(orig_message_lines) == 0:
+ return ''
+ message_lines = [color(orig_message_lines[0])]
+ for line in orig_message_lines[1:]:
+ match = re.match('^---.* begin captured stdout.*----$', line)
+ if match:
+ color = None
+ message_lines.append('')
+ line = ' ' + line
+ message_lines.append(color(line) if color is not None else line)
+ return '\n'.join(message_lines)
+
+ def _out(self, msg='', newline=False):
+ self.stream.write(msg)
+ if newline:
+ self.stream.write('\n')
+
+ def _outln(self, msg=''):
+ self._out(msg, True)
+
+ def _plural(self, num):
+ return '' if num == 1 else 's'
+
+ def _line(self, color=termstyle.reset, char='-'):
+ """
+ print a line of separator characters (default '-')
+ in the given colour (default black)
+ """
+ self._outln(color(char * line_length))
+
+
+import traceback
+import sys
+
+
+class FilteringStream(object):
+ """
+ A wrapper for a stream that will filter
+ calls to `write` and `writeln` to ignore calls
+ from blacklisted callers
+ (implemented as a regex on their filename, according
+ to traceback.extract_stack())
+
+ It's super hacky, but there seems to be no other way
+ to suppress nose's default output
+ """
+ def __init__(self, stream, excludes):
+ self.__stream = stream
+ self.__excludes = list(map(re.compile, excludes))
+
+ def __should_filter(self):
+ try:
+ stack = traceback.extract_stack(limit=3)[0]
+ filename = stack[0]
+ pattern_matches_filename = lambda pattern: pattern.search(filename)
+ should_filter = any(map(pattern_matches_filename, self.__excludes))
+ if REDNOSE_DEBUG:
+ print >> sys.stderr, "REDNOSE_DEBUG: got write call from %s, should_filter = %s" % (
+ filename, should_filter)
+ return should_filter
+ except StandardError as e:
+ if REDNOSE_DEBUG:
+ print("\nError in rednose filtering: %s" % (e,), file=sys.stderr)
+ traceback.print_exc(sys.stderr)
+ return False
+
+ def write(self, *a):
+ if self.__should_filter():
+ return
+ return self.__stream.write(*a)
+
+ def writeln(self, *a):
+ if self.__should_filter():
+ return
+ return self.__stream.writeln(*a)
+
+ # pass non-known methods through to self.__stream
+ def __getattr__(self, name):
+ if REDNOSE_DEBUG:
+ print("REDNOSE_DEBUG: getting attr %s" % (name,), file=sys.stderr)
+ return getattr(self.__stream, name)
diff --git a/scripts/external_libs/rednose-0.4.1/setup.py b/scripts/external_libs/rednose-0.4.1/setup.py
new file mode 100755
index 00000000..34cded4b
--- /dev/null
+++ b/scripts/external_libs/rednose-0.4.1/setup.py
@@ -0,0 +1,29 @@
+#!/usr/bin/env python
+
+## NOTE: ##
+## this setup.py was generated by zero2pypi:
+## http://gfxmonk.net/dist/0install/zero2pypi.xml
+
+from setuptools import *
+setup(
+ packages = find_packages(exclude=['test', 'test.*']),
+ description='coloured output for nosetests',
+ entry_points={'nose.plugins.0.10': ['NOSETESTS_PLUGINS = rednose:RedNose']},
+ install_requires=['setuptools', 'python-termstyle >=0.1.7'],
+ long_description="\n**Note**: This package has been built automatically by\n`zero2pypi <http://gfxmonk.net/dist/0install/zero2pypi.xml>`_.\nIf possible, you should use the zero-install feed instead:\nhttp://gfxmonk.net/dist/0install/rednose.xml\n\n----------------\n\n=========\nrednose\n=========\n\nrednose is a `nosetests`_\nplugin for adding colour (and readability) to nosetest console results.\n\nInstallation:\n-------------\n::\n\n\teasy_install rednose\n\t\nor from the source::\n\n\t./setup.py develop\n\nUsage:\n------\n::\n\n\tnosetests --rednose\n\nor::\n\n\texport NOSE_REDNOSE=1\n\tnosetests\n\nRednose by default uses auto-colouring, which will only use\ncolour if you're running it on a terminal (i.e not piping it\nto a file). To control colouring, use one of::\n\n\tnosetests --rednose --force-color\n\tnosetests --no-color\n\n(you can also control this by setting the environment variable NOSE_REDNOSE_COLOR to 'force' or 'no')\n\n.. _nosetests: http://somethingaboutorange.com/mrl/projects/nose/\n",
+ name='rednose',
+ py_modules=['rednose'],
+ url='http://gfxmonk.net/dist/0install/rednose.xml',
+ version='0.4.1',
+classifiers=[
+ "License :: OSI Approved :: BSD License",
+ "Programming Language :: Python",
+ "Programming Language :: Python :: 3",
+ "Development Status :: 4 - Beta",
+ "Intended Audience :: Developers",
+ "Topic :: Software Development :: Libraries :: Python Modules",
+ "Topic :: Software Development :: Testing",
+ ],
+ keywords='test nosetests nose nosetest output colour console',
+ license='BSD',
+)
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py b/scripts/external_libs/yaml-3.11/yaml/__init__.py
index c30973a3..76e19e13 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/__init__.py
+++ b/scripts/external_libs/yaml-3.11/yaml/__init__.py
@@ -8,21 +8,35 @@ from nodes import *
from loader import *
from dumper import *
+__version__ = '3.11'
+
+try:
+ from cyaml import *
+ __with_libyaml__ = True
+except ImportError:
+ __with_libyaml__ = False
+
def scan(stream, Loader=Loader):
"""
Scan a YAML stream and produce scanning tokens.
"""
loader = Loader(stream)
- while loader.check_token():
- yield loader.get_token()
+ try:
+ while loader.check_token():
+ yield loader.get_token()
+ finally:
+ loader.dispose()
def parse(stream, Loader=Loader):
"""
Parse a YAML stream and produce parsing events.
"""
loader = Loader(stream)
- while loader.check_event():
- yield loader.get_event()
+ try:
+ while loader.check_event():
+ yield loader.get_event()
+ finally:
+ loader.dispose()
def compose(stream, Loader=Loader):
"""
@@ -30,26 +44,22 @@ def compose(stream, Loader=Loader):
and produce the corresponding representation tree.
"""
loader = Loader(stream)
- if loader.check_node():
- return loader.get_node()
+ try:
+ return loader.get_single_node()
+ finally:
+ loader.dispose()
def compose_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
- and produce corresponsing representation trees.
- """
- loader = Loader(stream)
- while loader.check_node():
- yield loader.get_node()
-
-def load_all(stream, Loader=Loader):
- """
- Parse all YAML documents in a stream
- and produce corresponding Python objects.
+ and produce corresponding representation trees.
"""
loader = Loader(stream)
- while loader.check_data():
- yield loader.get_data()
+ try:
+ while loader.check_node():
+ yield loader.get_node()
+ finally:
+ loader.dispose()
def load(stream, Loader=Loader):
"""
@@ -57,16 +67,22 @@ def load(stream, Loader=Loader):
and produce the corresponding Python object.
"""
loader = Loader(stream)
- if loader.check_data():
- return loader.get_data()
+ try:
+ return loader.get_single_data()
+ finally:
+ loader.dispose()
-def safe_load_all(stream):
+def load_all(stream, Loader=Loader):
"""
Parse all YAML documents in a stream
and produce corresponding Python objects.
- Resolve only basic YAML tags.
"""
- return load_all(stream, SafeLoader)
+ loader = Loader(stream)
+ try:
+ while loader.check_data():
+ yield loader.get_data()
+ finally:
+ loader.dispose()
def safe_load(stream):
"""
@@ -76,6 +92,14 @@ def safe_load(stream):
"""
return load(stream, SafeLoader)
+def safe_load_all(stream):
+ """
+ Parse all YAML documents in a stream
+ and produce corresponding Python objects.
+ Resolve only basic YAML tags.
+ """
+ return load_all(stream, SafeLoader)
+
def emit(events, stream=None, Dumper=Dumper,
canonical=None, indent=None, width=None,
allow_unicode=None, line_break=None):
@@ -85,16 +109,16 @@ def emit(events, stream=None, Dumper=Dumper,
"""
getvalue = None
if stream is None:
- try:
- from cStringIO import StringIO
- except ImportError:
- from StringIO import StringIO
+ from StringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break)
- for event in events:
- dumper.emit(event)
+ try:
+ for event in events:
+ dumper.emit(event)
+ finally:
+ dumper.dispose()
if getvalue:
return getvalue()
@@ -109,20 +133,23 @@ def serialize_all(nodes, stream=None, Dumper=Dumper,
"""
getvalue = None
if stream is None:
- try:
- from cStringIO import StringIO
- except ImportError:
+ if encoding is None:
from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, canonical=canonical, indent=indent, width=width,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
- dumper.open()
- for node in nodes:
- dumper.serialize(node)
- dumper.close()
+ try:
+ dumper.open()
+ for node in nodes:
+ dumper.serialize(node)
+ dumper.close()
+ finally:
+ dumper.dispose()
if getvalue:
return getvalue()
@@ -145,10 +172,10 @@ def dump_all(documents, stream=None, Dumper=Dumper,
"""
getvalue = None
if stream is None:
- try:
- from cStringIO import StringIO
- except ImportError:
+ if encoding is None:
from StringIO import StringIO
+ else:
+ from cStringIO import StringIO
stream = StringIO()
getvalue = stream.getvalue
dumper = Dumper(stream, default_style=default_style,
@@ -157,10 +184,13 @@ def dump_all(documents, stream=None, Dumper=Dumper,
allow_unicode=allow_unicode, line_break=line_break,
encoding=encoding, version=version, tags=tags,
explicit_start=explicit_start, explicit_end=explicit_end)
- dumper.open()
- for data in documents:
- dumper.represent(data)
- dumper.close()
+ try:
+ dumper.open()
+ for data in documents:
+ dumper.represent(data)
+ dumper.close()
+ finally:
+ dumper.dispose()
if getvalue:
return getvalue()
@@ -260,6 +290,7 @@ class YAMLObject(object):
"""
__metaclass__ = YAMLObjectMetaclass
+ __slots__ = () # no direct instantiation, so allow immutable subclasses
yaml_loader = Loader
yaml_dumper = Dumper
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py b/scripts/external_libs/yaml-3.11/yaml/composer.py
index d256b054..06e5ac78 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/composer.py
+++ b/scripts/external_libs/yaml-3.11/yaml/composer.py
@@ -8,12 +8,16 @@ from nodes import *
class ComposerError(MarkedYAMLError):
pass
-class Composer:
+class Composer(object):
def __init__(self):
self.anchors = {}
def check_node(self):
+ # Drop the STREAM-START event.
+ if self.check_event(StreamStartEvent):
+ self.get_event()
+
# If there are more documents available?
return not self.check_event(StreamEndEvent)
@@ -22,17 +26,28 @@ class Composer:
if not self.check_event(StreamEndEvent):
return self.compose_document()
- def __iter__(self):
- # Iterator protocol.
- while not self.check_event(StreamEndEvent):
- yield self.compose_document()
+ def get_single_node(self):
+ # Drop the STREAM-START event.
+ self.get_event()
- def compose_document(self):
+ # Compose a document if the stream is not empty.
+ document = None
+ if not self.check_event(StreamEndEvent):
+ document = self.compose_document()
- # Drop the STREAM-START event.
- if self.check_event(StreamStartEvent):
- self.get_event()
+ # Ensure that the stream contains no more documents.
+ if not self.check_event(StreamEndEvent):
+ event = self.get_event()
+ raise ComposerError("expected a single document in the stream",
+ document.start_mark, "but found another document",
+ event.start_mark)
+ # Drop the STREAM-END event.
+ self.get_event()
+
+ return document
+
+ def compose_document(self):
# Drop the DOCUMENT-START event.
self.get_event()
@@ -42,7 +57,7 @@ class Composer:
# Drop the DOCUMENT-END event.
self.get_event()
- self.complete_anchors = {}
+ self.anchors = {}
return node
def compose_node(self, parent, index):
@@ -104,19 +119,20 @@ class Composer:
tag = start_event.tag
if tag is None or tag == u'!':
tag = self.resolve(MappingNode, None, start_event.implicit)
- node = MappingNode(tag, {},
+ node = MappingNode(tag, [],
start_event.start_mark, None,
flow_style=start_event.flow_style)
if anchor is not None:
self.anchors[anchor] = node
while not self.check_event(MappingEndEvent):
- key_event = self.peek_event()
+ #key_event = self.peek_event()
item_key = self.compose_node(node, None)
- if item_key in node.value:
- raise ComposerError("while composing a mapping", start_event.start_mark,
- "found duplicate key", key_event.start_mark)
+ #if item_key in node.value:
+ # raise ComposerError("while composing a mapping", start_event.start_mark,
+ # "found duplicate key", key_event.start_mark)
item_value = self.compose_node(node, item_key)
- node.value[item_key] = item_value
+ #node.value[item_key] = item_value
+ node.value.append((item_key, item_value))
end_event = self.get_event()
node.end_mark = end_event.end_mark
return node
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py b/scripts/external_libs/yaml-3.11/yaml/constructor.py
index 57ad53d1..635faac3 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/constructor.py
+++ b/scripts/external_libs/yaml-3.11/yaml/constructor.py
@@ -4,25 +4,15 @@ __all__ = ['BaseConstructor', 'SafeConstructor', 'Constructor',
from error import *
from nodes import *
-from composer import *
-try:
- import datetime
- datetime_available = True
-except ImportError:
- datetime_available = False
+import datetime
-try:
- set
-except NameError:
- from sets import Set as set
-
-import binascii, re, sys
+import binascii, re, sys, types
class ConstructorError(MarkedYAMLError):
pass
-class BaseConstructor(Composer):
+class BaseConstructor(object):
yaml_constructors = {}
yaml_multi_constructors = {}
@@ -30,6 +20,8 @@ class BaseConstructor(Composer):
def __init__(self):
self.constructed_objects = {}
self.recursive_objects = {}
+ self.state_generators = []
+ self.deep_construct = False
def check_data(self):
# If there are more documents available?
@@ -40,135 +32,117 @@ class BaseConstructor(Composer):
if self.check_node():
return self.construct_document(self.get_node())
- def __iter__(self):
- # Iterator protocol.
- while self.check_node():
- yield self.construct_document(self.get_node())
+ def get_single_data(self):
+ # Ensure that the stream contains a single document and construct it.
+ node = self.get_single_node()
+ if node is not None:
+ return self.construct_document(node)
+ return None
def construct_document(self, node):
data = self.construct_object(node)
+ while self.state_generators:
+ state_generators = self.state_generators
+ self.state_generators = []
+ for generator in state_generators:
+ for dummy in generator:
+ pass
self.constructed_objects = {}
self.recursive_objects = {}
+ self.deep_construct = False
return data
- def construct_object(self, node):
+ def construct_object(self, node, deep=False):
if node in self.constructed_objects:
return self.constructed_objects[node]
+ if deep:
+ old_deep = self.deep_construct
+ self.deep_construct = True
if node in self.recursive_objects:
raise ConstructorError(None, None,
- "found recursive node", node.start_mark)
+ "found unconstructable recursive node", node.start_mark)
self.recursive_objects[node] = None
constructor = None
+ tag_suffix = None
if node.tag in self.yaml_constructors:
- constructor = lambda node: self.yaml_constructors[node.tag](self, node)
+ constructor = self.yaml_constructors[node.tag]
else:
for tag_prefix in self.yaml_multi_constructors:
if node.tag.startswith(tag_prefix):
tag_suffix = node.tag[len(tag_prefix):]
- constructor = lambda node: \
- self.yaml_multi_constructors[tag_prefix](self, tag_suffix, node)
+ constructor = self.yaml_multi_constructors[tag_prefix]
break
else:
if None in self.yaml_multi_constructors:
- constructor = lambda node: \
- self.yaml_multi_constructors[None](self, node.tag, node)
+ tag_suffix = node.tag
+ constructor = self.yaml_multi_constructors[None]
elif None in self.yaml_constructors:
- constructor = lambda node: \
- self.yaml_constructors[None](self, node)
+ constructor = self.yaml_constructors[None]
elif isinstance(node, ScalarNode):
- constructor = self.construct_scalar
+ constructor = self.__class__.construct_scalar
elif isinstance(node, SequenceNode):
- constructor = self.construct_sequence
+ constructor = self.__class__.construct_sequence
elif isinstance(node, MappingNode):
- constructor = self.construct_mapping
- else:
- print node.tag
- data = constructor(node)
+ constructor = self.__class__.construct_mapping
+ if tag_suffix is None:
+ data = constructor(self, node)
+ else:
+ data = constructor(self, tag_suffix, node)
+ if isinstance(data, types.GeneratorType):
+ generator = data
+ data = generator.next()
+ if self.deep_construct:
+ for dummy in generator:
+ pass
+ else:
+ self.state_generators.append(generator)
self.constructed_objects[node] = data
del self.recursive_objects[node]
+ if deep:
+ self.deep_construct = old_deep
return data
def construct_scalar(self, node):
if not isinstance(node, ScalarNode):
- if isinstance(node, MappingNode):
- for key_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:value':
- return self.construct_scalar(node.value[key_node])
raise ConstructorError(None, None,
"expected a scalar node, but found %s" % node.id,
node.start_mark)
return node.value
- def construct_sequence(self, node):
+ def construct_sequence(self, node, deep=False):
if not isinstance(node, SequenceNode):
raise ConstructorError(None, None,
"expected a sequence node, but found %s" % node.id,
node.start_mark)
- return [self.construct_object(child) for child in node.value]
+ return [self.construct_object(child, deep=deep)
+ for child in node.value]
- def construct_mapping(self, node):
+ def construct_mapping(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
mapping = {}
- merge = None
- for key_node in node.value:
- if key_node.tag == u'tag:yaml.org,2002:merge':
- if merge is not None:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found duplicate merge key", key_node.start_mark)
- value_node = node.value[key_node]
- if isinstance(value_node, MappingNode):
- merge = [self.construct_mapping(value_node)]
- elif isinstance(value_node, SequenceNode):
- merge = []
- for subnode in value_node.value:
- if not isinstance(subnode, MappingNode):
- raise ConstructorError("while constructing a mapping",
- node.start_mark,
- "expected a mapping for merging, but found %s"
- % subnode.id, subnode.start_mark)
- merge.append(self.construct_mapping(subnode))
- merge.reverse()
- else:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "expected a mapping or list of mappings for merging, but found %s"
- % value_node.id, value_node.start_mark)
- elif key_node.tag == u'tag:yaml.org,2002:value':
- if '=' in mapping:
- raise ConstructorError("while construction a mapping", node.start_mark,
- "found duplicate value key", key_node.start_mark)
- value = self.construct_object(node.value[key_node])
- mapping['='] = value
- else:
- key = self.construct_object(key_node)
- try:
- duplicate_key = key in mapping
- except TypeError, exc:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found unacceptable key (%s)" % exc, key_node.start_mark)
- if duplicate_key:
- raise ConstructorError("while constructing a mapping", node.start_mark,
- "found duplicate key", key_node.start_mark)
- value = self.construct_object(node.value[key_node])
- mapping[key] = value
- if merge is not None:
- merge.append(mapping)
- mapping = {}
- for submapping in merge:
- mapping.update(submapping)
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ try:
+ hash(key)
+ except TypeError, exc:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "found unacceptable key (%s)" % exc, key_node.start_mark)
+ value = self.construct_object(value_node, deep=deep)
+ mapping[key] = value
return mapping
- def construct_pairs(self, node):
+ def construct_pairs(self, node, deep=False):
if not isinstance(node, MappingNode):
raise ConstructorError(None, None,
"expected a mapping node, but found %s" % node.id,
node.start_mark)
pairs = []
- for key_node in node.value:
- key = self.construct_object(key_node)
- value = self.construct_object(node.value[key_node])
+ for key_node, value_node in node.value:
+ key = self.construct_object(key_node, deep=deep)
+ value = self.construct_object(value_node, deep=deep)
pairs.append((key, value))
return pairs
@@ -186,6 +160,53 @@ class BaseConstructor(Composer):
class SafeConstructor(BaseConstructor):
+ def construct_scalar(self, node):
+ if isinstance(node, MappingNode):
+ for key_node, value_node in node.value:
+ if key_node.tag == u'tag:yaml.org,2002:value':
+ return self.construct_scalar(value_node)
+ return BaseConstructor.construct_scalar(self, node)
+
+ def flatten_mapping(self, node):
+ merge = []
+ index = 0
+ while index < len(node.value):
+ key_node, value_node = node.value[index]
+ if key_node.tag == u'tag:yaml.org,2002:merge':
+ del node.value[index]
+ if isinstance(value_node, MappingNode):
+ self.flatten_mapping(value_node)
+ merge.extend(value_node.value)
+ elif isinstance(value_node, SequenceNode):
+ submerge = []
+ for subnode in value_node.value:
+ if not isinstance(subnode, MappingNode):
+ raise ConstructorError("while constructing a mapping",
+ node.start_mark,
+ "expected a mapping for merging, but found %s"
+ % subnode.id, subnode.start_mark)
+ self.flatten_mapping(subnode)
+ submerge.append(subnode.value)
+ submerge.reverse()
+ for value in submerge:
+ merge.extend(value)
+ else:
+ raise ConstructorError("while constructing a mapping", node.start_mark,
+ "expected a mapping or list of mappings for merging, but found %s"
+ % value_node.id, value_node.start_mark)
+ elif key_node.tag == u'tag:yaml.org,2002:value':
+ key_node.tag = u'tag:yaml.org,2002:str'
+ index += 1
+ else:
+ index += 1
+ if merge:
+ node.value = merge + node.value
+
+ def construct_mapping(self, node, deep=False):
+ if isinstance(node, MappingNode):
+ self.flatten_mapping(node)
+ return BaseConstructor.construct_mapping(self, node, deep=deep)
+
def construct_yaml_null(self, node):
self.construct_scalar(node)
return None
@@ -231,20 +252,22 @@ class SafeConstructor(BaseConstructor):
else:
return sign*int(value)
- inf_value = 1e300000
- nan_value = inf_value/inf_value
+ inf_value = 1e300
+ while inf_value != inf_value*inf_value:
+ inf_value *= inf_value
+ nan_value = -inf_value/inf_value # Trying to make a quiet NaN (like C99).
def construct_yaml_float(self, node):
value = str(self.construct_scalar(node))
- value = value.replace('_', '')
+ value = value.replace('_', '').lower()
sign = +1
if value[0] == '-':
sign = -1
if value[0] in '+-':
value = value[1:]
- if value.lower() == '.inf':
+ if value == '.inf':
return sign*self.inf_value
- elif value.lower() == '.nan':
+ elif value == '.nan':
return self.nan_value
elif ':' in value:
digits = [float(part) for part in value.split(':')]
@@ -256,7 +279,7 @@ class SafeConstructor(BaseConstructor):
base *= 60
return sign*value
else:
- return float(value)
+ return sign*float(value)
def construct_yaml_binary(self, node):
value = self.construct_scalar(node)
@@ -275,35 +298,47 @@ class SafeConstructor(BaseConstructor):
:(?P<minute>[0-9][0-9])
:(?P<second>[0-9][0-9])
(?:\.(?P<fraction>[0-9]*))?
- (?:[ \t]*(?:Z|(?P<tz_hour>[-+][0-9][0-9]?)
- (?::(?P<tz_minute>[0-9][0-9])?)?))?)?$''', re.X)
+ (?:[ \t]*(?P<tz>Z|(?P<tz_sign>[-+])(?P<tz_hour>[0-9][0-9]?)
+ (?::(?P<tz_minute>[0-9][0-9]))?))?)?$''', re.X)
def construct_yaml_timestamp(self, node):
value = self.construct_scalar(node)
match = self.timestamp_regexp.match(node.value)
values = match.groupdict()
- for key in values:
- if values[key]:
- values[key] = int(values[key])
- else:
- values[key] = 0
- fraction = values['fraction']
- if fraction:
- while 10*fraction < 1000000:
- fraction *= 10
- values['fraction'] = fraction
- stamp = datetime.datetime(values['year'], values['month'], values['day'],
- values['hour'], values['minute'], values['second'], values['fraction'])
- diff = datetime.timedelta(hours=values['tz_hour'], minutes=values['tz_minute'])
- return stamp-diff
+ year = int(values['year'])
+ month = int(values['month'])
+ day = int(values['day'])
+ if not values['hour']:
+ return datetime.date(year, month, day)
+ hour = int(values['hour'])
+ minute = int(values['minute'])
+ second = int(values['second'])
+ fraction = 0
+ if values['fraction']:
+ fraction = values['fraction'][:6]
+ while len(fraction) < 6:
+ fraction += '0'
+ fraction = int(fraction)
+ delta = None
+ if values['tz_sign']:
+ tz_hour = int(values['tz_hour'])
+ tz_minute = int(values['tz_minute'] or 0)
+ delta = datetime.timedelta(hours=tz_hour, minutes=tz_minute)
+ if values['tz_sign'] == '-':
+ delta = -delta
+ data = datetime.datetime(year, month, day, hour, minute, second, fraction)
+ if delta:
+ data -= delta
+ return data
def construct_yaml_omap(self, node):
# Note: we do not check for duplicate keys, because it's too
# CPU-expensive.
+ omap = []
+ yield omap
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
- omap = []
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing an ordered map", node.start_mark,
@@ -313,18 +348,18 @@ class SafeConstructor(BaseConstructor):
raise ConstructorError("while constructing an ordered map", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
- key_node = subnode.value.keys()[0]
+ key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
- value = self.construct_object(subnode.value[key_node])
+ value = self.construct_object(value_node)
omap.append((key, value))
- return omap
def construct_yaml_pairs(self, node):
# Note: the same code as `construct_yaml_omap`.
+ pairs = []
+ yield pairs
if not isinstance(node, SequenceNode):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a sequence, but found %s" % node.id, node.start_mark)
- pairs = []
for subnode in node.value:
if not isinstance(subnode, MappingNode):
raise ConstructorError("while constructing pairs", node.start_mark,
@@ -334,37 +369,44 @@ class SafeConstructor(BaseConstructor):
raise ConstructorError("while constructing pairs", node.start_mark,
"expected a single mapping item, but found %d items" % len(subnode.value),
subnode.start_mark)
- key_node = subnode.value.keys()[0]
+ key_node, value_node = subnode.value[0]
key = self.construct_object(key_node)
- value = self.construct_object(subnode.value[key_node])
+ value = self.construct_object(value_node)
pairs.append((key, value))
- return pairs
def construct_yaml_set(self, node):
+ data = set()
+ yield data
value = self.construct_mapping(node)
- return set(value)
+ data.update(value)
def construct_yaml_str(self, node):
value = self.construct_scalar(node)
try:
- return str(value)
+ return value.encode('ascii')
except UnicodeEncodeError:
return value
def construct_yaml_seq(self, node):
- return self.construct_sequence(node)
+ data = []
+ yield data
+ data.extend(self.construct_sequence(node))
def construct_yaml_map(self, node):
- return self.construct_mapping(node)
+ data = {}
+ yield data
+ value = self.construct_mapping(node)
+ data.update(value)
def construct_yaml_object(self, node, cls):
- state = self.construct_mapping(node)
data = cls.__new__(cls)
+ yield data
if hasattr(data, '__setstate__'):
+ state = self.construct_mapping(node, deep=True)
data.__setstate__(state)
else:
+ state = self.construct_mapping(node)
data.__dict__.update(state)
- return data
def construct_undefined(self, node):
raise ConstructorError(None, None,
@@ -391,10 +433,9 @@ SafeConstructor.add_constructor(
u'tag:yaml.org,2002:binary',
SafeConstructor.construct_yaml_binary)
-if datetime_available:
- SafeConstructor.add_constructor(
- u'tag:yaml.org,2002:timestamp',
- SafeConstructor.construct_yaml_timestamp)
+SafeConstructor.add_constructor(
+ u'tag:yaml.org,2002:timestamp',
+ SafeConstructor.construct_yaml_timestamp)
SafeConstructor.add_constructor(
u'tag:yaml.org,2002:omap',
@@ -438,7 +479,7 @@ class Constructor(SafeConstructor):
return complex(self.construct_scalar(node))
def construct_python_tuple(self, node):
- return tuple(self.construct_yaml_seq(node))
+ return tuple(self.construct_sequence(node))
def find_python_module(self, name, mark):
if not name:
@@ -456,11 +497,7 @@ class Constructor(SafeConstructor):
raise ConstructorError("while constructing a Python object", mark,
"expected non-empty name appended to the tag", mark)
if u'.' in name:
- # Python 2.4 only
- #module_name, object_name = name.rsplit('.', 1)
- items = name.split('.')
- object_name = items.pop()
- module_name = '.'.join(items)
+ module_name, object_name = name.rsplit('.', 1)
else:
module_name = '__builtin__'
object_name = name
@@ -529,9 +566,10 @@ class Constructor(SafeConstructor):
# Format:
# !!python/object:module.name { ... state ... }
instance = self.make_python_instance(suffix, node, newobj=True)
- state = self.construct_mapping(node)
+ yield instance
+ deep = hasattr(instance, '__setstate__')
+ state = self.construct_mapping(node, deep=deep)
self.set_python_instance_state(instance, state)
- return instance
def construct_python_object_apply(self, suffix, node, newobj=False):
# Format:
@@ -546,13 +584,13 @@ class Constructor(SafeConstructor):
# The difference between !!python/object/apply and !!python/object/new
# is how an object is created, check make_python_instance for details.
if isinstance(node, SequenceNode):
- args = self.construct_sequence(node)
+ args = self.construct_sequence(node, deep=True)
kwds = {}
state = {}
listitems = []
dictitems = {}
else:
- value = self.construct_mapping(node)
+ value = self.construct_mapping(node, deep=True)
args = value.get('args', [])
kwds = value.get('kwds', {})
state = value.get('state', {})
@@ -571,7 +609,6 @@ class Constructor(SafeConstructor):
def construct_python_object_new(self, suffix, node):
return self.construct_python_object_apply(suffix, node, newobj=True)
-
Constructor.add_constructor(
u'tag:yaml.org,2002:python/none',
Constructor.construct_yaml_null)
diff --git a/scripts/external_libs/yaml-3.11/yaml/cyaml.py b/scripts/external_libs/yaml-3.11/yaml/cyaml.py
new file mode 100755
index 00000000..68dcd751
--- /dev/null
+++ b/scripts/external_libs/yaml-3.11/yaml/cyaml.py
@@ -0,0 +1,85 @@
+
+__all__ = ['CBaseLoader', 'CSafeLoader', 'CLoader',
+ 'CBaseDumper', 'CSafeDumper', 'CDumper']
+
+from _yaml import CParser, CEmitter
+
+from constructor import *
+
+from serializer import *
+from representer import *
+
+from resolver import *
+
+class CBaseLoader(CParser, BaseConstructor, BaseResolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ BaseConstructor.__init__(self)
+ BaseResolver.__init__(self)
+
+class CSafeLoader(CParser, SafeConstructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ SafeConstructor.__init__(self)
+ Resolver.__init__(self)
+
+class CLoader(CParser, Constructor, Resolver):
+
+ def __init__(self, stream):
+ CParser.__init__(self, stream)
+ Constructor.__init__(self)
+ Resolver.__init__(self)
+
+class CBaseDumper(CEmitter, BaseRepresenter, BaseResolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CSafeDumper(CEmitter, SafeRepresenter, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ SafeRepresenter.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
+class CDumper(CEmitter, Serializer, Representer, Resolver):
+
+ def __init__(self, stream,
+ default_style=None, default_flow_style=None,
+ canonical=None, indent=None, width=None,
+ allow_unicode=None, line_break=None,
+ encoding=None, explicit_start=None, explicit_end=None,
+ version=None, tags=None):
+ CEmitter.__init__(self, stream, canonical=canonical,
+ indent=indent, width=width, encoding=encoding,
+ allow_unicode=allow_unicode, line_break=line_break,
+ explicit_start=explicit_start, explicit_end=explicit_end,
+ version=version, tags=tags)
+ Representer.__init__(self, default_style=default_style,
+ default_flow_style=default_flow_style)
+ Resolver.__init__(self)
+
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py b/scripts/external_libs/yaml-3.11/yaml/dumper.py
index 355c1e2f..f811d2c9 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/dumper.py
+++ b/scripts/external_libs/yaml-3.11/yaml/dumper.py
@@ -16,7 +16,7 @@ class BaseDumper(Emitter, Serializer, BaseRepresenter, BaseResolver):
version=None, tags=None):
Emitter.__init__(self, stream, canonical=canonical,
indent=indent, width=width,
- allow_uncode=allow_unicode, line_break=line_break)
+ allow_unicode=allow_unicode, line_break=line_break)
Serializer.__init__(self, encoding=encoding,
explicit_start=explicit_start, explicit_end=explicit_end,
version=version, tags=tags)
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py b/scripts/external_libs/yaml-3.11/yaml/emitter.py
index a34c4526..e5bcdccc 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/emitter.py
+++ b/scripts/external_libs/yaml-3.11/yaml/emitter.py
@@ -11,12 +11,10 @@ __all__ = ['Emitter', 'EmitterError']
from error import YAMLError
from events import *
-import re
-
class EmitterError(YAMLError):
pass
-class ScalarAnalysis:
+class ScalarAnalysis(object):
def __init__(self, scalar, empty, multiline,
allow_flow_plain, allow_block_plain,
allow_single_quoted, allow_double_quoted,
@@ -30,7 +28,7 @@ class ScalarAnalysis:
self.allow_double_quoted = allow_double_quoted
self.allow_block = allow_block
-class Emitter:
+class Emitter(object):
DEFAULT_TAG_PREFIXES = {
u'!' : u'!',
@@ -78,6 +76,9 @@ class Emitter:
self.whitespace = True
self.indention = True
+ # Whether the document requires an explicit document indicator
+ self.open_ended = False
+
# Formatting details.
self.canonical = canonical
self.allow_unicode = allow_unicode
@@ -102,6 +103,11 @@ class Emitter:
self.analysis = None
self.style = None
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
def emit(self, event):
self.events.append(event)
while not self.need_more_events():
@@ -153,7 +159,7 @@ class Emitter:
def expect_stream_start(self):
if isinstance(self.event, StreamStartEvent):
- if self.event.encoding:
+ if self.event.encoding and not getattr(self.stream, 'encoding', None):
self.encoding = self.event.encoding
self.write_stream_start()
self.state = self.expect_first_document_start
@@ -171,6 +177,9 @@ class Emitter:
def expect_document_start(self, first=False):
if isinstance(self.event, DocumentStartEvent):
+ if (self.event.version or self.event.tags) and self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
if self.event.version:
version_text = self.prepare_version(self.event.version)
self.write_version_directive(version_text)
@@ -194,6 +203,9 @@ class Emitter:
self.write_indent()
self.state = self.expect_document_root
elif isinstance(self.event, StreamEndEvent):
+ if self.open_ended:
+ self.write_indicator(u'...', True)
+ self.write_indent()
self.write_stream_end()
self.state = self.expect_nothing
else:
@@ -492,7 +504,8 @@ class Emitter:
or (not self.flow_level and self.analysis.allow_block_plain))):
return ''
if self.event.style and self.event.style in '|>':
- if not self.flow_level and self.analysis.allow_block:
+ if (not self.flow_level and not self.simple_key_context
+ and self.analysis.allow_block):
return self.event.style
if not self.event.style or self.event.style == '\'':
if (self.analysis.allow_single_quoted and
@@ -537,7 +550,7 @@ class Emitter:
raise EmitterError("tag handle must start and end with '!': %r"
% (handle.encode('utf-8')))
for ch in handle[1:-1]:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the tag handle: %r"
% (ch.encode('utf-8'), handle.encode('utf-8')))
@@ -552,7 +565,7 @@ class Emitter:
end = 1
while end < len(prefix):
ch = prefix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?!:@&=+$,_.~*\'()[]':
end += 1
else:
@@ -573,7 +586,9 @@ class Emitter:
return tag
handle = None
suffix = tag
- for prefix in self.tag_prefixes:
+ prefixes = self.tag_prefixes.keys()
+ prefixes.sort()
+ for prefix in prefixes:
if tag.startswith(prefix) \
and (prefix == u'!' or len(prefix) < len(tag)):
handle = self.tag_prefixes[prefix]
@@ -582,7 +597,7 @@ class Emitter:
start = end = 0
while end < len(suffix):
ch = suffix[end]
- if u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ if u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.~*\'()[]' \
or (ch == u'!' and handle != u'!'):
end += 1
@@ -605,7 +620,7 @@ class Emitter:
if not anchor:
raise EmitterError("anchor must not be empty")
for ch in anchor:
- if not (u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ if not (u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_'):
raise EmitterError("invalid character %r in the anchor: %r"
% (ch.encode('utf-8'), anchor.encode('utf-8')))
@@ -626,15 +641,13 @@ class Emitter:
line_breaks = False
special_characters = False
- # Whitespaces.
- inline_spaces = False # non-space space+ non-space
- inline_breaks = False # non-space break+ non-space
- leading_spaces = False # ^ space+ (non-space | $)
- leading_breaks = False # ^ break+ (non-space | $)
- trailing_spaces = False # (^ | non-space) space+ $
- trailing_breaks = False # (^ | non-space) break+ $
- inline_breaks_spaces = False # non-space break+ space+ non-space
- mixed_breaks_spaces = False # anything else
+ # Important whitespace combinations.
+ leading_space = False
+ leading_break = False
+ trailing_space = False
+ trailing_break = False
+ break_space = False
+ space_break = False
# Check document indicators.
if scalar.startswith(u'---') or scalar.startswith(u'...'):
@@ -642,42 +655,33 @@ class Emitter:
flow_indicators = True
# First character or preceded by a whitespace.
- preceeded_by_space = True
+ preceeded_by_whitespace = True
# Last character or followed by a whitespace.
- followed_by_space = (len(scalar) == 1 or
+ followed_by_whitespace = (len(scalar) == 1 or
scalar[1] in u'\0 \t\r\n\x85\u2028\u2029')
- # The current series of whitespaces contain plain spaces.
- spaces = False
-
- # The current series of whitespaces contain line breaks.
- breaks = False
+ # The previous character is a space.
+ previous_space = False
- # The current series of whitespaces contain a space followed by a
- # break.
- mixed = False
-
- # The current series of whitespaces start at the beginning of the
- # scalar.
- leading = False
+ # The previous character is a break.
+ previous_break = False
index = 0
while index < len(scalar):
ch = scalar[index]
# Check for indicators.
-
if index == 0:
# Leading indicators are special characters.
- if ch in u'#,[]{}#&*!|>\'\"%@`':
+ if ch in u'#,[]{}&*!|>\'\"%@`':
flow_indicators = True
block_indicators = True
if ch in u'?:':
flow_indicators = True
- if followed_by_space:
+ if followed_by_whitespace:
block_indicators = True
- if ch == u'-' and followed_by_space:
+ if ch == u'-' and followed_by_whitespace:
flow_indicators = True
block_indicators = True
else:
@@ -686,14 +690,13 @@ class Emitter:
flow_indicators = True
if ch == u':':
flow_indicators = True
- if followed_by_space:
+ if followed_by_whitespace:
block_indicators = True
- if ch == u'#' and preceeded_by_space:
+ if ch == u'#' and preceeded_by_whitespace:
flow_indicators = True
block_indicators = True
# Check for line breaks, special, and unicode characters.
-
if ch in u'\n\x85\u2028\u2029':
line_breaks = True
if not (ch == u'\n' or u'\x20' <= ch <= u'\x7E'):
@@ -705,65 +708,33 @@ class Emitter:
else:
special_characters = True
- # Spaces, line breaks, and how they are mixed. State machine.
-
- # Start or continue series of whitespaces.
- if ch in u' \n\x85\u2028\u2029':
- if spaces and breaks:
- if ch != u' ': # break+ (space+ break+) => mixed
- mixed = True
- elif spaces:
- if ch != u' ': # (space+ break+) => mixed
- breaks = True
- mixed = True
- elif breaks:
- if ch == u' ': # break+ space+
- spaces = True
- else:
- leading = (index == 0)
- if ch == u' ': # space+
- spaces = True
- else: # break+
- breaks = True
-
- # Series of whitespaces ended with a non-space.
- elif spaces or breaks:
- if leading:
- if spaces and breaks:
- mixed_breaks_spaces = True
- elif spaces:
- leading_spaces = True
- elif breaks:
- leading_breaks = True
- else:
- if mixed:
- mixed_breaks_spaces = True
- elif spaces and breaks:
- inline_breaks_spaces = True
- elif spaces:
- inline_spaces = True
- elif breaks:
- inline_breaks = True
- spaces = breaks = mixed = leading = False
-
- # Series of whitespaces reach the end.
- if (spaces or breaks) and (index == len(scalar)-1):
- if spaces and breaks:
- mixed_breaks_spaces = True
- elif spaces:
- trailing_spaces = True
- if leading:
- leading_spaces = True
- elif breaks:
- trailing_breaks = True
- if leading:
- leading_breaks = True
- spaces = breaks = mixed = leading = False
+ # Detect important whitespace combinations.
+ if ch == u' ':
+ if index == 0:
+ leading_space = True
+ if index == len(scalar)-1:
+ trailing_space = True
+ if previous_break:
+ break_space = True
+ previous_space = True
+ previous_break = False
+ elif ch in u'\n\x85\u2028\u2029':
+ if index == 0:
+ leading_break = True
+ if index == len(scalar)-1:
+ trailing_break = True
+ if previous_space:
+ space_break = True
+ previous_space = False
+ previous_break = True
+ else:
+ previous_space = False
+ previous_break = False
# Prepare for the next character.
index += 1
- preceeded_by_space = (ch in u'\0 \t\r\n\x85\u2028\u2029')
- followed_by_space = (index+1 >= len(scalar) or
+ preceeded_by_whitespace = (ch in u'\0 \t\r\n\x85\u2028\u2029')
+ followed_by_whitespace = (index+1 >= len(scalar) or
scalar[index+1] in u'\0 \t\r\n\x85\u2028\u2029')
# Let's decide what styles are allowed.
@@ -773,28 +744,28 @@ class Emitter:
allow_double_quoted = True
allow_block = True
- # Leading and trailing whitespace are bad for plain scalars. We also
- # do not want to mess with leading whitespaces for block scalars.
- if leading_spaces or leading_breaks or trailing_spaces:
- allow_flow_plain = allow_block_plain = allow_block = False
-
- # Trailing breaks are fine for block scalars, but unacceptable for
- # plain scalars.
- if trailing_breaks:
+ # Leading and trailing whitespaces are bad for plain scalars.
+ if (leading_space or leading_break
+ or trailing_space or trailing_break):
allow_flow_plain = allow_block_plain = False
- # The combination of (space+ break+) is only acceptable for block
+ # We do not permit trailing spaces for block scalars.
+ if trailing_space:
+ allow_block = False
+
+ # Spaces at the beginning of a new line are only acceptable for block
# scalars.
- if inline_breaks_spaces:
+ if break_space:
allow_flow_plain = allow_block_plain = allow_single_quoted = False
- # Mixed spaces and breaks, as well as special character are only
+ # Spaces followed by breaks, as well as special character are only
# allowed for double quoted scalars.
- if mixed_breaks_spaces or special_characters:
+ if space_break or special_characters:
allow_flow_plain = allow_block_plain = \
allow_single_quoted = allow_block = False
- # We don't emit multiline plain scalars.
+ # Although the plain scalar writer supports breaks, we never emit
+ # multiline plain scalars.
if line_breaks:
allow_flow_plain = allow_block_plain = False
@@ -823,7 +794,7 @@ class Emitter:
def write_stream_start(self):
# Write BOM if needed.
if self.encoding and self.encoding.startswith('utf-16'):
- self.stream.write(u'\xFF\xFE'.encode(self.encoding))
+ self.stream.write(u'\uFEFF'.encode(self.encoding))
def write_stream_end(self):
self.flush_stream()
@@ -837,6 +808,7 @@ class Emitter:
self.whitespace = whitespace
self.indention = self.indention and indention
self.column += len(data)
+ self.open_ended = False
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
@@ -922,13 +894,13 @@ class Emitter:
data = data.encode(self.encoding)
self.stream.write(data)
start = end
- if ch == u'\'':
- data = u'\'\''
- self.column += 2
- if self.encoding:
- data = data.encode(self.encoding)
- self.stream.write(data)
- start = end + 1
+ if ch == u'\'':
+ data = u'\'\''
+ self.column += 2
+ if self.encoding:
+ data = data.encode(self.encoding)
+ self.stream.write(data)
+ start = end + 1
if ch is not None:
spaces = (ch == u' ')
breaks = (ch in u'\n\x85\u2028\u2029')
@@ -1007,25 +979,26 @@ class Emitter:
end += 1
self.write_indicator(u'"', False)
- def determine_chomp(self, text):
- tail = text[-2:]
- while len(tail) < 2:
- tail = u' '+tail
- if tail[-1] in u'\n\x85\u2028\u2029':
- if tail[-2] in u'\n\x85\u2028\u2029':
- return u'+'
- else:
- return u''
- else:
- return u'-'
+ def determine_block_hints(self, text):
+ hints = u''
+ if text:
+ if text[0] in u' \n\x85\u2028\u2029':
+ hints += unicode(self.best_indent)
+ if text[-1] not in u'\n\x85\u2028\u2029':
+ hints += u'-'
+ elif len(text) == 1 or text[-2] in u'\n\x85\u2028\u2029':
+ hints += u'+'
+ return hints
def write_folded(self, text):
- chomp = self.determine_chomp(text)
- self.write_indicator(u'>'+chomp, True)
- self.write_indent()
- leading_space = False
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'>'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ leading_space = True
spaces = False
- breaks = False
+ breaks = True
start = end = 0
while end <= len(text):
ch = None
@@ -1059,6 +1032,7 @@ class Emitter:
else:
if ch is None or ch in u' \n\x85\u2028\u2029':
data = text[start:end]
+ self.column += len(data)
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
@@ -1071,10 +1045,12 @@ class Emitter:
end += 1
def write_literal(self, text):
- chomp = self.determine_chomp(text)
- self.write_indicator(u'|'+chomp, True)
- self.write_indent()
- breaks = False
+ hints = self.determine_block_hints(text)
+ self.write_indicator(u'|'+hints, True)
+ if hints[-1:] == u'+':
+ self.open_ended = True
+ self.write_line_break()
+ breaks = True
start = end = 0
while end <= len(text):
ch = None
@@ -1104,6 +1080,8 @@ class Emitter:
end += 1
def write_plain(self, text, split=True):
+ if self.root_context:
+ self.open_ended = True
if not text:
return
if not self.whitespace:
@@ -1112,7 +1090,7 @@ class Emitter:
if self.encoding:
data = data.encode(self.encoding)
self.stream.write(data)
- self.writespace = False
+ self.whitespace = False
self.indention = False
spaces = False
breaks = False
@@ -1125,7 +1103,7 @@ class Emitter:
if ch != u' ':
if start+1 == end and self.column > self.best_width and split:
self.write_indent()
- self.writespace = False
+ self.whitespace = False
self.indention = False
else:
data = text[start:end]
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py b/scripts/external_libs/yaml-3.11/yaml/error.py
index 8fa916b2..577686db 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/error.py
+++ b/scripts/external_libs/yaml-3.11/yaml/error.py
@@ -1,7 +1,7 @@
__all__ = ['Mark', 'YAMLError', 'MarkedYAMLError']
-class Mark:
+class Mark(object):
def __init__(self, name, index, line, column, buffer, pointer):
self.name = name
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py b/scripts/external_libs/yaml-3.11/yaml/events.py
index 3f244fa0..f79ad389 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/events.py
+++ b/scripts/external_libs/yaml-3.11/yaml/events.py
@@ -1,7 +1,7 @@
# Abstract classes.
-class Event:
+class Event(object):
def __init__(self, start_mark=None, end_mark=None):
self.start_mark = start_mark
self.end_mark = end_mark
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py b/scripts/external_libs/yaml-3.11/yaml/loader.py
index 293ff467..293ff467 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/loader.py
+++ b/scripts/external_libs/yaml-3.11/yaml/loader.py
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py b/scripts/external_libs/yaml-3.11/yaml/nodes.py
index cb8c1cba..c4f070c4 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/nodes.py
+++ b/scripts/external_libs/yaml-3.11/yaml/nodes.py
@@ -1,5 +1,5 @@
-class Node:
+class Node(object):
def __init__(self, tag, value, start_mark, end_mark):
self.tag = tag
self.value = value
diff --git a/scripts/external_libs/yaml-3.11/yaml/parser.py b/scripts/external_libs/yaml-3.11/yaml/parser.py
new file mode 100755
index 00000000..f9e3057f
--- /dev/null
+++ b/scripts/external_libs/yaml-3.11/yaml/parser.py
@@ -0,0 +1,589 @@
+
+# The following YAML grammar is LL(1) and is parsed by a recursive descent
+# parser.
+#
+# stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+# implicit_document ::= block_node DOCUMENT-END*
+# explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+# block_node_or_indentless_sequence ::=
+# ALIAS
+# | properties (block_content | indentless_block_sequence)?
+# | block_content
+# | indentless_block_sequence
+# block_node ::= ALIAS
+# | properties block_content?
+# | block_content
+# flow_node ::= ALIAS
+# | properties flow_content?
+# | flow_content
+# properties ::= TAG ANCHOR? | ANCHOR TAG?
+# block_content ::= block_collection | flow_collection | SCALAR
+# flow_content ::= flow_collection | SCALAR
+# block_collection ::= block_sequence | block_mapping
+# flow_collection ::= flow_sequence | flow_mapping
+# block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+# indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+# block_mapping ::= BLOCK-MAPPING_START
+# ((KEY block_node_or_indentless_sequence?)?
+# (VALUE block_node_or_indentless_sequence?)?)*
+# BLOCK-END
+# flow_sequence ::= FLOW-SEQUENCE-START
+# (flow_sequence_entry FLOW-ENTRY)*
+# flow_sequence_entry?
+# FLOW-SEQUENCE-END
+# flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+# flow_mapping ::= FLOW-MAPPING-START
+# (flow_mapping_entry FLOW-ENTRY)*
+# flow_mapping_entry?
+# FLOW-MAPPING-END
+# flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+#
+# FIRST sets:
+#
+# stream: { STREAM-START }
+# explicit_document: { DIRECTIVE DOCUMENT-START }
+# implicit_document: FIRST(block_node)
+# block_node: { ALIAS TAG ANCHOR SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_node: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_content: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# flow_content: { FLOW-SEQUENCE-START FLOW-MAPPING-START SCALAR }
+# block_collection: { BLOCK-SEQUENCE-START BLOCK-MAPPING-START }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# block_sequence: { BLOCK-SEQUENCE-START }
+# block_mapping: { BLOCK-MAPPING-START }
+# block_node_or_indentless_sequence: { ALIAS ANCHOR TAG SCALAR BLOCK-SEQUENCE-START BLOCK-MAPPING-START FLOW-SEQUENCE-START FLOW-MAPPING-START BLOCK-ENTRY }
+# indentless_sequence: { ENTRY }
+# flow_collection: { FLOW-SEQUENCE-START FLOW-MAPPING-START }
+# flow_sequence: { FLOW-SEQUENCE-START }
+# flow_mapping: { FLOW-MAPPING-START }
+# flow_sequence_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+# flow_mapping_entry: { ALIAS ANCHOR TAG SCALAR FLOW-SEQUENCE-START FLOW-MAPPING-START KEY }
+
+__all__ = ['Parser', 'ParserError']
+
+from error import MarkedYAMLError
+from tokens import *
+from events import *
+from scanner import *
+
+class ParserError(MarkedYAMLError):
+ pass
+
+class Parser(object):
+ # Since writing a recursive-descendant parser is a straightforward task, we
+ # do not give many comments here.
+
+ DEFAULT_TAGS = {
+ u'!': u'!',
+ u'!!': u'tag:yaml.org,2002:',
+ }
+
+ def __init__(self):
+ self.current_event = None
+ self.yaml_version = None
+ self.tag_handles = {}
+ self.states = []
+ self.marks = []
+ self.state = self.parse_stream_start
+
+ def dispose(self):
+ # Reset the state attributes (to clear self-references)
+ self.states = []
+ self.state = None
+
+ def check_event(self, *choices):
+ # Check the type of the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ if self.current_event is not None:
+ if not choices:
+ return True
+ for choice in choices:
+ if isinstance(self.current_event, choice):
+ return True
+ return False
+
+ def peek_event(self):
+ # Get the next event.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ return self.current_event
+
+ def get_event(self):
+ # Get the next event and proceed further.
+ if self.current_event is None:
+ if self.state:
+ self.current_event = self.state()
+ value = self.current_event
+ self.current_event = None
+ return value
+
+ # stream ::= STREAM-START implicit_document? explicit_document* STREAM-END
+ # implicit_document ::= block_node DOCUMENT-END*
+ # explicit_document ::= DIRECTIVE* DOCUMENT-START block_node? DOCUMENT-END*
+
+ def parse_stream_start(self):
+
+ # Parse the stream start.
+ token = self.get_token()
+ event = StreamStartEvent(token.start_mark, token.end_mark,
+ encoding=token.encoding)
+
+ # Prepare the next state.
+ self.state = self.parse_implicit_document_start
+
+ return event
+
+ def parse_implicit_document_start(self):
+
+ # Parse an implicit document.
+ if not self.check_token(DirectiveToken, DocumentStartToken,
+ StreamEndToken):
+ self.tag_handles = self.DEFAULT_TAGS
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=False)
+
+ # Prepare the next state.
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_block_node
+
+ return event
+
+ else:
+ return self.parse_document_start()
+
+ def parse_document_start(self):
+
+ # Parse any extra document end indicators.
+ while self.check_token(DocumentEndToken):
+ self.get_token()
+
+ # Parse an explicit document.
+ if not self.check_token(StreamEndToken):
+ token = self.peek_token()
+ start_mark = token.start_mark
+ version, tags = self.process_directives()
+ if not self.check_token(DocumentStartToken):
+ raise ParserError(None, None,
+ "expected '<document start>', but found %r"
+ % self.peek_token().id,
+ self.peek_token().start_mark)
+ token = self.get_token()
+ end_mark = token.end_mark
+ event = DocumentStartEvent(start_mark, end_mark,
+ explicit=True, version=version, tags=tags)
+ self.states.append(self.parse_document_end)
+ self.state = self.parse_document_content
+ else:
+ # Parse the end of the stream.
+ token = self.get_token()
+ event = StreamEndEvent(token.start_mark, token.end_mark)
+ assert not self.states
+ assert not self.marks
+ self.state = None
+ return event
+
+ def parse_document_end(self):
+
+ # Parse the document end.
+ token = self.peek_token()
+ start_mark = end_mark = token.start_mark
+ explicit = False
+ if self.check_token(DocumentEndToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ explicit = True
+ event = DocumentEndEvent(start_mark, end_mark,
+ explicit=explicit)
+
+ # Prepare the next state.
+ self.state = self.parse_document_start
+
+ return event
+
+ def parse_document_content(self):
+ if self.check_token(DirectiveToken,
+ DocumentStartToken, DocumentEndToken, StreamEndToken):
+ event = self.process_empty_scalar(self.peek_token().start_mark)
+ self.state = self.states.pop()
+ return event
+ else:
+ return self.parse_block_node()
+
+ def process_directives(self):
+ self.yaml_version = None
+ self.tag_handles = {}
+ while self.check_token(DirectiveToken):
+ token = self.get_token()
+ if token.name == u'YAML':
+ if self.yaml_version is not None:
+ raise ParserError(None, None,
+ "found duplicate YAML directive", token.start_mark)
+ major, minor = token.value
+ if major != 1:
+ raise ParserError(None, None,
+ "found incompatible YAML document (version 1.* is required)",
+ token.start_mark)
+ self.yaml_version = token.value
+ elif token.name == u'TAG':
+ handle, prefix = token.value
+ if handle in self.tag_handles:
+ raise ParserError(None, None,
+ "duplicate tag handle %r" % handle.encode('utf-8'),
+ token.start_mark)
+ self.tag_handles[handle] = prefix
+ if self.tag_handles:
+ value = self.yaml_version, self.tag_handles.copy()
+ else:
+ value = self.yaml_version, None
+ for key in self.DEFAULT_TAGS:
+ if key not in self.tag_handles:
+ self.tag_handles[key] = self.DEFAULT_TAGS[key]
+ return value
+
+ # block_node_or_indentless_sequence ::= ALIAS
+ # | properties (block_content | indentless_block_sequence)?
+ # | block_content
+ # | indentless_block_sequence
+ # block_node ::= ALIAS
+ # | properties block_content?
+ # | block_content
+ # flow_node ::= ALIAS
+ # | properties flow_content?
+ # | flow_content
+ # properties ::= TAG ANCHOR? | ANCHOR TAG?
+ # block_content ::= block_collection | flow_collection | SCALAR
+ # flow_content ::= flow_collection | SCALAR
+ # block_collection ::= block_sequence | block_mapping
+ # flow_collection ::= flow_sequence | flow_mapping
+
+ def parse_block_node(self):
+ return self.parse_node(block=True)
+
+ def parse_flow_node(self):
+ return self.parse_node()
+
+ def parse_block_node_or_indentless_sequence(self):
+ return self.parse_node(block=True, indentless_sequence=True)
+
+ def parse_node(self, block=False, indentless_sequence=False):
+ if self.check_token(AliasToken):
+ token = self.get_token()
+ event = AliasEvent(token.value, token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ else:
+ anchor = None
+ tag = None
+ start_mark = end_mark = tag_mark = None
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ start_mark = token.start_mark
+ end_mark = token.end_mark
+ anchor = token.value
+ if self.check_token(TagToken):
+ token = self.get_token()
+ tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ elif self.check_token(TagToken):
+ token = self.get_token()
+ start_mark = tag_mark = token.start_mark
+ end_mark = token.end_mark
+ tag = token.value
+ if self.check_token(AnchorToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ anchor = token.value
+ if tag is not None:
+ handle, suffix = tag
+ if handle is not None:
+ if handle not in self.tag_handles:
+ raise ParserError("while parsing a node", start_mark,
+ "found undefined tag handle %r" % handle.encode('utf-8'),
+ tag_mark)
+ tag = self.tag_handles[handle]+suffix
+ else:
+ tag = suffix
+ #if tag == u'!':
+ # raise ParserError("while parsing a node", start_mark,
+ # "found non-specific tag '!'", tag_mark,
+ # "Please check 'http://pyyaml.org/wiki/YAMLNonSpecificTag' and share your opinion.")
+ if start_mark is None:
+ start_mark = end_mark = self.peek_token().start_mark
+ event = None
+ implicit = (tag is None or tag == u'!')
+ if indentless_sequence and self.check_token(BlockEntryToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark)
+ self.state = self.parse_indentless_sequence_entry
+ else:
+ if self.check_token(ScalarToken):
+ token = self.get_token()
+ end_mark = token.end_mark
+ if (token.plain and tag is None) or tag == u'!':
+ implicit = (True, False)
+ elif tag is None:
+ implicit = (False, True)
+ else:
+ implicit = (False, False)
+ event = ScalarEvent(anchor, tag, implicit, token.value,
+ start_mark, end_mark, style=token.style)
+ self.state = self.states.pop()
+ elif self.check_token(FlowSequenceStartToken):
+ end_mark = self.peek_token().end_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_sequence_first_entry
+ elif self.check_token(FlowMappingStartToken):
+ end_mark = self.peek_token().end_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=True)
+ self.state = self.parse_flow_mapping_first_key
+ elif block and self.check_token(BlockSequenceStartToken):
+ end_mark = self.peek_token().start_mark
+ event = SequenceStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_sequence_first_entry
+ elif block and self.check_token(BlockMappingStartToken):
+ end_mark = self.peek_token().start_mark
+ event = MappingStartEvent(anchor, tag, implicit,
+ start_mark, end_mark, flow_style=False)
+ self.state = self.parse_block_mapping_first_key
+ elif anchor is not None or tag is not None:
+ # Empty scalars are allowed even if a tag or an anchor is
+ # specified.
+ event = ScalarEvent(anchor, tag, (implicit, False), u'',
+ start_mark, end_mark)
+ self.state = self.states.pop()
+ else:
+ if block:
+ node = 'block'
+ else:
+ node = 'flow'
+ token = self.peek_token()
+ raise ParserError("while parsing a %s node" % node, start_mark,
+ "expected the node content, but found %r" % token.id,
+ token.start_mark)
+ return event
+
+ # block_sequence ::= BLOCK-SEQUENCE-START (BLOCK-ENTRY block_node?)* BLOCK-END
+
+ def parse_block_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_sequence_entry()
+
+ def parse_block_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken, BlockEndToken):
+ self.states.append(self.parse_block_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_block_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block collection", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ # indentless_sequence ::= (BLOCK-ENTRY block_node?)+
+
+ def parse_indentless_sequence_entry(self):
+ if self.check_token(BlockEntryToken):
+ token = self.get_token()
+ if not self.check_token(BlockEntryToken,
+ KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_indentless_sequence_entry)
+ return self.parse_block_node()
+ else:
+ self.state = self.parse_indentless_sequence_entry
+ return self.process_empty_scalar(token.end_mark)
+ token = self.peek_token()
+ event = SequenceEndEvent(token.start_mark, token.start_mark)
+ self.state = self.states.pop()
+ return event
+
+ # block_mapping ::= BLOCK-MAPPING_START
+ # ((KEY block_node_or_indentless_sequence?)?
+ # (VALUE block_node_or_indentless_sequence?)?)*
+ # BLOCK-END
+
+ def parse_block_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_block_mapping_key()
+
+ def parse_block_mapping_key(self):
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_value)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ if not self.check_token(BlockEndToken):
+ token = self.peek_token()
+ raise ParserError("while parsing a block mapping", self.marks[-1],
+ "expected <block end>, but found %r" % token.id, token.start_mark)
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_block_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(KeyToken, ValueToken, BlockEndToken):
+ self.states.append(self.parse_block_mapping_key)
+ return self.parse_block_node_or_indentless_sequence()
+ else:
+ self.state = self.parse_block_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_block_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ # flow_sequence ::= FLOW-SEQUENCE-START
+ # (flow_sequence_entry FLOW-ENTRY)*
+ # flow_sequence_entry?
+ # FLOW-SEQUENCE-END
+ # flow_sequence_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+ #
+ # Note that while production rules for both flow_sequence_entry and
+ # flow_mapping_entry are equal, their interpretations are different.
+ # For `flow_sequence_entry`, the part `KEY flow_node? (VALUE flow_node?)?`
+ # generate an inline mapping (set syntax).
+
+ def parse_flow_sequence_first_entry(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_sequence_entry(first=True)
+
+ def parse_flow_sequence_entry(self, first=False):
+ if not self.check_token(FlowSequenceEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow sequence", self.marks[-1],
+ "expected ',' or ']', but got %r" % token.id, token.start_mark)
+
+ if self.check_token(KeyToken):
+ token = self.peek_token()
+ event = MappingStartEvent(None, None, True,
+ token.start_mark, token.end_mark,
+ flow_style=True)
+ self.state = self.parse_flow_sequence_entry_mapping_key
+ return event
+ elif not self.check_token(FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = SequenceEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_sequence_entry_mapping_key(self):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+
+ def parse_flow_sequence_entry_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowSequenceEndToken):
+ self.states.append(self.parse_flow_sequence_entry_mapping_end)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_sequence_entry_mapping_end
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_sequence_entry_mapping_end(self):
+ self.state = self.parse_flow_sequence_entry
+ token = self.peek_token()
+ return MappingEndEvent(token.start_mark, token.start_mark)
+
+ # flow_mapping ::= FLOW-MAPPING-START
+ # (flow_mapping_entry FLOW-ENTRY)*
+ # flow_mapping_entry?
+ # FLOW-MAPPING-END
+ # flow_mapping_entry ::= flow_node | KEY flow_node? (VALUE flow_node?)?
+
+ def parse_flow_mapping_first_key(self):
+ token = self.get_token()
+ self.marks.append(token.start_mark)
+ return self.parse_flow_mapping_key(first=True)
+
+ def parse_flow_mapping_key(self, first=False):
+ if not self.check_token(FlowMappingEndToken):
+ if not first:
+ if self.check_token(FlowEntryToken):
+ self.get_token()
+ else:
+ token = self.peek_token()
+ raise ParserError("while parsing a flow mapping", self.marks[-1],
+ "expected ',' or '}', but got %r" % token.id, token.start_mark)
+ if self.check_token(KeyToken):
+ token = self.get_token()
+ if not self.check_token(ValueToken,
+ FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_value)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_value
+ return self.process_empty_scalar(token.end_mark)
+ elif not self.check_token(FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_empty_value)
+ return self.parse_flow_node()
+ token = self.get_token()
+ event = MappingEndEvent(token.start_mark, token.end_mark)
+ self.state = self.states.pop()
+ self.marks.pop()
+ return event
+
+ def parse_flow_mapping_value(self):
+ if self.check_token(ValueToken):
+ token = self.get_token()
+ if not self.check_token(FlowEntryToken, FlowMappingEndToken):
+ self.states.append(self.parse_flow_mapping_key)
+ return self.parse_flow_node()
+ else:
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(token.end_mark)
+ else:
+ self.state = self.parse_flow_mapping_key
+ token = self.peek_token()
+ return self.process_empty_scalar(token.start_mark)
+
+ def parse_flow_mapping_empty_value(self):
+ self.state = self.parse_flow_mapping_key
+ return self.process_empty_scalar(self.peek_token().start_mark)
+
+ def process_empty_scalar(self, mark):
+ return ScalarEvent(None, None, (True, False), u'', mark, mark)
+
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py b/scripts/external_libs/yaml-3.11/yaml/reader.py
index beb76d0a..3249e6b9 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/reader.py
+++ b/scripts/external_libs/yaml-3.11/yaml/reader.py
@@ -21,41 +21,6 @@ from error import YAMLError, Mark
import codecs, re
-# Unfortunately, codec functions in Python 2.3 does not support the `finish`
-# arguments, so we have to write our own wrappers.
-
-try:
- codecs.utf_8_decode('', 'strict', False)
- from codecs import utf_8_decode, utf_16_le_decode, utf_16_be_decode
-
-except TypeError:
-
- def utf_16_le_decode(data, errors, finish=False):
- if not finish and len(data) % 2 == 1:
- data = data[:-1]
- return codecs.utf_16_le_decode(data, errors)
-
- def utf_16_be_decode(data, errors, finish=False):
- if not finish and len(data) % 2 == 1:
- data = data[:-1]
- return codecs.utf_16_be_decode(data, errors)
-
- def utf_8_decode(data, errors, finish=False):
- if not finish:
- # We are trying to remove a possible incomplete multibyte character
- # from the suffix of the data.
- # The first byte of a multi-byte sequence is in the range 0xc0 to 0xfd.
- # All further bytes are in the range 0x80 to 0xbf.
- # UTF-8 encoded UCS characters may be up to six bytes long.
- count = 0
- while count < 5 and count < len(data) \
- and '\x80' <= data[-count-1] <= '\xBF':
- count -= 1
- if count < 5 and count < len(data) \
- and '\xC0' <= data[-count-1] <= '\xFD':
- data = data[:-count-1]
- return codecs.utf_8_decode(data, errors)
-
class ReaderError(YAMLError):
def __init__(self, name, position, character, encoding, reason):
@@ -74,10 +39,10 @@ class ReaderError(YAMLError):
else:
return "unacceptable character #x%04x: %s\n" \
" in \"%s\", position %d" \
- % (ord(self.character), self.reason,
+ % (self.character, self.reason,
self.name, self.position)
-class Reader:
+class Reader(object):
# Reader:
# - determines the data encoding and converts it to unicode,
# - checks if characters are in allowed range,
@@ -120,9 +85,11 @@ class Reader:
self.determine_encoding()
def peek(self, index=0):
- if self.pointer+index+1 >= len(self.buffer):
+ try:
+ return self.buffer[self.pointer+index]
+ except IndexError:
self.update(index+1)
- return self.buffer[self.pointer+index]
+ return self.buffer[self.pointer+index]
def prefix(self, length=1):
if self.pointer+length >= len(self.buffer):
@@ -132,16 +99,17 @@ class Reader:
def forward(self, length=1):
if self.pointer+length+1 >= len(self.buffer):
self.update(length+1)
- for k in range(length):
+ while length:
ch = self.buffer[self.pointer]
self.pointer += 1
self.index += 1
if ch in u'\n\x85\u2028\u2029' \
- or (ch == u'\r' and self.buffer[self.pointer+1] != u'\n'):
+ or (ch == u'\r' and self.buffer[self.pointer] != u'\n'):
self.line += 1
self.column = 0
elif ch != u'\uFEFF':
self.column += 1
+ length -= 1
def get_mark(self):
if self.stream is None:
@@ -156,13 +124,13 @@ class Reader:
self.update_raw()
if not isinstance(self.raw_buffer, unicode):
if self.raw_buffer.startswith(codecs.BOM_UTF16_LE):
- self.raw_decode = utf_16_le_decode
+ self.raw_decode = codecs.utf_16_le_decode
self.encoding = 'utf-16-le'
elif self.raw_buffer.startswith(codecs.BOM_UTF16_BE):
- self.raw_decode = utf_16_be_decode
+ self.raw_decode = codecs.utf_16_be_decode
self.encoding = 'utf-16-be'
else:
- self.raw_decode = utf_8_decode
+ self.raw_decode = codecs.utf_8_decode
self.encoding = 'utf-8'
self.update(1)
@@ -172,7 +140,7 @@ class Reader:
if match:
character = match.group()
position = self.index+(len(self.buffer)-self.pointer)+match.start()
- raise ReaderError(self.name, position, character,
+ raise ReaderError(self.name, position, ord(character),
'unicode', "special characters are not allowed")
def update(self, length):
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py b/scripts/external_libs/yaml-3.11/yaml/representer.py
index cb37169d..5f4fc70d 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/representer.py
+++ b/scripts/external_libs/yaml-3.11/yaml/representer.py
@@ -5,23 +5,14 @@ __all__ = ['BaseRepresenter', 'SafeRepresenter', 'Representer',
from error import *
from nodes import *
-try:
- import datetime
- datetime_available = True
-except ImportError:
- datetime_available = False
+import datetime
-try:
- set
-except NameError:
- from sets import Set as set
-
-import sys, copy_reg
+import sys, copy_reg, types
class RepresenterError(YAMLError):
pass
-class BaseRepresenter:
+class BaseRepresenter(object):
yaml_representers = {}
yaml_multi_representers = {}
@@ -30,21 +21,15 @@ class BaseRepresenter:
self.default_style = default_style
self.default_flow_style = default_flow_style
self.represented_objects = {}
+ self.object_keeper = []
+ self.alias_key = None
def represent(self, data):
node = self.represent_data(data)
self.serialize(node)
self.represented_objects = {}
-
- class C: pass
- c = C()
- def f(): pass
- classobj_type = type(C)
- instance_type = type(c)
- function_type = type(f)
- builtin_function_type = type(abs)
- module_type = type(sys)
- del C, c, f
+ self.object_keeper = []
+ self.alias_key = None
def get_classobj_bases(self, cls):
bases = [cls]
@@ -54,18 +39,19 @@ class BaseRepresenter:
def represent_data(self, data):
if self.ignore_aliases(data):
- alias_key = None
+ self.alias_key = None
else:
- alias_key = id(data)
- if alias_key is not None:
- if alias_key in self.represented_objects:
- node = self.represented_objects[alias_key]
- if node is None:
- raise RepresenterError("recursive objects are not allowed: %r" % data)
+ self.alias_key = id(data)
+ if self.alias_key is not None:
+ if self.alias_key in self.represented_objects:
+ node = self.represented_objects[self.alias_key]
+ #if node is None:
+ # raise RepresenterError("recursive objects are not allowed: %r" % data)
return node
- self.represented_objects[alias_key] = None
+ #self.represented_objects[alias_key] = None
+ self.object_keeper.append(data)
data_types = type(data).__mro__
- if type(data) is self.instance_type:
+ if type(data) is types.InstanceType:
data_types = self.get_classobj_bases(data.__class__)+list(data_types)
if data_types[0] in self.yaml_representers:
node = self.yaml_representers[data_types[0]](self, data)
@@ -81,8 +67,8 @@ class BaseRepresenter:
node = self.yaml_representers[None](self, data)
else:
node = ScalarNode(None, unicode(data))
- if alias_key is not None:
- self.represented_objects[alias_key] = node
+ #if alias_key is not None:
+ # self.represented_objects[alias_key] = node
return node
def add_representer(cls, data_type, representer):
@@ -100,50 +86,52 @@ class BaseRepresenter:
def represent_scalar(self, tag, value, style=None):
if style is None:
style = self.default_style
- return ScalarNode(tag, value, style=style)
+ node = ScalarNode(tag, value, style=style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ return node
def represent_sequence(self, tag, sequence, flow_style=None):
- best_style = True
value = []
+ node = SequenceNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
+ best_style = True
for item in sequence:
node_item = self.represent_data(item)
if not (isinstance(node_item, ScalarNode) and not node_item.style):
best_style = False
- value.append(self.represent_data(item))
+ value.append(node_item)
if flow_style is None:
- flow_style = self.default_flow_style
- if flow_style is None:
- flow_style = best_style
- return SequenceNode(tag, value, flow_style=flow_style)
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
def represent_mapping(self, tag, mapping, flow_style=None):
+ value = []
+ node = MappingNode(tag, value, flow_style=flow_style)
+ if self.alias_key is not None:
+ self.represented_objects[self.alias_key] = node
best_style = True
- if hasattr(mapping, 'keys'):
- value = {}
- for item_key in mapping.keys():
- item_value = mapping[item_key]
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value[node_key] = node_value
- else:
- value = []
- for item_key, item_value in mapping:
- node_key = self.represent_data(item_key)
- node_value = self.represent_data(item_value)
- if not (isinstance(node_key, ScalarNode) and not node_key.style):
- best_style = False
- if not (isinstance(node_value, ScalarNode) and not node_value.style):
- best_style = False
- value.append((node_key, node_value))
- if flow_style is None:
- flow_style = self.default_flow_style
+ if hasattr(mapping, 'items'):
+ mapping = mapping.items()
+ mapping.sort()
+ for item_key, item_value in mapping:
+ node_key = self.represent_data(item_key)
+ node_value = self.represent_data(item_value)
+ if not (isinstance(node_key, ScalarNode) and not node_key.style):
+ best_style = False
+ if not (isinstance(node_value, ScalarNode) and not node_value.style):
+ best_style = False
+ value.append((node_key, node_value))
if flow_style is None:
- flow_style = best_style
- return MappingNode(tag, value, flow_style=flow_style)
+ if self.default_flow_style is not None:
+ node.flow_style = self.default_flow_style
+ else:
+ node.flow_style = best_style
+ return node
def ignore_aliases(self, data):
return False
@@ -192,36 +180,44 @@ class SafeRepresenter(BaseRepresenter):
def represent_long(self, data):
return self.represent_scalar(u'tag:yaml.org,2002:int', unicode(data))
- repr_pos_inf = repr(1e300000)
- repr_neg_inf = repr(-1e300000)
- repr_nan = repr(1e300000/1e300000)
+ inf_value = 1e300
+ while repr(inf_value) != repr(inf_value*inf_value):
+ inf_value *= inf_value
def represent_float(self, data):
- repr_data = repr(data)
- if repr_data == self.repr_pos_inf:
+ if data != data or (data == 0.0 and data == 1.0):
+ value = u'.nan'
+ elif data == self.inf_value:
value = u'.inf'
- elif repr_data == self.repr_neg_inf:
+ elif data == -self.inf_value:
value = u'-.inf'
- elif repr_data == self.repr_nan:
- value = u'.nan'
else:
- value = unicode(repr_data)
+ value = unicode(repr(data)).lower()
+ # Note that in some cases `repr(data)` represents a float number
+ # without the decimal parts. For instance:
+ # >>> repr(1e17)
+ # '1e17'
+ # Unfortunately, this is not a valid float representation according
+ # to the definition of the `!!float` tag. We fix this by adding
+ # '.0' before the 'e' symbol.
+ if u'.' not in value and u'e' in value:
+ value = value.replace(u'e', u'.0e', 1)
return self.represent_scalar(u'tag:yaml.org,2002:float', value)
def represent_list(self, data):
- pairs = (len(data) > 0 and isinstance(data, list))
- if pairs:
- for item in data:
- if not isinstance(item, tuple) or len(item) != 2:
- pairs = False
- break
- if not pairs:
+ #pairs = (len(data) > 0 and isinstance(data, list))
+ #if pairs:
+ # for item in data:
+ # if not isinstance(item, tuple) or len(item) != 2:
+ # pairs = False
+ # break
+ #if not pairs:
return self.represent_sequence(u'tag:yaml.org,2002:seq', data)
- value = []
- for item_key, item_value in data:
- value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
- [(item_key, item_value)]))
- return SequenceNode(u'tag:yaml.org,2002:pairs', value)
+ #value = []
+ #for item_key, item_value in data:
+ # value.append(self.represent_mapping(u'tag:yaml.org,2002:map',
+ # [(item_key, item_value)]))
+ #return SequenceNode(u'tag:yaml.org,2002:pairs', value)
def represent_dict(self, data):
return self.represent_mapping(u'tag:yaml.org,2002:map', data)
@@ -233,17 +229,11 @@ class SafeRepresenter(BaseRepresenter):
return self.represent_mapping(u'tag:yaml.org,2002:set', value)
def represent_date(self, data):
- value = u'%04d-%02d-%02d' % (data.year, data.month, data.day)
+ value = unicode(data.isoformat())
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_datetime(self, data):
- value = u'%04d-%02d-%02d %02d:%02d:%02d' \
- % (data.year, data.month, data.day,
- data.hour, data.minute, data.second)
- if data.microsecond:
- value += u'.' + unicode(data.microsecond/1000000.0).split(u'.')[1]
- if data.utcoffset():
- value += unicode(data.utcoffset())
+ value = unicode(data.isoformat(' '))
return self.represent_scalar(u'tag:yaml.org,2002:timestamp', value)
def represent_yaml_object(self, tag, data, cls, flow_style=None):
@@ -251,9 +241,6 @@ class SafeRepresenter(BaseRepresenter):
state = data.__getstate__()
else:
state = data.__dict__.copy()
- if isinstance(state, dict):
- state = state.items()
- state.sort()
return self.represent_mapping(tag, state, flow_style=flow_style)
def represent_undefined(self, data):
@@ -292,11 +279,11 @@ SafeRepresenter.add_representer(dict,
SafeRepresenter.add_representer(set,
SafeRepresenter.represent_set)
-if datetime_available:
- SafeRepresenter.add_representer(datetime.date,
- SafeRepresenter.represent_date)
- SafeRepresenter.add_representer(datetime.datetime,
- SafeRepresenter.represent_datetime)
+SafeRepresenter.add_representer(datetime.date,
+ SafeRepresenter.represent_date)
+
+SafeRepresenter.add_representer(datetime.datetime,
+ SafeRepresenter.represent_datetime)
SafeRepresenter.add_representer(None,
SafeRepresenter.represent_undefined)
@@ -385,8 +372,6 @@ class Representer(SafeRepresenter):
else:
state = data.__dict__
if args is None and isinstance(state, dict):
- state = state.items()
- state.sort()
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+class_name, state)
if isinstance(state, dict) and not state:
@@ -418,7 +403,7 @@ class Representer(SafeRepresenter):
cls = type(data)
if cls in copy_reg.dispatch_table:
- reduce = copy_reg.dispatch_table[cls]
+ reduce = copy_reg.dispatch_table[cls](data)
elif hasattr(data, '__reduce_ex__'):
reduce = data.__reduce_ex__(2)
elif hasattr(data, '__reduce__'):
@@ -445,8 +430,6 @@ class Representer(SafeRepresenter):
function_name = u'%s.%s' % (function.__module__, function.__name__)
if not args and not listitems and not dictitems \
and isinstance(state, dict) and newobj:
- state = state.items()
- state.sort()
return self.represent_mapping(
u'tag:yaml.org,2002:python/object:'+function_name, state)
if not listitems and not dictitems \
@@ -481,19 +464,19 @@ Representer.add_representer(tuple,
Representer.add_representer(type,
Representer.represent_name)
-Representer.add_representer(Representer.classobj_type,
+Representer.add_representer(types.ClassType,
Representer.represent_name)
-Representer.add_representer(Representer.function_type,
+Representer.add_representer(types.FunctionType,
Representer.represent_name)
-Representer.add_representer(Representer.builtin_function_type,
+Representer.add_representer(types.BuiltinFunctionType,
Representer.represent_name)
-Representer.add_representer(Representer.module_type,
+Representer.add_representer(types.ModuleType,
Representer.represent_module)
-Representer.add_multi_representer(Representer.instance_type,
+Representer.add_multi_representer(types.InstanceType,
Representer.represent_instance)
Representer.add_multi_representer(object,
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py b/scripts/external_libs/yaml-3.11/yaml/resolver.py
index 7e580e98..6b5ab875 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/resolver.py
+++ b/scripts/external_libs/yaml-3.11/yaml/resolver.py
@@ -9,7 +9,7 @@ import re
class ResolverError(YAMLError):
pass
-class BaseResolver:
+class BaseResolver(object):
DEFAULT_SCALAR_TAG = u'tag:yaml.org,2002:str'
DEFAULT_SEQUENCE_TAG = u'tag:yaml.org,2002:seq'
@@ -32,6 +32,18 @@ class BaseResolver:
add_implicit_resolver = classmethod(add_implicit_resolver)
def add_path_resolver(cls, tag, path, kind=None):
+ # Note: `add_path_resolver` is experimental. The API could be changed.
+ # `new_path` is a pattern that is matched against the path from the
+ # root to the node that is being considered. `node_path` elements are
+ # tuples `(node_check, index_check)`. `node_check` is a node class:
+ # `ScalarNode`, `SequenceNode`, `MappingNode` or `None`. `None`
+ # matches any kind of a node. `index_check` could be `None`, a boolean
+ # value, a string value, or a number. `None` and `False` match against
+ # any _value_ of sequence and mapping nodes. `True` matches against
+ # any _key_ of a mapping node. A string `index_check` matches against
+ # a mapping value that corresponds to a scalar key which content is
+ # equal to the `index_check` value. An integer `index_check` matches
+ # against a sequence value with the index equal to `index_check`.
if not 'yaml_path_resolvers' in cls.__dict__:
cls.yaml_path_resolvers = cls.yaml_path_resolvers.copy()
new_path = []
@@ -51,7 +63,7 @@ class BaseResolver:
node_check = ScalarNode
elif node_check is list:
node_check = SequenceNode
- elif node_check is map:
+ elif node_check is dict:
node_check = MappingNode
elif node_check not in [ScalarNode, SequenceNode, MappingNode] \
and not isinstance(node_check, basestring) \
@@ -65,7 +77,7 @@ class BaseResolver:
kind = ScalarNode
elif kind is list:
kind = SequenceNode
- elif kind is map:
+ elif kind is dict:
kind = MappingNode
elif kind not in [ScalarNode, SequenceNode, MappingNode] \
and kind is not None:
@@ -74,6 +86,8 @@ class BaseResolver:
add_path_resolver = classmethod(add_path_resolver)
def descend_resolver(self, current_node, current_index):
+ if not self.yaml_path_resolvers:
+ return
exact_paths = {}
prefix_paths = []
if current_node:
@@ -95,6 +109,8 @@ class BaseResolver:
self.resolver_prefix_paths.append(prefix_paths)
def ascend_resolver(self):
+ if not self.yaml_path_resolvers:
+ return
self.resolver_exact_paths.pop()
self.resolver_prefix_paths.pop()
@@ -109,13 +125,14 @@ class BaseResolver:
return
if index_check is True and current_index is not None:
return
- if index_check in [False, None] and current_index is None:
+ if (index_check is False or index_check is None) \
+ and current_index is None:
return
if isinstance(index_check, basestring):
if not (isinstance(current_index, ScalarNode)
and index_check == current_index.value):
return
- elif isinstance(index_check, int):
+ elif isinstance(index_check, int) and not isinstance(index_check, bool):
if index_check != current_index:
return
return True
@@ -131,11 +148,12 @@ class BaseResolver:
if regexp.match(value):
return tag
implicit = implicit[1]
- exact_paths = self.resolver_exact_paths[-1]
- if kind in exact_paths:
- return exact_paths[kind]
- if None in exact_paths:
- return exact_paths[None]
+ if self.yaml_path_resolvers:
+ exact_paths = self.resolver_exact_paths[-1]
+ if kind in exact_paths:
+ return exact_paths[kind]
+ if None in exact_paths:
+ return exact_paths[None]
if kind is ScalarNode:
return self.DEFAULT_SCALAR_TAG
elif kind is SequenceNode:
@@ -148,14 +166,15 @@ class Resolver(BaseResolver):
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:bool',
- re.compile(ur'''^(?:yes|Yes|YES|n|N|no|No|NO
+ re.compile(ur'''^(?:yes|Yes|YES|no|No|NO
|true|True|TRUE|false|False|FALSE
|on|On|ON|off|Off|OFF)$''', re.X),
list(u'yYnNtTfFoO'))
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:float',
- re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)?\.[0-9_]*(?:[eE][-+][0-9]+)?
+ re.compile(ur'''^(?:[-+]?(?:[0-9][0-9_]*)\.[0-9_]*(?:[eE][-+][0-9]+)?
+ |\.[0-9_]+(?:[eE][-+][0-9]+)?
|[-+]?[0-9][0-9_]*(?::[0-5]?[0-9])+\.[0-9_]*
|[-+]?\.(?:inf|Inf|INF)
|\.(?:nan|NaN|NAN))$''', re.X),
@@ -173,7 +192,7 @@ Resolver.add_implicit_resolver(
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:merge',
re.compile(ur'^(?:<<)$'),
- ['<'])
+ [u'<'])
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:null',
@@ -194,7 +213,7 @@ Resolver.add_implicit_resolver(
Resolver.add_implicit_resolver(
u'tag:yaml.org,2002:value',
re.compile(ur'^(?:=)$'),
- ['='])
+ [u'='])
# The following resolver is only for documentation purposes. It cannot work
# because plain scalars cannot start with '!', '&', or '*'.
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py b/scripts/external_libs/yaml-3.11/yaml/scanner.py
index cf2478f9..5228fad6 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/scanner.py
+++ b/scripts/external_libs/yaml-3.11/yaml/scanner.py
@@ -19,7 +19,7 @@
# ALIAS(value)
# ANCHOR(value)
# TAG(value)
-# SCALAR(value, plain)
+# SCALAR(value, plain, style)
#
# Read comments in the Scanner code for more details.
#
@@ -32,7 +32,7 @@ from tokens import *
class ScannerError(MarkedYAMLError):
pass
-class SimpleKey:
+class SimpleKey(object):
# See below simple keys treatment.
def __init__(self, token_number, required, index, line, column, mark):
@@ -43,7 +43,7 @@ class SimpleKey:
self.column = column
self.mark = mark
-class Scanner:
+class Scanner(object):
def __init__(self):
"""Initialize the scanner."""
@@ -137,16 +137,6 @@ class Scanner:
self.tokens_taken += 1
return self.tokens.pop(0)
- def __iter__(self):
- # Iterator protocol.
- while self.need_more_tokens():
- self.fetch_more_tokens()
- while self.tokens:
- self.tokens_taken += 1
- yield self.tokens.pop(0)
- while self.need_more_tokens():
- self.fetch_more_tokens()
-
# Private methods.
def need_more_tokens(self):
@@ -214,11 +204,11 @@ class Scanner:
return self.fetch_flow_mapping_end()
# Is it the flow entry indicator?
- if ch in u',':
+ if ch == u',':
return self.fetch_flow_entry()
# Is it the block entry indicator?
- if ch in u'-' and self.check_block_entry():
+ if ch == u'-' and self.check_block_entry():
return self.fetch_block_entry()
# Is it the key indicator?
@@ -325,11 +315,11 @@ class Scanner:
if self.flow_level in self.possible_simple_keys:
key = self.possible_simple_keys[self.flow_level]
- # I don't think it's possible, but I could be wrong.
- assert not key.required
- #if key.required:
- # raise ScannerError("while scanning a simple key", key.mark,
- # "could not found expected ':'", self.get_mark())
+ if key.required:
+ raise ScannerError("while scanning a simple key", key.mark,
+ "could not found expected ':'", self.get_mark())
+
+ del self.possible_simple_keys[self.flow_level]
# Indentation functions.
@@ -384,7 +374,8 @@ class Scanner:
# Set the current intendation to -1.
self.unwind_indent(-1)
- # Reset everything (not really needed).
+ # Reset simple keys.
+ self.remove_possible_simple_key()
self.allow_simple_key = False
self.possible_simple_keys = {}
@@ -588,6 +579,14 @@ class Scanner:
"mapping values are not allowed here",
self.get_mark())
+ # If this value starts a new block mapping, we need to add
+ # BLOCK-MAPPING-START. It will be detected as an error later by
+ # the parser.
+ if not self.flow_level:
+ if self.add_indent(self.column):
+ mark = self.get_mark()
+ self.tokens.append(BlockMappingStartToken(mark, mark))
+
# Simple keys are allowed after ':' in the block context.
self.allow_simple_key = not self.flow_level
@@ -809,7 +808,7 @@ class Scanner:
# See the specification for details.
length = 0
ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
@@ -848,7 +847,7 @@ class Scanner:
def scan_yaml_directive_number(self, start_mark):
# See the specification for details.
ch = self.peek()
- if not (u'0' <= ch <= '9'):
+ if not (u'0' <= ch <= u'9'):
raise ScannerError("while scanning a directive", start_mark,
"expected a digit, but found %r" % ch.encode('utf-8'),
self.get_mark())
@@ -914,14 +913,14 @@ class Scanner:
# Therefore we restrict aliases to numbers and ASCII letters.
start_mark = self.get_mark()
indicator = self.peek()
- if indicator == '*':
+ if indicator == u'*':
name = 'alias'
else:
name = 'anchor'
self.forward()
length = 0
ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
@@ -1297,13 +1296,13 @@ class Scanner:
ch = self.peek(length)
if ch in u'\0 \t\r\n\x85\u2028\u2029' \
or (not self.flow_level and ch == u':' and
- self.peek(length+1) in u'\0 \t\r\n\x28\u2028\u2029') \
+ self.peek(length+1) in u'\0 \t\r\n\x85\u2028\u2029') \
or (self.flow_level and ch in u',:?[]{}'):
break
length += 1
# It's not clear what we should do with ':' in the flow context.
if (self.flow_level and ch == u':'
- and self.peek(length+1) not in u'\0 \t\r\n\x28\u2028\u2029,[]{}'):
+ and self.peek(length+1) not in u'\0 \t\r\n\x85\u2028\u2029,[]{}'):
self.forward(length)
raise ScannerError("while scanning a plain scalar", start_mark,
"found unexpected ':'", self.get_mark(),
@@ -1370,7 +1369,7 @@ class Scanner:
length = 1
ch = self.peek(length)
if ch != u' ':
- while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-_':
length += 1
ch = self.peek(length)
@@ -1390,7 +1389,7 @@ class Scanner:
chunks = []
length = 0
ch = self.peek(length)
- while u'0' <= ch <= u'9' or u'A' <= ch <= 'Z' or u'a' <= ch <= 'z' \
+ while u'0' <= ch <= u'9' or u'A' <= ch <= u'Z' or u'a' <= ch <= u'z' \
or ch in u'-;/?:@&=+$,_.!~*\'()[]%':
if ch == u'%':
chunks.append(self.prefix(length))
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py b/scripts/external_libs/yaml-3.11/yaml/serializer.py
index 937be9a9..0bf1e96d 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/serializer.py
+++ b/scripts/external_libs/yaml-3.11/yaml/serializer.py
@@ -8,7 +8,7 @@ from nodes import *
class SerializerError(YAMLError):
pass
-class Serializer:
+class Serializer(object):
ANCHOR_TEMPLATE = u'id%03d'
@@ -55,7 +55,7 @@ class Serializer:
self.emit(DocumentEndEvent(explicit=self.use_explicit_end))
self.serialized_nodes = {}
self.anchors = {}
- self.last_alias_id = 0
+ self.last_anchor_id = 0
def anchor_node(self, node):
if node in self.anchors:
@@ -67,14 +67,9 @@ class Serializer:
for item in node.value:
self.anchor_node(item)
elif isinstance(node, MappingNode):
- if hasattr(node.value, 'keys'):
- for key in node.value.keys():
- self.anchor_node(key)
- self.anchor_node(node.value[key])
- else:
- for key, value in node.value:
- self.anchor_node(key)
- self.anchor_node(value)
+ for key, value in node.value:
+ self.anchor_node(key)
+ self.anchor_node(value)
def generate_anchor(self, node):
self.last_anchor_id += 1
@@ -108,14 +103,9 @@ class Serializer:
== self.resolve(MappingNode, node.value, True))
self.emit(MappingStartEvent(alias, node.tag, implicit,
flow_style=node.flow_style))
- if hasattr(node.value, 'keys'):
- for key in node.value.keys():
- self.serialize_node(key, node, None)
- self.serialize_node(node.value[key], node, key)
- else:
- for key, value in node.value:
- self.serialize_node(key, node, None)
- self.serialize_node(value, node, key)
+ for key, value in node.value:
+ self.serialize_node(key, node, None)
+ self.serialize_node(value, node, key)
self.emit(MappingEndEvent())
self.ascend_resolver()
diff --git a/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py b/scripts/external_libs/yaml-3.11/yaml/tokens.py
index 4fe4522e..4d0b48a3 100644..100755
--- a/scripts/external_libs/PyYAML-3.01/lib/yaml/tokens.py
+++ b/scripts/external_libs/yaml-3.11/yaml/tokens.py
@@ -1,5 +1,5 @@
-class Token:
+class Token(object):
def __init__(self, start_mark, end_mark):
self.start_mark = start_mark
self.end_mark = end_mark